@article{SIGMA_2006_2_a38,
author = {Anatoli Torokhti and Phil Howlett},
title = {Combined {Reduced-Rank} {Transform}},
journal = {Symmetry, integrability and geometry: methods and applications},
year = {2006},
volume = {2},
language = {en},
url = {http://geodesic.mathdoc.fr/item/SIGMA_2006_2_a38/}
}
Anatoli Torokhti; Phil Howlett. Combined Reduced-Rank Transform. Symmetry, integrability and geometry: methods and applications, Tome 2 (2006). http://geodesic.mathdoc.fr/item/SIGMA_2006_2_a38/
[1] Hotelling H., “Analysis of a complex of statistical variables into Principal Components”, J. Educ. Psychol., 24 (1933), 417–441 ; 498–520 | DOI | Zbl | Zbl
[2] Karhunen K., “Über Lineare Methoden in der Wahrscheinlichkeitsrechnung”, Ann. Acad. Sci. Fennicae, Ser. A, 1947:37 (1947), 49 | MR
[3] Loève M., “Fonctions aléatoires de second order”, appendix to book: P. Lévy, Processus Stochastiques et Mouvement Brownien, Hermann, Paris, 1948; Loeve M., “Analyse harmonique generale d'une fonction aleatoire”, C. R. Acad. Sci. Paris, 220 (1945), 380–382 | MR | Zbl
[4] Jolliffe I. T., Principal component analysis, Springer Verlag, New York, 1986 ; 2 ed., 2002 | MR
[5] Scharf L. L., “The SVD and reduced rank signal processing”, Signal Processing, 25 (1991), 113–133 | DOI | Zbl
[6] Yamashita Y., Ogawa H., “Relative Karhunen–Loéve transform”, IEEE Trans. on Signal Processing, 44 (1996), 371–378 | DOI | MR
[7] Hua Y., Liu W. Q., “Generalized Karhunen–Loève transform”, IEEE Signal Processing Letters, 5 (1998), 141–143 | DOI
[8] Vapnik V., Statistical Learning Theory, Wiley, 1998 | MR | Zbl
[9] Ocaña F. A., Aguilera A. M., Valderrama M. J., “Functional principal componenets analysis by choice of norm”, J. Multivariate Anal., 71 (1999), 262–276 | DOI | MR | Zbl
[10] Tipping M. E., Bishop C. M., “Probabilistic principal component analysis”, J. of the Royal Statistical Society, Ser. A, 61 (1999), 611–619 | DOI | MR
[11] Tipping M. E., Bishop C. M., “Mixtures of probabilistic principal component analysers”, Neural Computation, 11 (1999), 443–482 | DOI
[12] Schölkopf B., Smola A. J., Müller K.-R., “Kernel principal component analysis”, Advances in Kernel Methods. Support Vector Learning, eds. B. Schölkopf, C. J. C. Burges and A. J. Smola, MIT Press, Cambridge, 1999, 327–352
[13] Tenenbaum J. B., de Silva V., Langford J. C., “A global geometric framework for nonlinear dimensionality reduction”, Science, 290:5500 (2000), 2319–2323 | DOI
[14] Rowers S. T., Saul L. K., “Nonlinear dimensionality reduction by locally linear embedding”, Science, 290:5500 (2000), 2323–2326 | DOI
[15] Cristianini N., Shawe-Taylor J., An introduction to support vector machines and other kernel-based learning methods, Cambridge University Press, Cambridge, 2000
[16] Yamada I., Sekiguchi T., Sakaniwa K., “Reduced rank Volterra filter for robust identification of nonlinear systems”, Proc. 2nd Int. Workshop on Multidimensional (ND) Systems (Czocha Castle, 2000), Tech. Univ. Press, Zielona Góra, 2000, 171–175 | MR | Zbl
[17] Hua Y., Nikpour M., Stoica P., “Optimal reduced-rank estimation and filtering”, IEEE Trans. on Signal Processing, 49 (2001), 457–469 | DOI
[18] Kneip A., Utikal K. J., “Inference for density families using functional principal component analysis”, Journal of the American Statistical Association, 96 (2001), 519–542 | DOI | MR | Zbl
[19] Honig M. L., Xiao W., “Performance of reduced-rank linear interferrence suppression”, IEEE Trans. on Information Theory, 47 (2001), 1928–1946 | DOI | MR | Zbl
[20] Chen W., Mitra U., Schniter P., “On the equivalence of three rediced rank linear estimators with applications to DS-CDMA”, IEEE Trans. on Information Theory, 48 (2002), 2609–2614 | DOI | MR | Zbl
[21] Honig M. L., Goldstein J. S., “Adaptive reduced-rank interference suppression based on multistage Wiener filter”, IEEE Trans. on Communications, 50 (2002), 986–994 | DOI
[22] Stock J. H., Watson M. W., “Forecasting using principal components from a large number of predictors”, Journal of the American Statistical Association, 97 (2002), 1167–1179 | DOI | MR | Zbl
[23] Fukunaga K., Introduction to statistical pattern recognition, Academic Press, Boston, 1990 | MR | Zbl
[24] Kraut S., Anderson R. H., Krolik J. L., “A generalized Karhunen–Loève basis for efficient estimation of tropospheric refractivity using radar clutter”, IEEE Trans. on Signal Processing, 52 (2004), 48–60 | DOI | MR
[25] Torokhti A., Howlett P., “An optimal filter of the second order”, IEEE Trans. on Signal Processing, 49 (2001), 1044–1048 | DOI | MR
[26] Torokhti A., Howlett P., “Optimal fixed rank transform of the second degree”, IEEE Trans. on Circuits and Systems. Part II, Analog Digital Signal Processing, 48:3 (2001), 309–315 | DOI
[27] Torokhti A., Howlett P., Pearce C., “New perspectives on optimal transforms of random vectors”, Optimization, Springer Optimization and Its Applications, 32, Springer, New York, 2009, 245–259 | Zbl
[28] Torokhti A., Howlett P., “Constructing fixed rank optimal estimators with method of recurrent best approximations”, J. Multivariate Analysis, 86 (2002), 293–309 | DOI | MR
[29] Torokhti A., Howlett P., “Best operator approximation in modelling of nonlinear Systems”, IEEE Trans. on Circuits and Systems. Part I, Fundamental Theory and Applications, 49 (2002), 1792–1798 | DOI | MR
[30] Torokhti A., Howlett P., “Method of recurrent best estimators of second degree for optimal filtering of random signals”, Signal Processing, 83 (2003), 1013–1024 | DOI | Zbl
[31] Torokhti A., Howlett P., “Best causal mathematical models for a nonlinear system”, IEEE Trans. Circuits Syst. I Regul. Pap., 52:5 (2005), 1013–1020 | DOI | MR
[32] Sontag E. D., Polynomial response maps, Lecture Notes in Control and Information Sciences, 13, Springer-Verlag, Berlin–Heidelberg–New York, 1979 | MR | Zbl
[33] Chen S., Billings S. A., “Representation of non-linear systems: NARMAX model”, Int. J. Control, 49 (1989), 1013–1032 | MR | Zbl
[34] Howlett P. G., Torokhti A. P., Pearce C. E. M., “A philosophy for the modelling of realistic non-linear systems”, Proc. of Amer. Math. Soc., 132 (2003), 353–363 | DOI | MR
[35] Cotlar M., Cignoli R., An introduction to functional analysis, North-Holland Publishing Company, Amsterdam–London, 1974, 114–116 | MR
[36] Perlovsky L. I., Marzetta T. L., “Estimating a covariance matrix from incomplete realizations of a random vector”, IEEE Trans. on Signal Processing, 40 (1992), 2097–2100 | DOI
[37] Kauermann G., Carroll R. J., “A note on the efficiency of Sandwich covariance matrix estimation”, Journal of the American Statistical Association, 96 (2001), 1387–1396 | DOI | MR | Zbl
[38] Schneider M. K., Willsky A. S., “A Krylov subspace method for covariance approximation and simulation of a random process and fields”, Int. J. Multidim. Syst. Signal Processing, 14 (2003), 295–318 | DOI | MR | Zbl
[39] Kubokawa T., Srivastava M. S., “Estimating the covariance matrix: a new approach”, J. Multivariate Analysis, 86 (2003), 28–47 | DOI | MR | Zbl
[40] Ledoit O., Wolf M., “A well-conditioned estimator for large-dimensional covariance matrices”, J. Multivariate Analysis, 88 (2004), 365–411 | DOI | MR | Zbl
[41] Leung P. L., Ng F. Y., “Improved estimation of a covariance matrix in an elliptically contoured matrix distribution”, J. Multivariate Analysis, 88 (2004), 131–137 | DOI | MR | Zbl
[42] Higham N. J., “Stable iterations for the matrix square root”, Numerical Algorithms, 15 (1997), 227–241 | DOI | MR
[43] Golub G. H., van Loan C. F., Matrix computations, Johns Hopkins University Press, Baltimore, 1996 | MR
[44] Kowalski M. A., Sikorski K. A., Stenger F., Selected topics in approximation and computations, Oxford University Press, New York–Oxford, 1995 | MR | Zbl
[45] Ben-Israel A., Greville T. N. E., Generalized inverses: theory and applications, John Wiley Sons, New York, 1974 | MR | Zbl
[46] Mathews V. J., Sicuranza G. L., Polynomial signal processing, J. Wiley Sons, 2001
[47] Goldstein J. S., Reed I., Scharf L. L., “A multistage representation of the Wiener filter based on orthogonal projections”, IEEE Trans. on Information Theory, 44 (1998), 2943–2959 | DOI | MR | Zbl