Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2019_29_1_a11, author = {Blachnik, Marcin}, title = {Ensembles of instance selection methods: {A} comparative study}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {151--168}, publisher = {mathdoc}, volume = {29}, number = {1}, year = {2019}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_1_a11/} }
TY - JOUR AU - Blachnik, Marcin TI - Ensembles of instance selection methods: A comparative study JO - International Journal of Applied Mathematics and Computer Science PY - 2019 SP - 151 EP - 168 VL - 29 IS - 1 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_1_a11/ LA - en ID - IJAMCS_2019_29_1_a11 ER -
Blachnik, Marcin. Ensembles of instance selection methods: A comparative study. International Journal of Applied Mathematics and Computer Science, Tome 29 (2019) no. 1, pp. 151-168. http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_1_a11/
[1] Abdi, H. (2010). Holm’s sequential Bonferroni procedure, Encyclopedia of Research Design 1(8): 620–627.
[2] Aha, D., Kibler, D. and Albert, M. (1991). Instance-based learning algorithms, Machine Learning 6(1): 37–66.
[3] Alcalá-Fdez, J., Fernández, A., Luengo, J., Derrac, J., García, S., Sanchez, L. and Herrera, F. (2011). Keel data-mining software tool: Data set repository, integration of algorithms and experimental analysis framework, Journal of Multiple-Valued Logic Soft Computing 17: 255–287.
[4] Arnaiz-González, Á., Blachnik, M., Kordos, M. and García-Osorio, C. (2016a). Fusion of instance selection methods in regression tasks, Information Fusion 30: 69–79.
[5] Arnaiz-González, Á., Díez-Pastor, J., Rodríguez, J.J. And García-Osorio, C.I. (2016b). Instance selection for regression: Adapting DROP, Neurocomputing 201: 66–81.
[6] Bauer, E. and Kohavi, R. (1999). An empirical comparison of voting classification algorithms: Bagging, boosting, and variants, Machine Learning 36(1): 105–139.
[7] Bezdek, J.C., Ehrlich, R. and Full, W. (1984). FCM: The fuzzy C-means clustering algorithm, Computers Geosciences 10(2–3): 191–203.
[8] Bhattacharya, B., Poulsen, R. and Toussaint, G. (1984). Application of proximity graphs to editing nearest neighbor decision rules, International Symposium on Information Theory, Santa Monica, CA, USA, pp. 97–108.
[9] Blachnik, M. (2014). Ensembles of instance selection methods based on feature subset, IEEE Procedia Computer Science 35: 388–396.
[10] Blachnik, M. and Kordos, M. (2014). Bagging of instance selection algorithms, International Conference on Artificial Intelligence and Soft Computing, Zakopane, Poland, pp. 40–51.
[11] Brighton, H. and Mellish, C. (2002). Advances in instance selection for instance-based learning algorithms, Data Mining and Knowledge Discovery 6(2): 153–172.
[12] Czarnowski, I. and Jędrzejowicz, P. (2015). Ensemble online classifier based on the one-class base classifiers for mining data streams, Cybernetics and Systems 46(1–2): 51–68.
[13] Freund, Y. and Schapire, R.E. (1996). Experiments with a new boosting algorithm, International Conference on Machine Learning, Bari, Italy, pp. 148–156.
[14] Freund, Y. and Schapire, R.E. (1997). A decision-theoretic generalization of on-line learning and an application to boosting, Journal of Computer and System Sciences 55(1): 119–139.
[15] Galar, M., Fernández, A., Barrenechea, E., Bustince, H. And Herrera, F. (2011). An overview of ensemble methods for binary classifiers in multi-class problems: Experimental study on one-vs-one and one-vs-all schemes, Pattern Recognition 44(8): 1761–1776.
[16] García-Osorio, C., de Haro-García, A. and García-Pedraja, N. (2010). Democratic instance selection: A linear complexity instance selection algorithm based on classifier ensemble concepts, Artificial Intelligence 174(4–5): 410–441.
[17] García, S., Derrac, J., Cano, J.R. and Herrera, F. (2012). Prototype selection for nearest neighbor classification: Taxonomy and empirical study, IEEE Transactions on Pattern Analysis and Machine Intelligence 34(3): 417–435.
[18] García-Pedrajas, N. (2009). Constructing ensembles of classifiers by means of weighted instance selection, IEEE Transactions on Neural Networks 20(2): 258–277.
[19] García-Pedrajas, N. and De Haro-García, A. (2014). Boosting instance selection algorithms, Knowledge-Based Systems 67: 342–360.
[20] García, S., Luengo, J. and Herrera, F. (2016). Tutorial on practical tips of the most influential data preprocessing algorithms in data mining, Knowledge-Based Systems 98: 1–29.
[21] Grochowski, M. and Jankowski, N. (2004). Comparison of instance selection algorithms. II: Results and comments, Lecture Notes in Computer Science, Vol. 3070, pp. 580–585.
[22] Gunn, I.A., Arnaiz-González, Á. and Kuncheva, L.I. (2018). A taxonomic look at instance-based stream classifiers, Neurocomputing 286: 167–178.
[23] Hart, P. (1968). The condensed nearest neighbor rule, IEEE Transactions on Information Theory 14(3): 515–516.
[24] Herrera, F. (2005). Keel, knowledge extraction based on evolutionary learning, Spanish National Projects TIC2002-04036-C05, TIN2005-08386-C05 and TIN2008-06681-C06, http://www.keel.es.
[25] Jacobs, R.A., Jordan, M.I., Nowlan, S.J. and Hinton, G.E. (1991). Adaptive mixtures of local experts, Neural Computation 3(1): 79–87.
[26] Jankowski, N. and Grochowski, M. (2004). Comparison of instance selection algorithms. I: Algorithms survey, International Conference on Artificial Intelligence and Soft Computing, Zakopane, Poland, Vol. 3070, pp. 598–603.
[27] Kordos, M. and Blachnik, M. (2012). Instance selection with neural networks for regression problems, International Conference on Artificial Neural Networks, Lausanne, Switzerland, pp. 263–270.
[28] Kordos, M. and Rusiecki, A. (2016). Reducing noise impact on MLP training, Soft Computing 20(1): 49–65.
[29] Kuncheva, L. (2004). Combining Pattern Classifiers: Methods and Algorithms, Wiley, Hoboken, NJ.
[30] Kuncheva, L.I., Bezdek, J.C. and Duin, R.P. (2001). Decision templates for multiple classifier fusion: An experimental comparison, Pattern Recognition 34(2): 299–314.
[31] Marchiori, E. (2008). Hit miss networks with applications to instance selection, Journal of Machine Learning Research 9(Jun): 997–1017.
[32] Marchiori, E. (2010). Class conditional nearest neighbor for large margin instance selection, IEEE Transactions on Pattern Analysis and Machine Intelligence 32(2): 364–370.
[33] Raviv, Y. and Intrator, N. (1996). Bootstrapping with noise: An effective regularization technique, Connection Science 8(3–4): 355–372.
[34] Rokach, L. (2009). Taxonomy for characterizing ensemble methods in classification tasks: A review and annotated bibliography, Computational Statistics Data Analysis 53(12): 4046–4072.
[35] Schapire, R.E. (1990). The strength of weak learnability, Machine Learning 5(2): 197–227.
[36] Sebban, M., Nock, R. and Lallich, S. (2002). Stopping criterion for boosting-based data reduction techniques: From binary to multiclass problem, Journal of Machine Learning Research 3(Dec): 863–885.
[37] Shaker, A. and Hüllermeier, E. (2012). IBLStreams: A system for instance-based classification and regression on data streams, Evolving Systems 3(4): 235–249.
[38] Skurichina, M. and Duin, R.P. (2001). Bagging and the random subspace method for redundant feature spaces, International Workshop on Multiple Classifier Systems, Cagliari, Italy, pp. 1–10.
[39] Song, Y., Liang, J., Lu, J. and Zhao, X. (2017). An efficient instance selection algorithm for k nearest neighbor regression, Neurocomputing 251: 26–34.
[40] Tomek, I. (1976). An experiment with the edited nearest-neighbor rule, IEEE Transactions on Systems, Man, and Cybernetics 6: 448–452.
[41] Wilson, D. (1972). Asymptotic properties of nearest neighbor rules using edited data, IEEE Transactions Systems, Man and Cybernetics 2: 408–421.
[42] Wilson, D. and Martinez, T. (2000). Reduction techniques for instance-based learning algorithms, Machine Learning 38(3): 257–268.
[43] Wolpert, D.H. (1992). Stacked generalization, Neural Networks 5(2): 241–259.
[44] Woźniak, M., Graña, M. and Corchado, E. (2014). A survey of multiple classifier systems as hybrid systems, Information Fusion 16: 3–17.
[45] Zhu, J., Zou, H., Rosset, S. and Hastie, T. (2009). Multi-class AdaBoost, Statistics and Its Interface 2(3): 349–360.