Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2022_32_2_a9, author = {Wawrze\'nczyk, Adam and Mielniczuk, Jan}, title = {Revisiting strategies for fitting logistic regression for positive and unlabeled data}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {299--309}, publisher = {mathdoc}, volume = {32}, number = {2}, year = {2022}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2022_32_2_a9/} }
TY - JOUR AU - Wawrzeńczyk, Adam AU - Mielniczuk, Jan TI - Revisiting strategies for fitting logistic regression for positive and unlabeled data JO - International Journal of Applied Mathematics and Computer Science PY - 2022 SP - 299 EP - 309 VL - 32 IS - 2 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2022_32_2_a9/ LA - en ID - IJAMCS_2022_32_2_a9 ER -
%0 Journal Article %A Wawrzeńczyk, Adam %A Mielniczuk, Jan %T Revisiting strategies for fitting logistic regression for positive and unlabeled data %J International Journal of Applied Mathematics and Computer Science %D 2022 %P 299-309 %V 32 %N 2 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2022_32_2_a9/ %G en %F IJAMCS_2022_32_2_a9
Wawrzeńczyk, Adam; Mielniczuk, Jan. Revisiting strategies for fitting logistic regression for positive and unlabeled data. International Journal of Applied Mathematics and Computer Science, Tome 32 (2022) no. 2, pp. 299-309. http://geodesic.mathdoc.fr/item/IJAMCS_2022_32_2_a9/
[1] [1] Bahorik, A.L., Newhill, C.E., Queen, C.C. and Eack, S.M. (2014). Under-reporting of drug use among individuals with schizophrenia: Prevalence and predictors, Psychological Medicine 44(12): 61–69, DOI: 10.1017/S0033291713000548.
[2] [2] Bekker, J. and Davis, J. (2018). Estimating the class prior in positive and unlabeled data through decision tree induction, Proceedings of the AAAI Conference on Artificial Intelligence, New Orleans, USA 32(1): 2712–2719.
[3] [3] Bekker, J. and Davis, J. (2020). Learning from positive and unlabeled data: A survey, Machine Learning 109(4): 719–760, DOI: 10.1007/s10994-020-05877-5.
[4] [4] Bekker, J., Robberechts, P. and Davis, J. (2019). Beyond the selected completely at random assumption for learning from positive and unlabeled data, in U. Brefeld et al. (Eds), Proceedings of the 2019 European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases, Springer, Cham, pp. 71–85, DOI: 10.1007/978-3-030-46147-8_5.
[5] [5] Cover, T. and Thomas, J. (1991). Elements of Information Theory, Wiley, New York, DOI: 10.1002/047174882X.
[6] [6] Elkan, C. and Noto, K. (2008). Learning classifiers from only positive and unlabeled data, Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, Las Vegas, USA, pp. 213–220, DOI: 10.1145/1401890.1401920.
[7] [7] Łazęcka, M., Mielniczuk, J. and Teisseyre, P. (2021). Estimating the class prior for positive and unlabelled data via logistic regression, Advances in Data Analysis and Classification 15(4): 1039–1068, DOI: 10.1007/s11634-021-00444-9.
[8] [8] Lipp, T. and Boyd, S. (2016). Variations and extension of the convex-concave procedure, Optimization and Engineering 17(2): 263–287, DOI: 10.1007/s11081-015-9294-x.
[9] [9] Liu, B., Dai, Y., Li, X., Lee, W.S. and Yu, P.S. (2003). Building text classifiers using positive and unlabeled examples, Proceedings of the 3rd IEEE International Conference on Data Mining, ICDM’03, Melbourne, USA, pp. 179–186, DOI: 10.1109/ICDM.2003.1250918.
[10] [10] Na, B., Kim, H., Song, K., Joo, W., Kim, Y.-Y. and Moon, I.-C. (2020). Deep generative positive-unlabeled learning under selection bias, Proceedings of the 29th ACM International Conference on Information and Knowledge Management, CIKM’20, Ireland, pp. 1155–1164, DOI: 10.1145/3340531.3411971, (virtual event).
[11] [11] Scott, B., Blanchard, G. and Handy, G. (2013). Classification with asymetric label noise: Consistency and maximal denoising, Proceedings of Machine Learning Research 30(2013): 1–23.
[12] [12] Sechidis, K., Sperrin, M., Petherick, E.S., Luján, M. and Brown, G. (2017). Dealing with under-reported variables: An information theoretic solution, International Journal of Approximate Reasoning 85(1): 159–177, DOI: 10.1016/j.ijar.2017.04.002.
[13] [13] Shen, X., Diamond, S., Gu, Y. and Boyd, S. (2016). Disciplined convex-concave programming, Proceedings of 2016 IEEE 55th Conference on Decision and Control (CDC), Las Vegas, USA, pp. 1009–1014, DOI: 10.1109/CDC.2016.7798400.
[14] [14] Teisseyre, P., Mielniczuk, J. and Łazęcka, M. (2020). Different strategies of fitting logistic regression for positive and unlabelled data, in V.V. Krzhizhanovskaya et al. (Eds), Proceedings of the International Conference on Computational Science ICCS’20, Springer International Publishing, Cham, pp. 3–17, DOI: 10.1007/978-3-030-50423-6_1.
[15] [15] Ward, G., Hastie, T., Barry, S., Elith, J. and Leathwick, J. (2009). Presence-only data and the EM algorithm, Biometrics 65(2): 554–563, DOI: 10.1111/j.1541-0420.2008.01116.x.
[16] [16] Yang, P., Li, X., Chua, H., Kwoh, C. and Ng, S. (2014). Ensemble positive unlabeled learning for disease gene identification, PLOS ONE 9(5): 1–11, DOI: 10.1371/journal.pone.0097079.
[17] [17] Yuille, A. and Rangarajan, A. (2003). The concave-convex procedure, Neural Computation 15(4): 915–936, DOI: 10.1162/08997660360581958.