@article{ZNSL_2024_540_a10,
author = {A. Sabieva and A. Zhamankhan and N. Zhetessov and A. Kubayeva and I. Akhmetov and A. Pak and D. Akhmetova and A. Zhaxylykova and A. Yelenov},
title = {Survey on the legal question answering problem},
journal = {Zapiski Nauchnykh Seminarov POMI},
pages = {194--213},
year = {2024},
volume = {540},
language = {en},
url = {http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a10/}
}
TY - JOUR AU - A. Sabieva AU - A. Zhamankhan AU - N. Zhetessov AU - A. Kubayeva AU - I. Akhmetov AU - A. Pak AU - D. Akhmetova AU - A. Zhaxylykova AU - A. Yelenov TI - Survey on the legal question answering problem JO - Zapiski Nauchnykh Seminarov POMI PY - 2024 SP - 194 EP - 213 VL - 540 UR - http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a10/ LA - en ID - ZNSL_2024_540_a10 ER -
%0 Journal Article %A A. Sabieva %A A. Zhamankhan %A N. Zhetessov %A A. Kubayeva %A I. Akhmetov %A A. Pak %A D. Akhmetova %A A. Zhaxylykova %A A. Yelenov %T Survey on the legal question answering problem %J Zapiski Nauchnykh Seminarov POMI %D 2024 %P 194-213 %V 540 %U http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a10/ %G en %F ZNSL_2024_540_a10
A. Sabieva; A. Zhamankhan; N. Zhetessov; A. Kubayeva; I. Akhmetov; A. Pak; D. Akhmetova; A. Zhaxylykova; A. Yelenov. Survey on the legal question answering problem. Zapiski Nauchnykh Seminarov POMI, Investigations on applied mathematics and informatics. Part IV, Tome 540 (2024), pp. 194-213. http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a10/
[1] I. Akhmetov and A. Pak, “TSP review: performance comparison of the well-known methods on a standardized dataset”, Proc. 2023 19th Int. Asian Sch.-Semin. Optim. Probl. Complex Syst. (OPCS), 2023, 4–9 | DOI
[2] A. Ravichander, A.W. Black, S. Wilson, T. Norton, and N. Sadeh, “Question answering for privacy policies: Combining computational and legal perspectives”, Proc. 2019 Conf. Empir. Methods Nat. Lang. Process. Int. Joint Conf. Nat. Lang. Process. (EMNLP-IJCNLP), 2019, 4947–4958
[3] P. Wang, L. Li, L. Chen, Z. Cai, D. Zhu, B. Lin, Y. Cao, Q. Liu, T. Liu, and Z. Sui, Large Language Models are not Fair Evaluators, 2023, arXiv: 2305.17926
[4] A. Louis, G. van Dijck, and G. Spanakis, Interpretable Long-Form Legal Question Answering with Retrieval-Augmented Large Language Models, 2023, arXiv: 2309.17050
[5] K. Krishna, A. Roy, and M. Iyyer, Hurdles to Progress in Long-form Question Answering, 2021, arXiv: 2103.06332
[6] S. Wang, F. Xu, L. Thompson, E. Choi, and M. Iyyer, “Modeling Exemplification in Long-form Question Answering via Retrieval”, Proc. 2022 Conf. North Am. Chapter Assoc. Comput. Linguist.: Hum. Lang. Technol., 2022, 2079–2092 | DOI
[7] F. Xu, Y. Song, M. Iyyer, and E. Choi, A Critical Evaluation of Evaluations for Long-form Question Answering, 2023, arXiv: 2305.18201
[8] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F.L. Aleman, et al., GPT-4 Technical Report, 2023, arXiv: 2303.08774
[9] Z. Ji, N. Lee, R. Frieske, T. Yu, D. Su, Y. Xu, E. Ishii, Y.J. Bang, A. Madotto, and P. Fung, “Survey of Hallucination in Natural Language Generation”, ACM Comput. Surv., 55:12 (2023), 248, 38 pp.
[10] P. Lewis, E. Perez, A. Piktus, F. Petroni, V. Karpukhin, N. Goyal, H. Küttler, M. Lewis, W.-t. Yih, T. Rocktäschel, S. Riedel, and D. Kiela, Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks, 2021, arXiv: 2005.11401
[11] G. Mialon, R. Dessì, M. Lomeli, C. Nalmpantis, R. Pasunuru, R. Raileanu, B. Rozière, T. Schick, J. Dwivedi-Yu, A. Celikyilmaz, E. Grave, Y. LeCun, and T. Scialom, Augmented Language Models: a Survey, 2023, arXiv: 2302.07842
[12] A. Lazaridou, E. Gribovskaya, W. Stokowiec, and N. Grigorev, Internet-augmented language models through few-shot prompting for open-domain question answering, 2022, arXiv: 2203.05115
[13] J. Saad-Falcon, O. Khattab, C. Potts, and M. Zaharia, ARES: An Automated Evaluation Framework for Retrieval-Augmented Generation Systems, 2023, arXiv: 2311.09476
[14] H. Li, Y. Su, D. Cai, Y. Wang, and L. Liu, A Survey on Retrieval-Augmented Text Generation, 2022, arXiv: 2202.01110 | MR
[15] A. Asai, S. Min, Z. Zhong, and D. Chen, “Retrieval-based Language Models and Applications”, Proc. 61st Annu. Meet. Assoc. Comput. Linguist., 2023, 41–46
[16] Y. Gao, Y. Xiong, X. Gao, K. Jia, J. Pan, Y. Bi, Y. Dai, J. Sun, Q. Guo, M. Wang, and H. Wang, Retrieval-Augmented Generation for Large Language Models: A Survey, 2024, arXiv: 2312.10997
[17] S.M. Gerrish and D.M. Blei, “Predicting legislative roll calls from text”, Proc. 28th Int. Conf. Mach. Learn., 2011, 489–496
[18] T. Yano, N.A. Smith, and J.D. Wilkerson, “Textual Predictors of Bill Survival in Congressional Committees”, Proc. 2012 Conf. North Am. Chapter Assoc. Comput. Linguist.: Hum. Lang. Technol., 2012, 793–802
[19] M. Kim, Y. Xu, and R. Goebel, “Summarization of Legal Texts with High Cohesion and Automatic Compression Rate”, New Front. Artif. Intell., 2013, 190–204 | DOI
[20] A. Kali, P. Shamoi, Y. Zhangbyrbayev, and A. Zhandaulet, “Computing with Words for Industrial Applications”, Intell. Syst. Appl., 2023, 257–271
[21] P. Shamoi and A. Inoue, “Computing with Words for Direct Marketing Support System”, Proc. Midwest Artif. Intell. Cogn. Sci. Conf., 2012
[22] J.B. Ruhl, D.M. Katz, and M.J. Bommarito, “Harnessing legal complexity”, Science, 355:6332 (2017), 1377–1378 | DOI
[23] C.-N. Chau, T.-S. Nguyen, and L.-M. Nguyen, “VNLawBERT: A Vietnamese Legal Answer Selection Approach Using BERT Language Model”, Proc. 2020 7th NAFOSTED Conf. Inf. Comput. Sci., 2020, 298–301