Other formats:
BibTeX
LaTeX
RIS
@inproceedings{1810359, author = {Medveď, Marek and Horák, Aleš and Sabol, Radoslav}, address = {Portugal}, booktitle = {Proceedings of the 14th International Conference on Agents and Artificial Intelligence (ICAART)}, doi = {http://dx.doi.org/10.5220/0010827000003116}, editor = {Ana Paula Rocha, Luc Steels, Jaap van den Herik}, keywords = {Question Answering; Answer Context; Answer Selection; Czech; Sentece Embeddings; RNN; BERT}, howpublished = {elektronická verze "online"}, language = {eng}, location = {Portugal}, isbn = {978-989-758-547-0}, note = {accepted for publication}, pages = {388-394}, publisher = {SCITEPRESS}, title = {Comparing RNN and Transformer Context Representations in the Czech Answer Selection Task}, year = {2022} }
TY - JOUR ID - 1810359 AU - Medveď, Marek - Horák, Aleš - Sabol, Radoslav PY - 2022 TI - Comparing RNN and Transformer Context Representations in the Czech Answer Selection Task PB - SCITEPRESS CY - Portugal SN - 9789897585470 N1 - accepted for publication KW - Question Answering KW - Answer Context KW - Answer Selection KW - Czech KW - Sentece Embeddings KW - RNN KW - BERT N2 - Open domain question answering now inevitably builds upon advanced neural models processing large unstructured textual sources serving as a kind of underlying knowledge base. In case of non-mainstream highly- inflected languages, the state-of-the-art approaches lack large training datasets emphasizing the need for other improvement techniques. In this paper, we present detailed evaluation of a new technique employing various context representations in the answer selection task where the best answer sentence from a candidate document is identified as the most relevant to the human entered question. The input data here consists not only of each sentence in isolation but also of its preceding sentence(s) as the context. We compare seven different context representations including direct recurrent network (RNN) embeddings and several BERT-model based sentence embedding vectors. All experiments are evaluated with a new version 3.1 of the Czech question answering benchmark dataset SQAD wit h possible multiple correct answers as a new feature. The comparison shows that the BERT-based sentence embeddings are able to offer the best context representations reaching the mean average precision results of 83.39% which is a new best score for this dataset. ER -
MEDVEĎ, Marek, Aleš HORÁK and Radoslav SABOL. Comparing RNN and Transformer Context Representations in the Czech Answer Selection Task. Online. In Ana Paula Rocha, Luc Steels, Jaap van den Herik. \textit{Proceedings of the 14th International Conference on Agents and Artificial Intelligence (ICAART)}. Portugal: SCITEPRESS, 2022, p.~388-394. ISBN~978-989-758-547-0. Available from: https://dx.doi.org/10.5220/0010827000003116.
|