Skip to content

Commit

Permalink
Update papers.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
wang-h authored May 30, 2024
1 parent 8eb0cfe commit fc78cca
Showing 1 changed file with 11 additions and 17 deletions.
28 changes: 11 additions & 17 deletions _bibliography/papers.bib
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,20 @@
@string{aps = {American Physical Society,}}
@inproceedings{wang-etal-2024-towards-human-like,
title = "Towards Human-Like Machine Comprehension: Few-Shot Relational Learning in Visually-Rich Documents",
author = "Wang, Hao and
title ={Towards Human-Like Machine Comprehension: Few-Shot Relational Learning in Visually-Rich Documents},
author = {Wang, Hao and
Li, Tang and
Chu, Chenhui and
Wang, Rui and
Zhu, Pinpin",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.1439",
pages = "16557--16569",
abstract = "Key-value relations are prevalent in Visually-Rich Documents (VRDs), often depicted in distinct spatial regions accompanied by specific color and font styles. These non-textual cues serve as important indicators that greatly enhance human comprehension and acquisition of such relation triplets. However, current document AI approaches often fail to consider this valuable prior information related to visual and spatial features, resulting in suboptimal performance, particularly when dealing with limited examples. To address this limitation, our research focuses on few-shot relational learning, specifically targeting the extraction of key-value relation triplets in VRDs. Given the absence of a suitable dataset for this task, we introduce two new few-shot benchmarks built upon existing supervised benchmark datasets. Furthermore, we propose a variational approach that incorporates relational 2D-spatial priors and prototypical rectification techniques. This approach aims to generate relation representations that are more aware of the spatial context and unseen relation in a manner similar to human perception. Experimental results demonstrate the effectiveness of our proposed method by showcasing its ability to outperform existing methods. This study also opens up new possibilities for practical applications.",
Zhu, Pinpin},
booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024, CCF-B)},
month = {may},
year = {2024},
address = {Torino, Italia},
publisher = {ELRA and ICCL},
url = {https://aclanthology.org/2024.lrec-main.1439},
pages = {16557--16569},
abstract = {Key-value relations are prevalent in Visually-Rich Documents (VRDs), often depicted in distinct spatial regions accompanied by specific color and font styles. These non-textual cues serve as important indicators that greatly enhance human comprehension and acquisition of such relation triplets. However, current document AI approaches often fail to consider this valuable prior information related to visual and spatial features, resulting in suboptimal performance, particularly when dealing with limited examples. To address this limitation, our research focuses on few-shot relational learning, specifically targeting the extraction of key-value relation triplets in VRDs. Given the absence of a suitable dataset for this task, we introduce two new few-shot benchmarks built upon existing supervised benchmark datasets. Furthermore, we propose a variational approach that incorporates relational 2D-spatial priors and prototypical rectification techniques. This approach aims to generate relation representations that are more aware of the spatial context and unseen relation in a manner similar to human perception. Experimental results demonstrate the effectiveness of our proposed method by showcasing its ability to outperform existing methods. This study also opens up new possibilities for practical applications.},
}
@article{DBLP:journals/ijseke/GuLW23,
Expand Down

0 comments on commit fc78cca

Please sign in to comment.