@inproceedings{21bcb64c09a741e192d9b2e83f88d4ec,
title = "Word Matters: What Influences Domain Adaptation in Summarization?",
abstract = "Domain adaptation aims to enable Large Language Models (LLMs) to generalize domain datasets unseen effectively during the training phase. However, factors such as the size of the model parameters and the scale of training data are general influencers and do not reflect the nuances of domain adaptation performance. This paper investigates the fine-grained factors affecting domain adaptation performance, analyzing the specific impact of 'words' in training data on summarization tasks. We propose quantifying dataset learning difficulty as the learning difficulty of generative summarization, which is determined by two indicators: word-based compression rate and abstraction level. Our experiments conclude that, when considering dataset learning difficulty, the cross-domain overlap and the performance gain in summarization tasks exhibit an approximate linear relationship, which is not directly related to the number of words. Based on this finding, predicting a model's performance on unknown domain datasets is possible without undergoing training. Source code and scripts are available at https://github.com/li-aolong/Word-Matters.",
author = "Yinghao Li and Siyu Miao and Heyan Huang and Yang Gao",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics.; 62nd Annual Meeting of the Association for Computational Linguistics, ACL 2024 ; Conference date: 11-08-2024 Through 16-08-2024",
year = "2024",
doi = "10.18653/v1/2024.acl-long.715",
language = "English",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "13236--13249",
editor = "Lun-Wei Ku and Martins, \{Andre F. T.\} and Vivek Srikumar",
booktitle = "Long Papers",
address = "United States",
}