@inproceedings{c5a795f61c7c444b84f8bfaad057a36b,
title = "MoDification: Mixture of Depths Made Easy",
abstract = "Long-context efficiency has recently become a trending topic in serving large language models (LLMs). And mixture of depths (MoD) is proposed as a perfect fit to bring down both latency and memory. In this paper, however, we discover that MoD can barely transform existing LLMs without costly training over an extensive number of tokens. To enable the transformations from any LLMs to MoD ones, we showcase top-k operator in MoD should be promoted to threshold-p operator, and refinement to architecture and data should also be crafted along. All these designs form our method termed MoDification. Through a comprehensive set of experiments covering model scales from 3B to 70B, we exhibit MoDification strikes an excellent balance between efficiency and effectiveness. MoDification can achieve up to ∼1.2× speedup in latency and ∼1.8× reduction in memory compared to original LLMs especially in long-context applications.",
author = "Chen Zhang and Meizhi Zhong and Qimeng Wang and Xuantao Lu and Zheyu Ye and Chengqiang Lu and Yan Gao and Yao Hu and Kehai Chen and Min Zhang and Dawei Song",
note = "Publisher Copyright: {\textcopyright} 2025 Association for Computational Linguistics.; 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2025 ; Conference date: 29-04-2025 Through 04-05-2025",
year = "2025",
doi = "10.18653/v1/2025.naacl-long.265",
language = "English",
series = "Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies: Long Papers, NAACL-HLT 2025",
publisher = "Association for Computational Linguistics (ACL)",
pages = "5137--5149",
editor = "Luis Chiruzzo and Alan Ritter and Lu Wang",
booktitle = "Long Papers",
address = "United States",
}