@inproceedings{4a55de7530094513ab643a25d0f34d18,
title = "Dynamic partitioning of scalable cache memory for SMT architectures",
abstract = "The one-level data cache [1], which is optimized for bandwidth, eliminates the overhead to maintain containment and coherence. And it is suitable for future large-scale SMT processor. Although the design has good scalability, large-scale SMT architecture exacerbates the stress on cache, especially for the bank-interleaved data cache referred to in paper [1]. This paper proposes a dynamic partitioning method of scalable cache for large-scale SMT architectures. We extend the scheme proposed in [2] to multi-banking cache. Since memory reference characteristics of threads can change very quickly, our method collects the miss-rate characteristics of simultaneously executing threads at runtime, and partitions the cache among the executing threads. The partitioning scheme has been evaluated using a modified SMT simulator modeling the one-level data cache. The results show a relative improvement in the IPC of up to 18.94% over those generated by the non-partitioned cache using standard least recently used replacement policy.",
keywords = "Bank caching, Cache partitioning, Scalable multi-banking cache memory, Simultaneous multithreading",
author = "Wu, {Jun Min} and Zhu, {Xiao Dong} and Sui, {Xiu Feng} and Jin, {Ying Qi} and Zhao, {Xiao Yu}",
year = "2013",
doi = "10.1007/978-3-642-41591-3_2",
language = "English",
isbn = "9783642415906",
series = "Communications in Computer and Information Science",
publisher = "Springer Verlag",
pages = "12--25",
booktitle = "High Performance Computing - 8th CCF Conference, HPC 2012, Revised Selected Papers",
address = "Germany",
note = "8th CCF Conference on High Performance Computing, HPC 2012 ; Conference date: 29-10-2012 Through 31-10-2012",
}