@inproceedings{60d90598211148b4a3f2ca96c4c57788,
title = "Seizing the Bandwidth Scaling of On-Package Interconnect in a Post-Moore's Law World",
abstract = "The slowing and forecasted end of Moore's Law have forced designers to look beyond simply adding transistors, encouraging them to employ other unused resources as a manner to increase chip performance. At the same time, in recent years, inter-die interconnect technologies made a huge leap forward, dramatically increasing the available bandwidth. While the end of Moore's Law will inevitably slow down the performance advances of single-die setups, interconnect technologies will likely continue to scale. We envision a future where designers must create ways to exploit interconnect utilization for better system performance.As an example of a feature that converts interconnect utilization into performance, we present Meduza - a write-update coherence protocol for future chiplet systems. Meduza extends previous write-update protocols to systems with multi-level cache hierarchies. Meduza improves execution speed in our benchmark suite by 19% when compared to the MESIF coherence protocol on a chiplet-based system. Moreover, Meduza promises even more advantages in future systems. This work shows that by exploiting excess interconnect bandwidth, there is significant potential for additional performance in modern and future chiplet systems.",
keywords = "bandwidth, coherence, interconnect, multi-chiplet, multicore, scaling, write-update",
author = "Grigory Chirkov and David Wentzlaff",
note = "Publisher Copyright: {\textcopyright} 2023 Owner/Author(s).; 37th ACM International Conference on Supercomputing, ICS 2023 ; Conference date: 21-06-2023 Through 23-06-2023",
year = "2023",
month = jun,
day = "21",
doi = "10.1145/3577193.3593702",
language = "English (US)",
series = "Proceedings of the International Conference on Supercomputing",
publisher = "Association for Computing Machinery",
pages = "410--422",
booktitle = "ACM ICS 2023 - Proceedings of the International Conference on Supercomputing",
}