@inproceedings{316a440450464bf78e522acf53def374,
title = "REST: Retrieval-Based Speculative Decoding",
abstract = "We introduce Retrieval-Based Speculative Decoding (REST), a novel algorithm designed to speed up language model generation. The key insight driving the development of REST is the observation that the process of text generation often includes certain common phases and patterns. Unlike previous methods that rely on a draft language model for speculative decoding, REST harnesses the power of retrieval to generate draft tokens. This method draws from the reservoir of existing knowledge, retrieving and employing relevant tokens based on the current context. Its plug-and-play nature allows for seamless integration and acceleration of any language model, all without necessitating additional training. When benchmarked on 7B and 13B language models in a single-batch setting, REST achieves a significant speedup of 1.62× to 2.36× on code or text generation. The source code of REST is available at https://github.com/FasterDecoding/REST.",
author = "Zhenyu He and Zexuan Zhong and Tianle Cai and Lee, {Jason D.} and Di He",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics.; 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL 2024 ; Conference date: 16-06-2024 Through 21-06-2024",
year = "2024",
language = "English (US)",
series = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL 2024",
publisher = "Association for Computational Linguistics (ACL)",
pages = "1582--1595",
editor = "Kevin Duh and Helena Gomez and Steven Bethard",
booktitle = "Long Papers",
}