mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-05-13 12:34:05 +00:00
* spec : refactor * spec : drop support for incompatible vocabs * spec : update common_speculative_init() * cont : pass seq_id * cont : dedup ctx_seq_rm_type * server : sketch the ctx_dft decode loop * server : draft prompt cache and checkpoints * server : improve ctx names * server, spec : transition to unified spec context * cont : sync main and drft contexts * cont : async drft eval when possible * cont : handle non-ckpt models * cont : pass correct n_past for drafting * cont : process images throught the draft context * spec : handle draft running out of context * server : fix mtmd draft processing * server : fix URL for draft model * server : add comment * server : clean-up + dry * speculative-simple : update * spec : fix n_past type * server : fix slot ctx_drft ptr * tools : update readme * naming : improve consistency * spec : refactor for multi-sequence speculative context * cont : prepare params * cont : prepare params * spec : support parallel drafts * server : support parallel drafting * llama : reuse device buffers when possible * server, spec : clean-up * cont : clean-up * cont : minor * spec : reset `drafting` flag at the end * spec : introduce `common_speculative_process()` * spec : allow for multiple spec types (chain of speculators) * replace old type field of type common_speculative_type in the common_params_speculative struct with a vector to allow multiple types to be specified * introduce common_get_enabled_speculative_impls(const std::vector<enum common_speculative_type>) to figure out which implementations the user has enabled * introduce common_speculative_type_from_names(const std::vector<std::string> & names) to parse the already user provided spec types * all speculators run sequentially, best one wins (we verify its drafted tokens) * maximize expected accepted tokens for current round by calculating the product between the probability of accepting current token (n_acc_tokens / n_gen_drafts) and the draft's length --------- Co-authored-by: Petros Sideris <petros.sideris@nokia.com>
132 lines
3.5 KiB
Python
132 lines
3.5 KiB
Python
import pytest
|
|
from utils import *
|
|
|
|
# We use a F16 MOE gguf as main model, and q4_0 as draft model
|
|
|
|
server = ServerPreset.stories15m_moe()
|
|
|
|
MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/tiny-llamas/resolve/main/stories15M-q4_0.gguf"
|
|
|
|
def create_server():
|
|
global server
|
|
server = ServerPreset.stories15m_moe()
|
|
# set default values
|
|
server.model_draft = download_file(MODEL_DRAFT_FILE_URL)
|
|
server.draft_min = 4
|
|
server.draft_max = 8
|
|
server.fa = "off"
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def fixture_create_server():
|
|
return create_server()
|
|
|
|
|
|
def test_with_and_without_draft():
|
|
global server
|
|
server.model_draft = None # disable draft model
|
|
server.start()
|
|
res = server.make_request("POST", "/completion", data={
|
|
"prompt": "I believe the meaning of life is",
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
"n_predict": 16,
|
|
})
|
|
assert res.status_code == 200
|
|
content_no_draft = res.body["content"]
|
|
server.stop()
|
|
|
|
# create new server with draft model
|
|
create_server()
|
|
server.start()
|
|
res = server.make_request("POST", "/completion", data={
|
|
"prompt": "I believe the meaning of life is",
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
"n_predict": 16,
|
|
})
|
|
assert res.status_code == 200
|
|
content_draft = res.body["content"]
|
|
|
|
assert content_no_draft == content_draft
|
|
|
|
|
|
def test_different_draft_min_draft_max():
|
|
global server
|
|
test_values = [
|
|
(1, 2),
|
|
(1, 4),
|
|
(4, 8),
|
|
(4, 12),
|
|
(8, 16),
|
|
]
|
|
last_content = None
|
|
for draft_min, draft_max in test_values:
|
|
server.stop()
|
|
server.draft_min = draft_min
|
|
server.draft_max = draft_max
|
|
server.start()
|
|
res = server.make_request("POST", "/completion", data={
|
|
"prompt": "I believe the meaning of life is",
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
"n_predict": 16,
|
|
})
|
|
assert res.status_code == 200
|
|
if last_content is not None:
|
|
assert last_content == res.body["content"]
|
|
last_content = res.body["content"]
|
|
|
|
|
|
def test_slot_ctx_not_exceeded():
|
|
global server
|
|
server.n_ctx = 256
|
|
server.start()
|
|
res = server.make_request("POST", "/completion", data={
|
|
"prompt": "Hello " * 248,
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
"speculative.p_min": 0.0,
|
|
})
|
|
assert res.status_code == 200
|
|
assert len(res.body["content"]) > 0
|
|
|
|
|
|
def test_with_ctx_shift():
|
|
global server
|
|
server.n_ctx = 256
|
|
server.enable_ctx_shift = True
|
|
server.start()
|
|
res = server.make_request("POST", "/completion", data={
|
|
"prompt": "Hello " * 248,
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
"n_predict": 256,
|
|
"speculative.p_min": 0.0,
|
|
})
|
|
assert res.status_code == 200
|
|
assert len(res.body["content"]) > 0
|
|
assert res.body["tokens_predicted"] == 256
|
|
assert res.body["truncated"] == True
|
|
|
|
|
|
@pytest.mark.parametrize("n_slots,n_requests", [
|
|
(1, 2),
|
|
(2, 2),
|
|
])
|
|
def test_multi_requests_parallel(n_slots: int, n_requests: int):
|
|
global server
|
|
server.n_slots = n_slots
|
|
server.start()
|
|
tasks = []
|
|
for _ in range(n_requests):
|
|
tasks.append((server.make_request, ("POST", "/completion", {
|
|
"prompt": "I believe the meaning of life is",
|
|
"temperature": 0.0,
|
|
"top_k": 1,
|
|
})))
|
|
results = parallel_function_calls(tasks)
|
|
for res in results:
|
|
assert res.status_code == 200
|
|
assert match_regex("(wise|kind|owl|answer)+", res.body["content"])
|