spec : refactor

This commit is contained in:
Georgi Gerganov
2026-01-25 17:15:46 +02:00
parent af382c384a
commit 924517dd38
10 changed files with 217 additions and 240 deletions

View File

@@ -3438,6 +3438,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
if (value == "none") {
params.speculative.draftless_type = COMMON_SPECULATIVE_TYPE_NONE;
} else if (value == "ngram-cache") {
// TODO: this does nothing atm
params.speculative.draftless_type = COMMON_SPECULATIVE_TYPE_NGRAM_CACHE;
} else if (value == "ngram-simple") {
params.speculative.draftless_type = COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE;

View File

@@ -1097,7 +1097,10 @@ common_init_result::common_init_result(common_params & params) :
if (params.fit_params) {
LOG_INF("%s: fitting params to device memory, for bugs during this step try to reproduce them with -fit off, or provide --verbose logs if the bug only occurs with -fit on\n", __func__);
llama_params_fit(params.model.path.c_str(), &mparams, &cparams,
params.tensor_split, params.tensor_buft_overrides.data(), params.fit_params_target.data(), params.fit_params_min_ctx,
params.tensor_split,
params.tensor_buft_overrides.data(),
params.fit_params_target.data(),
params.fit_params_min_ctx,
params.verbosity >= 4 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_ERROR);
}
@@ -1208,10 +1211,6 @@ std::vector<llama_adapter_lora_ptr> & common_init_result::lora() {
return pimpl->lora;
}
void common_init_result::free_context() {
pimpl->context.reset();
}
common_init_result_ptr common_init_from_params(common_params & params) {
common_init_result_ptr res(new common_init_result(params));

View File

@@ -291,6 +291,10 @@ struct common_params_speculative {
std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding // NOLINT
std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding // NOLINT
bool has_dft() const {
return !model.path.empty() || !model.hf_repo.empty();
}
};
struct common_params_vocoder {
@@ -603,10 +607,6 @@ struct common_params {
// return false from callback to abort model loading or true to continue
llama_progress_callback load_progress_callback = NULL;
void * load_progress_callback_user_data = NULL;
bool has_speculative() const {
return !speculative.model.path.empty() || !speculative.model.hf_repo.empty();
}
};
// call once at the start of a program if it uses libcommon
@@ -742,8 +742,6 @@ struct common_init_result {
std::vector<llama_adapter_lora_ptr> & lora();
void free_context();
private:
struct impl;
std::unique_ptr<impl> pimpl;

View File

@@ -192,12 +192,12 @@ void common_ngram_cache_draft(
break;
}
LOG(" - draft candidate: token=%d\n", drafted_token);
LOG_DBG(" - draft candidate: token=%d\n", drafted_token);
draft.push_back(drafted_token);
}
}
void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & filename) {
void common_ngram_cache_save(common_ngram_cache & ngram_cache, const std::string & filename) {
std::ofstream file_out(filename, std::ios::binary);
for (std::pair<common_ngram, common_ngram_cache_part> item : ngram_cache) {
const common_ngram ngram = item.first;
@@ -217,10 +217,9 @@ void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & fil
file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t));
}
}
}
common_ngram_cache common_ngram_cache_load(std::string & filename) {
common_ngram_cache common_ngram_cache_load(const std::string & filename) {
std::ifstream hashmap_file(filename, std::ios::binary);
if (!hashmap_file) {
throw std::ifstream::failure("Unable to open file " + filename);

View File

@@ -88,12 +88,12 @@ void common_ngram_cache_draft(
// Save an ngram cache to a file.
// ngram_cache: the ngram cache to save.
// filename: the path under which to save the ngram cache.
void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & filename);
void common_ngram_cache_save(common_ngram_cache & ngram_cache, const std::string & filename);
// Load an ngram cache saved with common_ngram_cache_save.
// filename: the path from which to load the ngram cache.
// returns: an ngram cache containing the information saved to filename.
common_ngram_cache common_ngram_cache_load(std::string & filename);
common_ngram_cache common_ngram_cache_load(const std::string & filename);
// Merge two ngram caches.
// ngram_cache_target: the ngram cache to which to add the information from ngram_cache_add.

View File

@@ -57,22 +57,23 @@ struct common_speculative_state {
struct common_speculative_state_draft : public common_speculative_state {
struct llama_context * ctx_tgt; // only used for retokenizing from ctx_dft
struct llama_context * ctx_dft;
struct common_sampler * smpl;
llama_batch batch;
bool vocab_dft_compatible; // whether retokenization is needed
std::map<std::string, std::string> tgt_dft_replacements = {};
llama_tokens prompt_dft = {};
struct common_sampler * smpl;
llama_batch batch;
llama_tokens prompt_dft;
bool vocab_cmpt = true; // whether retokenization is needed
std::unordered_map<std::string, std::string> vocab_map;
common_speculative_state_draft(
enum common_speculative_type type,
struct llama_context * ctx_tgt,
struct llama_context * ctx_dft,
std::map<std::string, std::string> tgt_dft_replacements)
const std::vector<std::pair<std::string, std::string>> & replacements)
: common_speculative_state(type)
, ctx_tgt(ctx_tgt)
, ctx_dft(ctx_dft)
, tgt_dft_replacements(std::move(tgt_dft_replacements))
{
batch = llama_batch_init(llama_n_batch(ctx_dft), 0, 1);
smpl = nullptr;
@@ -104,18 +105,29 @@ struct common_speculative_state_draft : public common_speculative_state {
smpl = common_sampler_init(llama_get_model(ctx_dft), params);
}
vocab_dft_compatible = common_speculative_are_compatible(ctx_tgt, ctx_dft);
LOG_DBG("vocab_dft_compatible = %d\n", vocab_dft_compatible);
vocab_cmpt = common_speculative_are_compatible(llama_get_model(ctx_tgt), llama_get_model(ctx_dft));
LOG_DBG("vocab_cmpt = %d\n", vocab_cmpt);
if (!vocab_cmpt) {
LOG_WRN("the target and draft vocabs are not compatible - tokens will be translated between the two\n");
for (const auto & pair : replacements) {
vocab_map[pair.first] = pair.second;
}
}
}
~common_speculative_state_draft() override {
if (smpl != nullptr) {
common_sampler_free(smpl);
smpl = nullptr;
}
llama_perf_context_print(ctx_dft);
llama_free(ctx_dft);
common_sampler_free(smpl);
llama_batch_free(batch);
}
};
struct common_speculative_state_eagle3 : public common_speculative_state {
common_speculative_state_eagle3(enum common_speculative_type type) : common_speculative_state(type) {}
};
@@ -128,17 +140,19 @@ struct common_speculative_state_ngram_simple : public common_speculative_state {
common_speculative_state_ngram_simple(
enum common_speculative_type type,
common_ngram_simple_state state)
: common_speculative_state(type), state(std::move(state)) {}
: common_speculative_state(type), state(state) {}
};
struct common_speculative_state_ngram_map_k : public common_speculative_state {
common_ngram_map map; // draft ngram map for speculative decoding without draft model
// draft ngram map for speculative decoding without draft model
common_ngram_map map;
common_speculative_state_ngram_map_k(
enum common_speculative_type type,
common_ngram_map map)
: common_speculative_state(type), map(map) {}
: common_speculative_state(type), map(std::move(map)) {}
};
struct common_speculative_state_ngram_map_k4v : public common_speculative_state_ngram_map_k {
common_speculative_state_ngram_map_k4v(
enum common_speculative_type type,
@@ -159,8 +173,8 @@ struct common_speculative_state_ngram_cache : public common_speculative_state {
common_speculative_state_ngram_cache(
const enum common_speculative_type type,
std::string & path_static,
std::string & path_dynamic,
const std::string & path_static,
const std::string & path_dynamic,
uint16_t n_draft,
bool save_dynamic,
bool save_static)
@@ -186,21 +200,15 @@ struct common_speculative_state_ngram_cache : public common_speculative_state {
GGML_ABORT("Couldn't read dynamic lookup cache");
}
}
}
};
struct common_speculative {
std::vector<std::unique_ptr<common_speculative_state>> impls; // list of implementations to use and their states
common_speculative_state * curr_impl = nullptr; // current implementation in use (for stats)
common_speculative_state * curr_impl = nullptr; // current implementation in use (for stats)
};
common_ngram_map get_common_ngram_map(const common_speculative_config config, uint16_t size_ngram, uint16_t size_mgram);
struct common_speculative_state_ngram_cache create_state_ngram_cache(
std::string path_static, std::string path_dynamic,
common_speculative_config config);
common_ngram_map get_common_ngram_map(const common_speculative_config config, uint16_t size_ngram, uint16_t size_mgram) {
static common_ngram_map get_common_ngram_map(const common_speculative_config & config, uint16_t size_ngram, uint16_t size_mgram) {
uint16_t size_key = size_ngram;
uint16_t size_value = size_mgram;
bool key_only = false;
@@ -237,33 +245,39 @@ common_ngram_map get_common_ngram_map(const common_speculative_config config, ui
return common_ngram_map(size_key, size_value, key_only, check_rate, min_hits);
}
struct common_speculative_state_ngram_cache create_state_ngram_cache(
std::string path_static, std::string path_dynamic,
common_speculative_config config) {
static struct common_speculative_state_ngram_cache create_state_ngram_cache(
const std::string & path_static, const std::string & path_dynamic,
const common_speculative_config & config) {
uint16_t n_draft = 8;
bool save_static = false;
bool save_dynamic = false;
const std::map<std::string, std::string> & cfg = config.config;
if (cfg.find("n_draft") != cfg.end()) {
n_draft = std::stoi(cfg.at("n_draft"));
if (n_draft < 1 || n_draft > 1024) {
throw std::invalid_argument("ngram-cache: n_draft must be between 1 and 1024");
}
}
if (cfg.find("save_static") != cfg.end()) {
save_static = (cfg.at("save_static") == "true");
}
if (cfg.find("save_dynamic") != cfg.end()) {
save_dynamic = (cfg.at("save_dynamic") == "true");
}
common_speculative_state_ngram_cache state(config.type,
path_static, path_dynamic, n_draft, save_static, save_dynamic);
common_speculative_state_ngram_cache state(config.type, path_static, path_dynamic, n_draft, save_static, save_dynamic);
return state;
}
std::string common_speculative_type_name_str() {
std::string result = "";
std::string result;
for (size_t i = 0; i < common_speculative_types.size(); i++) {
if (i > 0) {
result += ", ";
@@ -296,13 +310,22 @@ enum common_speculative_type common_speculative_type_from_name(const std::string
// initialization of the speculative decoding system
//
struct common_speculative * common_speculative_init(
struct common_params_speculative & params,
struct llama_context * ctx_tgt,
struct llama_context * ctx_dft
) {
const struct common_params_speculative & params,
struct llama_context * ctx_tgt,
const struct llama_context_params & cparams_dft,
struct llama_model * model_dft) {
llama_context * ctx_dft = nullptr;
if (model_dft) {
ctx_dft = llama_init_from_model(model_dft, cparams_dft);
if (ctx_dft == nullptr) {
LOG_ERR("%s", "failed to create draft context\n");
return nullptr;
}
}
std::vector<std::unique_ptr<common_speculative_state>> implementations = {};
for (const common_speculative_config & config : params.configs) {
LOG_DBG("%s: adding implementation %s\n", __func__, common_speculative_type_to_str(config.type).c_str());
switch (config.type) {
@@ -310,9 +333,9 @@ struct common_speculative * common_speculative_init(
break;
case COMMON_SPECULATIVE_TYPE_DRAFT: {
implementations.push_back(std::make_unique<common_speculative_state_draft>(config.type,
/* .ctx_tgt = */ ctx_tgt,
/* .ctx_dft = */ ctx_dft,
/* .tgt_dft_replacements = */ std::map<std::string, std::string>{}
/* .ctx_tgt = */ ctx_tgt,
/* .ctx_dft = */ ctx_dft,
/* .replacements = */ params.replacements
));
break;
}
@@ -321,11 +344,12 @@ struct common_speculative * common_speculative_init(
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE: {
common_ngram_map ngram_map = get_common_ngram_map(config,
params.spec_ngram_size_n, params.spec_ngram_size_m);
common_ngram_map ngram_map = get_common_ngram_map(config, params.spec_ngram_size_n, params.spec_ngram_size_m);
uint16_t ngram_size_key = ngram_map.size_key;
uint16_t mgram_size_value = ngram_map.size_value;
uint16_t check_rate = ngram_map.check_rate;
uint16_t check_rate = ngram_map.check_rate;
auto config_simple = common_ngram_simple_config{
/* .size_ngram = */ ngram_size_key,
/* .size_mgram = */ mgram_size_value,
@@ -363,6 +387,12 @@ struct common_speculative * common_speculative_init(
break;
}
}
if (implementations.empty()) {
LOG_WRN("%s", "no implementations specified for speculative decoding\n");
return nullptr;
}
auto * result = new common_speculative {
/* .impls = */ std::move(implementations)
};
@@ -379,14 +409,8 @@ void common_speculative_free(struct common_speculative * spec) {
}
bool common_speculative_are_compatible(
const struct llama_context * ctx_tgt,
const struct llama_context * ctx_dft) {
if (ctx_tgt == nullptr && ctx_dft == nullptr) {
return true;
}
const struct llama_model * model_tgt = llama_get_model(ctx_tgt);
const struct llama_model * model_dft = llama_get_model(ctx_dft);
const struct llama_model * model_tgt,
const struct llama_model * model_dft) {
const struct llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt);
const struct llama_vocab * vocab_dft = llama_model_get_vocab(model_dft);
@@ -429,11 +453,12 @@ bool common_speculative_are_compatible(
for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i);
const char * token_text_dft = llama_vocab_get_text(vocab_dft, i);
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
LOG_DBG("%s: draft model vocab must match target model to use speculation but ", __func__);
LOG_DBG("token %d content differs - target '%s', draft '%s'\n", i,
common_token_to_piece(ctx_tgt, i).c_str(),
common_token_to_piece(ctx_dft, i).c_str());
common_token_to_piece(vocab_tgt, i).c_str(),
common_token_to_piece(vocab_dft, i).c_str());
return false;
}
}
@@ -442,34 +467,19 @@ bool common_speculative_are_compatible(
return true;
}
void common_speculative_add_replacement_tgt_dft(
struct common_speculative * spec,
const char *source, const char *dest) {
// Iterate through all implementations and add the replacement in the draft model implementation
for (auto & impl : spec->impls) {
if (impl->type == COMMON_SPECULATIVE_TYPE_DRAFT) {
auto * draft_impl = dynamic_cast<struct common_speculative_state_draft *>(impl.get());
if (draft_impl) {
draft_impl->tgt_dft_replacements[source] = dest;
break;
} else {
GGML_ABORT("%s: unexpected implementation in type %d", __func__, impl.get()->type);
}
}
}
}
static std::string replace_to_dft(
struct common_speculative_state_draft * spec,
const std::string& input) {
const std::string & input) {
std::string result = input;
for (const auto & pair : spec->tgt_dft_replacements) {
for (const auto & pair : spec->vocab_map) {
size_t pos = result.find(pair.first);
while (pos != std::string::npos) {
result.replace(pos, pair.first.length(), pair.second);
pos = result.find(pair.first, pos + pair.second.length());
}
}
return result;
}
@@ -477,23 +487,25 @@ static std::string replace_to_tgt(
struct common_speculative_state_draft * spec,
const std::string& input) {
std::string result = input;
for (const auto& pair : spec->tgt_dft_replacements) {
for (const auto & pair : spec->vocab_map) {
size_t pos = result.find(pair.second);
while (pos != std::string::npos) {
result.replace(pos, pair.second.length(), pair.first);
pos = result.find(pair.second, pos + pair.first.length());
}
}
return result;
}
llama_tokens common_speculative_use_draft_model(
static llama_tokens common_speculative_use_draft_model(
struct common_speculative_state_draft * spec,
struct common_speculative_params params,
const llama_tokens & prompt_tgt_main_model, // specified in target model vocab
llama_token id_last);
llama_tokens common_speculative_gen_ngram_cache(
static llama_tokens common_speculative_gen_ngram_cache(
common_speculative_state_ngram_cache & state,
const llama_tokens & tokens, llama_token sampled);
@@ -505,14 +517,15 @@ llama_tokens common_speculative_gen_draft(
llama_tokens result = {};
spec->curr_impl = nullptr; // reset current implementation
// TODO: avoid dynamic casts
for (auto & impl : spec->impls) {
impl->drafts_call_count++;
// LOG name and call_count
switch (impl->type) {
case COMMON_SPECULATIVE_TYPE_NONE:
{
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_DRAFT:
{
// Create a draft using a draft model.
@@ -522,13 +535,11 @@ llama_tokens common_speculative_gen_draft(
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_EAGLE3:
{
// Work in progress: https://github.com/ggml-org/llama.cpp/pull/18039
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE:
{
// Use common_ngram_map_draft to generate a draft from the current context.
@@ -538,8 +549,7 @@ llama_tokens common_speculative_gen_draft(
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K:
{
// Use common_ngram_map_draft to generate a draft from the current context.
@@ -549,8 +559,7 @@ llama_tokens common_speculative_gen_draft(
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V:
{
// Use common_ngram_map_draft to generate a draft from the current context.
@@ -560,8 +569,7 @@ llama_tokens common_speculative_gen_draft(
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE:
{
auto * state= dynamic_cast<common_speculative_state_ngram_cache *>(impl.get());
@@ -570,14 +578,13 @@ llama_tokens common_speculative_gen_draft(
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
} break;
case COMMON_SPECULATIVE_TYPE_COUNT:
{
GGML_ABORT("invalid speculative type COUNT");
break;
}
}
if (!result.empty()) {
LOG_DBG("%s: called impl %s, hist size = %zu, call_count = %zu, gen = %zu\n", __func__,
common_speculative_type_to_str(impl.get()->type).c_str(),
@@ -597,13 +604,12 @@ llama_tokens common_speculative_gen_draft(
llama_tokens common_speculative_use_draft_model(
struct common_speculative_state_draft * spec,
struct common_speculative_params params,
const llama_tokens & prompt_tgt_main_model, // specified in target model vocab
const llama_tokens & prompt_tgt, // specified in target model vocab
llama_token id_last) {
auto & batch = spec->batch;
auto & ctx_tgt = spec->ctx_tgt;
auto & ctx_dft = spec->ctx_dft;
auto & smpl = spec->smpl;
auto & batch = spec->batch;
auto & ctx_tgt = spec->ctx_tgt;
auto & ctx_dft = spec->ctx_dft;
auto & smpl = spec->smpl;
auto & prompt_dft = spec->prompt_dft;
auto * mem_dft = llama_get_memory(ctx_dft);
@@ -613,13 +619,16 @@ llama_tokens common_speculative_use_draft_model(
const int n_ctx = llama_n_ctx(ctx_dft) - params.n_draft;
llama_tokens prompt_tgt_draft_model;
if (!spec->vocab_dft_compatible) {
llama_tokens prompt_cnv;
if (!spec->vocab_cmpt) {
std::string text;
text = common_detokenize(ctx_tgt, prompt_tgt_main_model, true);
text = common_detokenize(ctx_tgt, prompt_tgt, true);
text = replace_to_dft(spec, text);
LOG_DBG("%s: main->draft detokenized string: '%s'\n", __func__, text.c_str());
prompt_tgt_draft_model = common_tokenize(ctx_dft, text, false, true);
prompt_cnv = common_tokenize(ctx_dft, text, false, true);
// convert id_last to draft vocab. llama_detokenize is called directly to avoid an allocation
const auto * model_tgt = llama_get_model(ctx_tgt);
@@ -627,6 +636,7 @@ llama_tokens common_speculative_use_draft_model(
int32_t n_chars = llama_detokenize(vocab_tgt, &id_last, 1, nullptr, 0, false, false);
GGML_ASSERT(n_chars < 0 && "failed to detokenize id_last");
text.resize(-n_chars);
llama_detokenize(vocab_tgt, &id_last, 1, text.data(), text.size(), false, false);
text = replace_to_dft(spec, text);
@@ -634,23 +644,22 @@ llama_tokens common_speculative_use_draft_model(
LOG_DBG("main->draft detokenized id_last(%d): '%s'\n", id_last, text.c_str());
id_last = common_tokenize(ctx_dft, text, false, true)[0];
}
// prompt_tgt's tokens will always be compatible with ctx_dft
const llama_tokens &prompt_tgt =
spec->vocab_dft_compatible ? prompt_tgt_main_model : prompt_tgt_draft_model;
const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx);
const llama_tokens & prompt_cur = spec->vocab_cmpt ? prompt_tgt : prompt_cnv;
const int i_start = std::max<int>(0, (int) prompt_cur.size() - n_ctx);
// reuse as much as possible from the old draft context
// ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
for (int i = 0; i < (int) prompt_dft.size(); ++i) {
int cur = 0;
while (i_start + cur < (int) prompt_tgt.size() &&
while (i_start + cur < (int) prompt_cur.size() &&
i + cur < (int) prompt_dft.size() &&
prompt_tgt[i_start + cur] == prompt_dft[i + cur]) {
prompt_cur[i_start + cur] == prompt_dft[i + cur]) {
cur++;
}
if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) {
if ((cur >= 256 || n_ctx >= (int) prompt_cur.size()) && cur > reuse_n) {
reuse_i = i;
reuse_n = cur;
}
@@ -695,11 +704,11 @@ llama_tokens common_speculative_use_draft_model(
// prepare a batch to evaluate any new tokens in the prompt
common_batch_clear(batch);
for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) {
//LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]);
common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false);
for (size_t i = i_start + reuse_n; i < prompt_cur.size(); ++i) {
//LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_cur[i]);
common_batch_add(batch, prompt_cur[i], i - i_start, { 0 }, false);
prompt_dft.push_back(prompt_tgt[i]);
prompt_dft.push_back(prompt_cur[i]);
}
// we should rarely end-up here during normal decoding
@@ -761,7 +770,7 @@ llama_tokens common_speculative_use_draft_model(
prompt_dft.push_back(id);
}
if (!spec->vocab_dft_compatible) {
if (!spec->vocab_cmpt) {
std::string detokenized = common_detokenize(ctx_dft, result, true);
detokenized = replace_to_tgt(spec, detokenized);
LOG_DBG("draft->main detokenized string: '%s'\n", detokenized.c_str());
@@ -770,22 +779,31 @@ llama_tokens common_speculative_use_draft_model(
result.resize(params.n_draft);
}
}
return result;
}
void common_speculative_accept(struct common_speculative * spec, const uint16_t n_accepted) {
void common_speculative_accept(struct common_speculative * spec, uint16_t n_accepted) {
if (n_accepted == 0) {
return;
}
common_speculative_state * impl = spec->curr_impl;
if (impl != nullptr) {
if (n_accepted > 0) {
impl->drafts_accepted_count++;
impl->drafts_accepted_tokens += n_accepted;
}
if (impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K ||
impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V) {
auto * state = dynamic_cast<struct common_speculative_state_ngram_map_k *>(impl);
if (state) {
common_ngram_map_accept(state->map, n_accepted);
}
GGML_ASSERT(impl);
if (n_accepted > 0) {
impl->drafts_accepted_count++;
impl->drafts_accepted_tokens += n_accepted;
}
if (impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K ||
impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V) {
// TODO: add common_speculative_state::accept() to base class and remove this dynamic cast
auto * state = dynamic_cast<struct common_speculative_state_ngram_map_k *>(impl);
if (state) {
common_ngram_map_accept(state->map, n_accepted);
}
}
}
@@ -794,6 +812,7 @@ void common_speculative_print_stats(const struct common_speculative * spec) {
if (spec == nullptr) {
return;
}
for (const auto & impl : spec->impls) {
LOG_INF("statistics %s: #calls = %zu, #gen drafts = %zu, #acc drafts = %zu, #gen tokens = %zu, #acc tokens = %zu\n",
common_speculative_type_to_str(impl->type).c_str(),

View File

@@ -6,10 +6,9 @@
struct common_speculative;
struct common_speculative_params {
int n_draft = 16; // max drafted tokens
int n_reuse = 256;
int n_draft = 16; // max drafted tokens
float p_min = 0.75f; // min probability required to accept a token in the draft
float p_min = 0.75f; // min probability required to accept a token in the draft
};
// comma separated list of all types
@@ -22,20 +21,16 @@ enum common_speculative_type common_speculative_type_from_name(const std::string
std::string common_speculative_type_to_str(enum common_speculative_type type);
struct common_speculative * common_speculative_init(
struct common_params_speculative & params,
struct llama_context * ctx_tgt,
struct llama_context * ctx_dft
);
const struct common_params_speculative & params,
struct llama_context * ctx_tgt,
const struct llama_context_params & cparams_dft,
struct llama_model * model_dft);
void common_speculative_free(struct common_speculative * spec);
bool common_speculative_are_compatible(
const struct llama_context * ctx_tgt,
const struct llama_context * ctx_dft);
void common_speculative_add_replacement_tgt_dft(
struct common_speculative * spec,
const char *source, const char *dest);
const struct llama_model * model_tgt,
const struct llama_model * model_dft);
// sample up to n_draft tokens and add them to the batch using the draft model
llama_tokens common_speculative_gen_draft(
@@ -45,9 +40,7 @@ llama_tokens common_speculative_gen_draft(
llama_token id_last);
// informs the speculative decoder that n_accepted tokens were accepted by the target model
void common_speculative_accept(
struct common_speculative * spec,
const uint16_t n_accepted);
void common_speculative_accept(struct common_speculative * spec, uint16_t n_accepted);
// print statistics about the speculative decoding
void common_speculative_print_stats(const struct common_speculative * spec);

View File

@@ -34,10 +34,9 @@ int main(int argc, char ** argv) {
llama_numa_init(params.numa);
llama_model * model_tgt = NULL;
//llama_model * model_dft = NULL;
llama_model * model_dft = NULL;
llama_context * ctx_tgt = NULL;
llama_context * ctx_dft = NULL;
// load the target model
auto llama_init_tgt = common_init_from_params(params);
@@ -63,12 +62,7 @@ int main(int argc, char ** argv) {
auto llama_init_dft = common_init_from_params(params);
//model_dft = llama_init_dft->model();
ctx_dft = llama_init_dft->context();
if (!common_speculative_are_compatible(ctx_tgt, ctx_dft)) {
LOG_INF("the draft model '%s' is not compatible with the target model '%s'. tokens will be translated between the draft and target models.\n", params.speculative.model.path.c_str(), params.model.path.c_str());
}
model_dft = llama_init_dft->model();
// Tokenize the prompt
std::vector<llama_token> inp;
@@ -129,13 +123,9 @@ int main(int argc, char ** argv) {
// init the speculator
struct common_speculative_params params_spec;
params_spec.n_draft = n_draft;
params_spec.n_reuse = llama_n_ctx(ctx_dft) - n_draft;
params_spec.p_min = p_min;
struct common_speculative * spec = common_speculative_init(params.speculative, ctx_tgt, ctx_dft);
for (auto &pair : params.speculative.replacements) {
common_speculative_add_replacement_tgt_dft(spec, pair.first.c_str(), pair.second.c_str());
}
struct common_speculative * spec = common_speculative_init(params.speculative, ctx_tgt, common_context_params_to_llama(params), model_dft);
llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, 1);
@@ -249,8 +239,6 @@ int main(int argc, char ** argv) {
LOG_INF("\n");
LOG_INF("draft:\n\n");
llama_perf_context_print(ctx_dft);
LOG_INF("\n");
LOG_INF("target:\n\n");
common_perf_print(ctx_tgt, smpl);

View File

@@ -50,7 +50,6 @@ struct server_slot {
// TODO: change to unique_ptrs for consistency:
llama_context * ctx = nullptr;
llama_context * ctx_dft = nullptr;
// multimodal
mtmd_context * mctx = nullptr;
@@ -256,9 +255,8 @@ struct server_slot {
return state != SLOT_STATE_IDLE;
}
// Checks if a draft model is active or self-speculation using context-tokens
bool can_speculate() const {
return task->params.speculative.configs.size() > 0;
return !!spec;
}
void add_token(const completion_token_output & token) {
@@ -553,18 +551,13 @@ private:
// note: keep these alive - they determine the lifetime of the model, context, etc.
common_init_result_ptr llama_init;
common_init_result_ptr llama_init_dft;
llama_context * ctx = nullptr;
bool vocab_dft_compatible = true;
llama_model * model_dft = nullptr;
llama_context_params cparams_dft;
llama_batch batch {};
llama_model_ptr model_dft;
bool add_bos_token = true;
int32_t n_ctx; // total context for all clients / slots
@@ -597,9 +590,6 @@ private:
// Clear any sampling context
for (server_slot & slot : slots) {
llama_free(slot.ctx_dft);
slot.ctx_dft = nullptr;
common_speculative_free(slot.spec);
slot.spec = nullptr;
}
@@ -646,44 +636,26 @@ private:
add_bos_token = llama_vocab_get_add_bos(vocab);
if (params_base.has_speculative()) {
if (params_base.speculative.has_dft()) {
SRV_INF("loading draft model '%s'\n", params_base.speculative.model.path.c_str());
const auto & params_spec = params_base.speculative;
auto params_dft = params_base;
params_dft.devices = params_base.speculative.devices;
params_dft.model = params_base.speculative.model;
params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? llama_n_ctx_seq(ctx) : params_base.speculative.n_ctx;
params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
params_dft.n_parallel = 1;
params_dft.cache_type_k = params_base.speculative.cache_type_k;
params_dft.cache_type_v = params_base.speculative.cache_type_v;
params_dft.devices = params_spec.devices;
params_dft.model = params_spec.model;
params_dft.n_gpu_layers = params_spec.n_gpu_layers;
params_dft.cpuparams.n_threads = params_base.speculative.cpuparams.n_threads;
params_dft.cpuparams_batch.n_threads = params_base.speculative.cpuparams_batch.n_threads;
params_dft.tensor_buft_overrides = params_base.speculative.tensor_buft_overrides;
params_dft.tensor_buft_overrides = params_spec.tensor_buft_overrides;
llama_init_dft = common_init_from_params(params_dft);
model_dft = llama_init_dft->model();
auto mparams_dft = common_model_params_to_llama(params_dft);
model_dft.reset(llama_model_load_from_file(params_dft.model.path.c_str(), mparams_dft));
if (model_dft == nullptr) {
SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.path.c_str());
SRV_ERR("failed to load draft model, '%s'\n", params_spec.model.path.c_str());
return false;
}
vocab_dft_compatible = common_speculative_are_compatible(ctx, llama_init_dft->context());
if (!vocab_dft_compatible) {
SRV_INF("the draft model '%s' is not compatible with the target model '%s'. tokens will be translated between the draft and target models.\n", params_base.speculative.model.path.c_str(), params_base.model.path.c_str());
}
const int n_ctx_dft = llama_n_ctx(llama_init_dft->context());
cparams_dft = common_context_params_to_llama(params_dft);
cparams_dft.n_batch = n_ctx_dft;
// the context is not needed - we will create one for each slot
llama_init_dft->free_context();
}
std::string & mmproj_path = params_base.mmproj.path;
@@ -693,6 +665,7 @@ private:
}
mtmd_context_params mparams = mtmd_context_params_default();
mparams.use_gpu = params_base.mmproj_use_gpu;
mparams.print_timings = false;
mparams.n_threads = params_base.cpuparams.n_threads;
@@ -700,6 +673,7 @@ private:
mparams.warmup = params_base.warmup;
mparams.image_min_tokens = params_base.image_min_tokens;
mparams.image_max_tokens = params_base.image_max_tokens;
mctx = mtmd_init_from_file(mmproj_path.c_str(), model, mparams);
if (mctx == nullptr) {
SRV_ERR("failed to load multimodal model, '%s'\n", mmproj_path.c_str());
@@ -716,11 +690,6 @@ private:
params_base.n_cache_reuse = 0;
SRV_WRN("%s\n", "cache_reuse is not supported by multimodal, it will be disabled");
}
if (params_base.has_speculative()) {
SRV_ERR("%s\n", "err: speculative decode is not supported by multimodal");
return false;
}
}
if (!llama_memory_can_shift(llama_get_memory(ctx))) {
@@ -755,30 +724,40 @@ private:
for (int i = 0; i < params_base.n_parallel; i++) {
server_slot slot;
slot.id = i;
slot.ctx = ctx;
slot.id = i;
slot.ctx = ctx;
slot.n_ctx = n_ctx_slot;
slot.mctx = mctx;
slot.mctx = mctx;
slot.prompt.tokens.has_mtmd = mctx != nullptr;
if (model_dft) {
// TODO: rework speculative decoding [TAG_SERVER_SPEC_REWORK]
slot.ctx_dft = llama_init_from_model(model_dft, cparams_dft);
if (slot.ctx_dft == nullptr) {
SRV_ERR("%s", "failed to create draft context\n");
return false;
}
// try speculative decoding
{
const auto & params_spec = params_base.speculative;
slot.spec = common_speculative_init(params_base.speculative, slot.ctx, slot.ctx_dft);
if (slot.spec == nullptr) {
SRV_ERR("%s", "failed to create speculator\n");
return false;
auto params_dft = params_base;
params_dft.n_parallel = 1;
params_dft.n_ctx = params_spec.n_ctx == 0 ? llama_n_ctx_seq(ctx) : params_spec.n_ctx;
params_dft.n_batch = llama_n_ctx_seq(ctx);
params_dft.cache_type_k = params_spec.cache_type_k;
params_dft.cache_type_v = params_spec.cache_type_v;
params_dft.cpuparams.n_threads = params_spec.cpuparams.n_threads;
params_dft.cpuparams_batch.n_threads = params_spec.cpuparams_batch.n_threads;
auto cparams_dft = common_context_params_to_llama(params_dft);
slot.spec = common_speculative_init(params_base.speculative, slot.ctx, cparams_dft, model_dft.get());
if (slot.spec) {
if (mctx) {
SRV_ERR("%s\n", "speculative decoding is not supported with multimodal");
return false;
}
SRV_WRN("%s", "speculative decoding context initialized\n");
} else {
SRV_WRN("%s", "speculative decoding context not initialized\n");
}
for (auto & pair : params_base.speculative.replacements) {
common_speculative_add_replacement_tgt_dft(slot.spec, pair.first.c_str(), pair.second.c_str());
}
} else if (params_base.speculative.configs.size() > 0) {
slot.spec = common_speculative_init(params_base.speculative, nullptr, nullptr);
}
SLT_INF(slot, "new slot, n_ctx = %d\n", slot.n_ctx);
@@ -1057,7 +1036,7 @@ private:
return res;
}
std::vector<common_adapter_lora_info> construct_lora_list(const std::map<int, float> & config) {
std::vector<common_adapter_lora_info> construct_lora_list(const std::map<int, float> & config) const {
std::vector<common_adapter_lora_info> output = params_base.lora_adapters; // copy
for (size_t i = 0; i < output.size(); ++i) {
auto it = config.find(i);
@@ -1160,7 +1139,7 @@ private:
backend_sampling &= task.params.sampling.backend_sampling;
// TODO: speculative decoding requires multiple samples per batch - not supported yet
backend_sampling &= !(slot.ctx_dft && task.params.speculative.n_max > 0);
backend_sampling &= !(slot.spec && task.params.speculative.n_max > 0);
// TODO: getting post/pre sampling logits is not yet supported with backend sampling
backend_sampling &= !need_logits;
@@ -2058,7 +2037,6 @@ private:
struct common_speculative_params params_spec;
params_spec.n_draft = n_draft_max;
params_spec.n_reuse = slot.ctx_dft ? (llama_n_ctx(slot.ctx_dft) - slot.task->params.speculative.n_max) : 0;
params_spec.p_min = slot.task->params.speculative.p_min;
const llama_tokens & cached_text_tokens = slot.prompt.tokens.get_text_tokens();
llama_tokens draft = common_speculative_gen_draft(slot.spec, params_spec, cached_text_tokens, slot.sampled);

View File

@@ -238,6 +238,8 @@ task_params server_task::params_from_json_cmpl(
params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min);
params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max);
params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min);
// TODO: is this needed? remove?
//params.speculative.self_mode = json_value(data, "speculative.self_mode", defaults.speculative.self_mode);
//params.speculative.self_cfg = json_value(data, "speculative.self_cfg", defaults.speculative.self_cfg);
// Set params.speculative.configs. Use json-array "speculative.configs" if provided in data, otherwise use {}