server, webui: support continue generation on reasoning models (#22727)

* server, webui : support continue generation on reasoning models (#22727)

Remove the throw blocking assistant prefill on reasoning models and
orchestrate thinking tags around the prefilled message so the parser
routes the next stream chunks correctly. WebUI drops the reasoning
guard on the Continue button, sends reasoning_content with the
prefilled message and persists partial reasoning on stop so the CoT
survives reload and resume.

Scope : templates with a simple thinking_start_tag / thinking_end_tag
pair. Channel-based templates like GPT-OSS are out of scope, pending
a per-template prefill API in common/chat.

First step toward #21754.

* chore: update webui build output

* server: reject reasoning prefill on channel based templates
This commit is contained in:
Pascal
2026-05-13 11:09:51 +02:00
committed by GitHub
parent 3796c94bad
commit 5d44db6008
5 changed files with 250 additions and 193 deletions

View File

@@ -1082,11 +1082,12 @@ json oaicompat_chat_params_parse(
throw std::invalid_argument("Cannot have 2 or more assistant messages at the end of the list.");
}
/* TODO: test this properly */
inputs.reasoning_format = COMMON_REASONING_FORMAT_NONE;
if ( inputs.enable_thinking ) {
throw std::invalid_argument("Assistant response prefill is incompatible with enable_thinking.");
// reject reasoning prefill on channel based templates that do not expose explicit thinking tags
if (!last_message.reasoning_content.empty() && inputs.enable_thinking) {
auto probe_params = common_chat_templates_apply(opt.tmpls.get(), inputs);
if (probe_params.supports_thinking && probe_params.thinking_end_tag.empty()) {
throw std::invalid_argument("Assistant prefill with reasoning_content is not supported yet for this template.");
}
}
inputs.add_generation_prompt = true;
@@ -1098,6 +1099,42 @@ json oaicompat_chat_params_parse(
/* Append assistant prefilled message */
if (prefill_assistant_message) {
const bool thinking_active = chat_params.supports_thinking && !chat_params.thinking_end_tag.empty();
const bool has_reasoning = !last_message.reasoning_content.empty();
const bool has_content = !last_message.content.empty() || !last_message.content_parts.empty();
const bool mid_reasoning = has_reasoning && !has_content;
// some templates inject thinking_start in generation_prompt, others let the model emit it
const bool gp_has_think = thinking_active
&& chat_params.generation_prompt.find(chat_params.thinking_start_tag) != std::string::npos;
// open the thinking block when reasoning is present and the template did not inject it
if (has_reasoning) {
if (thinking_active && !gp_has_think) {
chat_params.prompt += chat_params.thinking_start_tag;
}
chat_params.prompt += last_message.reasoning_content;
}
if (thinking_active) {
if (mid_reasoning) {
// model continues inside the thinking block, keep generation_prompt open on think
if (!gp_has_think) {
chat_params.generation_prompt += chat_params.thinking_start_tag;
}
} else {
// close thinking block when reasoning is followed by content, or when the template forced it open
if (has_reasoning || gp_has_think) {
chat_params.prompt += chat_params.thinking_end_tag;
}
// strip thinking_start from generation_prompt so the parser routes model output as content
auto pos = chat_params.generation_prompt.rfind(chat_params.thinking_start_tag);
if (pos != std::string::npos) {
chat_params.generation_prompt = chat_params.generation_prompt.substr(0, pos);
}
}
}
if (!last_message.content_parts.empty()) {
for (auto & p : last_message.content_parts) {
chat_params.prompt += p.text;