mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-05-14 13:04:08 +00:00
more stable
This commit is contained in:
@@ -5825,7 +5825,10 @@ int main(int argc, char ** argv, char ** envp) {
|
||||
LOG_INF("%s: starting the main loop...\n", __func__);
|
||||
|
||||
// optionally, notify router server that this instance is ready
|
||||
server_models::notify_router_server_ready(params.hostname, params.model_alias);
|
||||
const char * router_port = std::getenv("LLAMA_SERVER_ROUTER_PORT");
|
||||
if (router_port != nullptr) {
|
||||
server_models::setup_child_server(params.hostname, std::atoi(router_port), params.model_alias, shutdown_handler);
|
||||
}
|
||||
|
||||
// this call blocks the main thread until queue_tasks.terminate() is called
|
||||
ctx_server.queue_tasks.start_loop();
|
||||
|
||||
Reference in New Issue
Block a user