mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-05-10 11:04:06 +00:00
cont : cleanup
This commit is contained in:
@@ -202,10 +202,14 @@ static bool run(llama_context * ctx, const common_params & params) {
|
||||
print_tokenized_prompt(ctx, tokens, params.prompt);
|
||||
|
||||
if (params.save_logits) {
|
||||
output_data output {ctx, model, params};
|
||||
std::filesystem::path model_path{params.model.path};
|
||||
std::string model_name{model_path.stem().string()};
|
||||
save_output_data(output, model_name, params.logits_output_dir);
|
||||
try {
|
||||
output_data output {ctx, model, params};
|
||||
std::filesystem::path model_path{params.model.path};
|
||||
std::string model_name{model_path.stem().string()};
|
||||
save_output_data(output, model_name, params.logits_output_dir);
|
||||
} catch (const std::exception & e) {
|
||||
LOG_ERR("%s : error saving logits: %s\n", __func__, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -223,7 +227,7 @@ int main(int argc, char ** argv) {
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
std::optional<base_callback_data> cb_data;
|
||||
std::optional<common_debug_cb_user_data> cb_data;
|
||||
if (!params.save_logits) {
|
||||
cb_data.emplace(params, params.tensor_filter);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user