From c12a78fe4dc1cfe6723bcb5a0502b9ac23555039 Mon Sep 17 00:00:00 2001 From: vansangpfiev Date: Tue, 1 Oct 2024 17:12:00 +0700 Subject: [PATCH] fix: run cmd (#1380) --- engine/commands/run_cmd.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/commands/run_cmd.cc b/engine/commands/run_cmd.cc index 0d2b56455..8d2cdc77e 100644 --- a/engine/commands/run_cmd.cc +++ b/engine/commands/run_cmd.cc @@ -72,8 +72,8 @@ void RunCmd::Exec(bool chat_flag) { // If it is llamacpp, then check model status first { if ((mc.engine.find("llamacpp") == std::string::npos) || - !commands::ModelStatusCmd().IsLoaded(host_, port_, model_handle_)) { - if (!ModelStartCmd().Exec(host_, port_, model_handle_)) { + !commands::ModelStatusCmd().IsLoaded(host_, port_, *model_id)) { + if (!ModelStartCmd().Exec(host_, port_, *model_id)) { return; } } @@ -81,7 +81,7 @@ void RunCmd::Exec(bool chat_flag) { // Chat if (chat_flag) { - ChatCompletionCmd().Exec(host_, port_, model_handle_, mc, ""); + ChatCompletionCmd().Exec(host_, port_, *model_id, mc, ""); } else { CLI_LOG(*model_id << " model started successfully. Use `" << commands::GetCortexBinary() << " chat " << *model_id