From 75625bbfb89ba482556a3e92fcce6c68bcb00bf4 Mon Sep 17 00:00:00 2001 From: nguyenhoangthuan99 Date: Mon, 30 Dec 2024 12:34:18 +0700 Subject: [PATCH] Fix comment --- engine/controllers/server.cc | 4 ++-- engine/services/model_service.cc | 15 +++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/engine/controllers/server.cc b/engine/controllers/server.cc index cc5cee54a..961798d2c 100644 --- a/engine/controllers/server.cc +++ b/engine/controllers/server.cc @@ -130,7 +130,7 @@ void server::FineTuning( void server::Inference(const HttpRequestPtr& req, std::function&& callback) { LOG_TRACE << "Start inference"; - auto q = std::make_shared(); + auto q = std::make_shared(); auto ir = inference_svc_->HandleInference(q, req->getJsonObject()); LOG_DEBUG << "request: " << req->getJsonObject()->toStyledString(); if (ir.has_error()) { @@ -156,7 +156,7 @@ void server::RouteRequest( std::function&& callback) { LOG_TRACE << "Start route request"; - auto q = std::make_shared(); + auto q = std::make_shared(); auto ir = inference_svc_->HandleRouteRequest(q, req->getJsonObject()); LOG_DEBUG << "request: " << req->getJsonObject()->toStyledString(); if (ir.has_error()) { diff --git a/engine/services/model_service.cc b/engine/services/model_service.cc index 34ca60b3b..d714c6b23 100644 --- a/engine/services/model_service.cc +++ b/engine/services/model_service.cc @@ -84,8 +84,7 @@ void ParseGguf(DatabaseService& db_service, CTL_ERR("Error adding model to modellist: " + result.error()); } } else { - if (auto m = db_service.GetModelInfo(ggufDownloadItem.id); - m.has_value()) { + if (auto m = db_service.GetModelInfo(ggufDownloadItem.id); m.has_value()) { auto upd_m = m.value(); upd_m.status = cortex::db::ModelStatus::Downloaded; if (auto r = db_service.UpdateModelEntry(ggufDownloadItem.id, upd_m); @@ -472,7 +471,8 @@ cpp::result ModelService::HandleUrl( model_size = model_size + item.bytes.value_or(0); } auto gguf_download_item = finishedTask.items[0]; - ParseGguf(*db_service_, gguf_download_item, author, std::nullopt, model_size); + ParseGguf(*db_service_, gguf_download_item, author, std::nullopt, + model_size); }; auto result = download_service_->AddDownloadTask(downloadTask, on_finished); @@ -653,7 +653,8 @@ cpp::result ModelService::DownloadModelFromCortexso( } std::string model_id{name + ":" + branch}; - auto on_finished = [this, branch, model_id](const DownloadTask& finishedTask) { + auto on_finished = [this, branch, + model_id](const DownloadTask& finishedTask) { const DownloadItem* model_yml_item = nullptr; auto need_parse_gguf = true; @@ -824,8 +825,7 @@ cpp::result ModelService::StartModel( constexpr const int kDefautlContextLength = 8192; int max_model_context_length = kDefautlContextLength; Json::Value json_data; - - auto model_entry = modellist_handler.GetModelInfo(model_handle); + auto model_entry = db_service_->GetModelInfo(model_handle); if (model_entry.has_error()) { CTL_WRN("Error: " + model_entry.error()); return cpp::fail(model_entry.error()); @@ -842,7 +842,6 @@ cpp::result ModelService::StartModel( config::PythonModelConfig python_model_config; python_model_config.ReadFromYaml( - fmu::ToAbsoluteCortexDataPath( fs::path(model_entry.value().path_to_model_yaml)) .string()); @@ -1051,7 +1050,7 @@ cpp::result ModelService::StopModel( // Update for python engine if (engine_name == kPythonEngine) { - auto model_entry = modellist_handler.GetModelInfo(model_handle); + auto model_entry = db_service_->GetModelInfo(model_handle); config::PythonModelConfig python_model_config; python_model_config.ReadFromYaml( fmu::ToAbsoluteCortexDataPath(