Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: some update for engine management #1650

Merged
merged 7 commits into from
Nov 8, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions engine/cli/command_line_parser.cc
Original file line number Diff line number Diff line change
@@ -113,6 +113,14 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) {
}
}
#endif
auto config = file_manager_utils::GetCortexConfig();
if (!config.llamacppVersion.empty() &&
config.latestLlamacppRelease != config.llamacppVersion) {
CLI_LOG(
"\nNew llama.cpp version available: " << config.latestLlamacppRelease);
CLI_LOG("To update, run: " << commands::GetCortexBinary()
<< " engines update llama-cpp");
}

return true;
}
85 changes: 36 additions & 49 deletions engine/cli/main.cc
Original file line number Diff line number Diff line change
@@ -101,49 +101,49 @@ int main(int argc, char* argv[]) {

RemoveBinaryTempFileIfExists();

std::thread t1([]() {
// TODO: namh current we only check for llamacpp. Need to add support for other engine
auto should_check_for_latest_llamacpp_version = true;
auto now = std::chrono::system_clock::now();

// read the yaml to see the last time we check for update
auto config = file_manager_utils::GetCortexConfig();
if (config.checkedForLlamacppUpdateAt != 0) {
// if it passed a day, then we should check
auto last_check =
std::chrono::system_clock::time_point(
std::chrono::milliseconds(config.checkedForLlamacppUpdateAt)) +
std::chrono::hours(24);
should_check_for_latest_llamacpp_version = now > last_check;
CTL_DBG("should_check_for_latest_llamacpp_version: "
<< should_check_for_latest_llamacpp_version);
}
auto should_check_for_latest_llamacpp_version = true;
auto now = std::chrono::system_clock::now();

// read the yaml to see the last time we check for update
auto config = file_manager_utils::GetCortexConfig();
if (config.checkedForLlamacppUpdateAt != 0) {
// if it passed a day, then we should check
auto last_check =
std::chrono::system_clock::time_point(
std::chrono::milliseconds(config.checkedForLlamacppUpdateAt)) +
std::chrono::hours(24);
should_check_for_latest_llamacpp_version = now > last_check;
}

auto get_latest_version = []() -> cpp::result<std::string, std::string> {
try {
auto res = github_release_utils::GetReleaseByVersion(
"janhq", "cortex.llamacpp", "latest");
if (res.has_error()) {
CTL_ERR("Failed to get latest llama.cpp version: " << res.error());
if (should_check_for_latest_llamacpp_version) {
std::thread t1([]() {
auto config = file_manager_utils::GetCortexConfig();
// TODO: namh current we only check for llamacpp. Need to add support for other engine
auto get_latest_version = []() -> cpp::result<std::string, std::string> {
try {
auto res = github_release_utils::GetReleaseByVersion(
"janhq", "cortex.llamacpp", "latest");
if (res.has_error()) {
CTL_ERR("Failed to get latest llama.cpp version: " << res.error());
return cpp::fail("Failed to get latest llama.cpp version: " +
res.error());
}
CTL_INF("Latest llamacpp version: " << res->tag_name);
return res->tag_name;
} catch (const std::exception& e) {
CTL_ERR("Failed to get latest llama.cpp version: " << e.what());
return cpp::fail("Failed to get latest llama.cpp version: " +
res.error());
std::string(e.what()));
}
CTL_INF("Latest llamacpp version: " << res->tag_name);
return res->tag_name;
} catch (const std::exception& e) {
CTL_ERR("Failed to get latest llama.cpp version: " << e.what());
return cpp::fail("Failed to get latest llama.cpp version: " +
std::string(e.what()));
}
};
};

if (should_check_for_latest_llamacpp_version) {
auto res = get_latest_version();
if (res.has_error()) {
CTL_ERR("Failed to get latest llama.cpp version: " << res.error());
return;
}

auto now = std::chrono::system_clock::now();
CTL_DBG("latest llama.cpp version: " << res.value());
config.checkedForLlamacppUpdateAt =
std::chrono::duration_cast<std::chrono::milliseconds>(
@@ -159,22 +159,9 @@ int main(int argc, char* argv[]) {
CTL_INF("Updated config file with latest llama.cpp version: "
<< res.value());
}
}

CTL_DBG("latest llama.cpp version: " << config.latestLlamacppRelease);
CTL_DBG("llamacpp version: " << config.llamacppVersion);
if (config.llamacppVersion.empty()) {
return;
}

if (config.latestLlamacppRelease != config.llamacppVersion) {
CLI_LOG(
"New llama.cpp version available: " << config.latestLlamacppRelease);
CLI_LOG("To update, run: " << commands::GetCortexBinary()
<< " engines update llama-cpp\n");
}
});
t1.detach();
});
t1.detach();
}

trantor::FileLogger async_file_logger;
SetupLogger(async_file_logger, verbose);