From c8afbd2612527aeebf0af76643174d8a328c5d88 Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Sun, 15 Oct 2023 08:49:05 -0400 Subject: [PATCH] fix warns --- src/llm-dock/llama-inference.cpp | 5 +++-- src/llm-dock/llama-inference.h | 1 + src/llm-dock/llm-config-data.cpp | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/llm-dock/llama-inference.cpp b/src/llm-dock/llama-inference.cpp index 008b742..6d3612d 100644 --- a/src/llm-dock/llama-inference.cpp +++ b/src/llm-dock/llama-inference.cpp @@ -10,6 +10,7 @@ #include #include #include +#include std::string replace(const std::string &s, const std::string &from, const std::string &to) { @@ -42,10 +43,10 @@ std::vector llama_tokenize(const struct llama_model *model, const s n_tokens = llama_tokenize(model, text.data(), (int)text.length(), result.data(), (int)result.size(), add_bos); if (n_tokens < 0) { - result.resize(-n_tokens); + result.resize(std::abs((int)n_tokens)); int check = llama_tokenize(model, text.data(), (int)text.length(), result.data(), (int)result.size(), add_bos); - GGML_ASSERT(check == (int)-n_tokens); + GGML_ASSERT(check == (int)std::abs((int)n_tokens)); } else { result.resize(n_tokens); } diff --git a/src/llm-dock/llama-inference.h b/src/llm-dock/llama-inference.h index 0315b49..b3dd5b5 100644 --- a/src/llm-dock/llama-inference.h +++ b/src/llm-dock/llama-inference.h @@ -1,6 +1,7 @@ #include #include +#include struct llama_context *llama_init_context(const std::string &model_file_path); diff --git a/src/llm-dock/llm-config-data.cpp b/src/llm-dock/llm-config-data.cpp index 7cb471f..03f0b44 100644 --- a/src/llm-dock/llm-config-data.cpp +++ b/src/llm-dock/llm-config-data.cpp @@ -19,7 +19,7 @@ Don't include harmful, unethical, racist, sexist, toxic, dangerous, socially bia global_llm_config.local_model_path = ""; global_llm_config.cloud_model_name = ""; global_llm_config.cloud_api_key = ""; - global_llm_config.temperature = 0.9; + global_llm_config.temperature = 0.9f; global_llm_config.max_output_tokens = 64; global_llm_config.system_prompt = LLAMA_DEFAULT_SYSTEM_PROMPT; }