Skip to content
This repository has been archived by the owner on Dec 18, 2024. It is now read-only.

Commit

Permalink
fix warns
Browse files Browse the repository at this point in the history
  • Loading branch information
royshil committed Oct 15, 2023
1 parent 4b399d2 commit c8afbd2
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 3 deletions.
5 changes: 3 additions & 2 deletions src/llm-dock/llama-inference.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <string>
#include <algorithm>
#include <sstream>
#include <cmath>

std::string replace(const std::string &s, const std::string &from, const std::string &to)
{
Expand Down Expand Up @@ -42,10 +43,10 @@ std::vector<llama_token> llama_tokenize(const struct llama_model *model, const s
n_tokens = llama_tokenize(model, text.data(), (int)text.length(), result.data(),
(int)result.size(), add_bos);
if (n_tokens < 0) {
result.resize(-n_tokens);
result.resize(std::abs((int)n_tokens));
int check = llama_tokenize(model, text.data(), (int)text.length(), result.data(),
(int)result.size(), add_bos);
GGML_ASSERT(check == (int)-n_tokens);
GGML_ASSERT(check == (int)std::abs((int)n_tokens));
} else {
result.resize(n_tokens);
}
Expand Down
1 change: 1 addition & 0 deletions src/llm-dock/llama-inference.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <llama.h>

#include <string>
#include <functional>

struct llama_context *llama_init_context(const std::string &model_file_path);

Expand Down
2 changes: 1 addition & 1 deletion src/llm-dock/llm-config-data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Don't include harmful, unethical, racist, sexist, toxic, dangerous, socially bia
global_llm_config.local_model_path = "";
global_llm_config.cloud_model_name = "";
global_llm_config.cloud_api_key = "";
global_llm_config.temperature = 0.9;
global_llm_config.temperature = 0.9f;
global_llm_config.max_output_tokens = 64;
global_llm_config.system_prompt = LLAMA_DEFAULT_SYSTEM_PROMPT;
}
Expand Down

0 comments on commit c8afbd2

Please sign in to comment.