Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass pointer to params in llama_init_from_file #1902

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,7 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
lparams.logits_all = params.perplexity;
lparams.embedding = params.embedding;

llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
llama_context * lctx = llama_init_from_file(params.model.c_str(), &lparams);

if (lctx == NULL) {
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
Expand Down
2 changes: 1 addition & 1 deletion examples/quantize-stats/quantize-stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ int main(int argc, char ** argv) {
lparams.f16_kv = false;
lparams.use_mlock = false;

ctx = llama_init_from_file(params.model.c_str(), lparams);
ctx = llama_init_from_file(params.model.c_str(), &lparams);

if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
Expand Down
4 changes: 2 additions & 2 deletions examples/save-load-state/save-load-state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ int main(int argc, char ** argv) {
auto last_n_tokens_data = std::vector<llama_token>(params.repeat_last_n, 0);

// init
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
auto ctx = llama_init_from_file(params.model.c_str(), &lparams);
auto tokens = std::vector<llama_token>(params.n_ctx);
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);

Expand Down Expand Up @@ -95,7 +95,7 @@ int main(int argc, char ** argv) {
llama_free(ctx);

// load new model
auto ctx2 = llama_init_from_file(params.model.c_str(), lparams);
auto ctx2 = llama_init_from_file(params.model.c_str(), &lparams);

// Load state (rng, logits, embedding and kv_cache) from file
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3054,7 +3054,7 @@ int main(int argc, char ** argv) {
struct llama_context_params llama_params = llama_context_default_params();
llama_params.vocab_only = true;

struct llama_context * lctx = llama_init_from_file(params.fn_vocab_model, llama_params);
struct llama_context * lctx = llama_init_from_file(params.fn_vocab_model, &llama_params);

struct llama_vocab vocab;
{
Expand Down
3 changes: 2 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2618,8 +2618,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s

struct llama_context * llama_init_from_file(
const char * path_model,
struct llama_context_params params) {
const struct llama_context_params * params_ptr) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@howard0su const is being used here? not sure what do you mean

ggml_time_init();
struct llama_context_params params = *params_ptr;

llama_context * ctx = new llama_context;

Expand Down
2 changes: 1 addition & 1 deletion llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ extern "C" {
// Return NULL on failure
LLAMA_API struct llama_context * llama_init_from_file(
const char * path_model,
struct llama_context_params params);
const struct llama_context_params * params);

// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
Expand Down
2 changes: 1 addition & 1 deletion tests/test-tokenizer-0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ int main(int argc, char **argv) {

lparams.vocab_only = true;

ctx = llama_init_from_file(fname.c_str(), lparams);
ctx = llama_init_from_file(fname.c_str(), &lparams);

if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
Expand Down