Skip to content

Commit

Permalink
llama : fix Vulkan whitelist (ggml-org#11)
Browse files Browse the repository at this point in the history
  • Loading branch information
cebtenzzre committed Nov 3, 2023
1 parent ffd0624 commit f88b198
Showing 1 changed file with 10 additions and 4 deletions.
14 changes: 10 additions & 4 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6352,9 +6352,11 @@ int64_t llama_time_us(void) {
return ggml_time_us();
}

struct llama_model * llama_load_model_from_file(
const char * path_model,
struct llama_context_params params) {
static struct llama_model * llama_load_model_from_file_internal(
const char * path_model, struct llama_context_params * params_p
) {
auto & params = *params_p;

ggml_time_init();

llama_model * model = new llama_model;
Expand Down Expand Up @@ -6389,6 +6391,10 @@ struct llama_model * llama_load_model_from_file(
return model;
}

struct llama_model * llama_load_model_from_file(const char * path_model, struct llama_context_params params) {
return llama_load_model_from_file_internal(path_model, &params);
}

void llama_free_model(struct llama_model * model) {
delete model;
}
Expand Down Expand Up @@ -6559,7 +6565,7 @@ struct llama_context * llama_new_context_with_model(
static struct llama_context * llama_init_from_file(
const char * path_model,
struct llama_context_params params) {
struct llama_model * model = llama_load_model_from_file(path_model, params);
struct llama_model * model = llama_load_model_from_file_internal(path_model, &params);
if (!model) {
return nullptr;
}
Expand Down

0 comments on commit f88b198

Please sign in to comment.