Skip to content

Commit

Permalink
Remove GPU warnings from option parser
Browse files Browse the repository at this point in the history
  • Loading branch information
jart committed Dec 28, 2023
1 parent 18c2821 commit 0164146
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 19 deletions.
17 changes: 0 additions & 17 deletions llama.cpp/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -793,23 +793,6 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
if (!params.kv_overrides.empty()) {
params.kv_overrides.emplace_back(llama_model_kv_override());
params.kv_overrides.back().key[0] = 0;
}

if (passed_gpu_flags) {
// user is tuning their gpu
if (!ggml_metal_supported() && !ggml_cuda_supported()) {
fprintf(stderr, "warning: GPU offload not supported on this platform; GPU related options will be ignored\n");
fprintf(stderr, "warning: you might need to install xcode (macos) or cuda (windows, linux, etc.) check the output above to see why support wasn't linked\n");
}
} else {
// no gpu flags were passed
if (ggml_metal_supported()) {
// apple metal gpu doesn't require explicit flags to enable
} else {
// avoid the >1 second cuda startup latency if cuda isn't being used
fprintf(stderr, "protip: pass the --n-gpu-layers N flag to link NVIDIA cuBLAS support\n");
ggml_cuda_disable();
}
}

return true;
Expand Down
3 changes: 1 addition & 2 deletions llama.cpp/llava/llava-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,7 @@ int llava_cli(int argc, char ** argv) {
return 1;
}
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
gpt_print_usage(argc, argv, params);
show_additional_info(argc, argv);
fprintf(stderr, "%s: fatal error: --image flag missing\n", argv[0]);
return 1;
}

Expand Down

0 comments on commit 0164146

Please sign in to comment.