From 4aaddce6fb159612d35ee25a1a408c696701b00e Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Tue, 3 Dec 2024 11:49:53 +0700 Subject: [PATCH] fix(readme): update cuda instructions during preprocess --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b900a1c5a..cd03abf00 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,7 @@ pip3 install -e '.[flash-attn,deepspeed]' ### Usage ```bash # preprocess datasets - optional but recommended -CUDA_VISIBLE_DEVICES="" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml +CUDA_VISIBLE_DEVICES="0" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml # finetune lora accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml