-
Notifications
You must be signed in to change notification settings - Fork 86
/
pretrain_yuan2.0_2.1B_sft.sh
executable file
·109 lines (97 loc) · 2.47 KB
/
pretrain_yuan2.0_2.1B_sft.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#!/bin/bash
# Runs the "Yuan-2.1B" parameter model
export CUDA_DEVICE_MAX_CONNECTIONS=1
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
CHECKPOINT_PATH=<Specify path>
DATA_PATH=<Specify path and file prefix>_text_document
TOKENIZER_MODEL_PATH=<Specify path to file>
TENSORBOARD_PATH=<Specify path to file>
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
GPT_ARGS="
--tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 1 \
--timing-log-level 2 \
--num-workers 2 \
--num-layers 24 \
--hidden-size 2048 \
--num-attention-heads 32 \
--use-lf-gate \
--lf-conv2d-group 1 \
--lf-conv2d-num-pad 1 \
--position-embedding-type rope \
--no-embedding-dropout \
--flash-attn-drop 0.1 \
--fim-rate 0.5 \
--fim-spm-rate 0.5 \
--norm-dtype RMSNorm \
--attention-dropout 0 \
--hidden-dropout 0 \
--disable-bias-linear \
--reset-position-ids \
--use-flash-attn \
--swiglu \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--seq-length 8192 \
--max-position-embeddings 8192 \
--micro-batch-size 2 \
--global-batch-size 384 \
--lr 0.0002 \
--train-iters 95367 \
--lr-decay-iters 95367 \
--lr-decay-style cosine \
--min-lr 2.0e-5 \
--weight-decay 1e-1 \
--lr-warmup-iters 1900 \
--clip-grad 1.0 \
--recompute-method uniform \
--recompute-granularity full \
--recompute-num-layers 1 \
--bf16 \
--sft-stage \
--override-opt-param-scheduler \
--train-reset \
--finetune
"
DATA_ARGS="
--data-path $DATA_PATH \
--tokenizer-type YuanTokenizer \
--tokenizer-model-path $TOKENIZER_MODEL_PATH \
--data-impl mmap \
--split 10,0,0
"
OUTPUT_ARGS="
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000000 \
--eval-iters 10
"
LOG_ARGS="
--tensorboard-dir $TENSORBOARD_PATH \
--tensorboard-log-interval 1 \
--tensorboard-queue-size 1000 \
--log-timers-to-tensorboard \
--log-batch-size-to-tensorboard \
--log-memory-to-tensorboard \
--log-world-size-to-tensorboard
"
torchrun $DISTRIBUTED_ARGS pretrain_yuan.py \
$GPT_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
$LOG_ARGS \
--distributed-backend nccl \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH