-
Notifications
You must be signed in to change notification settings - Fork 1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* add rwkv support * Update init_functions.py * rwkv model files * configs * kernels * Cleanup * Update 760M.yml * remove preffn and mishglu * Update NeoXArgs docs automatically * Add RWKV parallelism assertions * Update NeoXArgs docs automatically * pre-commit and config cleanup * Update NeoXArgs docs automatically * rwkv logging * Update NeoXArgs docs automatically * Add rwkv version dirname, make hdim 3.5x * pre-commit * Update NeoXArgs docs automatically * fix bug and set batch size to 32 * Update NeoXArgs docs automatically --------- Co-authored-by: Quentin Anthony <[email protected]> Co-authored-by: github-actions <[email protected]>
- Loading branch information
1 parent
c814959
commit 4bc6670
Showing
13 changed files
with
882 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
{ | ||
# Parallelism is not yet supported for rwkv | ||
"pipe_parallel_size": 1, | ||
"model_parallel_size": 1, | ||
|
||
"num_layers": 12, | ||
"hidden_size": 768, | ||
"num_attention_heads": 12, # head_size = dim_att / num_attention_heads. | ||
# head_size is 64 for all rwkv models | ||
"seq_length": 512, | ||
"max_position_embeddings": 2048, | ||
"output_layer_parallelism": "column", | ||
"norm": "rmsnorm", | ||
"rms_norm_epsilon": 1.0e-5, | ||
"train_micro_batch_size_per_gpu": 32, | ||
|
||
"attention_config": [[["rwkv"], 12]], | ||
|
||
"activation": "silu", | ||
|
||
# model settings | ||
|
||
#"pos_emb": "rotary", | ||
"rotary_pct": 0.25, | ||
"no_weight_tying": true, | ||
"gpt_j_residual": true, | ||
|
||
# these should provide some speedup but takes a while to build, set to true if desired | ||
"scaled_upper_triang_masked_softmax_fusion": false, | ||
"bias_gelu_fusion": false, | ||
"rope_fusion": false, | ||
"layernorm_fusion": false, | ||
|
||
|
||
# init methods | ||
"init_method": "small_init", | ||
"output_layer_init_method": "wang_init", | ||
|
||
# optimizer settings | ||
"optimizer": { | ||
"type": "Adam", | ||
"params": { | ||
"lr": 0.0008, | ||
"betas": [0.9, 0.95], | ||
"eps": 1.0e-8, | ||
} | ||
}, | ||
"min_lr": 0.00008, | ||
|
||
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training | ||
"zero_optimization": { | ||
"stage": 1, | ||
"allgather_partitions": True, | ||
"allgather_bucket_size": 500000000, | ||
"overlap_comm": True, | ||
"reduce_scatter": True, | ||
"reduce_bucket_size": 500000000, | ||
"contiguous_gradients": True, | ||
}, | ||
|
||
# batch / data settings | ||
"data_impl": "mmap", | ||
"num_workers": 1, | ||
|
||
# activation checkpointing | ||
"checkpoint_activations": true, | ||
"checkpoint_num_layers": 1, | ||
"partition_activations": true, | ||
"synchronize_each_layer": true, | ||
|
||
# regularization | ||
"gradient_clipping": 1.0, | ||
"weight_decay": 0.1, | ||
"hidden_dropout": 0, | ||
"attention_dropout": 0, | ||
|
||
# precision settings | ||
"bf16": { | ||
"bf16": true, | ||
"enabled": true, | ||
"loss_scale": 0, | ||
"loss_scale_window": 1000, | ||
"initial_scale_power": 12, | ||
"hysteresis": 2, | ||
"min_loss_scale": 1, | ||
}, | ||
|
||
# misc. training settings | ||
"train_iters": 500, | ||
"lr_decay_iters": 500, | ||
"distributed_backend": "nccl", | ||
"lr_decay_style": "constant", | ||
"warmup": 0.01, | ||
"checkpoint_factor": 100, | ||
"eval_interval": 100000, | ||
"eval_iters": 10, | ||
|
||
# logging | ||
"log_interval": 10, | ||
"steps_per_print": 10, | ||
"wall_clock_breakdown": true, | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
from .rwkv import RWKVResidualLayerPipe, RWKVResidualLayer |
Oops, something went wrong.