-
Notifications
You must be signed in to change notification settings - Fork 4
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
32 b #121
base: main
Are you sure you want to change the base?
32 b #121
Changes from 24 commits
b94e702
368abb8
c277d54
7c74d8b
53d61fe
514abb8
011113e
2577397
93637a1
784377d
eec7e10
bd5edee
f516f09
49264f5
4bb5d5c
1ff1371
4375612
20b9b08
7736198
8e0613f
5652953
323c786
4f676e2
d4e63fa
7c22386
f38bff4
3bf2440
ab5afcf
47f9545
ee6aa90
c656a41
7852e1e
b19e76d
a02dd95
6eaa5a3
b2a07de
9985d31
4cc6a62
1060499
fb2a274
1073613
c553b98
e49d4b7
9608482
4804004
fd4edb8
1f79446
072c616
6ba3e23
07cc66c
18e9a32
2150b36
c8cf403
d9cb6cf
5f2cf19
19c8758
9a12202
d5e6e2b
ea0acce
d2a00a7
016e426
a28ca37
1c33794
484d01c
275364c
54d5623
4644e6e
d7ed30e
0c47992
246eff6
b956e3f
f877907
58bef95
c84708f
56c4ab3
b5f3a86
3fbdeb0
b335cdf
30f8f59
e17e4b8
ba49cc4
25ede33
ac01e83
ddd61ac
973a26c
baf5700
b6762d8
d98f06d
4a68e9e
d81cd12
0a04034
5acc7eb
213b03e
b4994b0
3b84351
178d9ad
0b737aa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -130,7 +130,7 @@ def build(self, trainer: "Trainer") -> Optional[Callback]: | |
eval_batch_size = ( | ||
self.eval_batch_size | ||
if self.eval_batch_size is not None | ||
else trainer.rank_microbatch_size * get_world_size(trainer.dp_process_group) | ||
else 2 * trainer.rank_microbatch_size * get_world_size(trainer.dp_process_group) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: you could instead passed an updated evaluator callback in
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, but I think this is better. I think we can default to 2x the training batch size. It should always work. |
||
) | ||
dataset = self.eval_dataset.build() | ||
if not isinstance(dataset, NumpyPaddedFSLDataset): | ||
|
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
|
@@ -15,15 +15,16 @@ | |||
TransformerDataParallelConfig, | ||||
) | ||||
from olmo_core.optim import AdamWConfig, OptimGroupOverride | ||||
from olmo_core.train import TrainerConfig | ||||
from olmo_core.train.callbacks import CheckpointerCallback, CometCallback, WandBCallback | ||||
from olmo_core.train import TrainerConfig, Duration, DurationUnit | ||||
from olmo_core.train.callbacks import CheckpointerCallback, CometCallback, WandBCallback, \ | ||||
DownstreamEvaluatorCallbackConfig | ||||
|
||||
log = logging.getLogger(__name__) | ||||
|
||||
|
||||
def build_model_config(common: CommonComponents) -> TransformerConfig: | ||||
compile = True | ||||
return TransformerConfig.olmo2_26B( | ||||
return TransformerConfig.olmo2_32B( | ||||
vocab_size=common.tokenizer.padded_vocab_size(), | ||||
compile=compile, | ||||
fused_ops=False, | ||||
|
@@ -32,7 +33,12 @@ def build_model_config(common: CommonComponents) -> TransformerConfig: | |||
name=DataParallelType.fsdp, param_dtype=DType.bfloat16, reduce_dtype=DType.float32 | ||||
), | ||||
ac_config=TransformerActivationCheckpointingConfig( | ||||
mode=TransformerActivationCheckpointingMode.full | ||||
mode=TransformerActivationCheckpointingMode.selected_modules, | ||||
modules=[ | ||||
f"blocks.{i}" | ||||
for i in range(64) | ||||
if i % 4 != 0 | ||||
] | ||||
), | ||||
float8_config=Float8Config(compile=compile, enabled=False), | ||||
) | ||||
|
@@ -52,20 +58,23 @@ def build_optim_config(common: CommonComponents) -> AdamWConfig: | |||
|
||||
|
||||
def build_trainer_config(common: CommonComponents) -> TrainerConfig: | ||||
project_name = "peteish32" | ||||
return ( | ||||
TrainerConfig( | ||||
save_folder=common.save_folder, | ||||
rank_microbatch_size=4 * 4096, | ||||
save_folder=f"gs://ai2-llm/checkpoints/{project_name}/", | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why change this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It defaults to something under my name? Not what we want for an official run? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Especially if we swap babysitting responsibilities during the run |
||||
rank_microbatch_size=2 * 4096, | ||||
save_overwrite=True, | ||||
metrics_collect_interval=10, | ||||
cancel_check_interval=1, | ||||
cancel_check_interval=10, | ||||
z_loss_multiplier=1e-5, | ||||
compile_loss=True, | ||||
fused_loss=True, | ||||
max_duration=Duration(int(6.5e12), DurationUnit.tokens) | ||||
) | ||||
.with_callback( | ||||
"checkpointer", | ||||
CheckpointerCallback( | ||||
save_interval=10_000, | ||||
save_interval=1000, | ||||
ephemeral_save_interval=250, | ||||
save_async=True, | ||||
), | ||||
|
@@ -75,7 +84,7 @@ def build_trainer_config(common: CommonComponents) -> TrainerConfig: | |||
CometCallback( | ||||
name=common.run_name, | ||||
workspace="ai2", | ||||
project="OLMo-core-26B", | ||||
project=project_name, | ||||
enabled=True, | ||||
cancel_check_interval=10, | ||||
), | ||||
|
@@ -85,10 +94,57 @@ def build_trainer_config(common: CommonComponents) -> TrainerConfig: | |||
WandBCallback( | ||||
name=common.run_name, | ||||
entity="ai2-llm", | ||||
project="OLMo-core-26B", | ||||
project=project_name, | ||||
enabled=False, | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Intentionally disabled still? Just checking |
||||
cancel_check_interval=10, | ||||
), | ||||
).with_callback( | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should just add this to the common callbacks.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't know that we want these for everything. Default should probably be only the new, blessed ones. |
||||
"downstream_evaluator", | ||||
DownstreamEvaluatorCallbackConfig( | ||||
tasks=[ | ||||
# MMLU for backwards compatibility | ||||
"mmlu_stem_mc_5shot", | ||||
"mmlu_humanities_mc_5shot", | ||||
"mmlu_social_sciences_mc_5shot", | ||||
"mmlu_other_mc_5shot", | ||||
|
||||
# MMLU test | ||||
"mmlu_stem_mc_5shot_test", | ||||
"mmlu_humanities_mc_5shot_test", | ||||
"mmlu_social_sciences_mc_5shot_test", | ||||
"mmlu_other_mc_5shot_test", | ||||
|
||||
# Core 12 tasks for backwards compatibility | ||||
"arc_challenge", | ||||
"arc_easy", | ||||
"basic_arithmetic", | ||||
"boolq", | ||||
"commonsense_qa", | ||||
"copa", | ||||
"hellaswag", | ||||
"openbook_qa", | ||||
"piqa", | ||||
"sciq", | ||||
"social_iqa", | ||||
"winogrande", | ||||
|
||||
# Core 12 tasks 5-shot | ||||
"arc_challenge_rc_5shot", | ||||
"arc_easy_rc_5shot", | ||||
#"basic_arithmetic_rc_5shot", # doesn't exist | ||||
#"boolq_rc_5shot", # we don't like it | ||||
"csqa_rc_5shot", | ||||
#"copa_rc_5shot", # doesn't exist | ||||
"hellaswag_rc_5shot", | ||||
"openbookqa_rc_5shot", | ||||
"piqa_rc_5shot", | ||||
#"sciq_rc_5shot", # doesn't exist | ||||
"socialiqa_rc_5shot", | ||||
"winogrande_rc_5shot" | ||||
], | ||||
tokenizer=common.tokenizer, | ||||
eval_interval=1000, | ||||
), | ||||
) | ||||
) | ||||
|
||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is a very narrow model then... are you sure about that?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's a clone of Qwen 32. The tradeoffs are, narrow d_model, wide FFN, GQA, lots of layers.