Skip to content

Commit

Permalink
add max_length and max_prompt_length
Browse files Browse the repository at this point in the history
If you don't have a set, you can only use the default values
  • Loading branch information
LIE624 authored Apr 24, 2024
1 parent 4ee9b77 commit 3151591
Showing 1 changed file with 7 additions and 0 deletions.
7 changes: 7 additions & 0 deletions orpo_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ class ScriptArguments:
The name of the Casual LM model we wish to fine with DPO
"""
# Model arguments

max_length: Optional[int] = field(default=512,
metadata={"help": "Maximum total input sequence length after tokenization."})
max_prompt_length: Optional[int] = field(default=128, metadata={"help": "Maximum length of prompt sequences."})

model_type: str = field(
default=None,
metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())}
Expand Down Expand Up @@ -415,6 +420,8 @@ def main():
model.config.use_cache = True

training_args = ORPOConfig(
max_length=args.max_length,
max_prompt_length=args.max_prompt_length,
per_device_train_batch_size=args.per_device_train_batch_size,
per_device_eval_batch_size=args.per_device_eval_batch_size,
max_steps=args.max_steps,
Expand Down

0 comments on commit 3151591

Please sign in to comment.