forked from salesforce/CodeRL
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
148 lines (112 loc) · 4 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import json
import os
import pprint
import torch
import torch.multiprocessing
import transformers
from datasets.apps_dataset import APPSBaseDataset
from trainers.trainer_rl import Trainer_RL
from transformers import Trainer
torch.multiprocessing.set_sharing_strategy('file_system')
def run_training(args, train_data):
if args.model in ['codet5-base', 'codet5-large']:
model_path = args.model_path if args.model_path is not None else 'Salesforce/{}'.format(args.model)
print("Loading model from {}...".format(model_path))
model = transformers.T5ForConditionalGeneration.from_pretrained(
model_path,
tuning_mode=args.tuning_mode,
clone_rl_head=args.clone_rl_head)
if args.clone_rl_head:
# Optional: clone a separate RL head and initialize the model weights from finetuned LM head
print("Initializing RL head with finetuned LM head...")
lm_head_params = model.lm_head.weight.detach().numpy()
model.rl_head.weight = torch.nn.Parameter(torch.tensor(lm_head_params))
else:
raise NotImplementedError(f"{args.model=}")
print('Finished loading model {}'.format(args.model))
start_iteration = 0
train_data.start_iteration = start_iteration
print(f"Starting main loop")
training_args = transformers.TrainingArguments(
output_dir=args.save_dir,
overwrite_output_dir=True,
do_train=True,
do_eval=False,
do_predict=True,
evaluation_strategy='no',
eval_steps=0,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.batch_size_per_replica,
gradient_accumulation_steps=args.grad_acc_steps,
learning_rate=args.lr,
weight_decay=0.05,
lr_scheduler_type='constant_with_warmup',
logging_dir=args.save_dir,
logging_first_step=True,
logging_steps=args.log_freq,
save_steps=args.save_freq,
save_total_limit=args.save_total_limit,
dataloader_drop_last=True,
dataloader_num_workers=0 if args.db else 8,
local_rank=args.local_rank,
deepspeed=args.deepspeed,
fp16=args.fp16,
)
if args.tuning_mode in ['critic', 'rl']:
trainer = Trainer_RL(
model=model,
args=training_args,
train_dataset=train_data,
tuning_mode=args.tuning_mode,
)
else:
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_data,
)
trainer.train()
if args.local_rank == 0:
model.save_pretrained(os.path.join(args.save_dir, "final_checkpoint"))
def get_dataset(args):
fnames = os.listdir(args.train_path)
# train in debugging mode with small data split
if args.db:
fnames = fnames[:50]
if args.model in ['codet5-base', 'codet5-large']:
max_tokens = 512
max_src_tokens = 600
else:
max_tokens = 1024
max_src_tokens = -1
train_data = APPSBaseDataset(
dataroot=args.train_path,
problem_dirs=fnames,
model=args.model,
max_tokens=max_tokens,
max_src_tokens=max_src_tokens,
sample_mode=args.sample_mode,
tuning_mode=args.tuning_mode,
relative_returns=args.relative_returns
)
return train_data
def main(args):
argsdict = vars(args)
print(pprint.pformat(argsdict))
os.makedirs(args.save_dir, exist_ok=True)
# Load dataset
train_data = get_dataset(args)
# Save args to file
with open(os.path.join(args.save_dir, "args.json"), 'w') as file:
json.dump(argsdict, file)
# Load and train model; save model checkpoints
run_training(args, train_data)
if __name__ == "__main__":
from configs.train_configs import *
main(args)