-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
253 lines (231 loc) · 10.2 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
import argparse
import functools
import json
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional, List
import torch
from datasets import load_dataset
from transformers import (
AutoModel,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from utils.data import tokenize, CustomDataCollator, TrainDataset
from models import GRECO
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
freeze_lm: Optional[bool] = field(
default=False, metadata={"help": "Freeze the lm model"}
)
pretrained_path: Optional[str] = field(
default=None, metadata={"help": "Path to pretrained weight of the model"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
estimator_loss: Optional[str] = field(
default='h_listnet', metadata={"help": "F0.5 estimator loss"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
no_gap_prediction: Optional[bool] = field(
default=False, metadata={"help": "Disable the prediction of gaps between hypothesis tokens"}
)
word_dropout: Optional[float] = field(
default=0.25, metadata={"help": "Dropout before word projection layer"},
)
alpha: Optional[float] = field(
default=1.0, metadata={"help": "Coefficient for the word prediction loss"},
)
beta: Optional[float] = field(
default=1.0, metadata={"help": "Coefficient for the gap prediction loss"},
)
gamma: Optional[float] = field(
default=0.2, metadata={"help": "Coefficient for the rank loss"},
)
epsilon: Optional[float] = field(
default=0, metadata={"help": "Coefficient for the label aggregation F0.5 loss"},
)
label_weight: Optional[str] = field(
default=None, metadata={"help": "JSON string of the weight for each label class"},
)
gap_weight: Optional[str] = field(
default=None, metadata={"help": "JSON string of the weight for each gap label class"},
)
edit_weight: Optional[float] = field(
default=None, metadata={"help": "scalar multiplier for loss of edit tokens and gaps"},
)
rank_multiplier: Optional[int] = field(
default=5, metadata={"help": "Multiplier of ranking loss sigmoid"},
)
rank_sample: Optional[int] = field(
default=None, metadata={"help": "Number of samples to be calculated in rank loss"},
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
data_mode: Optional[str] = field(
default='serial', metadata={"help": "Type of data supplied in the data argument"}
)
mask_source: bool = field(
default=False, metadata={"help": ""}
)
label_smoothing: Optional[float] = field(
default=0, metadata={"help": "Add label smoothing"}
)
additional_mask: Optional[float] = field(
default=0, metadata={"help": "downsample by adding additional masks"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
label_all_tokens: bool = field(
default=True,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"first one (in which case the other tokens will have a padding index)."
},
)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
print(model_args, '\n', data_args, '\n', training_args)
torch.manual_seed(training_args.seed)
random.seed(training_args.seed)
device_str = 'cpu'
if torch.cuda.is_available():
device_str = 'cuda:{}'.format(0)
device = torch.device(device_str)
gamma = 0 if model_args.no_gap_prediction else model_args.gamma
if model_args.label_weight is not None:
label_weight = json.loads(model_args.label_weight)
label_weight = {int(k): v for k, v in label_weight.items()}
else:
label_weight = None
if model_args.gap_weight is not None:
gap_weight = json.loads(model_args.gap_weight)
gap_weight = {int(k): v for k, v in gap_weight.items()}
else:
gap_weight = None
model = GRECO(
lm=model_args.model_name_or_path,
tokenizer=model_args.tokenizer_name,
dropout=model_args.word_dropout,
alpha=model_args.alpha,
beta=model_args.beta,
gamma=gamma,
estimator_loss=model_args.estimator_loss,
epsilon=model_args.epsilon,
label_weight=label_weight,
gap_weight=gap_weight,
edit_weight=model_args.edit_weight,
rank_multiplier=model_args.rank_multiplier,
rank_sample=model_args.rank_sample,
)
if model_args.pretrained_path is not None:
model.load_state_dict(torch.load(model_args.pretrained_path))
tokenizer = model.tokenizer
if training_args.do_train:
if data_args.data_mode == 'serial':
train_dataset = load_dataset("json", data_files=data_args.data)
train_dataset = train_dataset.map(
functools.partial(tokenize, tokenizer, mask_source=data_args.mask_source),
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
train_dataset = train_dataset["train"]
elif data_args.data_mode == 'hierarchical':
train_dataset = TrainDataset(data_args.data, tokenizer,
mask_source=data_args.mask_source,
label_smoothing=data_args.label_smoothing,
additional_mask=data_args.additional_mask,)
bucket_size = train_dataset.bucket_size
model.set_bucket_size(bucket_size)
else:
raise ValueError("{} data mode is not supported.".format(data_args.data_mode))
data_collator = CustomDataCollator(tokenizer,
serialize=data_args.data_mode == 'hierarchical',
pad_to_multiple_of=8 if training_args.fp16 else None)
# Initialize our Trainer
if data_args.data_mode == 'hierarchical':
training_args.per_device_train_batch_size = \
training_args.per_device_train_batch_size // bucket_size
if training_args.per_gpu_train_batch_size is not None:
training_args.per_gpu_train_batch_size = \
training_args.per_gpu_train_batch_size // bucket_size
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
)
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
else:
raise ValueError("this script is for training only.")
if __name__ == "__main__":
main()