Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #140

Merged
merged 5 commits into from
Aug 1, 2023
Merged

Dev #140

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion merge_peft_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def main():
raise ValueError("chatglm does not support sequence classification")
base_model = AutoModelForSequenceClassification.from_pretrained(
base_model_path,
num_labels=1,
load_in_8bit=False,
torch_dtype=torch.float16,
trust_remote_code=True,
Expand Down
3 changes: 1 addition & 2 deletions reward_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,7 @@ def main():
model_args.device_map = {"": int(os.environ["LOCAL_RANK"]) or 0}
config = config_class.from_pretrained(
model_args.model_name_or_path,
num_labels=1,
torch_dtype=torch_dtype,
trust_remote_code=model_args.trust_remote_code,
cache_dir=model_args.cache_dir
Expand All @@ -384,7 +385,6 @@ def main():
model = model_class.from_pretrained(
model_args.model_name_or_path,
config=config,
num_labels=1,
load_in_8bit=model_args.load_in_8bit,
device_map=model_args.device_map,
trust_remote_code=model_args.trust_remote_code,
Expand All @@ -394,7 +394,6 @@ def main():
model = model_class.from_pretrained(
model_args.model_name_or_path,
config=config,
num_labels=1,
cache_dir=model_args.cache_dir,
ignore_mismatched_sizes=True
)
Expand Down
21 changes: 17 additions & 4 deletions supervised_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def __post_init__(self):
if self.model_name_or_path is None:
raise ValueError("You must specify a valid model_name_or_path to run training.")
if self.model_max_length < 256:
raise ValueError("You must set model_max_length more than 256, default is 512")
raise ValueError("You must specify a valid model_max_length >= 256 to run training.")


@dataclass
Expand Down Expand Up @@ -726,6 +726,8 @@ def preprocess_function(examples):
# Mask targets. Only compute loss on the assistant outputs.
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())

turns = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
Expand All @@ -738,13 +740,22 @@ def preprocess_function(examples):
if len(parts) != 2:
break
parts[0] += sep
# "-2" is hardcoded for the LLaMA tokenizer to make the offset correct.
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
instruction_len = len(tokenizer(parts[0]).input_ids)
if model_args.model_type in ['llama']:
# "-2" is hardcoded for the LLaMA tokenizer to make the offset correct.
instruction_len = instruction_len - 2

# Ignore the user instructions
target[cur_len: cur_len + instruction_len] = IGNORE_INDEX
cur_len += turn_len

target[cur_len:] = IGNORE_INDEX

if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
logger.warning(f"tokenization mismatch: {cur_len} vs. {total_len}. (ignored)")

return dict(
input_ids=input_ids,
labels=targets,
Expand Down Expand Up @@ -891,7 +902,9 @@ def preprocess_function(examples):
# Training
if training_args.do_train:
logger.info("*** Train ***")
logger.debug(f"Train dataloader example: {next(iter(trainer.get_train_dataloader()))}")
sample = next(iter(trainer.get_train_dataloader()))
logger.debug(f"Train dataloader example: {sample}")
logger.debug(f"Details: \ninput_ids: {list(sample['input_ids'])}, \nlabels: {list(sample['labels'])}")
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
Expand Down