cciwon-code-review-cli 2.0.2 → 2.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api-client.js +1 -1
- package/lib/chat-mode.js +7 -2
- package/package.json +1 -1
- package/unsloth_compiled_cache/.locks/.lock.AqlmLoraLinear_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.AwqLoraLinear_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.BatchNorm1d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.BatchNorm2d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.BatchNorm3d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Conv1d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Conv2d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Conv3d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.ConvTranspose1d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.ConvTranspose2d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.ConvTranspose3d.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.GPTQLoraLinear_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.GroupNorm.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.LayerNorm.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Linear4bit_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Linear8bitLt_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.Linear_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.LoraParallelLinear_peft_forward.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.RMSNorm.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothBCOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothCPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothDPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothGKDTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothGRPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothKTOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothNashMDTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothORPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothOnlineDPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothPPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothPRMTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothRLOOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothRewardTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothSFTTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.UnslothXPOTrainer.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_qwen3_moe.py +0 -0
- package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_siglip.py +0 -0
- package/unsloth_compiled_cache/AqlmLoraLinear_peft_forward.py +88 -0
- package/unsloth_compiled_cache/AwqLoraLinear_peft_forward.py +87 -0
- package/unsloth_compiled_cache/BatchNorm1d.py +117 -0
- package/unsloth_compiled_cache/BatchNorm2d.py +117 -0
- package/unsloth_compiled_cache/BatchNorm3d.py +117 -0
- package/unsloth_compiled_cache/Conv1d.py +70 -0
- package/unsloth_compiled_cache/Conv2d.py +70 -0
- package/unsloth_compiled_cache/Conv3d.py +70 -0
- package/unsloth_compiled_cache/ConvTranspose1d.py +97 -0
- package/unsloth_compiled_cache/ConvTranspose2d.py +106 -0
- package/unsloth_compiled_cache/ConvTranspose3d.py +98 -0
- package/unsloth_compiled_cache/GPTQLoraLinear_peft_forward.py +95 -0
- package/unsloth_compiled_cache/GroupNorm.py +70 -0
- package/unsloth_compiled_cache/LayerNorm.py +72 -0
- package/unsloth_compiled_cache/Linear4bit_peft_forward.py +115 -0
- package/unsloth_compiled_cache/Linear8bitLt_peft_forward.py +113 -0
- package/unsloth_compiled_cache/Linear_peft_forward.py +104 -0
- package/unsloth_compiled_cache/LoraParallelLinear_peft_forward.py +91 -0
- package/unsloth_compiled_cache/RMSNorm.py +73 -0
- package/unsloth_compiled_cache/UnslothBCOTrainer.py +2026 -0
- package/unsloth_compiled_cache/UnslothCPOTrainer.py +1806 -0
- package/unsloth_compiled_cache/UnslothDPOTrainer.py +2750 -0
- package/unsloth_compiled_cache/UnslothGKDTrainer.py +1157 -0
- package/unsloth_compiled_cache/UnslothGRPOTrainer.py +3607 -0
- package/unsloth_compiled_cache/UnslothKTOTrainer.py +2220 -0
- package/unsloth_compiled_cache/UnslothNashMDTrainer.py +1210 -0
- package/unsloth_compiled_cache/UnslothORPOTrainer.py +1730 -0
- package/unsloth_compiled_cache/UnslothOnlineDPOTrainer.py +2313 -0
- package/unsloth_compiled_cache/UnslothPPOTrainer.py +1504 -0
- package/unsloth_compiled_cache/UnslothPRMTrainer.py +979 -0
- package/unsloth_compiled_cache/UnslothRLOOTrainer.py +2674 -0
- package/unsloth_compiled_cache/UnslothRewardTrainer.py +1197 -0
- package/unsloth_compiled_cache/UnslothSFTTrainer.py +1416 -0
- package/unsloth_compiled_cache/UnslothXPOTrainer.py +1255 -0
- package/unsloth_compiled_cache/__pycache__/AqlmLoraLinear_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/AwqLoraLinear_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/BatchNorm1d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/BatchNorm2d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/BatchNorm3d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Conv1d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Conv2d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Conv3d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/ConvTranspose1d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/ConvTranspose2d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/ConvTranspose3d.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/GPTQLoraLinear_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/GroupNorm.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/LayerNorm.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Linear4bit_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Linear8bitLt_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/Linear_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/LoraParallelLinear_peft_forward.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/RMSNorm.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothBCOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothCPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothDPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothGKDTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothGRPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothKTOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothNashMDTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothORPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothOnlineDPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothPPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothPRMTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothRLOOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothRewardTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothSFTTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/UnslothXPOTrainer.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_qwen3_moe.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_siglip.cpython-312.pyc +0 -0
- package/unsloth_compiled_cache/unsloth_compiled_module_qwen3_moe.py +726 -0
- package/unsloth_compiled_cache/unsloth_compiled_module_siglip.py +534 -0
|
@@ -0,0 +1,2026 @@
|
|
|
1
|
+
"""
|
|
2
|
+
2025.12.6
|
|
3
|
+
2025.12.7
|
|
4
|
+
4.57.1
|
|
5
|
+
0.24.0
|
|
6
|
+
__UNSLOTH_VERSIONING__
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# Unsloth auto generated code
|
|
10
|
+
# Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
|
|
11
|
+
#
|
|
12
|
+
# This program is free software: you can redistribute it and/or modify
|
|
13
|
+
# it under the terms of the GNU Lesser General Public License as published by
|
|
14
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
15
|
+
# (at your option) any later version.
|
|
16
|
+
#
|
|
17
|
+
# This program is distributed in the hope that it will be useful,
|
|
18
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
19
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
20
|
+
# GNU General Public License for more details.
|
|
21
|
+
#
|
|
22
|
+
# You should have received a copy of the GNU Lesser General Public License
|
|
23
|
+
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
24
|
+
|
|
25
|
+
from torch import Tensor
|
|
26
|
+
import torch
|
|
27
|
+
import torch.nn as nn
|
|
28
|
+
from torch.nn import functional as F
|
|
29
|
+
from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable
|
|
30
|
+
from trl.trainer.bco_trainer import (Any, AutoModelForCausalLM, BCOConfig, BCOTrainer, BaseImageProcessor, BaseTrainer, CLF_NAME, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, Optional, PartialState, Path, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, RUNNING_NAME, RunningMoments, SequentialSampler, TrainerCallback, TrainingArguments, Union, _process_tokens, _tokenize, autocast, contextmanager, create_reference_model, defaultdict, disable_dropout_in_model, has_length, inspect, is_comet_available, is_joblib_available, is_peft_available, is_sklearn_available, is_wandb_available, itemgetter, log_table_to_comet_experiment, logger, logging, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_deepspeed, prepare_model_for_kbit_training, random, selective_log_softmax, textwrap, torch, tqdm, warnings, F, Optional, PeftModel, PreTrainedModel, is_peft_available, logger, os, torch)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
import os
|
|
34
|
+
from typing import *
|
|
35
|
+
from dataclasses import dataclass, field
|
|
36
|
+
from packaging.version import Version
|
|
37
|
+
import torch
|
|
38
|
+
import numpy as np
|
|
39
|
+
from contextlib import nullcontext
|
|
40
|
+
from torch.nn import functional as F
|
|
41
|
+
import inspect
|
|
42
|
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
|
|
43
|
+
from transformers.training_args import ParallelMode
|
|
44
|
+
|
|
45
|
+
# Wrap trainer with padding to right and enable training mode
|
|
46
|
+
# Also patches W&B since multiple runs must use wandb.finish()
|
|
47
|
+
import functools
|
|
48
|
+
from types import MethodType
|
|
49
|
+
def prepare_for_training_mode(f):
|
|
50
|
+
@functools.wraps(f)
|
|
51
|
+
def wrapper(self, *args, **kwargs):
|
|
52
|
+
# Enable training mode
|
|
53
|
+
if hasattr(self, 'model') and hasattr(self.model, "for_training"):
|
|
54
|
+
self.model.for_training()
|
|
55
|
+
output = f(self, *args, **kwargs)
|
|
56
|
+
# Return inference mode
|
|
57
|
+
if hasattr(self, 'model') and hasattr(self.model, "for_inference"):
|
|
58
|
+
self.model.for_inference()
|
|
59
|
+
# Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run
|
|
60
|
+
try:
|
|
61
|
+
import wandb
|
|
62
|
+
wandb.finish()
|
|
63
|
+
except:
|
|
64
|
+
pass
|
|
65
|
+
return output
|
|
66
|
+
return wrapper
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
torch_compile_options = {
|
|
70
|
+
"epilogue_fusion" : True,
|
|
71
|
+
"max_autotune" : False,
|
|
72
|
+
"shape_padding" : True,
|
|
73
|
+
"trace.enabled" : False,
|
|
74
|
+
"triton.cudagraphs" : False,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
|
78
|
+
def chunked_selective_log_softmax(logits, index):
|
|
79
|
+
# Split into 4 chunks only
|
|
80
|
+
chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0)
|
|
81
|
+
chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0)
|
|
82
|
+
all_per_token_logps = []
|
|
83
|
+
# Below loop does the same as selective_log_softmax(chunk_logits, chunk_index)
|
|
84
|
+
for chunk_logits, chunk_index in zip(chunked_logits, chunked_index):
|
|
85
|
+
chunk_logits = chunk_logits.to(torch.float32)
|
|
86
|
+
selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1)
|
|
87
|
+
logsumexp_values = torch.logsumexp(chunk_logits, dim = -1)
|
|
88
|
+
per_token_logps = selected_logits - logsumexp_values
|
|
89
|
+
all_per_token_logps.append(per_token_logps)
|
|
90
|
+
pass
|
|
91
|
+
all_per_token_logps = torch.concat(all_per_token_logps)
|
|
92
|
+
all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1]))
|
|
93
|
+
return all_per_token_logps
|
|
94
|
+
|
|
95
|
+
def calculate_pad_tokens_in_prompt(
|
|
96
|
+
input_ids: torch.Tensor,
|
|
97
|
+
logits_to_keep: int,
|
|
98
|
+
pad_token_id: int
|
|
99
|
+
) -> torch.Tensor:
|
|
100
|
+
"""
|
|
101
|
+
Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens
|
|
102
|
+
"""
|
|
103
|
+
if logits_to_keep >= input_ids.shape[1]:
|
|
104
|
+
raise ValueError("logits_to_keep must be smaller than the sequence length.")
|
|
105
|
+
|
|
106
|
+
prompt_section = input_ids[:, :-logits_to_keep]
|
|
107
|
+
|
|
108
|
+
padding_mask = (prompt_section == pad_token_id)
|
|
109
|
+
|
|
110
|
+
pad_token_counts = padding_mask.sum(dim=1)
|
|
111
|
+
|
|
112
|
+
return pad_token_counts
|
|
113
|
+
|
|
114
|
+
def create_completion_attention_mask(
|
|
115
|
+
completion_input_ids: torch.Tensor,
|
|
116
|
+
left_pad_tokens_per_prompt: torch.Tensor,
|
|
117
|
+
max_left_pad: int,
|
|
118
|
+
pad_token_id: int
|
|
119
|
+
) -> torch.Tensor:
|
|
120
|
+
"""
|
|
121
|
+
Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad]
|
|
122
|
+
|
|
123
|
+
Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens
|
|
124
|
+
and pad are pad tokens, this function would make a completion mask that would 0 out the pad
|
|
125
|
+
and p tokens. so in this example [0,0,0,1,1,1,0,0,0]
|
|
126
|
+
"""
|
|
127
|
+
batch_size, completion_len = completion_input_ids.shape
|
|
128
|
+
device = completion_input_ids.device
|
|
129
|
+
|
|
130
|
+
num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt
|
|
131
|
+
|
|
132
|
+
indices = torch.arange(completion_len, device=device).unsqueeze(0)
|
|
133
|
+
shift_mask = indices >= num_tokens_to_mask.unsqueeze(1)
|
|
134
|
+
|
|
135
|
+
non_padding_mask = (completion_input_ids != pad_token_id)
|
|
136
|
+
|
|
137
|
+
final_mask = shift_mask & non_padding_mask
|
|
138
|
+
|
|
139
|
+
return final_mask
|
|
140
|
+
|
|
141
|
+
def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor:
|
|
142
|
+
"""
|
|
143
|
+
Moves all padding tokens in each sequence of a batch to the right.
|
|
144
|
+
"""
|
|
145
|
+
mask = (tensor != pad_id)
|
|
146
|
+
# Must do stable=True since binary mark is unordered
|
|
147
|
+
sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True)
|
|
148
|
+
packed_tensor = torch.gather(tensor, 1, sorted_indices)
|
|
149
|
+
return packed_tensor
|
|
150
|
+
|
|
151
|
+
def align_logprobs_with_mask(
|
|
152
|
+
logprob_tensor: torch.Tensor,
|
|
153
|
+
attention_mask: torch.Tensor,
|
|
154
|
+
pad_value: float = 0.0
|
|
155
|
+
) -> torch.Tensor:
|
|
156
|
+
"""
|
|
157
|
+
Aligns a log probability tensor with a given attention mask.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
device = logprob_tensor.device
|
|
161
|
+
batch_size, logprob_seq_len = logprob_tensor.shape
|
|
162
|
+
mask_seq_len = attention_mask.shape[1]
|
|
163
|
+
|
|
164
|
+
padded_logprobs = torch.full(
|
|
165
|
+
attention_mask.shape,
|
|
166
|
+
fill_value=pad_value,
|
|
167
|
+
dtype=logprob_tensor.dtype,
|
|
168
|
+
device=device
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
left_pad_counts = torch.argmax(attention_mask, dim=1)
|
|
172
|
+
|
|
173
|
+
cols = torch.arange(logprob_seq_len, device=device)
|
|
174
|
+
dest_indices = left_pad_counts.unsqueeze(1) + cols
|
|
175
|
+
|
|
176
|
+
# Create destination row indices
|
|
177
|
+
# Shape: [batch_size, logprob_seq_len]
|
|
178
|
+
row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices)
|
|
179
|
+
|
|
180
|
+
# --- 4. Filter out-of-bounds indices and perform assignment ---
|
|
181
|
+
# Create a mask to identify only the indices that are within the bounds
|
|
182
|
+
# of the target tensor's sequence length.
|
|
183
|
+
valid_mask = dest_indices < mask_seq_len
|
|
184
|
+
|
|
185
|
+
# Use this mask to select only the valid row indices, column indices,
|
|
186
|
+
# and the corresponding values from the logprob tensor.
|
|
187
|
+
# This flattens the selected elements into 1D tensors.
|
|
188
|
+
valid_rows = row_indices[valid_mask]
|
|
189
|
+
valid_cols = dest_indices[valid_mask]
|
|
190
|
+
valid_vals = logprob_tensor[valid_mask]
|
|
191
|
+
|
|
192
|
+
# Place the valid values into their correct positions in the padded tensor
|
|
193
|
+
# using a single, efficient advanced indexing operation.
|
|
194
|
+
padded_logprobs[valid_rows, valid_cols] = valid_vals
|
|
195
|
+
|
|
196
|
+
return padded_logprobs
|
|
197
|
+
@dataclass
|
|
198
|
+
class UnslothBCOConfig(BCOConfig):
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
Configuration class for the [`BCOTrainer`].
|
|
202
|
+
|
|
203
|
+
This class includes only the parameters that are specific to BCO training. For a full list of training arguments,
|
|
204
|
+
please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
|
|
205
|
+
differ from those in [`~transformers.TrainingArguments`].
|
|
206
|
+
|
|
207
|
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
|
208
|
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
|
209
|
+
command line.
|
|
210
|
+
|
|
211
|
+
Parameters:
|
|
212
|
+
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
|
213
|
+
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
|
214
|
+
to use the default data collator.
|
|
215
|
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
|
216
|
+
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
|
217
|
+
max_completion_length (`int`, *optional*):
|
|
218
|
+
Maximum length of the completion. This argument is required if you want to use the default data collator
|
|
219
|
+
and your model is an encoder-decoder.
|
|
220
|
+
beta (`float`, *optional*, defaults to `0.1`):
|
|
221
|
+
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
|
222
|
+
reference model.
|
|
223
|
+
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
|
224
|
+
Label pad token id. This argument is required if you want to use the default data collator.
|
|
225
|
+
padding_value (`int`, *optional*):
|
|
226
|
+
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
|
227
|
+
truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
|
|
228
|
+
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
|
229
|
+
This argument is required if you want to use the default data collator.
|
|
230
|
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
|
231
|
+
Whether to disable dropout in the model and reference model.
|
|
232
|
+
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
|
233
|
+
If `True`, generates and logs completions from both the model and the reference model to W&B or Comet
|
|
234
|
+
during evaluation.
|
|
235
|
+
is_encoder_decoder (`bool`, *optional*):
|
|
236
|
+
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
|
237
|
+
you need to specify if the model returned by the callable is an encoder-decoder model.
|
|
238
|
+
precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
|
|
239
|
+
Whether to precompute reference model log probabilities for training and evaluation datasets. This is
|
|
240
|
+
useful when training without the reference model to reduce the total GPU memory needed.
|
|
241
|
+
model_init_kwargs (`dict[str, Any]`, *optional*):
|
|
242
|
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
|
243
|
+
string.
|
|
244
|
+
ref_model_init_kwargs (`dict[str, Any]`, *optional*):
|
|
245
|
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
|
|
246
|
+
from a string.
|
|
247
|
+
dataset_num_proc (`int`, *optional*):
|
|
248
|
+
Number of processes to use for processing the dataset.
|
|
249
|
+
prompt_sample_size (`int`, *optional*, defaults to `1024`):
|
|
250
|
+
Number of prompts that are fed to density ratio classifier.
|
|
251
|
+
min_density_ratio (`float`, *optional*, defaults to `0.5`):
|
|
252
|
+
Minimum value of the density ratio. The estimated density ratio is clamped to this value.
|
|
253
|
+
max_density_ratio (`float`, *optional*, defaults to `10.0`):
|
|
254
|
+
Maximum value of the density ratio. The estimated density ratio is clamped to this value.
|
|
255
|
+
|
|
256
|
+
"""
|
|
257
|
+
vllm_sampling_params: Optional[Any] = field(
|
|
258
|
+
default = None,
|
|
259
|
+
metadata = {'help': 'vLLM SamplingParams'},
|
|
260
|
+
)
|
|
261
|
+
unsloth_num_chunks : Optional[int] = field(
|
|
262
|
+
default = -1,
|
|
263
|
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
|
264
|
+
)
|
|
265
|
+
max_seq_length : Optional[int] = field(
|
|
266
|
+
default = None,
|
|
267
|
+
metadata = {'help': 'Maximum sequence length to truncate to.'},
|
|
268
|
+
)
|
|
269
|
+
def __init__(
|
|
270
|
+
self,
|
|
271
|
+
output_dir = None,
|
|
272
|
+
overwrite_output_dir = None,
|
|
273
|
+
do_train = False,
|
|
274
|
+
do_eval = False,
|
|
275
|
+
do_predict = False,
|
|
276
|
+
eval_strategy = 'no',
|
|
277
|
+
prediction_loss_only = False,
|
|
278
|
+
per_device_train_batch_size = 4,
|
|
279
|
+
per_device_eval_batch_size = 4,
|
|
280
|
+
per_gpu_train_batch_size = None,
|
|
281
|
+
per_gpu_eval_batch_size = None,
|
|
282
|
+
gradient_accumulation_steps = 2,
|
|
283
|
+
eval_accumulation_steps = 2,
|
|
284
|
+
eval_delay = 0,
|
|
285
|
+
torch_empty_cache_steps = 250,
|
|
286
|
+
learning_rate = 5e-05,
|
|
287
|
+
weight_decay = 0.01,
|
|
288
|
+
adam_beta1 = 0.9,
|
|
289
|
+
adam_beta2 = 0.999,
|
|
290
|
+
adam_epsilon = 1e-08,
|
|
291
|
+
max_grad_norm = 1.0,
|
|
292
|
+
num_train_epochs = 3.0,
|
|
293
|
+
max_steps = -1,
|
|
294
|
+
lr_scheduler_type = 'linear',
|
|
295
|
+
warmup_ratio = 0.1,
|
|
296
|
+
warmup_steps = 0,
|
|
297
|
+
log_level = 'passive',
|
|
298
|
+
log_level_replica = 'warning',
|
|
299
|
+
log_on_each_node = True,
|
|
300
|
+
logging_dir = None,
|
|
301
|
+
logging_strategy = 'steps',
|
|
302
|
+
logging_first_step = False,
|
|
303
|
+
logging_steps = 1,
|
|
304
|
+
logging_nan_inf_filter = False,
|
|
305
|
+
save_strategy = 'steps',
|
|
306
|
+
save_steps = 500,
|
|
307
|
+
save_total_limit = None,
|
|
308
|
+
save_safetensors = True,
|
|
309
|
+
save_on_each_node = False,
|
|
310
|
+
save_only_model = False,
|
|
311
|
+
restore_callback_states_from_checkpoint = False,
|
|
312
|
+
no_cuda = False,
|
|
313
|
+
use_cpu = False,
|
|
314
|
+
use_mps_device = False,
|
|
315
|
+
seed = 3407,
|
|
316
|
+
data_seed = 3407,
|
|
317
|
+
jit_mode_eval = False,
|
|
318
|
+
bf16 = False,
|
|
319
|
+
fp16 = False,
|
|
320
|
+
fp16_opt_level = 'O1',
|
|
321
|
+
half_precision_backend = 'auto',
|
|
322
|
+
bf16_full_eval = False,
|
|
323
|
+
fp16_full_eval = False,
|
|
324
|
+
tf32 = None,
|
|
325
|
+
local_rank = -1,
|
|
326
|
+
ddp_backend = None,
|
|
327
|
+
tpu_num_cores = None,
|
|
328
|
+
tpu_metrics_debug = False,
|
|
329
|
+
debug = '',
|
|
330
|
+
dataloader_drop_last = False,
|
|
331
|
+
eval_steps = None,
|
|
332
|
+
dataloader_num_workers = 0,
|
|
333
|
+
dataloader_prefetch_factor = None,
|
|
334
|
+
past_index = -1,
|
|
335
|
+
run_name = None,
|
|
336
|
+
disable_tqdm = None,
|
|
337
|
+
remove_unused_columns = True,
|
|
338
|
+
label_names = None,
|
|
339
|
+
load_best_model_at_end = False,
|
|
340
|
+
metric_for_best_model = None,
|
|
341
|
+
greater_is_better = None,
|
|
342
|
+
ignore_data_skip = False,
|
|
343
|
+
fsdp = None,
|
|
344
|
+
fsdp_min_num_params = 0,
|
|
345
|
+
fsdp_config = None,
|
|
346
|
+
fsdp_transformer_layer_cls_to_wrap = None,
|
|
347
|
+
accelerator_config = None,
|
|
348
|
+
parallelism_config = None,
|
|
349
|
+
deepspeed = None,
|
|
350
|
+
label_smoothing_factor = 0.0,
|
|
351
|
+
optim = 'adamw_8bit',
|
|
352
|
+
optim_args = None,
|
|
353
|
+
adafactor = False,
|
|
354
|
+
group_by_length = False,
|
|
355
|
+
length_column_name = 'length',
|
|
356
|
+
report_to = 'none',
|
|
357
|
+
project = 'huggingface',
|
|
358
|
+
trackio_space_id = 'trackio',
|
|
359
|
+
ddp_find_unused_parameters = None,
|
|
360
|
+
ddp_bucket_cap_mb = None,
|
|
361
|
+
ddp_broadcast_buffers = None,
|
|
362
|
+
dataloader_pin_memory = True,
|
|
363
|
+
dataloader_persistent_workers = False,
|
|
364
|
+
skip_memory_metrics = True,
|
|
365
|
+
use_legacy_prediction_loop = False,
|
|
366
|
+
push_to_hub = False,
|
|
367
|
+
resume_from_checkpoint = None,
|
|
368
|
+
hub_model_id = None,
|
|
369
|
+
hub_strategy = 'every_save',
|
|
370
|
+
hub_token = None,
|
|
371
|
+
hub_private_repo = None,
|
|
372
|
+
hub_always_push = False,
|
|
373
|
+
hub_revision = None,
|
|
374
|
+
gradient_checkpointing = True,
|
|
375
|
+
gradient_checkpointing_kwargs = None,
|
|
376
|
+
include_inputs_for_metrics = False,
|
|
377
|
+
eval_do_concat_batches = True,
|
|
378
|
+
fp16_backend = 'auto',
|
|
379
|
+
push_to_hub_model_id = None,
|
|
380
|
+
push_to_hub_organization = None,
|
|
381
|
+
push_to_hub_token = None,
|
|
382
|
+
mp_parameters = '',
|
|
383
|
+
auto_find_batch_size = False,
|
|
384
|
+
full_determinism = False,
|
|
385
|
+
torchdynamo = None,
|
|
386
|
+
ray_scope = 'last',
|
|
387
|
+
ddp_timeout = 1800,
|
|
388
|
+
torch_compile = False,
|
|
389
|
+
torch_compile_backend = None,
|
|
390
|
+
torch_compile_mode = None,
|
|
391
|
+
include_tokens_per_second = False,
|
|
392
|
+
include_num_input_tokens_seen = False,
|
|
393
|
+
neftune_noise_alpha = None,
|
|
394
|
+
optim_target_modules = None,
|
|
395
|
+
batch_eval_metrics = False,
|
|
396
|
+
eval_on_start = False,
|
|
397
|
+
use_liger_kernel = False,
|
|
398
|
+
liger_kernel_config = None,
|
|
399
|
+
eval_use_gather_object = False,
|
|
400
|
+
average_tokens_across_devices = True,
|
|
401
|
+
max_length = 1024,
|
|
402
|
+
max_prompt_length = 512,
|
|
403
|
+
max_completion_length = None,
|
|
404
|
+
beta = 0.1,
|
|
405
|
+
label_pad_token_id = -100,
|
|
406
|
+
padding_value = None,
|
|
407
|
+
truncation_mode = 'keep_end',
|
|
408
|
+
disable_dropout = True,
|
|
409
|
+
generate_during_eval = False,
|
|
410
|
+
is_encoder_decoder = None,
|
|
411
|
+
precompute_ref_log_probs = False,
|
|
412
|
+
model_init_kwargs = None,
|
|
413
|
+
ref_model_init_kwargs = None,
|
|
414
|
+
dataset_num_proc = None,
|
|
415
|
+
prompt_sample_size = 1024,
|
|
416
|
+
min_density_ratio = 0.5,
|
|
417
|
+
max_density_ratio = 10.0,
|
|
418
|
+
vllm_sampling_params = None,
|
|
419
|
+
unsloth_num_chunks = -1,
|
|
420
|
+
max_seq_length = None,
|
|
421
|
+
**kwargs,
|
|
422
|
+
):
|
|
423
|
+
if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
|
424
|
+
if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
|
425
|
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
|
426
|
+
output_dir = 'unsloth_training_checkpoints'
|
|
427
|
+
save_strategy = 'no'
|
|
428
|
+
if dataset_num_proc is None:
|
|
429
|
+
from multiprocessing import cpu_count
|
|
430
|
+
dataset_num_proc = min(max(cpu_count()+4, 2), 64)
|
|
431
|
+
|
|
432
|
+
super().__init__(
|
|
433
|
+
output_dir = output_dir,
|
|
434
|
+
overwrite_output_dir = overwrite_output_dir,
|
|
435
|
+
do_train = do_train,
|
|
436
|
+
do_eval = do_eval,
|
|
437
|
+
do_predict = do_predict,
|
|
438
|
+
eval_strategy = eval_strategy,
|
|
439
|
+
prediction_loss_only = prediction_loss_only,
|
|
440
|
+
per_device_train_batch_size = per_device_train_batch_size,
|
|
441
|
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
|
442
|
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
|
443
|
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
|
444
|
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
|
445
|
+
eval_accumulation_steps = eval_accumulation_steps,
|
|
446
|
+
eval_delay = eval_delay,
|
|
447
|
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
|
448
|
+
learning_rate = learning_rate,
|
|
449
|
+
weight_decay = weight_decay,
|
|
450
|
+
adam_beta1 = adam_beta1,
|
|
451
|
+
adam_beta2 = adam_beta2,
|
|
452
|
+
adam_epsilon = adam_epsilon,
|
|
453
|
+
max_grad_norm = max_grad_norm,
|
|
454
|
+
num_train_epochs = num_train_epochs,
|
|
455
|
+
max_steps = max_steps,
|
|
456
|
+
lr_scheduler_type = lr_scheduler_type,
|
|
457
|
+
warmup_ratio = warmup_ratio,
|
|
458
|
+
warmup_steps = warmup_steps,
|
|
459
|
+
log_level = log_level,
|
|
460
|
+
log_level_replica = log_level_replica,
|
|
461
|
+
log_on_each_node = log_on_each_node,
|
|
462
|
+
logging_dir = logging_dir,
|
|
463
|
+
logging_strategy = logging_strategy,
|
|
464
|
+
logging_first_step = logging_first_step,
|
|
465
|
+
logging_steps = logging_steps,
|
|
466
|
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
|
467
|
+
save_strategy = save_strategy,
|
|
468
|
+
save_steps = save_steps,
|
|
469
|
+
save_total_limit = save_total_limit,
|
|
470
|
+
save_safetensors = save_safetensors,
|
|
471
|
+
save_on_each_node = save_on_each_node,
|
|
472
|
+
save_only_model = save_only_model,
|
|
473
|
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
|
474
|
+
no_cuda = no_cuda,
|
|
475
|
+
use_cpu = use_cpu,
|
|
476
|
+
use_mps_device = use_mps_device,
|
|
477
|
+
seed = seed,
|
|
478
|
+
data_seed = data_seed,
|
|
479
|
+
jit_mode_eval = jit_mode_eval,
|
|
480
|
+
bf16 = bf16,
|
|
481
|
+
fp16 = fp16,
|
|
482
|
+
fp16_opt_level = fp16_opt_level,
|
|
483
|
+
half_precision_backend = half_precision_backend,
|
|
484
|
+
bf16_full_eval = bf16_full_eval,
|
|
485
|
+
fp16_full_eval = fp16_full_eval,
|
|
486
|
+
tf32 = tf32,
|
|
487
|
+
local_rank = local_rank,
|
|
488
|
+
ddp_backend = ddp_backend,
|
|
489
|
+
tpu_num_cores = tpu_num_cores,
|
|
490
|
+
tpu_metrics_debug = tpu_metrics_debug,
|
|
491
|
+
debug = debug,
|
|
492
|
+
dataloader_drop_last = dataloader_drop_last,
|
|
493
|
+
eval_steps = eval_steps,
|
|
494
|
+
dataloader_num_workers = dataloader_num_workers,
|
|
495
|
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
|
496
|
+
past_index = past_index,
|
|
497
|
+
run_name = run_name,
|
|
498
|
+
disable_tqdm = disable_tqdm,
|
|
499
|
+
remove_unused_columns = remove_unused_columns,
|
|
500
|
+
label_names = label_names,
|
|
501
|
+
load_best_model_at_end = load_best_model_at_end,
|
|
502
|
+
metric_for_best_model = metric_for_best_model,
|
|
503
|
+
greater_is_better = greater_is_better,
|
|
504
|
+
ignore_data_skip = ignore_data_skip,
|
|
505
|
+
fsdp = fsdp,
|
|
506
|
+
fsdp_min_num_params = fsdp_min_num_params,
|
|
507
|
+
fsdp_config = fsdp_config,
|
|
508
|
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
|
509
|
+
accelerator_config = accelerator_config,
|
|
510
|
+
parallelism_config = parallelism_config,
|
|
511
|
+
deepspeed = deepspeed,
|
|
512
|
+
label_smoothing_factor = label_smoothing_factor,
|
|
513
|
+
optim = optim,
|
|
514
|
+
optim_args = optim_args,
|
|
515
|
+
adafactor = adafactor,
|
|
516
|
+
group_by_length = group_by_length,
|
|
517
|
+
length_column_name = length_column_name,
|
|
518
|
+
report_to = report_to,
|
|
519
|
+
project = project,
|
|
520
|
+
trackio_space_id = trackio_space_id,
|
|
521
|
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
|
522
|
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
|
523
|
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
|
524
|
+
dataloader_pin_memory = dataloader_pin_memory,
|
|
525
|
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
|
526
|
+
skip_memory_metrics = skip_memory_metrics,
|
|
527
|
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
|
528
|
+
push_to_hub = push_to_hub,
|
|
529
|
+
resume_from_checkpoint = resume_from_checkpoint,
|
|
530
|
+
hub_model_id = hub_model_id,
|
|
531
|
+
hub_strategy = hub_strategy,
|
|
532
|
+
hub_token = hub_token,
|
|
533
|
+
hub_private_repo = hub_private_repo,
|
|
534
|
+
hub_always_push = hub_always_push,
|
|
535
|
+
hub_revision = hub_revision,
|
|
536
|
+
gradient_checkpointing = gradient_checkpointing,
|
|
537
|
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
|
538
|
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
|
539
|
+
eval_do_concat_batches = eval_do_concat_batches,
|
|
540
|
+
fp16_backend = fp16_backend,
|
|
541
|
+
push_to_hub_model_id = push_to_hub_model_id,
|
|
542
|
+
push_to_hub_organization = push_to_hub_organization,
|
|
543
|
+
push_to_hub_token = push_to_hub_token,
|
|
544
|
+
mp_parameters = mp_parameters,
|
|
545
|
+
auto_find_batch_size = auto_find_batch_size,
|
|
546
|
+
full_determinism = full_determinism,
|
|
547
|
+
torchdynamo = torchdynamo,
|
|
548
|
+
ray_scope = ray_scope,
|
|
549
|
+
ddp_timeout = ddp_timeout,
|
|
550
|
+
torch_compile = torch_compile,
|
|
551
|
+
torch_compile_backend = torch_compile_backend,
|
|
552
|
+
torch_compile_mode = torch_compile_mode,
|
|
553
|
+
include_tokens_per_second = include_tokens_per_second,
|
|
554
|
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
|
555
|
+
neftune_noise_alpha = neftune_noise_alpha,
|
|
556
|
+
optim_target_modules = optim_target_modules,
|
|
557
|
+
batch_eval_metrics = batch_eval_metrics,
|
|
558
|
+
eval_on_start = eval_on_start,
|
|
559
|
+
use_liger_kernel = use_liger_kernel,
|
|
560
|
+
liger_kernel_config = liger_kernel_config,
|
|
561
|
+
eval_use_gather_object = eval_use_gather_object,
|
|
562
|
+
average_tokens_across_devices = average_tokens_across_devices,
|
|
563
|
+
max_length = max_length,
|
|
564
|
+
max_prompt_length = max_prompt_length,
|
|
565
|
+
max_completion_length = max_completion_length,
|
|
566
|
+
beta = beta,
|
|
567
|
+
label_pad_token_id = label_pad_token_id,
|
|
568
|
+
padding_value = padding_value,
|
|
569
|
+
truncation_mode = truncation_mode,
|
|
570
|
+
disable_dropout = disable_dropout,
|
|
571
|
+
generate_during_eval = generate_during_eval,
|
|
572
|
+
is_encoder_decoder = is_encoder_decoder,
|
|
573
|
+
precompute_ref_log_probs = precompute_ref_log_probs,
|
|
574
|
+
model_init_kwargs = model_init_kwargs,
|
|
575
|
+
ref_model_init_kwargs = ref_model_init_kwargs,
|
|
576
|
+
dataset_num_proc = dataset_num_proc,
|
|
577
|
+
prompt_sample_size = prompt_sample_size,
|
|
578
|
+
min_density_ratio = min_density_ratio,
|
|
579
|
+
max_density_ratio = max_density_ratio,**kwargs)
|
|
580
|
+
self.vllm_sampling_params = vllm_sampling_params
|
|
581
|
+
self.unsloth_num_chunks = unsloth_num_chunks
|
|
582
|
+
self.max_seq_length = max_seq_length
|
|
583
|
+
pass
|
|
584
|
+
|
|
585
|
+
class _UnslothBCOTrainer(BaseTrainer):
|
|
586
|
+
r""""""
|
|
587
|
+
|
|
588
|
+
_tag_names = ["trl", "bco"]
|
|
589
|
+
_name = "BCO"
|
|
590
|
+
_paper = {
|
|
591
|
+
"title": "Binary Classifier Optimization for Large Language Model Alignment",
|
|
592
|
+
"id": "2404.04656",
|
|
593
|
+
# docstyle-ignore
|
|
594
|
+
"citation": textwrap.dedent("""\
|
|
595
|
+
@article{jung2024binary,
|
|
596
|
+
title = {{Binary Classifier Optimization for Large Language Model Alignment}},
|
|
597
|
+
author = {Seungjae Jung and Gunsoo Han and Daniel Wontae Nam and Kyoung{-}Woon On},
|
|
598
|
+
year = 2024,
|
|
599
|
+
eprint = {arXiv:2404.04656}
|
|
600
|
+
}"""),
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
def __init__(
|
|
604
|
+
self,
|
|
605
|
+
model: Union[PreTrainedModel, nn.Module, str] = None,
|
|
606
|
+
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
|
607
|
+
args: BCOConfig = None,
|
|
608
|
+
train_dataset: Optional[Dataset] = None,
|
|
609
|
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
|
610
|
+
processing_class: Optional[
|
|
611
|
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
|
612
|
+
] = None,
|
|
613
|
+
data_collator: Optional[DataCollator] = None,
|
|
614
|
+
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
|
615
|
+
callbacks: Optional[list[TrainerCallback]] = None,
|
|
616
|
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
|
617
|
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
|
618
|
+
peft_config: Optional[dict] = None,
|
|
619
|
+
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
|
620
|
+
model_adapter_name: Optional[str] = None,
|
|
621
|
+
ref_adapter_name: Optional[str] = None,
|
|
622
|
+
embedding_func: Optional[Callable] = None,
|
|
623
|
+
embedding_tokenizer: Optional[PreTrainedTokenizerBase] = None,
|
|
624
|
+
):
|
|
625
|
+
if not os.environ.get("TRL_EXPERIMENTAL_SILENCE"):
|
|
626
|
+
warnings.warn(
|
|
627
|
+
"This trainer will soon be moved to trl.experimental and is a candidate for removal. If you rely on "
|
|
628
|
+
"it and want it to remain, please share your comments here: "
|
|
629
|
+
"https://github.com/huggingface/trl/issues/4223. Silence this warning by setting environment variable "
|
|
630
|
+
"TRL_EXPERIMENTAL_SILENCE=1."
|
|
631
|
+
)
|
|
632
|
+
if embedding_func is not None and not (is_sklearn_available() and is_joblib_available()):
|
|
633
|
+
raise ImportError(
|
|
634
|
+
"BCOTrainer with UDM requires the scikit-learn and joblib libraries. Please install it with `pip install scikit-learn joblib`."
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
if type(args) is TrainingArguments:
|
|
638
|
+
raise ValueError("Please use `BCOConfig` instead `TrainingArguments`.")
|
|
639
|
+
|
|
640
|
+
if not isinstance(model, str) and model is not None and ref_model is model:
|
|
641
|
+
raise ValueError(
|
|
642
|
+
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
|
|
643
|
+
"same as `model`, you must mass a copy of it, or `None` if you use peft."
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
if args.model_init_kwargs is None:
|
|
647
|
+
model_init_kwargs = {}
|
|
648
|
+
elif not isinstance(model, str):
|
|
649
|
+
raise ValueError("You passed model_kwargs to the BCOTrainer. But your model is already instantiated.")
|
|
650
|
+
else:
|
|
651
|
+
model_init_kwargs = args.model_init_kwargs
|
|
652
|
+
dtype = model_init_kwargs.get("dtype")
|
|
653
|
+
if dtype is not None:
|
|
654
|
+
# Convert to `torch.dtype` if an str is passed
|
|
655
|
+
if isinstance(dtype, str) and dtype != "auto":
|
|
656
|
+
dtype = getattr(torch, dtype)
|
|
657
|
+
if dtype != "auto" and not isinstance(dtype, torch.dtype):
|
|
658
|
+
raise ValueError(
|
|
659
|
+
f"Invalid `dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}."
|
|
660
|
+
)
|
|
661
|
+
model_init_kwargs["dtype"] = dtype
|
|
662
|
+
|
|
663
|
+
if args.ref_model_init_kwargs is None:
|
|
664
|
+
ref_model_init_kwargs = {}
|
|
665
|
+
elif not isinstance(ref_model, str):
|
|
666
|
+
raise ValueError(
|
|
667
|
+
"You passed ref_model_kwargs to the BCOTrainer. But your ref_model is already instantiated."
|
|
668
|
+
)
|
|
669
|
+
else:
|
|
670
|
+
ref_model_init_kwargs = args.ref_model_init_kwargs
|
|
671
|
+
dtype = ref_model_init_kwargs.get("dtype")
|
|
672
|
+
if dtype is not None:
|
|
673
|
+
# Convert to `torch.dtype` if an str is passed
|
|
674
|
+
if isinstance(dtype, str) and dtype != "auto":
|
|
675
|
+
dtype = getattr(torch, dtype)
|
|
676
|
+
if dtype != "auto" and not isinstance(dtype, torch.dtype):
|
|
677
|
+
raise ValueError(
|
|
678
|
+
f"Invalid `dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}."
|
|
679
|
+
)
|
|
680
|
+
ref_model_init_kwargs["dtype"] = dtype
|
|
681
|
+
|
|
682
|
+
if isinstance(model, str):
|
|
683
|
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
|
684
|
+
|
|
685
|
+
if isinstance(ref_model, str):
|
|
686
|
+
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
|
|
687
|
+
|
|
688
|
+
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
|
689
|
+
# has been called in order to properly call autocast if needed.
|
|
690
|
+
self._peft_has_been_casted_to_bf16 = False
|
|
691
|
+
|
|
692
|
+
if not is_peft_available() and peft_config is not None:
|
|
693
|
+
raise ValueError(
|
|
694
|
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models"
|
|
695
|
+
)
|
|
696
|
+
elif is_peft_available() and peft_config is not None:
|
|
697
|
+
# if model is a peft model and we have a peft_config, we merge and unload it first
|
|
698
|
+
if isinstance(model, PeftModel):
|
|
699
|
+
model = model.merge_and_unload()
|
|
700
|
+
|
|
701
|
+
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
|
702
|
+
_support_gc_kwargs = hasattr(
|
|
703
|
+
args, "gradient_checkpointing_kwargs"
|
|
704
|
+
) and "gradient_checkpointing_kwargs" in list(
|
|
705
|
+
inspect.signature(prepare_model_for_kbit_training).parameters
|
|
706
|
+
)
|
|
707
|
+
|
|
708
|
+
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
|
709
|
+
|
|
710
|
+
if _support_gc_kwargs:
|
|
711
|
+
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
|
712
|
+
|
|
713
|
+
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
|
714
|
+
elif args.gradient_checkpointing:
|
|
715
|
+
# For backward compatibility with older versions of transformers
|
|
716
|
+
if hasattr(model, "enable_input_require_grads"):
|
|
717
|
+
model.enable_input_require_grads()
|
|
718
|
+
else:
|
|
719
|
+
|
|
720
|
+
def make_inputs_require_grad(module, input, output):
|
|
721
|
+
output.requires_grad_(True)
|
|
722
|
+
|
|
723
|
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
|
724
|
+
|
|
725
|
+
# get peft model with the given config
|
|
726
|
+
model = model
|
|
727
|
+
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
|
728
|
+
peft_module_casting_to_bf16(model)
|
|
729
|
+
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
|
730
|
+
self._peft_has_been_casted_to_bf16 = True
|
|
731
|
+
|
|
732
|
+
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
|
733
|
+
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
|
734
|
+
# fail or completely fail.
|
|
735
|
+
elif args.gradient_checkpointing:
|
|
736
|
+
# For backward compatibility with older versions of transformers
|
|
737
|
+
if hasattr(model, "enable_input_require_grads"):
|
|
738
|
+
model.enable_input_require_grads()
|
|
739
|
+
else:
|
|
740
|
+
|
|
741
|
+
def make_inputs_require_grad(module, input, output):
|
|
742
|
+
output.requires_grad_(True)
|
|
743
|
+
|
|
744
|
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
|
745
|
+
|
|
746
|
+
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
|
747
|
+
raise ValueError(
|
|
748
|
+
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
|
749
|
+
" Please install `wandb` or `comet-ml` to resolve."
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
if model is not None:
|
|
753
|
+
self.is_encoder_decoder = model.config.is_encoder_decoder
|
|
754
|
+
elif args.is_encoder_decoder is None:
|
|
755
|
+
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
|
756
|
+
else:
|
|
757
|
+
self.is_encoder_decoder = args.is_encoder_decoder
|
|
758
|
+
|
|
759
|
+
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
|
|
760
|
+
self.model_adapter_name = model_adapter_name
|
|
761
|
+
self.ref_adapter_name = ref_adapter_name
|
|
762
|
+
|
|
763
|
+
if ref_model:
|
|
764
|
+
self.ref_model = ref_model
|
|
765
|
+
elif self.is_peft_model or args.precompute_ref_log_probs:
|
|
766
|
+
# The `model` with adapters turned off will be used as the reference model
|
|
767
|
+
self.ref_model = None
|
|
768
|
+
else:
|
|
769
|
+
self.ref_model = create_reference_model(model)
|
|
770
|
+
|
|
771
|
+
if processing_class is None:
|
|
772
|
+
raise ValueError(
|
|
773
|
+
"max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding"
|
|
774
|
+
)
|
|
775
|
+
if args.max_length is None:
|
|
776
|
+
logger.warning(
|
|
777
|
+
"When using DPODataCollatorWithPadding, you should set `max_length` in the `BCOConfig`. "
|
|
778
|
+
"It will be set to `512` by default, but you should do it yourself in the future.",
|
|
779
|
+
)
|
|
780
|
+
max_length = 512
|
|
781
|
+
if args.max_length is not None:
|
|
782
|
+
max_length = args.max_length
|
|
783
|
+
|
|
784
|
+
if args.max_prompt_length is None:
|
|
785
|
+
logger.warning(
|
|
786
|
+
"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the `BCOConfig`. "
|
|
787
|
+
"It will be set to `128` by default, but you should do it yourself in the future.",
|
|
788
|
+
)
|
|
789
|
+
max_prompt_length = 128
|
|
790
|
+
if args.max_prompt_length is not None:
|
|
791
|
+
max_prompt_length = args.max_prompt_length
|
|
792
|
+
|
|
793
|
+
max_completion_length = None
|
|
794
|
+
if args.max_completion_length is None and self.is_encoder_decoder:
|
|
795
|
+
logger.warning(
|
|
796
|
+
"When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the BCOTrainer's init"
|
|
797
|
+
" it will be set to `128` by default, but you should do it yourself in the future.",
|
|
798
|
+
)
|
|
799
|
+
max_completion_length = 128
|
|
800
|
+
if args.max_completion_length is not None and self.is_encoder_decoder:
|
|
801
|
+
max_completion_length = args.max_completion_length
|
|
802
|
+
|
|
803
|
+
if data_collator is None:
|
|
804
|
+
data_collator = DPODataCollatorWithPadding(
|
|
805
|
+
pad_token_id=processing_class.pad_token_id,
|
|
806
|
+
label_pad_token_id=args.label_pad_token_id,
|
|
807
|
+
is_encoder_decoder=self.is_encoder_decoder,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
if args.remove_unused_columns:
|
|
811
|
+
args.remove_unused_columns = False
|
|
812
|
+
# warn users
|
|
813
|
+
logger.warning(
|
|
814
|
+
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your BCOConfig"
|
|
815
|
+
" we have set it for you, but you should do it yourself in the future.",
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
self.use_dpo_data_collator = True
|
|
819
|
+
else:
|
|
820
|
+
self.use_dpo_data_collator = False
|
|
821
|
+
|
|
822
|
+
# Disable dropout in the model and reference model
|
|
823
|
+
if args.disable_dropout:
|
|
824
|
+
disable_dropout_in_model(model)
|
|
825
|
+
if self.ref_model is not None:
|
|
826
|
+
disable_dropout_in_model(self.ref_model)
|
|
827
|
+
|
|
828
|
+
self.max_length = max_length
|
|
829
|
+
self.generate_during_eval = args.generate_during_eval
|
|
830
|
+
self.label_pad_token_id = args.label_pad_token_id
|
|
831
|
+
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
|
832
|
+
self.max_prompt_length = max_prompt_length
|
|
833
|
+
self.truncation_mode = args.truncation_mode
|
|
834
|
+
self.max_completion_length = max_completion_length
|
|
835
|
+
self.precompute_ref_log_probs = args.precompute_ref_log_probs
|
|
836
|
+
|
|
837
|
+
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
|
|
838
|
+
# keep track of first called to avoid computation of future calls
|
|
839
|
+
self._precomputed_train_ref_log_probs = False
|
|
840
|
+
self._precomputed_eval_ref_log_probs = False
|
|
841
|
+
|
|
842
|
+
# metric
|
|
843
|
+
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
|
844
|
+
|
|
845
|
+
# BCO parameter
|
|
846
|
+
self.beta = args.beta
|
|
847
|
+
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
|
848
|
+
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
|
849
|
+
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
|
850
|
+
logger.warning(
|
|
851
|
+
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
|
852
|
+
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
|
853
|
+
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
|
854
|
+
"loss.",
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
# Underlying Distribution Matching argument
|
|
858
|
+
self.embedding_func = embedding_func
|
|
859
|
+
self.embedding_tokenizer = embedding_tokenizer
|
|
860
|
+
|
|
861
|
+
# The trainer estimates the number of FLOPs [floating-point operations] using the number of elements in the
|
|
862
|
+
# input tensor associated with the key "input_ids". However, in BCO, the sampled data does not include the
|
|
863
|
+
# "input_ids" key. Instead, the available keys are "prompt_input_ids" and "completion_input_ids". As a result,
|
|
864
|
+
# the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point
|
|
865
|
+
# operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's
|
|
866
|
+
# "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been
|
|
867
|
+
# issued.
|
|
868
|
+
model.warnings_issued["estimate_tokens"] = True
|
|
869
|
+
|
|
870
|
+
with PartialState().main_process_first():
|
|
871
|
+
# Extract the prompt if needed
|
|
872
|
+
train_dataset = train_dataset.map(
|
|
873
|
+
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from train dataset"
|
|
874
|
+
)
|
|
875
|
+
# Unpair the dataset if needed
|
|
876
|
+
train_dataset = maybe_unpair_preference_dataset(
|
|
877
|
+
train_dataset, args.dataset_num_proc, desc="Unpairing train dataset"
|
|
878
|
+
)
|
|
879
|
+
# Apply the chat template if needed
|
|
880
|
+
train_dataset = train_dataset.map(
|
|
881
|
+
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
|
|
882
|
+
)
|
|
883
|
+
if eval_dataset is not None:
|
|
884
|
+
# Extract the prompt if needed
|
|
885
|
+
eval_dataset = eval_dataset.map(
|
|
886
|
+
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from eval dataset"
|
|
887
|
+
)
|
|
888
|
+
# Unpair the dataset if needed
|
|
889
|
+
eval_dataset = maybe_unpair_preference_dataset(
|
|
890
|
+
eval_dataset, args.dataset_num_proc, desc="Unpairing eval dataset"
|
|
891
|
+
)
|
|
892
|
+
eval_dataset = eval_dataset.map(
|
|
893
|
+
maybe_apply_chat_template,
|
|
894
|
+
fn_kwargs={"tokenizer": processing_class},
|
|
895
|
+
num_proc=args.dataset_num_proc,
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
# Tokenize and prepare the training datasets
|
|
899
|
+
train_dataset = train_dataset.map(
|
|
900
|
+
_tokenize,
|
|
901
|
+
batched=True,
|
|
902
|
+
fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer},
|
|
903
|
+
num_proc=args.dataset_num_proc,
|
|
904
|
+
desc="Tokenizing train dataset",
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
# Prepare the datasets
|
|
908
|
+
fn_kwargs = {
|
|
909
|
+
"prefix": "",
|
|
910
|
+
"is_encoder_decoder": self.is_encoder_decoder,
|
|
911
|
+
"tokenizer": processing_class,
|
|
912
|
+
"max_length": self.max_length,
|
|
913
|
+
"truncation_mode": self.truncation_mode,
|
|
914
|
+
"label_pad_token_id": self.label_pad_token_id,
|
|
915
|
+
"max_prompt_length": self.max_prompt_length,
|
|
916
|
+
"max_completion_length": self.max_completion_length,
|
|
917
|
+
}
|
|
918
|
+
train_dataset = train_dataset.map(
|
|
919
|
+
_process_tokens,
|
|
920
|
+
fn_kwargs=fn_kwargs,
|
|
921
|
+
num_proc=args.dataset_num_proc,
|
|
922
|
+
desc="Processing tokenized train dataset",
|
|
923
|
+
)
|
|
924
|
+
|
|
925
|
+
if eval_dataset is not None:
|
|
926
|
+
# Tokenize
|
|
927
|
+
eval_dataset = eval_dataset.map(
|
|
928
|
+
_tokenize,
|
|
929
|
+
fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer},
|
|
930
|
+
batched=True,
|
|
931
|
+
num_proc=args.dataset_num_proc,
|
|
932
|
+
desc="Tokenizing eval dataset",
|
|
933
|
+
)
|
|
934
|
+
|
|
935
|
+
# Process
|
|
936
|
+
fn_kwargs = {
|
|
937
|
+
"prefix": "",
|
|
938
|
+
"is_encoder_decoder": self.is_encoder_decoder,
|
|
939
|
+
"tokenizer": processing_class,
|
|
940
|
+
"max_length": self.max_length,
|
|
941
|
+
"truncation_mode": self.truncation_mode,
|
|
942
|
+
"label_pad_token_id": self.label_pad_token_id,
|
|
943
|
+
"max_prompt_length": self.max_prompt_length,
|
|
944
|
+
"max_completion_length": self.max_completion_length,
|
|
945
|
+
}
|
|
946
|
+
eval_dataset = eval_dataset.map(
|
|
947
|
+
_process_tokens,
|
|
948
|
+
fn_kwargs=fn_kwargs,
|
|
949
|
+
num_proc=args.dataset_num_proc,
|
|
950
|
+
desc="Processing tokenized eval dataset",
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
desirable = train_dataset.filter(
|
|
954
|
+
lambda x: x["label"], num_proc=args.dataset_num_proc, desc="Filtering desirable examples"
|
|
955
|
+
)
|
|
956
|
+
undesirable = train_dataset.filter(
|
|
957
|
+
lambda x: not x["label"], num_proc=args.dataset_num_proc, desc="Filtering undesirable examples"
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
super().__init__(
|
|
961
|
+
model=model,
|
|
962
|
+
args=args,
|
|
963
|
+
data_collator=data_collator,
|
|
964
|
+
train_dataset=train_dataset,
|
|
965
|
+
eval_dataset=eval_dataset,
|
|
966
|
+
processing_class=processing_class,
|
|
967
|
+
model_init=model_init,
|
|
968
|
+
compute_metrics=compute_metrics,
|
|
969
|
+
callbacks=callbacks,
|
|
970
|
+
optimizers=optimizers,
|
|
971
|
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
|
972
|
+
)
|
|
973
|
+
|
|
974
|
+
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
|
975
|
+
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
|
976
|
+
# self.model_accepts_loss_kwargs to False to enable scaling.
|
|
977
|
+
self.model_accepts_loss_kwargs = False
|
|
978
|
+
|
|
979
|
+
# Add tags for models that have been loaded with the correct transformers version
|
|
980
|
+
if hasattr(self.model, "add_model_tags"):
|
|
981
|
+
self.model.add_model_tags(self._tag_names)
|
|
982
|
+
|
|
983
|
+
if not hasattr(self, "accelerator"):
|
|
984
|
+
raise AttributeError(
|
|
985
|
+
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
# Deepspeed Zero-3 does not support precompute_ref_log_probs
|
|
989
|
+
if self.is_deepspeed_enabled:
|
|
990
|
+
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
|
|
991
|
+
raise ValueError(
|
|
992
|
+
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
if self.ref_model is None:
|
|
996
|
+
if not (self.is_peft_model or self.precompute_ref_log_probs):
|
|
997
|
+
raise ValueError(
|
|
998
|
+
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
|
|
999
|
+
)
|
|
1000
|
+
else:
|
|
1001
|
+
if self.is_deepspeed_enabled:
|
|
1002
|
+
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
|
|
1003
|
+
else:
|
|
1004
|
+
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
|
1005
|
+
|
|
1006
|
+
self.running = RunningMoments(accelerator=self.accelerator)
|
|
1007
|
+
|
|
1008
|
+
if self.embedding_func is None or args.resume_from_checkpoint:
|
|
1009
|
+
return
|
|
1010
|
+
|
|
1011
|
+
chosen_embeddings = self._get_sample_prompt_embeddings(desirable, sample_size=self.args.prompt_sample_size)
|
|
1012
|
+
rejected_embeddings = self._get_sample_prompt_embeddings(undesirable, sample_size=self.args.prompt_sample_size)
|
|
1013
|
+
|
|
1014
|
+
embeddings = torch.cat((chosen_embeddings, rejected_embeddings), dim=0)
|
|
1015
|
+
labels = torch.cat(
|
|
1016
|
+
(torch.ones_like(chosen_embeddings[:, 0]), torch.zeros_like(rejected_embeddings[:, 0])), dim=0
|
|
1017
|
+
)
|
|
1018
|
+
|
|
1019
|
+
self.clf = LogisticRegression(class_weight="balanced").fit(
|
|
1020
|
+
embeddings.cpu().float().numpy(), labels.cpu().numpy()
|
|
1021
|
+
)
|
|
1022
|
+
chosen_mean = self.clf.score(
|
|
1023
|
+
chosen_embeddings.cpu().float().numpy(), torch.ones_like(chosen_embeddings[:, 0]).cpu().numpy()
|
|
1024
|
+
)
|
|
1025
|
+
rejected_mean = self.clf.score(
|
|
1026
|
+
rejected_embeddings.cpu().float().numpy(), torch.zeros_like(rejected_embeddings[:, 0]).cpu().numpy()
|
|
1027
|
+
)
|
|
1028
|
+
logger.info(f"UDM classifier training scores: chosen: {chosen_mean}, rejected: {rejected_mean}")
|
|
1029
|
+
|
|
1030
|
+
@property
|
|
1031
|
+
def match_underlying_distribution(self):
|
|
1032
|
+
return self.embedding_func is not None and self.embedding_tokenizer is not None
|
|
1033
|
+
|
|
1034
|
+
def _get_chosen_prob(self, prompt_embeddings: torch.FloatTensor) -> torch.FloatTensor:
|
|
1035
|
+
"""
|
|
1036
|
+
Calculates the probability if the given prompt embedding is from desirable dataset. This function calculates
|
|
1037
|
+
the probability in the process and ensemble across processes.
|
|
1038
|
+
"""
|
|
1039
|
+
dtype = prompt_embeddings.dtype
|
|
1040
|
+
device = prompt_embeddings.device
|
|
1041
|
+
rank = self.accelerator.process_index
|
|
1042
|
+
|
|
1043
|
+
padded_prompt_embeddings = self.accelerator.pad_across_processes(
|
|
1044
|
+
prompt_embeddings, pad_index=self.embedding_tokenizer.pad_token_id
|
|
1045
|
+
)
|
|
1046
|
+
sample_size = padded_prompt_embeddings.shape[0]
|
|
1047
|
+
nonzero = padded_prompt_embeddings.mean(dim=1) != self.embedding_tokenizer.pad_token_id
|
|
1048
|
+
prompt_embeddings = self.accelerator.gather(padded_prompt_embeddings)
|
|
1049
|
+
|
|
1050
|
+
# cannot predict for all empty values
|
|
1051
|
+
if prompt_embeddings.shape[0] == 0:
|
|
1052
|
+
return torch.tensor([], device=device, dtype=dtype)
|
|
1053
|
+
|
|
1054
|
+
prob = self.clf.predict_proba(prompt_embeddings.cpu().float().numpy())[:, 1]
|
|
1055
|
+
prob = torch.as_tensor(prob, dtype=dtype, device=device)
|
|
1056
|
+
prob = self.accelerator.reduce(prob, reduction="mean")
|
|
1057
|
+
|
|
1058
|
+
prob = prob[sample_size * rank : sample_size * (rank + 1)]
|
|
1059
|
+
prob = prob[nonzero]
|
|
1060
|
+
|
|
1061
|
+
return prob
|
|
1062
|
+
|
|
1063
|
+
def _vectorize_prompt(self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor) -> torch.FloatTensor:
|
|
1064
|
+
"""
|
|
1065
|
+
Replaces processing_class.pad_token_id to embedding_tokenizer.pad_token_id and applies self.embedding_func
|
|
1066
|
+
"""
|
|
1067
|
+
input_ids = torch.where(
|
|
1068
|
+
input_ids == self.processing_class.pad_token_id,
|
|
1069
|
+
self.embedding_tokenizer.pad_token_id,
|
|
1070
|
+
input_ids,
|
|
1071
|
+
)
|
|
1072
|
+
|
|
1073
|
+
with torch.no_grad():
|
|
1074
|
+
embeddings = self.embedding_func(
|
|
1075
|
+
input_ids=input_ids,
|
|
1076
|
+
attention_mask=attention_mask,
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
return embeddings
|
|
1080
|
+
|
|
1081
|
+
def _get_prompt_embeddings(
|
|
1082
|
+
self, batch: dict[str, Union[list, torch.LongTensor]]
|
|
1083
|
+
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
|
|
1084
|
+
"""Extract embeddings from frozen embedding model"""
|
|
1085
|
+
|
|
1086
|
+
if not self.match_underlying_distribution:
|
|
1087
|
+
return None, None
|
|
1088
|
+
|
|
1089
|
+
embeddings = self._vectorize_prompt(
|
|
1090
|
+
input_ids=batch["embedding_input_ids"],
|
|
1091
|
+
attention_mask=batch["embedding_attention_mask"],
|
|
1092
|
+
)
|
|
1093
|
+
|
|
1094
|
+
labels = torch.tensor(batch["label"], dtype=torch.bool, device=embeddings.device)
|
|
1095
|
+
chosen_idx = torch.where(labels)[0]
|
|
1096
|
+
rejected_idx = torch.where(~labels)[0]
|
|
1097
|
+
|
|
1098
|
+
chosen_embeddings = embeddings[chosen_idx, ...]
|
|
1099
|
+
rejected_embeddings = embeddings[rejected_idx, ...]
|
|
1100
|
+
|
|
1101
|
+
return (chosen_embeddings, rejected_embeddings)
|
|
1102
|
+
|
|
1103
|
+
def _get_sample_prompt_embeddings(self, dataset: Dataset, sample_size: int = 512) -> torch.FloatTensor:
|
|
1104
|
+
"""
|
|
1105
|
+
Sample instances from dataset and get prompt embeddings. Used for density ratio classifier training.
|
|
1106
|
+
"""
|
|
1107
|
+
n_samples = min(len(dataset), sample_size)
|
|
1108
|
+
rand_indices = np.random.choice(len(dataset), size=(n_samples,))
|
|
1109
|
+
|
|
1110
|
+
embedding_dataset = dataset.select(rand_indices)
|
|
1111
|
+
|
|
1112
|
+
dataloader_params = {
|
|
1113
|
+
"batch_size": self.args.per_device_train_batch_size,
|
|
1114
|
+
"collate_fn": self.data_collator,
|
|
1115
|
+
"num_workers": self.args.dataloader_num_workers,
|
|
1116
|
+
"pin_memory": self.args.dataloader_pin_memory,
|
|
1117
|
+
"shuffle": False,
|
|
1118
|
+
}
|
|
1119
|
+
|
|
1120
|
+
# prepare dataloader
|
|
1121
|
+
data_loader = self.accelerator.prepare(DataLoader(embedding_dataset, **dataloader_params))
|
|
1122
|
+
|
|
1123
|
+
with torch.no_grad():
|
|
1124
|
+
all_embeddings = torch.empty(0)
|
|
1125
|
+
for padded_batch in tqdm(iterable=data_loader, desc="Building sample prompt embeddings"):
|
|
1126
|
+
embeddings = self._vectorize_prompt(
|
|
1127
|
+
input_ids=padded_batch["embedding_input_ids"],
|
|
1128
|
+
attention_mask=padded_batch["embedding_attention_mask"],
|
|
1129
|
+
)
|
|
1130
|
+
embeddings = self.accelerator.gather_for_metrics(embeddings)
|
|
1131
|
+
all_embeddings = torch.cat((all_embeddings, embeddings.cpu()))
|
|
1132
|
+
|
|
1133
|
+
return all_embeddings
|
|
1134
|
+
|
|
1135
|
+
def _save_optimizer_and_scheduler(self, output_dir):
|
|
1136
|
+
output_dir = output_dir if output_dir is not None else self.args.output_dir
|
|
1137
|
+
super()._save_optimizer_and_scheduler(output_dir)
|
|
1138
|
+
|
|
1139
|
+
if self.accelerator.is_main_process:
|
|
1140
|
+
# When saving optimizer and scheduler to checkpoint, save also the running delta object.
|
|
1141
|
+
self.running.save_to_json(os.path.join(output_dir, RUNNING_NAME))
|
|
1142
|
+
|
|
1143
|
+
if self.match_underlying_distribution:
|
|
1144
|
+
joblib.dump(self.clf, os.path.join(output_dir, CLF_NAME), compress=True)
|
|
1145
|
+
|
|
1146
|
+
def _load_optimizer_and_scheduler(self, checkpoint):
|
|
1147
|
+
if checkpoint is None:
|
|
1148
|
+
logger.warning_once(f"Missing Checkpoint {checkpoint}")
|
|
1149
|
+
return
|
|
1150
|
+
|
|
1151
|
+
super()._load_optimizer_and_scheduler(checkpoint)
|
|
1152
|
+
|
|
1153
|
+
# when loading optimizer and scheduler from checkpoint, also load the running delta object.
|
|
1154
|
+
running_file = os.path.join(checkpoint, RUNNING_NAME)
|
|
1155
|
+
if os.path.isfile(running_file):
|
|
1156
|
+
self.running = RunningMoments.load_from_json(self.accelerator, running_file)
|
|
1157
|
+
|
|
1158
|
+
if self.match_underlying_distribution:
|
|
1159
|
+
clf_file = os.path.join(checkpoint, CLF_NAME)
|
|
1160
|
+
if os.path.isfile(clf_file):
|
|
1161
|
+
self.clf = joblib.load(clf_file)
|
|
1162
|
+
|
|
1163
|
+
@contextmanager
|
|
1164
|
+
def null_ref_context(self):
|
|
1165
|
+
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
|
|
1166
|
+
with (
|
|
1167
|
+
self.accelerator.unwrap_model(self.model).disable_adapter()
|
|
1168
|
+
if self.is_peft_model and not self.ref_adapter_name
|
|
1169
|
+
else nullcontext()
|
|
1170
|
+
):
|
|
1171
|
+
if self.ref_adapter_name:
|
|
1172
|
+
self.model.set_adapter(self.ref_adapter_name)
|
|
1173
|
+
yield
|
|
1174
|
+
if self.ref_adapter_name:
|
|
1175
|
+
self.model.set_adapter(self.model_adapter_name or "default")
|
|
1176
|
+
|
|
1177
|
+
def get_train_dataloader(self) -> DataLoader:
|
|
1178
|
+
"""
|
|
1179
|
+
Returns the training [`~torch.utils.data.DataLoader`].
|
|
1180
|
+
|
|
1181
|
+
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
|
|
1182
|
+
"""
|
|
1183
|
+
|
|
1184
|
+
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
|
|
1185
|
+
dataloader_params = {
|
|
1186
|
+
"batch_size": self.args.per_device_train_batch_size,
|
|
1187
|
+
"collate_fn": self.data_collator,
|
|
1188
|
+
"num_workers": self.args.dataloader_num_workers,
|
|
1189
|
+
"pin_memory": self.args.dataloader_pin_memory,
|
|
1190
|
+
"shuffle": False,
|
|
1191
|
+
}
|
|
1192
|
+
|
|
1193
|
+
# prepare dataloader
|
|
1194
|
+
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
|
|
1195
|
+
reference_completion_logps = []
|
|
1196
|
+
|
|
1197
|
+
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
|
|
1198
|
+
reference_completion_logp = self.compute_reference_log_probs(padded_batch)
|
|
1199
|
+
|
|
1200
|
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
|
1201
|
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
|
1202
|
+
|
|
1203
|
+
self.train_dataset = self.train_dataset.add_column(
|
|
1204
|
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
self._precomputed_train_ref_log_probs = True
|
|
1208
|
+
|
|
1209
|
+
return super().get_train_dataloader()
|
|
1210
|
+
|
|
1211
|
+
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
|
1212
|
+
"""
|
|
1213
|
+
Returns the evaluation [`~torch.utils.data.DataLoader`].
|
|
1214
|
+
|
|
1215
|
+
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
|
|
1216
|
+
|
|
1217
|
+
Args:
|
|
1218
|
+
eval_dataset (`torch.utils.data.Dataset`, *optional*):
|
|
1219
|
+
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
|
|
1220
|
+
by the `model.forward()` method are automatically removed. It must implement `__len__`.
|
|
1221
|
+
"""
|
|
1222
|
+
if eval_dataset is None and self.eval_dataset is None:
|
|
1223
|
+
raise ValueError("Trainer: evaluation requires an eval_dataset.")
|
|
1224
|
+
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
|
1225
|
+
|
|
1226
|
+
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
|
|
1227
|
+
dataloader_params = {
|
|
1228
|
+
"batch_size": self.args.per_device_eval_batch_size,
|
|
1229
|
+
"collate_fn": self.data_collator,
|
|
1230
|
+
"num_workers": self.args.dataloader_num_workers,
|
|
1231
|
+
"pin_memory": self.args.dataloader_pin_memory,
|
|
1232
|
+
"shuffle": False,
|
|
1233
|
+
}
|
|
1234
|
+
|
|
1235
|
+
# prepare dataloader
|
|
1236
|
+
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
|
|
1237
|
+
|
|
1238
|
+
reference_completion_logps = []
|
|
1239
|
+
|
|
1240
|
+
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
|
|
1241
|
+
reference_completion_logp = self.compute_reference_log_probs(padded_batch)
|
|
1242
|
+
|
|
1243
|
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
|
1244
|
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
|
1245
|
+
|
|
1246
|
+
eval_dataset = eval_dataset.add_column(
|
|
1247
|
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
|
1248
|
+
)
|
|
1249
|
+
|
|
1250
|
+
# Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
|
|
1251
|
+
if self.eval_dataset is not None:
|
|
1252
|
+
self.eval_dataset = eval_dataset
|
|
1253
|
+
self._precomputed_eval_ref_log_probs = True
|
|
1254
|
+
|
|
1255
|
+
return super().get_eval_dataloader(eval_dataset=eval_dataset)
|
|
1256
|
+
|
|
1257
|
+
def compute_reference_log_probs(self, padded_batch: dict) -> dict:
|
|
1258
|
+
"""Computes log probabilities of the reference model for a single padded batch of a BCO specific dataset."""
|
|
1259
|
+
with torch.no_grad():
|
|
1260
|
+
if self.ref_model is None:
|
|
1261
|
+
with self.null_ref_context():
|
|
1262
|
+
if self.is_encoder_decoder:
|
|
1263
|
+
completion_logits = self.model(
|
|
1264
|
+
padded_batch["prompt_input_ids"],
|
|
1265
|
+
attention_mask=padded_batch["prompt_attention_mask"],
|
|
1266
|
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
|
1267
|
+
labels=padded_batch["completion_labels"],
|
|
1268
|
+
).logits
|
|
1269
|
+
|
|
1270
|
+
else:
|
|
1271
|
+
completion_logits = self.model(
|
|
1272
|
+
padded_batch["completion_input_ids"],
|
|
1273
|
+
attention_mask=padded_batch["completion_attention_mask"],
|
|
1274
|
+
).logits
|
|
1275
|
+
|
|
1276
|
+
else:
|
|
1277
|
+
if self.is_encoder_decoder:
|
|
1278
|
+
completion_logits = self.ref_model(
|
|
1279
|
+
padded_batch["prompt_input_ids"],
|
|
1280
|
+
attention_mask=padded_batch["prompt_attention_mask"],
|
|
1281
|
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
|
1282
|
+
labels=padded_batch["completion_labels"],
|
|
1283
|
+
).logits
|
|
1284
|
+
|
|
1285
|
+
else:
|
|
1286
|
+
completion_logits = self.ref_model(
|
|
1287
|
+
padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"]
|
|
1288
|
+
).logits
|
|
1289
|
+
|
|
1290
|
+
completion_logps = self.get_batch_logps(
|
|
1291
|
+
completion_logits,
|
|
1292
|
+
padded_batch["completion_labels"],
|
|
1293
|
+
average_log_prob=False,
|
|
1294
|
+
is_encoder_decoder=self.is_encoder_decoder,
|
|
1295
|
+
label_pad_token_id=self.label_pad_token_id,
|
|
1296
|
+
)
|
|
1297
|
+
|
|
1298
|
+
return completion_logps
|
|
1299
|
+
|
|
1300
|
+
@staticmethod
|
|
1301
|
+
def get_batch_logps(
|
|
1302
|
+
logits: torch.FloatTensor,
|
|
1303
|
+
labels: torch.LongTensor,
|
|
1304
|
+
average_log_prob: bool = False,
|
|
1305
|
+
label_pad_token_id: int = -100,
|
|
1306
|
+
is_encoder_decoder: bool = False,
|
|
1307
|
+
) -> torch.FloatTensor:
|
|
1308
|
+
"""Compute the log probabilities of the given labels under the given logits.
|
|
1309
|
+
|
|
1310
|
+
Args:
|
|
1311
|
+
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
|
1312
|
+
labels:
|
|
1313
|
+
Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are
|
|
1314
|
+
ignored. Shape: (batch_size, sequence_length)
|
|
1315
|
+
average_log_prob:
|
|
1316
|
+
If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the
|
|
1317
|
+
log probabilities of the (non-masked) tokens.
|
|
1318
|
+
label_pad_token_id:
|
|
1319
|
+
The label value to ignore when computing log probabilities.
|
|
1320
|
+
is_encoder_decoder:
|
|
1321
|
+
Whether the model is an encoder-decoder model. If True, the labels are not shifted, and the logits are
|
|
1322
|
+
assumed to already be aligned with the labels. If False, the labels are shifted to the right by one
|
|
1323
|
+
position, and the logits are assumed to be aligned with the shifted labels.
|
|
1324
|
+
|
|
1325
|
+
Returns:
|
|
1326
|
+
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the
|
|
1327
|
+
given logits.
|
|
1328
|
+
"""
|
|
1329
|
+
if logits.shape[:-1] != labels.shape:
|
|
1330
|
+
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
|
1331
|
+
|
|
1332
|
+
if not is_encoder_decoder:
|
|
1333
|
+
labels = labels[:, 1:].clone()
|
|
1334
|
+
logits = logits[:, :-1, :]
|
|
1335
|
+
else:
|
|
1336
|
+
# Fixes end-dec RuntimeError
|
|
1337
|
+
labels = labels.clone()
|
|
1338
|
+
|
|
1339
|
+
loss_mask = labels != label_pad_token_id
|
|
1340
|
+
|
|
1341
|
+
# dummy token; we'll ignore the losses on these tokens later
|
|
1342
|
+
labels[labels == label_pad_token_id] = 0
|
|
1343
|
+
|
|
1344
|
+
per_token_logps = selective_log_softmax(logits, labels)
|
|
1345
|
+
|
|
1346
|
+
if average_log_prob:
|
|
1347
|
+
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
|
1348
|
+
else:
|
|
1349
|
+
return (per_token_logps * loss_mask).sum(-1)
|
|
1350
|
+
|
|
1351
|
+
def forward(
|
|
1352
|
+
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
|
1353
|
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
1354
|
+
model_kwargs = (
|
|
1355
|
+
{
|
|
1356
|
+
"labels": batch["completion_labels"],
|
|
1357
|
+
"decoder_input_ids": batch.get("completion_decoder_input_ids"),
|
|
1358
|
+
}
|
|
1359
|
+
if self.is_encoder_decoder
|
|
1360
|
+
else {}
|
|
1361
|
+
)
|
|
1362
|
+
if self.aux_loss_enabled:
|
|
1363
|
+
model_kwargs["output_router_logits"] = True
|
|
1364
|
+
|
|
1365
|
+
outputs = model(
|
|
1366
|
+
batch["completion_input_ids"],
|
|
1367
|
+
attention_mask=batch["completion_attention_mask"],
|
|
1368
|
+
**model_kwargs,
|
|
1369
|
+
)
|
|
1370
|
+
completion_logits = outputs.logits
|
|
1371
|
+
|
|
1372
|
+
completion_logps = self.get_batch_logps(
|
|
1373
|
+
completion_logits,
|
|
1374
|
+
batch["completion_labels"],
|
|
1375
|
+
average_log_prob=False,
|
|
1376
|
+
is_encoder_decoder=self.is_encoder_decoder,
|
|
1377
|
+
label_pad_token_id=self.label_pad_token_id,
|
|
1378
|
+
)
|
|
1379
|
+
|
|
1380
|
+
if completion_logps.shape[0] != len(batch["label"]):
|
|
1381
|
+
raise ValueError(
|
|
1382
|
+
"There is a mismatch between the number of examples in this batch and the number of "
|
|
1383
|
+
"examples for which an output sequence was predicted."
|
|
1384
|
+
)
|
|
1385
|
+
|
|
1386
|
+
chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True]
|
|
1387
|
+
rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False]
|
|
1388
|
+
|
|
1389
|
+
chosen_logps = completion_logps[chosen_idx, ...]
|
|
1390
|
+
rejected_logps = completion_logps[rejected_idx, ...]
|
|
1391
|
+
|
|
1392
|
+
chosen_logits = completion_logits[chosen_idx, ...]
|
|
1393
|
+
rejected_logits = completion_logits[rejected_idx, ...]
|
|
1394
|
+
|
|
1395
|
+
if self.aux_loss_enabled:
|
|
1396
|
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, outputs.aux_loss)
|
|
1397
|
+
else:
|
|
1398
|
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)
|
|
1399
|
+
|
|
1400
|
+
def _get_udm_weight(self, rejected_embeddings: torch.FloatTensor) -> torch.FloatTensor:
|
|
1401
|
+
prob_desirable = self._get_chosen_prob(rejected_embeddings)
|
|
1402
|
+
min_ratio = self.args.min_density_ratio
|
|
1403
|
+
max_ratio = self.args.max_density_ratio
|
|
1404
|
+
|
|
1405
|
+
weight = (prob_desirable / (1 - prob_desirable + 1e-8)).clamp(min=min_ratio, max=max_ratio)
|
|
1406
|
+
|
|
1407
|
+
return weight
|
|
1408
|
+
|
|
1409
|
+
def bco_loss(
|
|
1410
|
+
self,
|
|
1411
|
+
policy_chosen_logps: torch.FloatTensor,
|
|
1412
|
+
policy_rejected_logps: torch.FloatTensor,
|
|
1413
|
+
reference_chosen_logps: torch.FloatTensor,
|
|
1414
|
+
reference_rejected_logps: torch.FloatTensor,
|
|
1415
|
+
chosen_embeddings: Optional[torch.FloatTensor],
|
|
1416
|
+
rejected_embeddings: Optional[torch.FloatTensor],
|
|
1417
|
+
do_train: bool = True,
|
|
1418
|
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
1419
|
+
"""Compute the BCO loss for a batch of policy and reference model log probabilities.
|
|
1420
|
+
|
|
1421
|
+
Args:
|
|
1422
|
+
policy_chosen_logps:
|
|
1423
|
+
Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
|
1424
|
+
policy_rejected_logps:
|
|
1425
|
+
Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,)
|
|
1426
|
+
reference_chosen_logps:
|
|
1427
|
+
Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
|
1428
|
+
reference_rejected_logps:
|
|
1429
|
+
Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in
|
|
1430
|
+
batch_size,)
|
|
1431
|
+
chosen_embeddings: embeddings of desirable prompts
|
|
1432
|
+
rejected_embeddings: embeddings of undesirable prompts
|
|
1433
|
+
do_train: whether to update the running delta value. Default is True.
|
|
1434
|
+
|
|
1435
|
+
Returns:
|
|
1436
|
+
A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, delta). The losses tensor contains the
|
|
1437
|
+
BCO loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards
|
|
1438
|
+
for the chosen and rejected responses, respectively. The delta value contains the moving average of all
|
|
1439
|
+
implicit rewards.
|
|
1440
|
+
"""
|
|
1441
|
+
|
|
1442
|
+
chosen_logratios = policy_chosen_logps - reference_chosen_logps
|
|
1443
|
+
chosen_rewards = self.beta * chosen_logratios
|
|
1444
|
+
|
|
1445
|
+
rejected_logratios = policy_rejected_logps - reference_rejected_logps
|
|
1446
|
+
rejected_rewards = self.beta * rejected_logratios
|
|
1447
|
+
|
|
1448
|
+
if do_train:
|
|
1449
|
+
self.running.update(torch.cat((chosen_rewards, rejected_rewards), 0).detach())
|
|
1450
|
+
delta = torch.as_tensor(self.running.mean, device=chosen_rewards.device)
|
|
1451
|
+
|
|
1452
|
+
chosen_losses = -F.logsigmoid(chosen_rewards - delta)
|
|
1453
|
+
rejected_losses = -F.logsigmoid(-(rejected_rewards - delta))
|
|
1454
|
+
|
|
1455
|
+
if self.match_underlying_distribution:
|
|
1456
|
+
chosen_weight = torch.ones_like(chosen_losses)
|
|
1457
|
+
rejected_weight = self._get_udm_weight(rejected_embeddings)
|
|
1458
|
+
|
|
1459
|
+
losses = torch.cat((chosen_weight * chosen_losses, rejected_weight * rejected_losses), dim=0)
|
|
1460
|
+
else:
|
|
1461
|
+
losses = torch.cat((chosen_losses, rejected_losses), dim=0)
|
|
1462
|
+
|
|
1463
|
+
return losses, chosen_rewards, rejected_rewards, delta
|
|
1464
|
+
|
|
1465
|
+
def get_batch_loss_metrics(
|
|
1466
|
+
self,
|
|
1467
|
+
model,
|
|
1468
|
+
batch: dict[str, Union[list, torch.LongTensor]],
|
|
1469
|
+
do_train: bool = True,
|
|
1470
|
+
):
|
|
1471
|
+
"""Compute the BCO loss and other metrics for the given batch of inputs for train or test."""
|
|
1472
|
+
metrics = {}
|
|
1473
|
+
batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
|
1474
|
+
|
|
1475
|
+
forward_output = self.forward(model, batch)
|
|
1476
|
+
(
|
|
1477
|
+
policy_chosen_logps,
|
|
1478
|
+
policy_rejected_logps,
|
|
1479
|
+
policy_chosen_logits,
|
|
1480
|
+
policy_rejected_logits,
|
|
1481
|
+
) = forward_output[:4]
|
|
1482
|
+
if self.aux_loss_enabled:
|
|
1483
|
+
aux_loss = forward_output[4]
|
|
1484
|
+
|
|
1485
|
+
# if reference_logps in batch use them, otherwise use the reference model
|
|
1486
|
+
if "reference_logps" in batch:
|
|
1487
|
+
chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True]
|
|
1488
|
+
rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False]
|
|
1489
|
+
|
|
1490
|
+
reference_chosen_logps = batch["reference_logps"][chosen_idx, ...]
|
|
1491
|
+
reference_rejected_logps = batch["reference_logps"][rejected_idx, ...]
|
|
1492
|
+
else:
|
|
1493
|
+
with torch.no_grad():
|
|
1494
|
+
if self.ref_model is None:
|
|
1495
|
+
with self.null_ref_context():
|
|
1496
|
+
(
|
|
1497
|
+
reference_chosen_logps,
|
|
1498
|
+
reference_rejected_logps,
|
|
1499
|
+
_,
|
|
1500
|
+
_,
|
|
1501
|
+
) = self.forward(self.model, batch)[:4]
|
|
1502
|
+
else:
|
|
1503
|
+
(
|
|
1504
|
+
reference_chosen_logps,
|
|
1505
|
+
reference_rejected_logps,
|
|
1506
|
+
_,
|
|
1507
|
+
_,
|
|
1508
|
+
) = self.forward(self.ref_model, batch)[:4]
|
|
1509
|
+
|
|
1510
|
+
chosen_embeddings, rejected_embeddings = self._get_prompt_embeddings(batch)
|
|
1511
|
+
|
|
1512
|
+
losses, chosen_rewards, rejected_rewards, delta = self.bco_loss(
|
|
1513
|
+
policy_chosen_logps,
|
|
1514
|
+
policy_rejected_logps,
|
|
1515
|
+
reference_chosen_logps,
|
|
1516
|
+
reference_rejected_logps,
|
|
1517
|
+
chosen_embeddings,
|
|
1518
|
+
rejected_embeddings,
|
|
1519
|
+
do_train=do_train,
|
|
1520
|
+
)
|
|
1521
|
+
metrics["delta"] = self.accelerator.gather_for_metrics(delta).mean().item()
|
|
1522
|
+
|
|
1523
|
+
num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device)
|
|
1524
|
+
num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device)
|
|
1525
|
+
|
|
1526
|
+
all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item()
|
|
1527
|
+
all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item()
|
|
1528
|
+
|
|
1529
|
+
if all_num_chosen > 0:
|
|
1530
|
+
metrics["rewards/chosen_sum"] = (
|
|
1531
|
+
self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item()
|
|
1532
|
+
)
|
|
1533
|
+
metrics["logps/chosen_sum"] = (
|
|
1534
|
+
self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item()
|
|
1535
|
+
)
|
|
1536
|
+
metrics["logits/chosen_sum"] = (
|
|
1537
|
+
self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item()
|
|
1538
|
+
)
|
|
1539
|
+
metrics["count/chosen"] = all_num_chosen
|
|
1540
|
+
|
|
1541
|
+
if all_num_rejected > 0:
|
|
1542
|
+
metrics["rewards/rejected_sum"] = (
|
|
1543
|
+
self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item()
|
|
1544
|
+
)
|
|
1545
|
+
metrics["logps/rejected_sum"] = (
|
|
1546
|
+
self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item()
|
|
1547
|
+
)
|
|
1548
|
+
metrics["logits/rejected_sum"] = (
|
|
1549
|
+
self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item()
|
|
1550
|
+
)
|
|
1551
|
+
metrics["count/rejected"] = all_num_rejected
|
|
1552
|
+
|
|
1553
|
+
loss = losses.nanmean()
|
|
1554
|
+
if self.aux_loss_enabled:
|
|
1555
|
+
loss += self.aux_loss_coef * aux_loss
|
|
1556
|
+
|
|
1557
|
+
return loss, metrics
|
|
1558
|
+
|
|
1559
|
+
def compute_loss(
|
|
1560
|
+
self,
|
|
1561
|
+
model: Union[PreTrainedModel, nn.Module],
|
|
1562
|
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
|
1563
|
+
return_outputs=False,
|
|
1564
|
+
num_items_in_batch=None,
|
|
1565
|
+
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
|
1566
|
+
compute_loss_context_manager = (
|
|
1567
|
+
autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
1568
|
+
)
|
|
1569
|
+
|
|
1570
|
+
with compute_loss_context_manager:
|
|
1571
|
+
loss, metrics = self.get_batch_loss_metrics(model, inputs)
|
|
1572
|
+
|
|
1573
|
+
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
|
|
1574
|
+
loss = loss.to(self.args.device)
|
|
1575
|
+
# force log the metrics
|
|
1576
|
+
if self.accelerator.is_main_process:
|
|
1577
|
+
self.store_metrics(metrics, train_eval="train")
|
|
1578
|
+
|
|
1579
|
+
if return_outputs:
|
|
1580
|
+
return (loss, metrics)
|
|
1581
|
+
return loss
|
|
1582
|
+
|
|
1583
|
+
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
|
1584
|
+
for key, value in metrics.items():
|
|
1585
|
+
self._stored_metrics[train_eval][key].append(value)
|
|
1586
|
+
|
|
1587
|
+
def _get_train_sampler(self, dataset: Optional[Dataset] = None) -> Optional[torch.utils.data.Sampler]:
|
|
1588
|
+
if dataset is None:
|
|
1589
|
+
dataset = self.train_dataset
|
|
1590
|
+
if dataset is None or not has_length(dataset):
|
|
1591
|
+
return None
|
|
1592
|
+
return SequentialSampler(dataset)
|
|
1593
|
+
|
|
1594
|
+
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
|
|
1595
|
+
"""Generate samples from the model and reference model for the given batch of inputs."""
|
|
1596
|
+
|
|
1597
|
+
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
|
1598
|
+
# the torch amp context manager as some hidden states are silently casted to full precision.
|
|
1599
|
+
generate_context_manager = (
|
|
1600
|
+
autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
1601
|
+
)
|
|
1602
|
+
with generate_context_manager:
|
|
1603
|
+
policy_output = model.generate(
|
|
1604
|
+
input_ids=batch["prompt_input_ids"],
|
|
1605
|
+
attention_mask=batch["prompt_attention_mask"],
|
|
1606
|
+
max_length=self.max_length,
|
|
1607
|
+
do_sample=True,
|
|
1608
|
+
pad_token_id=self.processing_class.pad_token_id,
|
|
1609
|
+
)
|
|
1610
|
+
|
|
1611
|
+
# if reference_output in batch use that otherwise use the reference model
|
|
1612
|
+
if "reference_output" in batch:
|
|
1613
|
+
reference_output = batch["reference_output"]
|
|
1614
|
+
else:
|
|
1615
|
+
if self.ref_model is None:
|
|
1616
|
+
with self.null_ref_context():
|
|
1617
|
+
reference_output = self.model.generate(
|
|
1618
|
+
input_ids=batch["prompt_input_ids"],
|
|
1619
|
+
attention_mask=batch["prompt_attention_mask"],
|
|
1620
|
+
max_length=self.max_length,
|
|
1621
|
+
do_sample=True,
|
|
1622
|
+
pad_token_id=self.processing_class.pad_token_id,
|
|
1623
|
+
)
|
|
1624
|
+
else:
|
|
1625
|
+
reference_output = self.ref_model.generate(
|
|
1626
|
+
input_ids=batch["prompt_input_ids"],
|
|
1627
|
+
attention_mask=batch["prompt_attention_mask"],
|
|
1628
|
+
max_length=self.max_length,
|
|
1629
|
+
do_sample=True,
|
|
1630
|
+
pad_token_id=self.processing_class.pad_token_id,
|
|
1631
|
+
)
|
|
1632
|
+
|
|
1633
|
+
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
|
1634
|
+
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
|
1635
|
+
|
|
1636
|
+
reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
|
|
1637
|
+
reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
|
|
1638
|
+
|
|
1639
|
+
return policy_output_decoded, reference_output_decoded
|
|
1640
|
+
|
|
1641
|
+
def prediction_step(
|
|
1642
|
+
self,
|
|
1643
|
+
model: Union[PreTrainedModel, nn.Module],
|
|
1644
|
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
|
1645
|
+
prediction_loss_only: bool,
|
|
1646
|
+
ignore_keys: Optional[list[str]] = None,
|
|
1647
|
+
):
|
|
1648
|
+
if ignore_keys is None:
|
|
1649
|
+
if hasattr(model, "config"):
|
|
1650
|
+
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
|
1651
|
+
else:
|
|
1652
|
+
ignore_keys = []
|
|
1653
|
+
|
|
1654
|
+
prediction_context_manager = (
|
|
1655
|
+
autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
1656
|
+
)
|
|
1657
|
+
with torch.no_grad(), prediction_context_manager:
|
|
1658
|
+
loss, metrics = self.get_batch_loss_metrics(model, inputs, do_train=False)
|
|
1659
|
+
|
|
1660
|
+
# force log the metrics
|
|
1661
|
+
if self.accelerator.is_main_process:
|
|
1662
|
+
self.store_metrics(metrics, train_eval="eval")
|
|
1663
|
+
|
|
1664
|
+
if prediction_loss_only:
|
|
1665
|
+
return (loss.detach(), None, None)
|
|
1666
|
+
|
|
1667
|
+
# logits for the chosen and rejected samples from model
|
|
1668
|
+
logits_dict = {}
|
|
1669
|
+
if "logits/chosen_sum" in metrics:
|
|
1670
|
+
logits_dict["eval_logits/chosen"] = metrics["logits/chosen_sum"]
|
|
1671
|
+
if "logits/rejected_sum" in metrics:
|
|
1672
|
+
logits_dict["eval_logits/rejected"] = metrics["logits/rejected_sum"]
|
|
1673
|
+
logits = [v for k, v in logits_dict.items() if k not in ignore_keys]
|
|
1674
|
+
logits = torch.tensor(logits, device=self.accelerator.device)
|
|
1675
|
+
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
|
1676
|
+
|
|
1677
|
+
return (loss.detach(), logits, labels)
|
|
1678
|
+
|
|
1679
|
+
def evaluation_loop(
|
|
1680
|
+
self,
|
|
1681
|
+
dataloader: DataLoader,
|
|
1682
|
+
description: str,
|
|
1683
|
+
prediction_loss_only: Optional[bool] = None,
|
|
1684
|
+
ignore_keys: Optional[list[str]] = None,
|
|
1685
|
+
metric_key_prefix: str = "eval",
|
|
1686
|
+
) -> EvalLoopOutput:
|
|
1687
|
+
"""
|
|
1688
|
+
Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by
|
|
1689
|
+
`Trainer.evaluate()` and `Trainer.predict()`.
|
|
1690
|
+
|
|
1691
|
+
Works both with or without labels.
|
|
1692
|
+
"""
|
|
1693
|
+
|
|
1694
|
+
# Sample and save to game log if requested (for one batch to save time)
|
|
1695
|
+
if self.generate_during_eval:
|
|
1696
|
+
# Generate random indices within the range of the total number of samples
|
|
1697
|
+
num_samples = len(dataloader.dataset)
|
|
1698
|
+
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
|
1699
|
+
|
|
1700
|
+
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
|
1701
|
+
random_batch_dataset = dataloader.dataset.select(random_indices)
|
|
1702
|
+
random_batch = self.data_collator(random_batch_dataset)
|
|
1703
|
+
random_batch = self._prepare_inputs(random_batch)
|
|
1704
|
+
|
|
1705
|
+
target_labels = torch.tensor(random_batch["label"], dtype=torch.bool, device=self.accelerator.device)
|
|
1706
|
+
target_indices = torch.where(~target_labels)[0]
|
|
1707
|
+
target_batch = {
|
|
1708
|
+
"prompt_input_ids": random_batch["prompt_input_ids"][target_indices],
|
|
1709
|
+
"prompt_attention_mask": random_batch["prompt_attention_mask"][target_indices],
|
|
1710
|
+
"prompt": itemgetter(*target_indices)(random_batch["prompt"]),
|
|
1711
|
+
}
|
|
1712
|
+
policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch)
|
|
1713
|
+
|
|
1714
|
+
table = pd.DataFrame(
|
|
1715
|
+
columns=["Prompt", "Policy", "Ref Model"],
|
|
1716
|
+
data=[
|
|
1717
|
+
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
|
|
1718
|
+
for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded)
|
|
1719
|
+
],
|
|
1720
|
+
)
|
|
1721
|
+
if "wandb" in self.args.report_to:
|
|
1722
|
+
wandb.log({"game_log": wandb.Table(data=table)})
|
|
1723
|
+
|
|
1724
|
+
if "comet_ml" in self.args.report_to:
|
|
1725
|
+
log_table_to_comet_experiment(
|
|
1726
|
+
name="game_log.csv",
|
|
1727
|
+
table=table,
|
|
1728
|
+
)
|
|
1729
|
+
|
|
1730
|
+
# Base evaluation
|
|
1731
|
+
initial_output = super().evaluation_loop(
|
|
1732
|
+
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
|
1733
|
+
)
|
|
1734
|
+
|
|
1735
|
+
return initial_output
|
|
1736
|
+
|
|
1737
|
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
|
1738
|
+
"""
|
|
1739
|
+
Log `logs` on the various objects watching training, including stored metrics.
|
|
1740
|
+
|
|
1741
|
+
Args:
|
|
1742
|
+
logs (`dict[str, float]`):
|
|
1743
|
+
The values to log.
|
|
1744
|
+
start_time (`float`, *optional*):
|
|
1745
|
+
Start time of the training.
|
|
1746
|
+
"""
|
|
1747
|
+
# logs either has 'loss' or 'eval_loss'
|
|
1748
|
+
train_eval = "train" if "loss" in logs else "eval"
|
|
1749
|
+
# train metrics should have no prefix, eval should have 'eval_'
|
|
1750
|
+
prefix = "eval_" if train_eval == "eval" else ""
|
|
1751
|
+
# accumulate average metrics from sums and lengths
|
|
1752
|
+
for split in ["chosen", "rejected"]:
|
|
1753
|
+
if f"count/{split}" in self._stored_metrics[train_eval]:
|
|
1754
|
+
count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item()
|
|
1755
|
+
for metric in ["rewards", "logps", "logits"]:
|
|
1756
|
+
logs[f"{prefix}{metric}/{split}"] = (
|
|
1757
|
+
torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item()
|
|
1758
|
+
/ count_sum
|
|
1759
|
+
)
|
|
1760
|
+
# delete obsolete metric
|
|
1761
|
+
del self._stored_metrics[train_eval][f"{metric}/{split}_sum"]
|
|
1762
|
+
del self._stored_metrics[train_eval][f"count/{split}"]
|
|
1763
|
+
# calculate reward margin
|
|
1764
|
+
if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs:
|
|
1765
|
+
logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
|
|
1766
|
+
# Add averaged stored metrics to logs
|
|
1767
|
+
for key, metrics in self._stored_metrics[train_eval].items():
|
|
1768
|
+
logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item()
|
|
1769
|
+
del self._stored_metrics[train_eval]
|
|
1770
|
+
return super().log(logs, start_time)
|
|
1771
|
+
|
|
1772
|
+
# Ensure the model card is saved along with the checkpoint
|
|
1773
|
+
def _save_checkpoint(self, model, trial):
|
|
1774
|
+
if self.args.hub_model_id is None:
|
|
1775
|
+
model_name = Path(self.args.output_dir).name
|
|
1776
|
+
else:
|
|
1777
|
+
model_name = self.args.hub_model_id.split("/")[-1]
|
|
1778
|
+
self.create_model_card(model_name=model_name)
|
|
1779
|
+
super()._save_checkpoint(model, trial)
|
|
1780
|
+
class UnslothBCOTrainer(_UnslothBCOTrainer):
|
|
1781
|
+
"""
|
|
1782
|
+
|
|
1783
|
+
Initialize BCOTrainer from [BCO](https://huggingface.co/papers/2404.04656) paper.
|
|
1784
|
+
|
|
1785
|
+
Args:
|
|
1786
|
+
model ([`~transformers.PreTrainedModel`]):
|
|
1787
|
+
The model to train, preferably an [`~transformers.AutoModelForSequenceClassification`].
|
|
1788
|
+
ref_model ([`PreTrainedModelWrapper`]):
|
|
1789
|
+
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation
|
|
1790
|
+
and loss. If no reference model is provided, the trainer will create a reference model with the same
|
|
1791
|
+
architecture as the model to be optimized.
|
|
1792
|
+
args ([`BCOConfig`]):
|
|
1793
|
+
The arguments to use for training.
|
|
1794
|
+
train_dataset ([`~datasets.Dataset`]):
|
|
1795
|
+
The dataset to use for training.
|
|
1796
|
+
eval_dataset ([`~datasets.Dataset`]):
|
|
1797
|
+
The dataset to use for evaluation.
|
|
1798
|
+
processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*):
|
|
1799
|
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
|
1800
|
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
|
1801
|
+
reuse the fine-tuned model.
|
|
1802
|
+
data_collator ([`~transformers.DataCollator`], *optional*):
|
|
1803
|
+
The data collator to use for training. If None is specified, the default data collator
|
|
1804
|
+
([`DPODataCollatorWithPadding`]) will be used which will pad the sequences to the maximum length of the
|
|
1805
|
+
sequences in the batch, given a dataset of paired sequences.
|
|
1806
|
+
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
|
1807
|
+
The model initializer to use for training. If None is specified, the default model initializer will be
|
|
1808
|
+
used.
|
|
1809
|
+
callbacks (`list[transformers.TrainerCallback]`):
|
|
1810
|
+
The callbacks to use for training.
|
|
1811
|
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
|
1812
|
+
The optimizer and scheduler to use for training.
|
|
1813
|
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
|
1814
|
+
The function to use to preprocess the logits before computing the metrics.
|
|
1815
|
+
peft_config (`dict`, defaults to `None`):
|
|
1816
|
+
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in
|
|
1817
|
+
a PEFT model.
|
|
1818
|
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
|
1819
|
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to
|
|
1820
|
+
metric values.
|
|
1821
|
+
model_adapter_name (`str`, defaults to `None`):
|
|
1822
|
+
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
|
|
1823
|
+
ref_adapter_name (`str`, defaults to `None`):
|
|
1824
|
+
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
|
|
1825
|
+
|
|
1826
|
+
"""
|
|
1827
|
+
def __init__(
|
|
1828
|
+
self,
|
|
1829
|
+
model = None,
|
|
1830
|
+
ref_model = None,
|
|
1831
|
+
args = None,
|
|
1832
|
+
train_dataset = None,
|
|
1833
|
+
eval_dataset = None,
|
|
1834
|
+
processing_class = None,
|
|
1835
|
+
data_collator = None,
|
|
1836
|
+
model_init = None,
|
|
1837
|
+
callbacks = None,
|
|
1838
|
+
preprocess_logits_for_metrics = None,
|
|
1839
|
+
peft_config = None,
|
|
1840
|
+
compute_metrics = None,
|
|
1841
|
+
model_adapter_name = None,
|
|
1842
|
+
ref_adapter_name = None,
|
|
1843
|
+
embedding_func = None,
|
|
1844
|
+
embedding_tokenizer = None,
|
|
1845
|
+
**kwargs
|
|
1846
|
+
):
|
|
1847
|
+
if args is None: args = UnslothBCOConfig()
|
|
1848
|
+
use_bf16 = getattr(args, 'bf16', False)
|
|
1849
|
+
if type(use_bf16) is not bool: use_bf16 = False
|
|
1850
|
+
use_fp16 = getattr(args, 'fp16', False)
|
|
1851
|
+
if type(use_fp16) is not bool: use_fp16 = False
|
|
1852
|
+
force_float32 = False
|
|
1853
|
+
full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1'
|
|
1854
|
+
if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'):
|
|
1855
|
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
|
1856
|
+
force_float32 = True
|
|
1857
|
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
|
1858
|
+
dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None)
|
|
1859
|
+
if dtype is None: dtype = model.get_input_embeddings().weight.dtype
|
|
1860
|
+
from unsloth_zoo.utils import _get_dtype
|
|
1861
|
+
dtype = _get_dtype(dtype)
|
|
1862
|
+
float16 = dtype == torch.float16
|
|
1863
|
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
|
1864
|
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
|
1865
|
+
if force_float32:
|
|
1866
|
+
# Forced float32 training
|
|
1867
|
+
args.fp16 = False
|
|
1868
|
+
args.bf16 = False
|
|
1869
|
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
|
1870
|
+
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
|
|
1871
|
+
# args.mixed_precision is a new argument which needs to be set now
|
|
1872
|
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
|
1873
|
+
# Mixed precision training
|
|
1874
|
+
args.fp16 = float16
|
|
1875
|
+
args.bf16 = not float16
|
|
1876
|
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
|
1877
|
+
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16'
|
|
1878
|
+
# args.mixed_precision is a new argument which needs to be set now
|
|
1879
|
+
elif mixed_precision_dtype == 'bfloat16':
|
|
1880
|
+
# Both False since bfloat16 full finetuning doesn't do any autocasting.
|
|
1881
|
+
args.fp16 = False
|
|
1882
|
+
args.bf16 = False
|
|
1883
|
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
|
1884
|
+
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
|
|
1885
|
+
# args.mixed_precision is a new argument which needs to be set now
|
|
1886
|
+
|
|
1887
|
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
|
1888
|
+
args.eval_strategy = 'steps'
|
|
1889
|
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
|
1890
|
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
|
1891
|
+
if ga_steps is not None and ga_steps > 1:
|
|
1892
|
+
from transformers import __version__ as transformers_version
|
|
1893
|
+
if Version(transformers_version) <= Version('4.45.2'):
|
|
1894
|
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
|
1895
|
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
|
1896
|
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
|
1897
|
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
|
1898
|
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
|
1899
|
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
|
1900
|
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
|
1901
|
+
if type(fp16_full_eval) is not bool: fp16_full_eval = False
|
|
1902
|
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
|
1903
|
+
if type(bf16_full_eval) is not bool: bf16_full_eval = False
|
|
1904
|
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
|
1905
|
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
|
1906
|
+
if force_float32:
|
|
1907
|
+
args.bf16_full_eval = False
|
|
1908
|
+
args.fp16_full_eval = False
|
|
1909
|
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
|
1910
|
+
args.bf16_full_eval = True
|
|
1911
|
+
args.fp16_full_eval = False
|
|
1912
|
+
elif not bf16_full_eval and not fp16_full_eval:
|
|
1913
|
+
args.bf16_full_eval = args.bf16
|
|
1914
|
+
args.fp16_full_eval = args.fp16
|
|
1915
|
+
_output_logits = False
|
|
1916
|
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
|
1917
|
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
|
1918
|
+
if _output_logits:
|
|
1919
|
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
|
1920
|
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
|
1921
|
+
pass
|
|
1922
|
+
else:
|
|
1923
|
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
|
1924
|
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
|
1925
|
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
|
1926
|
+
max_seq_length = model.max_seq_length
|
|
1927
|
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
|
1928
|
+
if model is not None and hasattr(model, 'for_training'):
|
|
1929
|
+
model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
|
|
1930
|
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
|
1931
|
+
if 'processing_class' in locals():
|
|
1932
|
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
|
1933
|
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
|
1934
|
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
|
1935
|
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
|
1936
|
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
1937
|
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
|
1938
|
+
data_collator = TransformersDataCollatorForLanguageModeling(
|
|
1939
|
+
__tokenizer,
|
|
1940
|
+
mlm = False,
|
|
1941
|
+
mlm_probability = 0.0,
|
|
1942
|
+
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
1943
|
+
)
|
|
1944
|
+
elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
|
1945
|
+
data_collator = DataCollatorForSeq2Seq(
|
|
1946
|
+
__tokenizer,
|
|
1947
|
+
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
1948
|
+
)
|
|
1949
|
+
else:
|
|
1950
|
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
|
1951
|
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
|
1952
|
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
|
1953
|
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
1954
|
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
|
1955
|
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
|
1956
|
+
data_collator = DataCollatorForSeq2Seq(
|
|
1957
|
+
__tokenizer.tokenizer,
|
|
1958
|
+
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
1959
|
+
)
|
|
1960
|
+
else:
|
|
1961
|
+
data_collator = TransformersDataCollatorForLanguageModeling(
|
|
1962
|
+
__tokenizer.tokenizer,
|
|
1963
|
+
mlm = False,
|
|
1964
|
+
mlm_probability = 0.0,
|
|
1965
|
+
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
1966
|
+
)
|
|
1967
|
+
other_metrics = []
|
|
1968
|
+
|
|
1969
|
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
|
1970
|
+
PatchRLStatistics('bco_trainer', other_metrics)
|
|
1971
|
+
|
|
1972
|
+
# [TODO] Fix up DataParallel multiplying batch sizes
|
|
1973
|
+
# [TODO] DDP works, but DP seems to not work? [TODO]
|
|
1974
|
+
if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1:
|
|
1975
|
+
if getattr(args, "_n_gpu", 1) != 1:
|
|
1976
|
+
args._n_gpu = 1
|
|
1977
|
+
if "model" in locals() and hasattr(model, "for_training"):
|
|
1978
|
+
model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
|
|
1979
|
+
super().__init__(
|
|
1980
|
+
model = model,
|
|
1981
|
+
ref_model = ref_model,
|
|
1982
|
+
args = args,
|
|
1983
|
+
train_dataset = train_dataset,
|
|
1984
|
+
eval_dataset = eval_dataset,
|
|
1985
|
+
processing_class = processing_class,
|
|
1986
|
+
data_collator = data_collator,
|
|
1987
|
+
model_init = model_init,
|
|
1988
|
+
callbacks = callbacks,
|
|
1989
|
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
|
1990
|
+
peft_config = peft_config,
|
|
1991
|
+
compute_metrics = compute_metrics,
|
|
1992
|
+
model_adapter_name = model_adapter_name,
|
|
1993
|
+
ref_adapter_name = ref_adapter_name,
|
|
1994
|
+
embedding_func = embedding_func,
|
|
1995
|
+
embedding_tokenizer = embedding_tokenizer,**kwargs)
|
|
1996
|
+
if "model" in locals() and hasattr(model, "for_inference"):
|
|
1997
|
+
model.for_inference()
|
|
1998
|
+
if hasattr(self, 'neftune_hook_handle'):
|
|
1999
|
+
self.neftune_hook_handle.remove()
|
|
2000
|
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
|
2001
|
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
|
2002
|
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
|
2003
|
+
pass
|
|
2004
|
+
if hasattr(self, 'accelerator'):
|
|
2005
|
+
scaler = self.accelerator.scaler
|
|
2006
|
+
current_model = model
|
|
2007
|
+
while hasattr(current_model, 'model'):
|
|
2008
|
+
current_model.accelerator_scaler = scaler
|
|
2009
|
+
current_model = current_model.model
|
|
2010
|
+
current_model.accelerator_scaler = scaler
|
|
2011
|
+
pass
|
|
2012
|
+
if hasattr(self, 'train'):
|
|
2013
|
+
self.train = MethodType(prepare_for_training_mode(self.__class__.train), self)
|
|
2014
|
+
pass
|
|
2015
|
+
|
|
2016
|
+
pass
|
|
2017
|
+
|
|
2018
|
+
|
|
2019
|
+
if hasattr(logger, "addFilter"):
|
|
2020
|
+
import logging
|
|
2021
|
+
class HideLoggingMessage(logging.Filter):
|
|
2022
|
+
def __init__(self, text): self.text = text
|
|
2023
|
+
def filter(self, x): return not (self.text in x.getMessage())
|
|
2024
|
+
pass
|
|
2025
|
+
logger.addFilter(HideLoggingMessage("`use_cache=True`"))
|
|
2026
|
+
|