cciwon-code-review-cli 2.0.2 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/lib/chat-mode.js +7 -2
  2. package/package.json +1 -1
  3. package/unsloth_compiled_cache/.locks/.lock.AqlmLoraLinear_peft_forward.py +0 -0
  4. package/unsloth_compiled_cache/.locks/.lock.AwqLoraLinear_peft_forward.py +0 -0
  5. package/unsloth_compiled_cache/.locks/.lock.BatchNorm1d.py +0 -0
  6. package/unsloth_compiled_cache/.locks/.lock.BatchNorm2d.py +0 -0
  7. package/unsloth_compiled_cache/.locks/.lock.BatchNorm3d.py +0 -0
  8. package/unsloth_compiled_cache/.locks/.lock.Conv1d.py +0 -0
  9. package/unsloth_compiled_cache/.locks/.lock.Conv2d.py +0 -0
  10. package/unsloth_compiled_cache/.locks/.lock.Conv3d.py +0 -0
  11. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose1d.py +0 -0
  12. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose2d.py +0 -0
  13. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose3d.py +0 -0
  14. package/unsloth_compiled_cache/.locks/.lock.GPTQLoraLinear_peft_forward.py +0 -0
  15. package/unsloth_compiled_cache/.locks/.lock.GroupNorm.py +0 -0
  16. package/unsloth_compiled_cache/.locks/.lock.LayerNorm.py +0 -0
  17. package/unsloth_compiled_cache/.locks/.lock.Linear4bit_peft_forward.py +0 -0
  18. package/unsloth_compiled_cache/.locks/.lock.Linear8bitLt_peft_forward.py +0 -0
  19. package/unsloth_compiled_cache/.locks/.lock.Linear_peft_forward.py +0 -0
  20. package/unsloth_compiled_cache/.locks/.lock.LoraParallelLinear_peft_forward.py +0 -0
  21. package/unsloth_compiled_cache/.locks/.lock.RMSNorm.py +0 -0
  22. package/unsloth_compiled_cache/.locks/.lock.UnslothBCOTrainer.py +0 -0
  23. package/unsloth_compiled_cache/.locks/.lock.UnslothCPOTrainer.py +0 -0
  24. package/unsloth_compiled_cache/.locks/.lock.UnslothDPOTrainer.py +0 -0
  25. package/unsloth_compiled_cache/.locks/.lock.UnslothGKDTrainer.py +0 -0
  26. package/unsloth_compiled_cache/.locks/.lock.UnslothGRPOTrainer.py +0 -0
  27. package/unsloth_compiled_cache/.locks/.lock.UnslothKTOTrainer.py +0 -0
  28. package/unsloth_compiled_cache/.locks/.lock.UnslothNashMDTrainer.py +0 -0
  29. package/unsloth_compiled_cache/.locks/.lock.UnslothORPOTrainer.py +0 -0
  30. package/unsloth_compiled_cache/.locks/.lock.UnslothOnlineDPOTrainer.py +0 -0
  31. package/unsloth_compiled_cache/.locks/.lock.UnslothPPOTrainer.py +0 -0
  32. package/unsloth_compiled_cache/.locks/.lock.UnslothPRMTrainer.py +0 -0
  33. package/unsloth_compiled_cache/.locks/.lock.UnslothRLOOTrainer.py +0 -0
  34. package/unsloth_compiled_cache/.locks/.lock.UnslothRewardTrainer.py +0 -0
  35. package/unsloth_compiled_cache/.locks/.lock.UnslothSFTTrainer.py +0 -0
  36. package/unsloth_compiled_cache/.locks/.lock.UnslothXPOTrainer.py +0 -0
  37. package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_qwen3_moe.py +0 -0
  38. package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_siglip.py +0 -0
  39. package/unsloth_compiled_cache/AqlmLoraLinear_peft_forward.py +88 -0
  40. package/unsloth_compiled_cache/AwqLoraLinear_peft_forward.py +87 -0
  41. package/unsloth_compiled_cache/BatchNorm1d.py +117 -0
  42. package/unsloth_compiled_cache/BatchNorm2d.py +117 -0
  43. package/unsloth_compiled_cache/BatchNorm3d.py +117 -0
  44. package/unsloth_compiled_cache/Conv1d.py +70 -0
  45. package/unsloth_compiled_cache/Conv2d.py +70 -0
  46. package/unsloth_compiled_cache/Conv3d.py +70 -0
  47. package/unsloth_compiled_cache/ConvTranspose1d.py +97 -0
  48. package/unsloth_compiled_cache/ConvTranspose2d.py +106 -0
  49. package/unsloth_compiled_cache/ConvTranspose3d.py +98 -0
  50. package/unsloth_compiled_cache/GPTQLoraLinear_peft_forward.py +95 -0
  51. package/unsloth_compiled_cache/GroupNorm.py +70 -0
  52. package/unsloth_compiled_cache/LayerNorm.py +72 -0
  53. package/unsloth_compiled_cache/Linear4bit_peft_forward.py +115 -0
  54. package/unsloth_compiled_cache/Linear8bitLt_peft_forward.py +113 -0
  55. package/unsloth_compiled_cache/Linear_peft_forward.py +104 -0
  56. package/unsloth_compiled_cache/LoraParallelLinear_peft_forward.py +91 -0
  57. package/unsloth_compiled_cache/RMSNorm.py +73 -0
  58. package/unsloth_compiled_cache/UnslothBCOTrainer.py +2026 -0
  59. package/unsloth_compiled_cache/UnslothCPOTrainer.py +1806 -0
  60. package/unsloth_compiled_cache/UnslothDPOTrainer.py +2750 -0
  61. package/unsloth_compiled_cache/UnslothGKDTrainer.py +1157 -0
  62. package/unsloth_compiled_cache/UnslothGRPOTrainer.py +3607 -0
  63. package/unsloth_compiled_cache/UnslothKTOTrainer.py +2220 -0
  64. package/unsloth_compiled_cache/UnslothNashMDTrainer.py +1210 -0
  65. package/unsloth_compiled_cache/UnslothORPOTrainer.py +1730 -0
  66. package/unsloth_compiled_cache/UnslothOnlineDPOTrainer.py +2313 -0
  67. package/unsloth_compiled_cache/UnslothPPOTrainer.py +1504 -0
  68. package/unsloth_compiled_cache/UnslothPRMTrainer.py +979 -0
  69. package/unsloth_compiled_cache/UnslothRLOOTrainer.py +2674 -0
  70. package/unsloth_compiled_cache/UnslothRewardTrainer.py +1197 -0
  71. package/unsloth_compiled_cache/UnslothSFTTrainer.py +1416 -0
  72. package/unsloth_compiled_cache/UnslothXPOTrainer.py +1255 -0
  73. package/unsloth_compiled_cache/__pycache__/AqlmLoraLinear_peft_forward.cpython-312.pyc +0 -0
  74. package/unsloth_compiled_cache/__pycache__/AwqLoraLinear_peft_forward.cpython-312.pyc +0 -0
  75. package/unsloth_compiled_cache/__pycache__/BatchNorm1d.cpython-312.pyc +0 -0
  76. package/unsloth_compiled_cache/__pycache__/BatchNorm2d.cpython-312.pyc +0 -0
  77. package/unsloth_compiled_cache/__pycache__/BatchNorm3d.cpython-312.pyc +0 -0
  78. package/unsloth_compiled_cache/__pycache__/Conv1d.cpython-312.pyc +0 -0
  79. package/unsloth_compiled_cache/__pycache__/Conv2d.cpython-312.pyc +0 -0
  80. package/unsloth_compiled_cache/__pycache__/Conv3d.cpython-312.pyc +0 -0
  81. package/unsloth_compiled_cache/__pycache__/ConvTranspose1d.cpython-312.pyc +0 -0
  82. package/unsloth_compiled_cache/__pycache__/ConvTranspose2d.cpython-312.pyc +0 -0
  83. package/unsloth_compiled_cache/__pycache__/ConvTranspose3d.cpython-312.pyc +0 -0
  84. package/unsloth_compiled_cache/__pycache__/GPTQLoraLinear_peft_forward.cpython-312.pyc +0 -0
  85. package/unsloth_compiled_cache/__pycache__/GroupNorm.cpython-312.pyc +0 -0
  86. package/unsloth_compiled_cache/__pycache__/LayerNorm.cpython-312.pyc +0 -0
  87. package/unsloth_compiled_cache/__pycache__/Linear4bit_peft_forward.cpython-312.pyc +0 -0
  88. package/unsloth_compiled_cache/__pycache__/Linear8bitLt_peft_forward.cpython-312.pyc +0 -0
  89. package/unsloth_compiled_cache/__pycache__/Linear_peft_forward.cpython-312.pyc +0 -0
  90. package/unsloth_compiled_cache/__pycache__/LoraParallelLinear_peft_forward.cpython-312.pyc +0 -0
  91. package/unsloth_compiled_cache/__pycache__/RMSNorm.cpython-312.pyc +0 -0
  92. package/unsloth_compiled_cache/__pycache__/UnslothBCOTrainer.cpython-312.pyc +0 -0
  93. package/unsloth_compiled_cache/__pycache__/UnslothCPOTrainer.cpython-312.pyc +0 -0
  94. package/unsloth_compiled_cache/__pycache__/UnslothDPOTrainer.cpython-312.pyc +0 -0
  95. package/unsloth_compiled_cache/__pycache__/UnslothGKDTrainer.cpython-312.pyc +0 -0
  96. package/unsloth_compiled_cache/__pycache__/UnslothGRPOTrainer.cpython-312.pyc +0 -0
  97. package/unsloth_compiled_cache/__pycache__/UnslothKTOTrainer.cpython-312.pyc +0 -0
  98. package/unsloth_compiled_cache/__pycache__/UnslothNashMDTrainer.cpython-312.pyc +0 -0
  99. package/unsloth_compiled_cache/__pycache__/UnslothORPOTrainer.cpython-312.pyc +0 -0
  100. package/unsloth_compiled_cache/__pycache__/UnslothOnlineDPOTrainer.cpython-312.pyc +0 -0
  101. package/unsloth_compiled_cache/__pycache__/UnslothPPOTrainer.cpython-312.pyc +0 -0
  102. package/unsloth_compiled_cache/__pycache__/UnslothPRMTrainer.cpython-312.pyc +0 -0
  103. package/unsloth_compiled_cache/__pycache__/UnslothRLOOTrainer.cpython-312.pyc +0 -0
  104. package/unsloth_compiled_cache/__pycache__/UnslothRewardTrainer.cpython-312.pyc +0 -0
  105. package/unsloth_compiled_cache/__pycache__/UnslothSFTTrainer.cpython-312.pyc +0 -0
  106. package/unsloth_compiled_cache/__pycache__/UnslothXPOTrainer.cpython-312.pyc +0 -0
  107. package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_qwen3_moe.cpython-312.pyc +0 -0
  108. package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_siglip.cpython-312.pyc +0 -0
  109. package/unsloth_compiled_cache/unsloth_compiled_module_qwen3_moe.py +726 -0
  110. package/unsloth_compiled_cache/unsloth_compiled_module_siglip.py +534 -0
@@ -0,0 +1,1806 @@
1
+ """
2
+ 2025.12.6
3
+ 2025.12.7
4
+ 4.57.1
5
+ 0.24.0
6
+ __UNSLOTH_VERSIONING__
7
+ """
8
+
9
+ # Unsloth auto generated code
10
+ # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
11
+ #
12
+ # This program is free software: you can redistribute it and/or modify
13
+ # it under the terms of the GNU Lesser General Public License as published by
14
+ # the Free Software Foundation, either version 3 of the License, or
15
+ # (at your option) any later version.
16
+ #
17
+ # This program is distributed in the hope that it will be useful,
18
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
+ # GNU General Public License for more details.
21
+ #
22
+ # You should have received a copy of the GNU Lesser General Public License
23
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
24
+
25
+ from torch import Tensor
26
+ import torch
27
+ import torch.nn as nn
28
+ from torch.nn import functional as F
29
+ from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable
30
+ from trl.trainer.cpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, BaseTrainer, CPOConfig, CPOTrainer, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, Optional, PartialState, Path, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, autocast, defaultdict, disable_dropout_in_model, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_wandb_available, log_table_to_comet_experiment, logger, logging, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, selective_log_softmax, textwrap, torch, warnings, F, Optional, PeftModel, PreTrainedModel, is_peft_available, logger, os, torch)
31
+
32
+
33
+ import os
34
+ from typing import *
35
+ from dataclasses import dataclass, field
36
+ from packaging.version import Version
37
+ import torch
38
+ import numpy as np
39
+ from contextlib import nullcontext
40
+ from torch.nn import functional as F
41
+ import inspect
42
+ from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
43
+ from transformers.training_args import ParallelMode
44
+
45
+ # Wrap trainer with padding to right and enable training mode
46
+ # Also patches W&B since multiple runs must use wandb.finish()
47
+ import functools
48
+ from types import MethodType
49
+ def prepare_for_training_mode(f):
50
+ @functools.wraps(f)
51
+ def wrapper(self, *args, **kwargs):
52
+ # Enable training mode
53
+ if hasattr(self, 'model') and hasattr(self.model, "for_training"):
54
+ self.model.for_training()
55
+ output = f(self, *args, **kwargs)
56
+ # Return inference mode
57
+ if hasattr(self, 'model') and hasattr(self.model, "for_inference"):
58
+ self.model.for_inference()
59
+ # Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run
60
+ try:
61
+ import wandb
62
+ wandb.finish()
63
+ except:
64
+ pass
65
+ return output
66
+ return wrapper
67
+ pass
68
+
69
+ torch_compile_options = {
70
+ "epilogue_fusion" : True,
71
+ "max_autotune" : False,
72
+ "shape_padding" : True,
73
+ "trace.enabled" : False,
74
+ "triton.cudagraphs" : False,
75
+ }
76
+
77
+ @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
78
+ def chunked_selective_log_softmax(logits, index):
79
+ # Split into 4 chunks only
80
+ chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0)
81
+ chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0)
82
+ all_per_token_logps = []
83
+ # Below loop does the same as selective_log_softmax(chunk_logits, chunk_index)
84
+ for chunk_logits, chunk_index in zip(chunked_logits, chunked_index):
85
+ chunk_logits = chunk_logits.to(torch.float32)
86
+ selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1)
87
+ logsumexp_values = torch.logsumexp(chunk_logits, dim = -1)
88
+ per_token_logps = selected_logits - logsumexp_values
89
+ all_per_token_logps.append(per_token_logps)
90
+ pass
91
+ all_per_token_logps = torch.concat(all_per_token_logps)
92
+ all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1]))
93
+ return all_per_token_logps
94
+
95
+ def calculate_pad_tokens_in_prompt(
96
+ input_ids: torch.Tensor,
97
+ logits_to_keep: int,
98
+ pad_token_id: int
99
+ ) -> torch.Tensor:
100
+ """
101
+ Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens
102
+ """
103
+ if logits_to_keep >= input_ids.shape[1]:
104
+ raise ValueError("logits_to_keep must be smaller than the sequence length.")
105
+
106
+ prompt_section = input_ids[:, :-logits_to_keep]
107
+
108
+ padding_mask = (prompt_section == pad_token_id)
109
+
110
+ pad_token_counts = padding_mask.sum(dim=1)
111
+
112
+ return pad_token_counts
113
+
114
+ def create_completion_attention_mask(
115
+ completion_input_ids: torch.Tensor,
116
+ left_pad_tokens_per_prompt: torch.Tensor,
117
+ max_left_pad: int,
118
+ pad_token_id: int
119
+ ) -> torch.Tensor:
120
+ """
121
+ Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad]
122
+
123
+ Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens
124
+ and pad are pad tokens, this function would make a completion mask that would 0 out the pad
125
+ and p tokens. so in this example [0,0,0,1,1,1,0,0,0]
126
+ """
127
+ batch_size, completion_len = completion_input_ids.shape
128
+ device = completion_input_ids.device
129
+
130
+ num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt
131
+
132
+ indices = torch.arange(completion_len, device=device).unsqueeze(0)
133
+ shift_mask = indices >= num_tokens_to_mask.unsqueeze(1)
134
+
135
+ non_padding_mask = (completion_input_ids != pad_token_id)
136
+
137
+ final_mask = shift_mask & non_padding_mask
138
+
139
+ return final_mask
140
+
141
+ def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor:
142
+ """
143
+ Moves all padding tokens in each sequence of a batch to the right.
144
+ """
145
+ mask = (tensor != pad_id)
146
+ # Must do stable=True since binary mark is unordered
147
+ sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True)
148
+ packed_tensor = torch.gather(tensor, 1, sorted_indices)
149
+ return packed_tensor
150
+
151
+ def align_logprobs_with_mask(
152
+ logprob_tensor: torch.Tensor,
153
+ attention_mask: torch.Tensor,
154
+ pad_value: float = 0.0
155
+ ) -> torch.Tensor:
156
+ """
157
+ Aligns a log probability tensor with a given attention mask.
158
+ """
159
+
160
+ device = logprob_tensor.device
161
+ batch_size, logprob_seq_len = logprob_tensor.shape
162
+ mask_seq_len = attention_mask.shape[1]
163
+
164
+ padded_logprobs = torch.full(
165
+ attention_mask.shape,
166
+ fill_value=pad_value,
167
+ dtype=logprob_tensor.dtype,
168
+ device=device
169
+ )
170
+
171
+ left_pad_counts = torch.argmax(attention_mask, dim=1)
172
+
173
+ cols = torch.arange(logprob_seq_len, device=device)
174
+ dest_indices = left_pad_counts.unsqueeze(1) + cols
175
+
176
+ # Create destination row indices
177
+ # Shape: [batch_size, logprob_seq_len]
178
+ row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices)
179
+
180
+ # --- 4. Filter out-of-bounds indices and perform assignment ---
181
+ # Create a mask to identify only the indices that are within the bounds
182
+ # of the target tensor's sequence length.
183
+ valid_mask = dest_indices < mask_seq_len
184
+
185
+ # Use this mask to select only the valid row indices, column indices,
186
+ # and the corresponding values from the logprob tensor.
187
+ # This flattens the selected elements into 1D tensors.
188
+ valid_rows = row_indices[valid_mask]
189
+ valid_cols = dest_indices[valid_mask]
190
+ valid_vals = logprob_tensor[valid_mask]
191
+
192
+ # Place the valid values into their correct positions in the padded tensor
193
+ # using a single, efficient advanced indexing operation.
194
+ padded_logprobs[valid_rows, valid_cols] = valid_vals
195
+
196
+ return padded_logprobs
197
+ @dataclass
198
+ class UnslothCPOConfig(CPOConfig):
199
+ """
200
+
201
+ Configuration class for the [`CPOTrainer`].
202
+
203
+ This class includes only the parameters that are specific to CPO training. For a full list of training arguments,
204
+ please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
205
+ differ from those in [`~transformers.TrainingArguments`].
206
+
207
+ Using [`~transformers.HfArgumentParser`] we can turn this class into
208
+ [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
209
+ command line.
210
+
211
+ Parameters:
212
+ max_length (`int` or `None`, *optional*, defaults to `1024`):
213
+ Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
214
+ to use the default data collator.
215
+ max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
216
+ Maximum length of the prompt. This argument is required if you want to use the default data collator.
217
+ max_completion_length (`int`, *optional*):
218
+ Maximum length of the completion. This argument is required if you want to use the default data collator
219
+ and your model is an encoder-decoder.
220
+ beta (`float`, *optional*, defaults to `0.1`):
221
+ Parameter controlling the deviation from the reference model. Higher β means less deviation from the
222
+ reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
223
+ the [paper](https://huggingface.co/papers/2310.12036).
224
+ label_smoothing (`float`, *optional*, defaults to `0.0`):
225
+ Label smoothing factor. This argument is required if you want to use the default data collator.
226
+ loss_type (`str`, *optional*, defaults to `"sigmoid"`):
227
+ Type of loss to use. Possible values are:
228
+
229
+ - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
230
+ - `"hinge"`: hinge loss on the normalized likelihood from the
231
+ [SLiC](https://huggingface.co/papers/2305.10425) paper.
232
+ - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
233
+ - `"simpo"`: SimPO loss from the [SimPO](https://huggingface.co/papers/2405.14734) paper.
234
+ - `"alphapo"`: AlphaPO loss from the [AlphaPO](https://huggingface.co/papers/2501.03884) paper. This
235
+ automatically sets `loss_type="simpo"` and `cpo_alpha=0.0`.
236
+
237
+ disable_dropout (`bool`, *optional*, defaults to `True`):
238
+ Whether to disable dropout in the model.
239
+ cpo_alpha (`float`, *optional*, defaults to `1.0`):
240
+ Weight of the BC regularizer in CPO training.
241
+ simpo_gamma (`float`, *optional*, defaults to `0.5`):
242
+ Target reward margin for the SimPO loss, used only when the `loss_type="simpo"`.
243
+ alpha (`float`, *optional*, defaults to `0.0`):
244
+ Alpha parameter that controls reward function shape across all loss types. When alpha=0 (default), uses
245
+ standard log probability rewards. When `alpha != 0`, applies AlphaPO transformation: `r = (1 - p^(-alpha))
246
+ / alpha` from the [AlphaPO paper](https://huggingface.co/papers/2501.03884). This parameter works with all
247
+ loss types.
248
+ label_pad_token_id (`int`, *optional*, defaults to `-100`):
249
+ Label pad token id. This argument is required if you want to use the default data collator.
250
+ padding_value (`int`, *optional*):
251
+ Padding value to use. If `None`, the padding value of the tokenizer is used.
252
+ truncation_mode (`str`,*optional*, defaults to `"keep_end"`):
253
+ Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
254
+ This argument is required if you want to use the default data collator.
255
+ generate_during_eval (`bool`, *optional*, defaults to `False`):
256
+ If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
257
+ is_encoder_decoder (`bool`, *optional*):
258
+ When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
259
+ you need to specify if the model returned by the callable is an encoder-decoder model.
260
+ model_init_kwargs (`dict[str, Any]`, *optional*):
261
+ Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
262
+ string.
263
+ dataset_num_proc (`int`, *optional*):
264
+ Number of processes to use for processing the dataset.
265
+
266
+ """
267
+ vllm_sampling_params: Optional[Any] = field(
268
+ default = None,
269
+ metadata = {'help': 'vLLM SamplingParams'},
270
+ )
271
+ unsloth_num_chunks : Optional[int] = field(
272
+ default = -1,
273
+ metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
274
+ )
275
+ max_seq_length : Optional[int] = field(
276
+ default = None,
277
+ metadata = {'help': 'Maximum sequence length to truncate to.'},
278
+ )
279
+ def __init__(
280
+ self,
281
+ output_dir = None,
282
+ overwrite_output_dir = None,
283
+ do_train = False,
284
+ do_eval = False,
285
+ do_predict = False,
286
+ eval_strategy = 'no',
287
+ prediction_loss_only = False,
288
+ per_device_train_batch_size = 4,
289
+ per_device_eval_batch_size = 4,
290
+ per_gpu_train_batch_size = None,
291
+ per_gpu_eval_batch_size = None,
292
+ gradient_accumulation_steps = 2,
293
+ eval_accumulation_steps = 2,
294
+ eval_delay = 0,
295
+ torch_empty_cache_steps = 250,
296
+ learning_rate = 5e-05,
297
+ weight_decay = 0.01,
298
+ adam_beta1 = 0.9,
299
+ adam_beta2 = 0.999,
300
+ adam_epsilon = 1e-08,
301
+ max_grad_norm = 1.0,
302
+ num_train_epochs = 3.0,
303
+ max_steps = -1,
304
+ lr_scheduler_type = 'linear',
305
+ warmup_ratio = 0.1,
306
+ warmup_steps = 0,
307
+ log_level = 'passive',
308
+ log_level_replica = 'warning',
309
+ log_on_each_node = True,
310
+ logging_dir = None,
311
+ logging_strategy = 'steps',
312
+ logging_first_step = False,
313
+ logging_steps = 1,
314
+ logging_nan_inf_filter = False,
315
+ save_strategy = 'steps',
316
+ save_steps = 500,
317
+ save_total_limit = None,
318
+ save_safetensors = True,
319
+ save_on_each_node = False,
320
+ save_only_model = False,
321
+ restore_callback_states_from_checkpoint = False,
322
+ no_cuda = False,
323
+ use_cpu = False,
324
+ use_mps_device = False,
325
+ seed = 3407,
326
+ data_seed = 3407,
327
+ jit_mode_eval = False,
328
+ bf16 = False,
329
+ fp16 = False,
330
+ fp16_opt_level = 'O1',
331
+ half_precision_backend = 'auto',
332
+ bf16_full_eval = False,
333
+ fp16_full_eval = False,
334
+ tf32 = None,
335
+ local_rank = -1,
336
+ ddp_backend = None,
337
+ tpu_num_cores = None,
338
+ tpu_metrics_debug = False,
339
+ debug = '',
340
+ dataloader_drop_last = False,
341
+ eval_steps = None,
342
+ dataloader_num_workers = 0,
343
+ dataloader_prefetch_factor = None,
344
+ past_index = -1,
345
+ run_name = None,
346
+ disable_tqdm = None,
347
+ remove_unused_columns = True,
348
+ label_names = None,
349
+ load_best_model_at_end = False,
350
+ metric_for_best_model = None,
351
+ greater_is_better = None,
352
+ ignore_data_skip = False,
353
+ fsdp = None,
354
+ fsdp_min_num_params = 0,
355
+ fsdp_config = None,
356
+ fsdp_transformer_layer_cls_to_wrap = None,
357
+ accelerator_config = None,
358
+ parallelism_config = None,
359
+ deepspeed = None,
360
+ label_smoothing_factor = 0.0,
361
+ optim = 'adamw_8bit',
362
+ optim_args = None,
363
+ adafactor = False,
364
+ group_by_length = False,
365
+ length_column_name = 'length',
366
+ report_to = 'none',
367
+ project = 'huggingface',
368
+ trackio_space_id = 'trackio',
369
+ ddp_find_unused_parameters = None,
370
+ ddp_bucket_cap_mb = None,
371
+ ddp_broadcast_buffers = None,
372
+ dataloader_pin_memory = True,
373
+ dataloader_persistent_workers = False,
374
+ skip_memory_metrics = True,
375
+ use_legacy_prediction_loop = False,
376
+ push_to_hub = False,
377
+ resume_from_checkpoint = None,
378
+ hub_model_id = None,
379
+ hub_strategy = 'every_save',
380
+ hub_token = None,
381
+ hub_private_repo = None,
382
+ hub_always_push = False,
383
+ hub_revision = None,
384
+ gradient_checkpointing = True,
385
+ gradient_checkpointing_kwargs = None,
386
+ include_inputs_for_metrics = False,
387
+ eval_do_concat_batches = True,
388
+ fp16_backend = 'auto',
389
+ push_to_hub_model_id = None,
390
+ push_to_hub_organization = None,
391
+ push_to_hub_token = None,
392
+ mp_parameters = '',
393
+ auto_find_batch_size = False,
394
+ full_determinism = False,
395
+ torchdynamo = None,
396
+ ray_scope = 'last',
397
+ ddp_timeout = 1800,
398
+ torch_compile = False,
399
+ torch_compile_backend = None,
400
+ torch_compile_mode = None,
401
+ include_tokens_per_second = False,
402
+ include_num_input_tokens_seen = False,
403
+ neftune_noise_alpha = None,
404
+ optim_target_modules = None,
405
+ batch_eval_metrics = False,
406
+ eval_on_start = False,
407
+ use_liger_kernel = False,
408
+ liger_kernel_config = None,
409
+ eval_use_gather_object = False,
410
+ average_tokens_across_devices = True,
411
+ max_length = 1024,
412
+ max_prompt_length = 512,
413
+ max_completion_length = None,
414
+ beta = 0.1,
415
+ label_smoothing = 0.0,
416
+ loss_type = 'sigmoid',
417
+ disable_dropout = True,
418
+ cpo_alpha = 1.0,
419
+ simpo_gamma = 0.5,
420
+ alpha = 0.0,
421
+ label_pad_token_id = -100,
422
+ padding_value = None,
423
+ truncation_mode = 'keep_end',
424
+ generate_during_eval = False,
425
+ is_encoder_decoder = None,
426
+ model_init_kwargs = None,
427
+ dataset_num_proc = None,
428
+ vllm_sampling_params = None,
429
+ unsloth_num_chunks = -1,
430
+ max_seq_length = None,
431
+ **kwargs,
432
+ ):
433
+ if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
434
+ if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
435
+ if output_dir is None and save_strategy == 'steps' and save_steps == 500:
436
+ output_dir = 'unsloth_training_checkpoints'
437
+ save_strategy = 'no'
438
+ if dataset_num_proc is None:
439
+ from multiprocessing import cpu_count
440
+ dataset_num_proc = min(max(cpu_count()+4, 2), 64)
441
+
442
+ super().__init__(
443
+ output_dir = output_dir,
444
+ overwrite_output_dir = overwrite_output_dir,
445
+ do_train = do_train,
446
+ do_eval = do_eval,
447
+ do_predict = do_predict,
448
+ eval_strategy = eval_strategy,
449
+ prediction_loss_only = prediction_loss_only,
450
+ per_device_train_batch_size = per_device_train_batch_size,
451
+ per_device_eval_batch_size = per_device_eval_batch_size,
452
+ per_gpu_train_batch_size = per_gpu_train_batch_size,
453
+ per_gpu_eval_batch_size = per_gpu_eval_batch_size,
454
+ gradient_accumulation_steps = gradient_accumulation_steps,
455
+ eval_accumulation_steps = eval_accumulation_steps,
456
+ eval_delay = eval_delay,
457
+ torch_empty_cache_steps = torch_empty_cache_steps,
458
+ learning_rate = learning_rate,
459
+ weight_decay = weight_decay,
460
+ adam_beta1 = adam_beta1,
461
+ adam_beta2 = adam_beta2,
462
+ adam_epsilon = adam_epsilon,
463
+ max_grad_norm = max_grad_norm,
464
+ num_train_epochs = num_train_epochs,
465
+ max_steps = max_steps,
466
+ lr_scheduler_type = lr_scheduler_type,
467
+ warmup_ratio = warmup_ratio,
468
+ warmup_steps = warmup_steps,
469
+ log_level = log_level,
470
+ log_level_replica = log_level_replica,
471
+ log_on_each_node = log_on_each_node,
472
+ logging_dir = logging_dir,
473
+ logging_strategy = logging_strategy,
474
+ logging_first_step = logging_first_step,
475
+ logging_steps = logging_steps,
476
+ logging_nan_inf_filter = logging_nan_inf_filter,
477
+ save_strategy = save_strategy,
478
+ save_steps = save_steps,
479
+ save_total_limit = save_total_limit,
480
+ save_safetensors = save_safetensors,
481
+ save_on_each_node = save_on_each_node,
482
+ save_only_model = save_only_model,
483
+ restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
484
+ no_cuda = no_cuda,
485
+ use_cpu = use_cpu,
486
+ use_mps_device = use_mps_device,
487
+ seed = seed,
488
+ data_seed = data_seed,
489
+ jit_mode_eval = jit_mode_eval,
490
+ bf16 = bf16,
491
+ fp16 = fp16,
492
+ fp16_opt_level = fp16_opt_level,
493
+ half_precision_backend = half_precision_backend,
494
+ bf16_full_eval = bf16_full_eval,
495
+ fp16_full_eval = fp16_full_eval,
496
+ tf32 = tf32,
497
+ local_rank = local_rank,
498
+ ddp_backend = ddp_backend,
499
+ tpu_num_cores = tpu_num_cores,
500
+ tpu_metrics_debug = tpu_metrics_debug,
501
+ debug = debug,
502
+ dataloader_drop_last = dataloader_drop_last,
503
+ eval_steps = eval_steps,
504
+ dataloader_num_workers = dataloader_num_workers,
505
+ dataloader_prefetch_factor = dataloader_prefetch_factor,
506
+ past_index = past_index,
507
+ run_name = run_name,
508
+ disable_tqdm = disable_tqdm,
509
+ remove_unused_columns = remove_unused_columns,
510
+ label_names = label_names,
511
+ load_best_model_at_end = load_best_model_at_end,
512
+ metric_for_best_model = metric_for_best_model,
513
+ greater_is_better = greater_is_better,
514
+ ignore_data_skip = ignore_data_skip,
515
+ fsdp = fsdp,
516
+ fsdp_min_num_params = fsdp_min_num_params,
517
+ fsdp_config = fsdp_config,
518
+ fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
519
+ accelerator_config = accelerator_config,
520
+ parallelism_config = parallelism_config,
521
+ deepspeed = deepspeed,
522
+ label_smoothing_factor = label_smoothing_factor,
523
+ optim = optim,
524
+ optim_args = optim_args,
525
+ adafactor = adafactor,
526
+ group_by_length = group_by_length,
527
+ length_column_name = length_column_name,
528
+ report_to = report_to,
529
+ project = project,
530
+ trackio_space_id = trackio_space_id,
531
+ ddp_find_unused_parameters = ddp_find_unused_parameters,
532
+ ddp_bucket_cap_mb = ddp_bucket_cap_mb,
533
+ ddp_broadcast_buffers = ddp_broadcast_buffers,
534
+ dataloader_pin_memory = dataloader_pin_memory,
535
+ dataloader_persistent_workers = dataloader_persistent_workers,
536
+ skip_memory_metrics = skip_memory_metrics,
537
+ use_legacy_prediction_loop = use_legacy_prediction_loop,
538
+ push_to_hub = push_to_hub,
539
+ resume_from_checkpoint = resume_from_checkpoint,
540
+ hub_model_id = hub_model_id,
541
+ hub_strategy = hub_strategy,
542
+ hub_token = hub_token,
543
+ hub_private_repo = hub_private_repo,
544
+ hub_always_push = hub_always_push,
545
+ hub_revision = hub_revision,
546
+ gradient_checkpointing = gradient_checkpointing,
547
+ gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
548
+ include_inputs_for_metrics = include_inputs_for_metrics,
549
+ eval_do_concat_batches = eval_do_concat_batches,
550
+ fp16_backend = fp16_backend,
551
+ push_to_hub_model_id = push_to_hub_model_id,
552
+ push_to_hub_organization = push_to_hub_organization,
553
+ push_to_hub_token = push_to_hub_token,
554
+ mp_parameters = mp_parameters,
555
+ auto_find_batch_size = auto_find_batch_size,
556
+ full_determinism = full_determinism,
557
+ torchdynamo = torchdynamo,
558
+ ray_scope = ray_scope,
559
+ ddp_timeout = ddp_timeout,
560
+ torch_compile = torch_compile,
561
+ torch_compile_backend = torch_compile_backend,
562
+ torch_compile_mode = torch_compile_mode,
563
+ include_tokens_per_second = include_tokens_per_second,
564
+ include_num_input_tokens_seen = include_num_input_tokens_seen,
565
+ neftune_noise_alpha = neftune_noise_alpha,
566
+ optim_target_modules = optim_target_modules,
567
+ batch_eval_metrics = batch_eval_metrics,
568
+ eval_on_start = eval_on_start,
569
+ use_liger_kernel = use_liger_kernel,
570
+ liger_kernel_config = liger_kernel_config,
571
+ eval_use_gather_object = eval_use_gather_object,
572
+ average_tokens_across_devices = average_tokens_across_devices,
573
+ max_length = max_length,
574
+ max_prompt_length = max_prompt_length,
575
+ max_completion_length = max_completion_length,
576
+ beta = beta,
577
+ label_smoothing = label_smoothing,
578
+ loss_type = loss_type,
579
+ disable_dropout = disable_dropout,
580
+ cpo_alpha = cpo_alpha,
581
+ simpo_gamma = simpo_gamma,
582
+ alpha = alpha,
583
+ label_pad_token_id = label_pad_token_id,
584
+ padding_value = padding_value,
585
+ truncation_mode = truncation_mode,
586
+ generate_during_eval = generate_during_eval,
587
+ is_encoder_decoder = is_encoder_decoder,
588
+ model_init_kwargs = model_init_kwargs,
589
+ dataset_num_proc = dataset_num_proc,**kwargs)
590
+ self.vllm_sampling_params = vllm_sampling_params
591
+ self.unsloth_num_chunks = unsloth_num_chunks
592
+ self.max_seq_length = max_seq_length
593
+ pass
594
+
595
+ class _UnslothCPOTrainer(BaseTrainer):
596
+ r""""""
597
+
598
+ _tag_names = ["trl", "cpo"]
599
+ _name = "CPO"
600
+ _paper = {
601
+ "title": "Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation",
602
+ "id": "2401.08417",
603
+ # docstyle-ignore
604
+ "citation": textwrap.dedent("""\
605
+ @inproceedings{xu2024contrastive,
606
+ title = {{Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation}},
607
+ author = {Haoran Xu and Amr Sharaf and Yunmo Chen and Weiting Tan and Lingfeng Shen and Benjamin Van Durme and Kenton Murray and Young Jin Kim},
608
+ year = 2024,
609
+ booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024},
610
+ publisher = {OpenReview.net},
611
+ url = {https://openreview.net/forum?id=51iwkioZpn}
612
+ }"""),
613
+ }
614
+
615
+ def __init__(
616
+ self,
617
+ model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
618
+ args: Optional[CPOConfig] = None,
619
+ data_collator: Optional[DataCollator] = None,
620
+ train_dataset: Optional[Dataset] = None,
621
+ eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
622
+ processing_class: Optional[
623
+ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
624
+ ] = None,
625
+ model_init: Optional[Callable[[], PreTrainedModel]] = None,
626
+ callbacks: Optional[list[TrainerCallback]] = None,
627
+ optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
628
+ preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
629
+ peft_config: Optional[dict] = None,
630
+ compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
631
+ ):
632
+ if not os.environ.get("TRL_EXPERIMENTAL_SILENCE"):
633
+ warnings.warn(
634
+ "This trainer will soon be moved to trl.experimental and is a candidate for removal. If you rely on "
635
+ "it and want it to remain, please share your comments here: "
636
+ "https://github.com/huggingface/trl/issues/4223. Silence this warning by setting environment variable "
637
+ "TRL_EXPERIMENTAL_SILENCE=1."
638
+ )
639
+ if args.model_init_kwargs is None:
640
+ model_init_kwargs = {}
641
+ elif not isinstance(model, str):
642
+ raise ValueError("You passed model_kwargs to the CPOTrainer. But your model is already instantiated.")
643
+ else:
644
+ model_init_kwargs = args.model_init_kwargs
645
+ dtype = model_init_kwargs.get("dtype")
646
+ if dtype is not None:
647
+ # Convert to `torch.dtype` if an str is passed
648
+ if isinstance(dtype, str) and dtype != "auto":
649
+ dtype = getattr(torch, dtype)
650
+ if dtype != "auto" and not isinstance(dtype, torch.dtype):
651
+ raise ValueError(
652
+ f"Invalid `dtype` passed to the CPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}."
653
+ )
654
+ model_init_kwargs["dtype"] = dtype
655
+
656
+ if isinstance(model, str):
657
+ model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
658
+
659
+ # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
660
+ # has been called in order to properly call autocast if needed.
661
+ self._peft_has_been_casted_to_bf16 = False
662
+
663
+ if not is_peft_available() and peft_config is not None:
664
+ raise ValueError(
665
+ "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
666
+ )
667
+ elif is_peft_available() and peft_config is not None:
668
+ # if model is a peft model and we have a peft_config, we merge and unload it first
669
+ if isinstance(model, PeftModel):
670
+ model = model.merge_and_unload()
671
+
672
+ if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
673
+ _support_gc_kwargs = hasattr(
674
+ args, "gradient_checkpointing_kwargs"
675
+ ) and "gradient_checkpointing_kwargs" in list(
676
+ inspect.signature(prepare_model_for_kbit_training).parameters
677
+ )
678
+
679
+ prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
680
+
681
+ if _support_gc_kwargs:
682
+ prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
683
+
684
+ model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
685
+ elif args.gradient_checkpointing:
686
+ # For backward compatibility with older versions of transformers
687
+ if hasattr(model, "enable_input_require_grads"):
688
+ model.enable_input_require_grads()
689
+ else:
690
+
691
+ def make_inputs_require_grad(module, input, output):
692
+ output.requires_grad_(True)
693
+
694
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
695
+
696
+ # get peft model with the given config
697
+ model = model
698
+ if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
699
+ peft_module_casting_to_bf16(model)
700
+ # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
701
+ self._peft_has_been_casted_to_bf16 = True
702
+
703
+ # For models that use gradient_checkpointing, we need to attach a hook that enables input
704
+ # to explicitly have `requires_grad=True`, otherwise training will either silently
705
+ # fail or completely fail.
706
+ elif args.gradient_checkpointing:
707
+ # For backward compatibility with older versions of transformers
708
+ if hasattr(model, "enable_input_require_grads"):
709
+ model.enable_input_require_grads()
710
+ else:
711
+
712
+ def make_inputs_require_grad(module, input, output):
713
+ output.requires_grad_(True)
714
+
715
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
716
+
717
+ if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
718
+ raise ValueError(
719
+ "`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
720
+ " Please install `wandb` or `comet-ml` to resolve."
721
+ )
722
+
723
+ if model is not None:
724
+ self.is_encoder_decoder = model.config.is_encoder_decoder
725
+ elif args.is_encoder_decoder is None:
726
+ raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
727
+ else:
728
+ self.is_encoder_decoder = args.is_encoder_decoder
729
+
730
+ if self.is_encoder_decoder:
731
+ self.decoder_start_token_id = model.config.decoder_start_token_id
732
+ self.pad_token_id = model.config.pad_token_id
733
+
734
+ if processing_class is None:
735
+ raise ValueError("processing_class must be specified to tokenize a CPO dataset.")
736
+ if args.max_length is None:
737
+ logger.warning(
738
+ "`max_length` is not set in the CPOConfig's init"
739
+ " it will default to `512` by default, but you should do it yourself in the future.",
740
+ )
741
+ max_length = 512
742
+ else:
743
+ max_length = args.max_length
744
+ if args.max_prompt_length is None:
745
+ logger.warning(
746
+ "`max_prompt_length` is not set in the CPOConfig's init"
747
+ " it will default to `128` by default, but you should do it yourself in the future.",
748
+ )
749
+ max_prompt_length = 128
750
+ else:
751
+ max_prompt_length = args.max_prompt_length
752
+
753
+ if not max_prompt_length < max_length:
754
+ raise ValueError(
755
+ f"max_prompt_length ({max_prompt_length}) should be strictly less than max_length ({max_length})."
756
+ )
757
+
758
+ if args.max_completion_length is None and self.is_encoder_decoder:
759
+ logger.warning(
760
+ "When using an encoder decoder architecture, you should set `max_completion_length` in the CPOConfig's init"
761
+ " it will default to `128` by default, but you should do it yourself in the future.",
762
+ )
763
+ max_completion_length = 128
764
+ else:
765
+ max_completion_length = args.max_completion_length
766
+
767
+ if data_collator is None:
768
+ data_collator = DPODataCollatorWithPadding(
769
+ pad_token_id=processing_class.pad_token_id,
770
+ label_pad_token_id=args.label_pad_token_id,
771
+ is_encoder_decoder=self.is_encoder_decoder,
772
+ )
773
+
774
+ if args.remove_unused_columns:
775
+ args.remove_unused_columns = False
776
+ # warn users
777
+ logger.warning(
778
+ "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
779
+ " we have set it for you, but you should do it yourself in the future.",
780
+ )
781
+
782
+ self.use_dpo_data_collator = True
783
+ else:
784
+ self.use_dpo_data_collator = False
785
+
786
+ # Disable dropout in the model
787
+ if args.disable_dropout:
788
+ disable_dropout_in_model(model)
789
+
790
+ self.max_length = max_length
791
+ self.generate_during_eval = args.generate_during_eval
792
+ self.label_pad_token_id = args.label_pad_token_id
793
+ self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
794
+ self.max_prompt_length = max_prompt_length
795
+ self.truncation_mode = args.truncation_mode
796
+ self.max_completion_length = max_completion_length
797
+ self.processing_class = processing_class
798
+
799
+ if args.loss_type in ["hinge", "ipo"] and args.label_smoothing > 0:
800
+ logger.warning(
801
+ f"You are using the {args.loss_type} loss type that does not support label smoothing. The "
802
+ "`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this warning.",
803
+ )
804
+ if args.loss_type == "kto_pair":
805
+ raise ValueError("Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.")
806
+
807
+ self.beta = args.beta
808
+ self.label_smoothing = args.label_smoothing
809
+ self.loss_type = args.loss_type
810
+ self.cpo_alpha = args.cpo_alpha
811
+ self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
812
+ self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
813
+ if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
814
+ logger.warning(
815
+ "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
816
+ "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
817
+ "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
818
+ "loss.",
819
+ )
820
+
821
+ if args.loss_type == "simpo":
822
+ self.simpo_gamma = args.simpo_gamma
823
+
824
+ # AlphaPO parameter for reward shaping
825
+ self.alpha = args.alpha
826
+
827
+ self._stored_metrics = defaultdict(lambda: defaultdict(list))
828
+
829
+ # The trainer estimates the number of FLOPs [floating-point operations] using the number of elements in the
830
+ # input tensor associated with the key "input_ids". However, in CPO, the sampled data does not include the
831
+ # "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
832
+ # "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
833
+ # of the input, floating-point operations will not be computed." To suppress this warning, we set the
834
+ # "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
835
+ # that the warning has already been issued.
836
+ model.warnings_issued["estimate_tokens"] = True
837
+
838
+ # Compute that only on the main process for faster data processing.
839
+ # see: https://github.com/huggingface/trl/pull/1255
840
+ with PartialState().main_process_first():
841
+ # Extract the prompt if needed, and apply the chat template if needed
842
+ train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
843
+ train_dataset = train_dataset.map(
844
+ maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
845
+ )
846
+ if eval_dataset is not None:
847
+ eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
848
+ eval_dataset = eval_dataset.map(
849
+ maybe_apply_chat_template,
850
+ fn_kwargs={"tokenizer": processing_class},
851
+ num_proc=args.dataset_num_proc,
852
+ )
853
+
854
+ # tokenize the dataset
855
+ train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
856
+ if eval_dataset is not None:
857
+ eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
858
+
859
+ super().__init__(
860
+ model=model,
861
+ args=args,
862
+ data_collator=data_collator,
863
+ train_dataset=train_dataset,
864
+ eval_dataset=eval_dataset,
865
+ processing_class=processing_class,
866
+ model_init=model_init,
867
+ compute_metrics=compute_metrics,
868
+ callbacks=callbacks,
869
+ optimizers=optimizers,
870
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics,
871
+ )
872
+
873
+ # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
874
+ # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
875
+ # self.model_accepts_loss_kwargs to False to enable scaling.
876
+ self.model_accepts_loss_kwargs = False
877
+
878
+ # Add tags for models that have been loaded with the correct transformers version
879
+ if hasattr(self.model, "add_model_tags"):
880
+ self.model.add_model_tags(self._tag_names)
881
+
882
+ if not hasattr(self, "accelerator"):
883
+ raise AttributeError(
884
+ "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
885
+ )
886
+
887
+ def build_tokenized_answer(self, prompt, answer):
888
+ """
889
+ Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`. It does ensure `enc(a + b) = enc(a) + enc(a +
890
+ b)[len(enc(a)):]`. Reference:
891
+ https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
892
+ """
893
+
894
+ full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False)
895
+ prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"]
896
+
897
+ answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
898
+ answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
899
+
900
+ # Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
901
+ full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
902
+
903
+ # Prepare input tokens for token by token comparison
904
+ full_input_ids = np.array(full_tokenized["input_ids"])
905
+
906
+ if len(full_input_ids) != len(full_concat_input_ids):
907
+ raise ValueError("Prompt input ids and answer input ids should have the same length.")
908
+
909
+ # On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
910
+ # can be merged together when tokenizing prompt+answer. This could result
911
+ # on the last token from the prompt being different when tokenized on its own
912
+ # vs when done as prompt+answer.
913
+ response_token_ids_start_idx = len(prompt_input_ids)
914
+
915
+ # If tokenized prompt is different than both prompt+answer, then it means the
916
+ # last token has changed due to merging.
917
+ if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
918
+ response_token_ids_start_idx -= 1
919
+
920
+ prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
921
+ prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
922
+
923
+ if len(prompt_input_ids) != len(prompt_attention_mask):
924
+ raise ValueError("Prompt input ids and attention mask should have the same length.")
925
+
926
+ answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
927
+ answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
928
+
929
+ return dict(
930
+ prompt_input_ids=prompt_input_ids,
931
+ prompt_attention_mask=prompt_attention_mask,
932
+ input_ids=answer_input_ids,
933
+ attention_mask=answer_attention_mask,
934
+ )
935
+
936
+ def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict:
937
+ """Tokenize a single row from a CPO specific dataset.
938
+
939
+ At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation in case the prompt +
940
+ chosen or prompt + rejected responses is/are too long. First we truncate the prompt; if we're still too long,
941
+ we truncate the chosen/rejected.
942
+
943
+ We also create the labels for the chosen/rejected responses, which are of length equal to the sum of the length
944
+ of the prompt and the chosen/rejected response, with label_pad_token_id for the prompt tokens.
945
+ """
946
+ batch = {}
947
+ prompt = feature["prompt"]
948
+ chosen = feature["chosen"]
949
+ rejected = feature["rejected"]
950
+
951
+ if not self.is_encoder_decoder:
952
+ # Check issues below for more details
953
+ # 1. https://github.com/huggingface/trl/issues/907
954
+ # 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
955
+ # 3. https://github.com/LianjiaTech/BELLE/issues/337
956
+
957
+ if not isinstance(prompt, str):
958
+ raise ValueError(f"prompt should be an str but got {type(prompt)}")
959
+ prompt_tokens = self.processing_class(prompt, add_special_tokens=False)
960
+ prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
961
+
962
+ if not isinstance(chosen, str):
963
+ raise ValueError(f"chosen should be an str but got {type(chosen)}")
964
+ chosen_tokens = self.build_tokenized_answer(prompt, chosen)
965
+
966
+ if not isinstance(rejected, str):
967
+ raise ValueError(f"rejected should be an str but got {type(rejected)}")
968
+ rejected_tokens = self.build_tokenized_answer(prompt, rejected)
969
+
970
+ # Last prompt token might get merged by tokenizer and
971
+ # it should not be included for generation if that happens
972
+ prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
973
+
974
+ chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
975
+ rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
976
+ prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
977
+
978
+ for k, v in prompt_tokens.items():
979
+ prompt_tokens[k] = v[:prompt_len_input_ids]
980
+
981
+ # Make sure prompts only have one different token at most an
982
+ # and length only differs by 1 at most
983
+ num_diff_tokens = sum(
984
+ a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])
985
+ )
986
+ num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
987
+ if num_diff_tokens > 1 or num_diff_len > 1:
988
+ raise ValueError(
989
+ "Chosen and rejected prompt_input_ids might only differ on the "
990
+ "last token due to tokenizer merge ops."
991
+ )
992
+
993
+ # add BOS token to head of prompt. Avoid adding if it's already there
994
+ prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed(
995
+ self.processing_class.bos_token_id,
996
+ prompt_len_input_ids,
997
+ prompt_tokens,
998
+ chosen_prompt_len_input_ids,
999
+ chosen_tokens,
1000
+ rejected_prompt_len_input_ids,
1001
+ rejected_tokens,
1002
+ )
1003
+
1004
+ # add EOS token to end of answer. Avoid adding if it's already there
1005
+ chosen_tokens, rejected_tokens = add_eos_token_if_needed(
1006
+ self.processing_class.eos_token_id, chosen_tokens, rejected_tokens
1007
+ )
1008
+
1009
+ longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
1010
+
1011
+ # if combined sequence is too long, truncate the prompt
1012
+ for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
1013
+ if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
1014
+ if self.truncation_mode == "keep_start":
1015
+ for k in ["prompt_input_ids", "prompt_attention_mask"]:
1016
+ answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
1017
+ elif self.truncation_mode == "keep_end":
1018
+ for k in ["prompt_input_ids", "prompt_attention_mask"]:
1019
+ answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
1020
+ else:
1021
+ raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
1022
+
1023
+ # if that's still too long, truncate the response
1024
+ for answer_tokens in [chosen_tokens, rejected_tokens]:
1025
+ if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
1026
+ for k in ["input_ids", "attention_mask"]:
1027
+ answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
1028
+
1029
+ # Create labels
1030
+ chosen_sequence_tokens = {
1031
+ k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
1032
+ }
1033
+ rejected_sequence_tokens = {
1034
+ k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
1035
+ }
1036
+ chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
1037
+ chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
1038
+ self.label_pad_token_id
1039
+ ] * len(chosen_tokens["prompt_input_ids"])
1040
+ rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
1041
+ rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
1042
+ self.label_pad_token_id
1043
+ ] * len(rejected_tokens["prompt_input_ids"])
1044
+
1045
+ for k, toks in {
1046
+ "chosen_": chosen_sequence_tokens,
1047
+ "rejected_": rejected_sequence_tokens,
1048
+ "": prompt_tokens,
1049
+ }.items():
1050
+ for type_key, tokens in toks.items():
1051
+ if type_key == "token_type_ids":
1052
+ continue
1053
+ batch[f"{k}{type_key}"] = tokens
1054
+
1055
+ else:
1056
+ chosen_tokens = self.processing_class(
1057
+ chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
1058
+ )
1059
+ rejected_tokens = self.processing_class(
1060
+ rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
1061
+ )
1062
+ prompt_tokens = self.processing_class(
1063
+ prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
1064
+ )
1065
+
1066
+ batch["chosen_labels"] = chosen_tokens["input_ids"]
1067
+ batch["rejected_labels"] = rejected_tokens["input_ids"]
1068
+ batch["prompt_input_ids"] = prompt_tokens["input_ids"]
1069
+ batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
1070
+
1071
+ if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
1072
+ batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
1073
+ labels=torch.tensor(batch["rejected_labels"])
1074
+ )
1075
+ batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
1076
+ labels=torch.tensor(batch["chosen_labels"])
1077
+ )
1078
+
1079
+ return batch
1080
+
1081
+ @staticmethod
1082
+ def concatenated_inputs(
1083
+ batch: dict[str, Union[list, torch.LongTensor]],
1084
+ is_encoder_decoder: bool = False,
1085
+ label_pad_token_id: int = -100,
1086
+ padding_value: int = 0,
1087
+ device: Optional[torch.device] = None,
1088
+ ) -> dict[str, torch.LongTensor]:
1089
+ """Concatenate the chosen and rejected inputs into a single tensor.
1090
+
1091
+ Args:
1092
+ batch:
1093
+ A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors
1094
+ of shape (batch_size, sequence_length).
1095
+ is_encoder_decoder:
1096
+ Whether the model is an encoder-decoder model.
1097
+ label_pad_token_id:
1098
+ The label pad token id.
1099
+ padding_value:
1100
+ The padding value to use for the concatenated inputs_ids.
1101
+ device:
1102
+ The device for the concatenated inputs.
1103
+
1104
+ Returns:
1105
+ A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
1106
+ """
1107
+ concatenated_batch = {}
1108
+
1109
+ if is_encoder_decoder:
1110
+ max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
1111
+ else:
1112
+ max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
1113
+
1114
+ for k in batch:
1115
+ if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
1116
+ if "labels" in k or is_encoder_decoder:
1117
+ pad_value = label_pad_token_id
1118
+ elif k.endswith("_input_ids"):
1119
+ pad_value = padding_value
1120
+ elif k.endswith("_attention_mask"):
1121
+ pad_value = 0
1122
+ concatenated_key = k.replace("chosen", "concatenated")
1123
+ concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
1124
+ for k in batch:
1125
+ if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
1126
+ if "labels" in k or is_encoder_decoder:
1127
+ pad_value = label_pad_token_id
1128
+ elif k.endswith("_input_ids"):
1129
+ pad_value = padding_value
1130
+ elif k.endswith("_attention_mask"):
1131
+ pad_value = 0
1132
+ concatenated_key = k.replace("rejected", "concatenated")
1133
+ concatenated_batch[concatenated_key] = torch.cat(
1134
+ (
1135
+ concatenated_batch[concatenated_key],
1136
+ pad_to_length(batch[k], max_length, pad_value=pad_value),
1137
+ ),
1138
+ dim=0,
1139
+ ).to(device=device)
1140
+
1141
+ if is_encoder_decoder:
1142
+ concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
1143
+ concatenated_batch["concatenated_attention_mask"] = (
1144
+ batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
1145
+ )
1146
+
1147
+ return concatenated_batch
1148
+
1149
+ def cpo_loss(
1150
+ self,
1151
+ policy_chosen_logps: torch.FloatTensor,
1152
+ policy_rejected_logps: torch.FloatTensor,
1153
+ ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
1154
+ """Compute the CPO loss for a batch of policy and reference model log probabilities.
1155
+
1156
+ Args:
1157
+ policy_chosen_logps:
1158
+ Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
1159
+ policy_rejected_logps:
1160
+ Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
1161
+
1162
+ Returns:
1163
+ A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). The losses tensor contains the CPO
1164
+ loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards for
1165
+ the chosen and rejected responses, respectively.
1166
+ """
1167
+ # Apply AlphaPO reward transformation if alpha != 0
1168
+ if self.alpha != 0.0:
1169
+ # Compute probabilities
1170
+ chosen_probs = torch.exp(policy_chosen_logps)
1171
+ rejected_probs = torch.exp(policy_rejected_logps)
1172
+
1173
+ # Apply AlphaPO transformation: r = (1 - p^(-alpha)) / alpha
1174
+ policy_chosen_rewards = (1 - chosen_probs.pow(-self.alpha)) / self.alpha
1175
+ policy_rejected_rewards = (1 - rejected_probs.pow(-self.alpha)) / self.alpha
1176
+
1177
+ logits = (policy_chosen_rewards - policy_rejected_rewards).to(self.accelerator.device)
1178
+ else:
1179
+ # Standard log probability rewards when alpha = 0
1180
+ logits = (policy_chosen_logps - policy_rejected_logps).to(self.accelerator.device)
1181
+
1182
+ # The beta is a temperature parameter for the CPO loss, typically something in the range of 0.1 to 0.5.
1183
+ # We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the labels and
1184
+ # calculates a conservative CPO loss.
1185
+
1186
+ if self.loss_type == "simpo":
1187
+ gamma_logratios = self.simpo_gamma / self.beta
1188
+ logits = logits - gamma_logratios
1189
+ # This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
1190
+ losses = (
1191
+ -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
1192
+ - F.logsigmoid(-self.beta * logits) * self.label_smoothing
1193
+ )
1194
+ elif self.loss_type == "sigmoid":
1195
+ # This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
1196
+ losses = (
1197
+ -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
1198
+ - F.logsigmoid(-self.beta * logits) * self.label_smoothing
1199
+ )
1200
+ elif self.loss_type == "hinge":
1201
+ losses = torch.relu(1 - self.beta * logits)
1202
+ elif self.loss_type == "ipo":
1203
+ # eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
1204
+ losses = (logits - 1 / (2 * self.beta)) ** 2
1205
+ else:
1206
+ raise ValueError(
1207
+ f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'simpo']"
1208
+ )
1209
+
1210
+ # Calculate rewards for logging
1211
+ if self.alpha != 0.0:
1212
+ # When using AlphaPO transformation, use the transformed rewards
1213
+ chosen_rewards = self.beta * policy_chosen_rewards.to(self.accelerator.device).detach()
1214
+ rejected_rewards = self.beta * policy_rejected_rewards.to(self.accelerator.device).detach()
1215
+ else:
1216
+ # Standard log probability rewards
1217
+ chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach()
1218
+ rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach()
1219
+
1220
+ return losses, chosen_rewards, rejected_rewards
1221
+
1222
+ @staticmethod
1223
+ def get_batch_logps(
1224
+ logits: torch.FloatTensor,
1225
+ labels: torch.LongTensor,
1226
+ average_log_prob: bool = False,
1227
+ label_pad_token_id: int = -100,
1228
+ is_encoder_decoder: bool = False,
1229
+ ) -> torch.FloatTensor:
1230
+ """Compute the log probabilities of the given labels under the given logits.
1231
+
1232
+ Args:
1233
+ logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
1234
+ labels:
1235
+ Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are
1236
+ ignored. Shape: (batch_size, sequence_length)
1237
+ average_log_prob:
1238
+ If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the
1239
+ log probabilities of the (non-masked) tokens.
1240
+ label_pad_token_id: The label pad token id.
1241
+ is_encoder_decoder: Whether the model is an encoder-decoder model.
1242
+
1243
+ Returns:
1244
+ A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the
1245
+ given logits.
1246
+ """
1247
+ if logits.shape[:-1] != labels.shape:
1248
+ raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
1249
+
1250
+ if not is_encoder_decoder:
1251
+ labels = labels[:, 1:].clone()
1252
+ logits = logits[:, :-1, :]
1253
+ loss_mask = labels != label_pad_token_id
1254
+
1255
+ # dummy token; we'll ignore the losses on these tokens later
1256
+ labels[labels == label_pad_token_id] = 0
1257
+
1258
+ per_token_logps = selective_log_softmax(logits, labels)
1259
+
1260
+ if average_log_prob:
1261
+ return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
1262
+ else:
1263
+ return (per_token_logps * loss_mask).sum(-1)
1264
+
1265
+ def concatenated_forward(
1266
+ self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
1267
+ ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
1268
+ """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
1269
+
1270
+ We do this to avoid doing two forward passes, because it's faster for FSDP.
1271
+ """
1272
+ concatenated_batch = self.concatenated_inputs(
1273
+ batch,
1274
+ is_encoder_decoder=self.is_encoder_decoder,
1275
+ label_pad_token_id=self.label_pad_token_id,
1276
+ padding_value=self.padding_value,
1277
+ device=self.accelerator.device,
1278
+ )
1279
+ len_chosen = batch["chosen_labels"].shape[0]
1280
+
1281
+ model_kwargs = (
1282
+ {
1283
+ "decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]),
1284
+ }
1285
+ if self.is_encoder_decoder
1286
+ else {}
1287
+ )
1288
+
1289
+ if self.aux_loss_enabled:
1290
+ model_kwargs["output_router_logits"] = True
1291
+
1292
+ outputs = model(
1293
+ concatenated_batch["concatenated_input_ids"],
1294
+ attention_mask=concatenated_batch["concatenated_attention_mask"],
1295
+ use_cache=False,
1296
+ **model_kwargs,
1297
+ )
1298
+ all_logits = outputs.logits
1299
+
1300
+ def cross_entropy_loss(logits, labels):
1301
+ if not self.is_encoder_decoder:
1302
+ # Shift so that tokens < n predict n
1303
+ logits = logits[..., :-1, :].contiguous()
1304
+ labels = labels[..., 1:].contiguous()
1305
+ # Flatten the tokens
1306
+ loss_fct = nn.CrossEntropyLoss()
1307
+ logits = logits.view(-1, logits.shape[-1])
1308
+ labels = labels.view(-1)
1309
+ # Enable model parallelism
1310
+ labels = labels.to(logits.device)
1311
+ loss = loss_fct(logits, labels)
1312
+ return loss
1313
+
1314
+ labels = concatenated_batch["concatenated_labels"].clone()
1315
+
1316
+ if self.cpo_alpha == 0:
1317
+ nll_loss = torch.tensor(0.0).to(self.accelerator.device)
1318
+ else:
1319
+ nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen])
1320
+
1321
+ all_logps = self.get_batch_logps(
1322
+ all_logits,
1323
+ concatenated_batch["concatenated_labels"],
1324
+ average_log_prob=self.loss_type in ["ipo", "simpo"],
1325
+ is_encoder_decoder=self.is_encoder_decoder,
1326
+ label_pad_token_id=self.label_pad_token_id,
1327
+ )
1328
+
1329
+ chosen_logps = all_logps[:len_chosen]
1330
+ rejected_logps = all_logps[len_chosen:]
1331
+
1332
+ chosen_logits = all_logits[:len_chosen]
1333
+ rejected_logits = all_logits[len_chosen:]
1334
+
1335
+ if self.aux_loss_enabled:
1336
+ return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss)
1337
+
1338
+ return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss)
1339
+
1340
+ def get_batch_loss_metrics(
1341
+ self,
1342
+ model,
1343
+ batch: dict[str, Union[list, torch.LongTensor]],
1344
+ train_eval: Literal["train", "eval"] = "train",
1345
+ ):
1346
+ """Compute the CPO loss and other metrics for the given batch of inputs for train or test."""
1347
+ metrics = {}
1348
+
1349
+ forward_output = self.concatenated_forward(model, batch)
1350
+ (
1351
+ policy_chosen_logps,
1352
+ policy_rejected_logps,
1353
+ policy_chosen_logits,
1354
+ policy_rejected_logits,
1355
+ policy_nll_loss,
1356
+ ) = forward_output[:5]
1357
+ if self.aux_loss_enabled:
1358
+ aux_loss = forward_output[5]
1359
+
1360
+ losses, chosen_rewards, rejected_rewards = self.cpo_loss(
1361
+ policy_chosen_logps,
1362
+ policy_rejected_logps,
1363
+ )
1364
+
1365
+ loss = losses.mean() + self.cpo_alpha * policy_nll_loss
1366
+ reward_accuracies = (chosen_rewards > rejected_rewards).float()
1367
+
1368
+ prefix = "eval_" if train_eval == "eval" else ""
1369
+ metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item()
1370
+ metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item()
1371
+ metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item()
1372
+ metrics[f"{prefix}rewards/margins"] = (
1373
+ self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item()
1374
+ )
1375
+ metrics[f"{prefix}logps/rejected"] = (
1376
+ self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean().item()
1377
+ )
1378
+ metrics[f"{prefix}logps/chosen"] = (
1379
+ self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean().item()
1380
+ )
1381
+ metrics[f"{prefix}logits/rejected"] = (
1382
+ self.accelerator.gather_for_metrics(policy_rejected_logits.detach().mean()).mean().item()
1383
+ )
1384
+ metrics[f"{prefix}logits/chosen"] = (
1385
+ self.accelerator.gather_for_metrics(policy_chosen_logits.detach().mean()).mean().item()
1386
+ )
1387
+ metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean().item()
1388
+
1389
+ if self.aux_loss_enabled:
1390
+ loss += self.aux_loss_coef * aux_loss
1391
+
1392
+ return loss, metrics
1393
+
1394
+ def compute_loss(
1395
+ self,
1396
+ model: Union[PreTrainedModel, nn.Module],
1397
+ inputs: dict[str, Union[torch.Tensor, Any]],
1398
+ return_outputs=False,
1399
+ num_items_in_batch=None,
1400
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
1401
+ compute_loss_context_manager = (
1402
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1403
+ )
1404
+
1405
+ with compute_loss_context_manager:
1406
+ loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
1407
+
1408
+ # force log the metrics
1409
+ self.store_metrics(metrics, train_eval="train")
1410
+
1411
+ if return_outputs:
1412
+ return (loss, metrics)
1413
+ return loss
1414
+
1415
+ def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str:
1416
+ """Generate samples from the model and reference model for the given batch of inputs."""
1417
+
1418
+ # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
1419
+ # the torch amp context manager as some hidden states are silently casted to full precision.
1420
+ generate_context_manager = (
1421
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1422
+ )
1423
+
1424
+ with generate_context_manager:
1425
+ policy_output = model.generate(
1426
+ input_ids=batch["prompt_input_ids"],
1427
+ attention_mask=batch["prompt_attention_mask"],
1428
+ max_length=self.max_length,
1429
+ do_sample=True,
1430
+ pad_token_id=self.processing_class.pad_token_id,
1431
+ )
1432
+
1433
+ policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
1434
+ policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
1435
+
1436
+ return policy_output_decoded
1437
+
1438
+ def prediction_step(
1439
+ self,
1440
+ model: Union[PreTrainedModel, nn.Module],
1441
+ inputs: dict[str, Union[torch.Tensor, Any]],
1442
+ prediction_loss_only: bool,
1443
+ ignore_keys: Optional[list[str]] = None,
1444
+ ):
1445
+ if ignore_keys is None:
1446
+ if hasattr(model, "config"):
1447
+ ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
1448
+ else:
1449
+ ignore_keys = []
1450
+
1451
+ prediction_context_manager = (
1452
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1453
+ )
1454
+
1455
+ with torch.no_grad(), prediction_context_manager:
1456
+ loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
1457
+
1458
+ # force log the metrics
1459
+ self.store_metrics(metrics, train_eval="eval")
1460
+
1461
+ if prediction_loss_only:
1462
+ return (loss.detach(), None, None)
1463
+
1464
+ # logits for the chosen and rejected samples from model
1465
+ logits_dict = {
1466
+ "eval_logits/chosen": metrics["eval_logits/chosen"],
1467
+ "eval_logits/rejected": metrics["eval_logits/rejected"],
1468
+ }
1469
+ logits = [v for k, v in logits_dict.items() if k not in ignore_keys]
1470
+ logits = torch.tensor(logits, device=self.accelerator.device)
1471
+ labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
1472
+
1473
+ return (loss.detach(), logits, labels)
1474
+
1475
+ def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
1476
+ for key, value in metrics.items():
1477
+ self._stored_metrics[train_eval][key].append(value)
1478
+
1479
+ def evaluation_loop(
1480
+ self,
1481
+ dataloader: DataLoader,
1482
+ description: str,
1483
+ prediction_loss_only: Optional[bool] = None,
1484
+ ignore_keys: Optional[list[str]] = None,
1485
+ metric_key_prefix: str = "eval",
1486
+ ) -> EvalLoopOutput:
1487
+ """
1488
+ Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by
1489
+ `Trainer.evaluate()` and `Trainer.predict()`.
1490
+
1491
+ Works both with or without labels.
1492
+ """
1493
+
1494
+ # Sample and save to game log if requested (for one batch to save time)
1495
+ if self.generate_during_eval:
1496
+ # Generate random indices within the range of the total number of samples
1497
+ num_samples = len(dataloader.dataset)
1498
+ random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
1499
+
1500
+ # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
1501
+ random_batch_dataset = dataloader.dataset.select(random_indices)
1502
+ random_batch = self.data_collator(random_batch_dataset)
1503
+ random_batch = self._prepare_inputs(random_batch)
1504
+
1505
+ policy_output_decoded = self.generate_from_model(self.model, random_batch)
1506
+
1507
+ table = pd.DataFrame(
1508
+ columns=["Prompt", "Policy"],
1509
+ data=[
1510
+ [prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded)
1511
+ ],
1512
+ )
1513
+ if "wandb" in self.args.report_to:
1514
+ wandb.log({"game_log": wandb.Table(data=table)})
1515
+
1516
+ if "comet_ml" in self.args.report_to:
1517
+ log_table_to_comet_experiment(
1518
+ name="game_log.csv",
1519
+ table=table,
1520
+ )
1521
+
1522
+ # Base evaluation
1523
+ initial_output = super().evaluation_loop(
1524
+ dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
1525
+ )
1526
+
1527
+ return initial_output
1528
+
1529
+ def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
1530
+ """
1531
+ Log `logs` on the various objects watching training, including stored metrics.
1532
+
1533
+ Args:
1534
+ logs (`dict[str, float]`):
1535
+ The values to log.
1536
+ start_time (`float`, *optional*):
1537
+ Start time of the training.
1538
+ """
1539
+ # logs either has 'loss' or 'eval_loss'
1540
+ train_eval = "train" if "loss" in logs else "eval"
1541
+ # Add averaged stored metrics to logs
1542
+ for key, metrics in self._stored_metrics[train_eval].items():
1543
+ logs[key] = torch.tensor(metrics).mean().item()
1544
+ del self._stored_metrics[train_eval]
1545
+ return super().log(logs, start_time)
1546
+
1547
+ def _shift_right(self, input_ids):
1548
+ if self.decoder_start_token_id is None:
1549
+ raise ValueError(
1550
+ "model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id."
1551
+ )
1552
+
1553
+ # shift inputs to the right
1554
+ if is_torch_fx_proxy(input_ids):
1555
+ # Item assignment is not supported natively for proxies.
1556
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id)
1557
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
1558
+ else:
1559
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
1560
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
1561
+ shifted_input_ids[..., 0] = self.decoder_start_token_id
1562
+
1563
+ if self.pad_token_id is None:
1564
+ raise ValueError("model.config.pad_token_id has to be defined.")
1565
+ # replace possible -100 values in labels by `pad_token_id`
1566
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
1567
+
1568
+ return shifted_input_ids
1569
+
1570
+ # Ensure the model card is saved along with the checkpoint
1571
+ def _save_checkpoint(self, model, trial):
1572
+ if self.args.hub_model_id is None:
1573
+ model_name = Path(self.args.output_dir).name
1574
+ else:
1575
+ model_name = self.args.hub_model_id.split("/")[-1]
1576
+ self.create_model_card(model_name=model_name)
1577
+ super()._save_checkpoint(model, trial)
1578
+ class UnslothCPOTrainer(_UnslothCPOTrainer):
1579
+ """
1580
+
1581
+ Initialize CPOTrainer.
1582
+
1583
+ Args:
1584
+ model ([`~transformers.PreTrainedModel`]):
1585
+ The model to train, preferably an [`~transformers.AutoModelForSequenceClassification`].
1586
+ args ([`CPOConfig`]):
1587
+ The CPO config arguments to use for training.
1588
+ data_collator ([`~transformers.DataCollator`]):
1589
+ The data collator to use for training. If None is specified, the default data collator
1590
+ ([`DPODataCollatorWithPadding`]) will be used which will pad the sequences to the maximum length of the
1591
+ sequences in the batch, given a dataset of paired sequences.
1592
+ train_dataset ([`~datasets.Dataset`]):
1593
+ The dataset to use for training.
1594
+ eval_dataset ([`~datasets.Dataset`]):
1595
+ The dataset to use for evaluation.
1596
+ processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*):
1597
+ Processing class used to process the data. If provided, will be used to automatically process the inputs
1598
+ for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
1599
+ reuse the fine-tuned model.
1600
+ model_init (`Callable[[], transformers.PreTrainedModel]`):
1601
+ The model initializer to use for training. If None is specified, the default model initializer will be
1602
+ used.
1603
+ callbacks (`list[transformers.TrainerCallback]`):
1604
+ The callbacks to use for training.
1605
+ optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
1606
+ The optimizer and scheduler to use for training.
1607
+ preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
1608
+ The function to use to preprocess the logits before computing the metrics.
1609
+ peft_config (`dict`, defaults to `None`):
1610
+ The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in
1611
+ a PEFT model.
1612
+ compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
1613
+ The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to
1614
+ metric values.
1615
+
1616
+ """
1617
+ def __init__(
1618
+ self,
1619
+ model = None,
1620
+ args = None,
1621
+ data_collator = None,
1622
+ train_dataset = None,
1623
+ eval_dataset = None,
1624
+ processing_class = None,
1625
+ model_init = None,
1626
+ callbacks = None,
1627
+ preprocess_logits_for_metrics = None,
1628
+ peft_config = None,
1629
+ compute_metrics = None,
1630
+ **kwargs
1631
+ ):
1632
+ if args is None: args = UnslothCPOConfig()
1633
+ use_bf16 = getattr(args, 'bf16', False)
1634
+ if type(use_bf16) is not bool: use_bf16 = False
1635
+ use_fp16 = getattr(args, 'fp16', False)
1636
+ if type(use_fp16) is not bool: use_fp16 = False
1637
+ force_float32 = False
1638
+ full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1'
1639
+ if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'):
1640
+ print('Unsloth: Switching to float32 training since model cannot work with float16')
1641
+ force_float32 = True
1642
+ mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
1643
+ dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None)
1644
+ if dtype is None: dtype = model.get_input_embeddings().weight.dtype
1645
+ from unsloth_zoo.utils import _get_dtype
1646
+ dtype = _get_dtype(dtype)
1647
+ float16 = dtype == torch.float16
1648
+ if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
1649
+ if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
1650
+ if force_float32:
1651
+ # Forced float32 training
1652
+ args.fp16 = False
1653
+ args.bf16 = False
1654
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
1655
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
1656
+ # args.mixed_precision is a new argument which needs to be set now
1657
+ elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
1658
+ # Mixed precision training
1659
+ args.fp16 = float16
1660
+ args.bf16 = not float16
1661
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
1662
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16'
1663
+ # args.mixed_precision is a new argument which needs to be set now
1664
+ elif mixed_precision_dtype == 'bfloat16':
1665
+ # Both False since bfloat16 full finetuning doesn't do any autocasting.
1666
+ args.fp16 = False
1667
+ args.bf16 = False
1668
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
1669
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
1670
+ # args.mixed_precision is a new argument which needs to be set now
1671
+
1672
+ if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
1673
+ args.eval_strategy = 'steps'
1674
+ if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
1675
+ ga_steps = getattr(args, 'gradient_accumulation_steps', None)
1676
+ if ga_steps is not None and ga_steps > 1:
1677
+ from transformers import __version__ as transformers_version
1678
+ if Version(transformers_version) <= Version('4.45.2'):
1679
+ print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
1680
+ '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
1681
+ if getattr(args, 'eval_strategy', 'no') != 'no':
1682
+ eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
1683
+ if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
1684
+ if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
1685
+ fp16_full_eval = getattr(args, 'fp16_full_eval', False)
1686
+ if type(fp16_full_eval) is not bool: fp16_full_eval = False
1687
+ bf16_full_eval = getattr(args, 'bf16_full_eval', False)
1688
+ if type(bf16_full_eval) is not bool: bf16_full_eval = False
1689
+ if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
1690
+ if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
1691
+ if force_float32:
1692
+ args.bf16_full_eval = False
1693
+ args.fp16_full_eval = False
1694
+ elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
1695
+ args.bf16_full_eval = True
1696
+ args.fp16_full_eval = False
1697
+ elif not bf16_full_eval and not fp16_full_eval:
1698
+ args.bf16_full_eval = args.bf16
1699
+ args.fp16_full_eval = args.fp16
1700
+ _output_logits = False
1701
+ if locals().get('compute_metrics', None) is not None: _output_logits = True
1702
+ if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
1703
+ if _output_logits:
1704
+ os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
1705
+ if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
1706
+ pass
1707
+ else:
1708
+ model_max_seq_length = getattr(model, 'max_seq_length', None)
1709
+ args_max_seq_length = getattr(args, 'max_seq_length', None)
1710
+ if args_max_seq_length is None and model_max_seq_length is not None:
1711
+ max_seq_length = model.max_seq_length
1712
+ if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
1713
+ if model is not None and hasattr(model, 'for_training'):
1714
+ model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
1715
+ if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
1716
+ if 'processing_class' in locals():
1717
+ if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
1718
+ if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
1719
+ __tokenizer = processing_class if 'processing_class' in locals() else tokenizer
1720
+ from unsloth_zoo.vision_utils import UnslothVisionDataCollator
1721
+ if not isinstance(data_collator, UnslothVisionDataCollator):
1722
+ if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
1723
+ data_collator = TransformersDataCollatorForLanguageModeling(
1724
+ __tokenizer,
1725
+ mlm = False,
1726
+ mlm_probability = 0.0,
1727
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
1728
+ )
1729
+ elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
1730
+ data_collator = DataCollatorForSeq2Seq(
1731
+ __tokenizer,
1732
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
1733
+ )
1734
+ else:
1735
+ if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
1736
+ if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
1737
+ if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
1738
+ if not isinstance(data_collator, UnslothVisionDataCollator):
1739
+ if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
1740
+ if isinstance(data_collator, DataCollatorForSeq2Seq):
1741
+ data_collator = DataCollatorForSeq2Seq(
1742
+ __tokenizer.tokenizer,
1743
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
1744
+ )
1745
+ else:
1746
+ data_collator = TransformersDataCollatorForLanguageModeling(
1747
+ __tokenizer.tokenizer,
1748
+ mlm = False,
1749
+ mlm_probability = 0.0,
1750
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
1751
+ )
1752
+ other_metrics = []
1753
+
1754
+ from unsloth_zoo.logging_utils import PatchRLStatistics
1755
+ PatchRLStatistics('cpo_trainer', other_metrics)
1756
+
1757
+ # [TODO] Fix up DataParallel multiplying batch sizes
1758
+ # [TODO] DDP works, but DP seems to not work? [TODO]
1759
+ if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1:
1760
+ if getattr(args, "_n_gpu", 1) != 1:
1761
+ args._n_gpu = 1
1762
+ if "model" in locals() and hasattr(model, "for_training"):
1763
+ model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
1764
+ super().__init__(
1765
+ model = model,
1766
+ args = args,
1767
+ data_collator = data_collator,
1768
+ train_dataset = train_dataset,
1769
+ eval_dataset = eval_dataset,
1770
+ processing_class = processing_class,
1771
+ model_init = model_init,
1772
+ callbacks = callbacks,
1773
+ preprocess_logits_for_metrics = preprocess_logits_for_metrics,
1774
+ peft_config = peft_config,
1775
+ compute_metrics = compute_metrics,**kwargs)
1776
+ if "model" in locals() and hasattr(model, "for_inference"):
1777
+ model.for_inference()
1778
+ if hasattr(self, 'neftune_hook_handle'):
1779
+ self.neftune_hook_handle.remove()
1780
+ if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
1781
+ if getattr(args, 'neftune_noise_alpha', None) is not None:
1782
+ model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
1783
+ pass
1784
+ if hasattr(self, 'accelerator'):
1785
+ scaler = self.accelerator.scaler
1786
+ current_model = model
1787
+ while hasattr(current_model, 'model'):
1788
+ current_model.accelerator_scaler = scaler
1789
+ current_model = current_model.model
1790
+ current_model.accelerator_scaler = scaler
1791
+ pass
1792
+ if hasattr(self, 'train'):
1793
+ self.train = MethodType(prepare_for_training_mode(self.__class__.train), self)
1794
+ pass
1795
+
1796
+ pass
1797
+
1798
+
1799
+ if hasattr(logger, "addFilter"):
1800
+ import logging
1801
+ class HideLoggingMessage(logging.Filter):
1802
+ def __init__(self, text): self.text = text
1803
+ def filter(self, x): return not (self.text in x.getMessage())
1804
+ pass
1805
+ logger.addFilter(HideLoggingMessage("`use_cache=True`"))
1806
+