cciwon-code-review-cli 2.0.1 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/bin/code-review.js +1 -1
  2. package/lib/chat-mode.js +7 -2
  3. package/package.json +1 -1
  4. package/unsloth_compiled_cache/.locks/.lock.AqlmLoraLinear_peft_forward.py +0 -0
  5. package/unsloth_compiled_cache/.locks/.lock.AwqLoraLinear_peft_forward.py +0 -0
  6. package/unsloth_compiled_cache/.locks/.lock.BatchNorm1d.py +0 -0
  7. package/unsloth_compiled_cache/.locks/.lock.BatchNorm2d.py +0 -0
  8. package/unsloth_compiled_cache/.locks/.lock.BatchNorm3d.py +0 -0
  9. package/unsloth_compiled_cache/.locks/.lock.Conv1d.py +0 -0
  10. package/unsloth_compiled_cache/.locks/.lock.Conv2d.py +0 -0
  11. package/unsloth_compiled_cache/.locks/.lock.Conv3d.py +0 -0
  12. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose1d.py +0 -0
  13. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose2d.py +0 -0
  14. package/unsloth_compiled_cache/.locks/.lock.ConvTranspose3d.py +0 -0
  15. package/unsloth_compiled_cache/.locks/.lock.GPTQLoraLinear_peft_forward.py +0 -0
  16. package/unsloth_compiled_cache/.locks/.lock.GroupNorm.py +0 -0
  17. package/unsloth_compiled_cache/.locks/.lock.LayerNorm.py +0 -0
  18. package/unsloth_compiled_cache/.locks/.lock.Linear4bit_peft_forward.py +0 -0
  19. package/unsloth_compiled_cache/.locks/.lock.Linear8bitLt_peft_forward.py +0 -0
  20. package/unsloth_compiled_cache/.locks/.lock.Linear_peft_forward.py +0 -0
  21. package/unsloth_compiled_cache/.locks/.lock.LoraParallelLinear_peft_forward.py +0 -0
  22. package/unsloth_compiled_cache/.locks/.lock.RMSNorm.py +0 -0
  23. package/unsloth_compiled_cache/.locks/.lock.UnslothBCOTrainer.py +0 -0
  24. package/unsloth_compiled_cache/.locks/.lock.UnslothCPOTrainer.py +0 -0
  25. package/unsloth_compiled_cache/.locks/.lock.UnslothDPOTrainer.py +0 -0
  26. package/unsloth_compiled_cache/.locks/.lock.UnslothGKDTrainer.py +0 -0
  27. package/unsloth_compiled_cache/.locks/.lock.UnslothGRPOTrainer.py +0 -0
  28. package/unsloth_compiled_cache/.locks/.lock.UnslothKTOTrainer.py +0 -0
  29. package/unsloth_compiled_cache/.locks/.lock.UnslothNashMDTrainer.py +0 -0
  30. package/unsloth_compiled_cache/.locks/.lock.UnslothORPOTrainer.py +0 -0
  31. package/unsloth_compiled_cache/.locks/.lock.UnslothOnlineDPOTrainer.py +0 -0
  32. package/unsloth_compiled_cache/.locks/.lock.UnslothPPOTrainer.py +0 -0
  33. package/unsloth_compiled_cache/.locks/.lock.UnslothPRMTrainer.py +0 -0
  34. package/unsloth_compiled_cache/.locks/.lock.UnslothRLOOTrainer.py +0 -0
  35. package/unsloth_compiled_cache/.locks/.lock.UnslothRewardTrainer.py +0 -0
  36. package/unsloth_compiled_cache/.locks/.lock.UnslothSFTTrainer.py +0 -0
  37. package/unsloth_compiled_cache/.locks/.lock.UnslothXPOTrainer.py +0 -0
  38. package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_qwen3_moe.py +0 -0
  39. package/unsloth_compiled_cache/.locks/.lock.unsloth_compiled_module_siglip.py +0 -0
  40. package/unsloth_compiled_cache/AqlmLoraLinear_peft_forward.py +88 -0
  41. package/unsloth_compiled_cache/AwqLoraLinear_peft_forward.py +87 -0
  42. package/unsloth_compiled_cache/BatchNorm1d.py +117 -0
  43. package/unsloth_compiled_cache/BatchNorm2d.py +117 -0
  44. package/unsloth_compiled_cache/BatchNorm3d.py +117 -0
  45. package/unsloth_compiled_cache/Conv1d.py +70 -0
  46. package/unsloth_compiled_cache/Conv2d.py +70 -0
  47. package/unsloth_compiled_cache/Conv3d.py +70 -0
  48. package/unsloth_compiled_cache/ConvTranspose1d.py +97 -0
  49. package/unsloth_compiled_cache/ConvTranspose2d.py +106 -0
  50. package/unsloth_compiled_cache/ConvTranspose3d.py +98 -0
  51. package/unsloth_compiled_cache/GPTQLoraLinear_peft_forward.py +95 -0
  52. package/unsloth_compiled_cache/GroupNorm.py +70 -0
  53. package/unsloth_compiled_cache/LayerNorm.py +72 -0
  54. package/unsloth_compiled_cache/Linear4bit_peft_forward.py +115 -0
  55. package/unsloth_compiled_cache/Linear8bitLt_peft_forward.py +113 -0
  56. package/unsloth_compiled_cache/Linear_peft_forward.py +104 -0
  57. package/unsloth_compiled_cache/LoraParallelLinear_peft_forward.py +91 -0
  58. package/unsloth_compiled_cache/RMSNorm.py +73 -0
  59. package/unsloth_compiled_cache/UnslothBCOTrainer.py +2026 -0
  60. package/unsloth_compiled_cache/UnslothCPOTrainer.py +1806 -0
  61. package/unsloth_compiled_cache/UnslothDPOTrainer.py +2750 -0
  62. package/unsloth_compiled_cache/UnslothGKDTrainer.py +1157 -0
  63. package/unsloth_compiled_cache/UnslothGRPOTrainer.py +3607 -0
  64. package/unsloth_compiled_cache/UnslothKTOTrainer.py +2220 -0
  65. package/unsloth_compiled_cache/UnslothNashMDTrainer.py +1210 -0
  66. package/unsloth_compiled_cache/UnslothORPOTrainer.py +1730 -0
  67. package/unsloth_compiled_cache/UnslothOnlineDPOTrainer.py +2313 -0
  68. package/unsloth_compiled_cache/UnslothPPOTrainer.py +1504 -0
  69. package/unsloth_compiled_cache/UnslothPRMTrainer.py +979 -0
  70. package/unsloth_compiled_cache/UnslothRLOOTrainer.py +2674 -0
  71. package/unsloth_compiled_cache/UnslothRewardTrainer.py +1197 -0
  72. package/unsloth_compiled_cache/UnslothSFTTrainer.py +1416 -0
  73. package/unsloth_compiled_cache/UnslothXPOTrainer.py +1255 -0
  74. package/unsloth_compiled_cache/__pycache__/AqlmLoraLinear_peft_forward.cpython-312.pyc +0 -0
  75. package/unsloth_compiled_cache/__pycache__/AwqLoraLinear_peft_forward.cpython-312.pyc +0 -0
  76. package/unsloth_compiled_cache/__pycache__/BatchNorm1d.cpython-312.pyc +0 -0
  77. package/unsloth_compiled_cache/__pycache__/BatchNorm2d.cpython-312.pyc +0 -0
  78. package/unsloth_compiled_cache/__pycache__/BatchNorm3d.cpython-312.pyc +0 -0
  79. package/unsloth_compiled_cache/__pycache__/Conv1d.cpython-312.pyc +0 -0
  80. package/unsloth_compiled_cache/__pycache__/Conv2d.cpython-312.pyc +0 -0
  81. package/unsloth_compiled_cache/__pycache__/Conv3d.cpython-312.pyc +0 -0
  82. package/unsloth_compiled_cache/__pycache__/ConvTranspose1d.cpython-312.pyc +0 -0
  83. package/unsloth_compiled_cache/__pycache__/ConvTranspose2d.cpython-312.pyc +0 -0
  84. package/unsloth_compiled_cache/__pycache__/ConvTranspose3d.cpython-312.pyc +0 -0
  85. package/unsloth_compiled_cache/__pycache__/GPTQLoraLinear_peft_forward.cpython-312.pyc +0 -0
  86. package/unsloth_compiled_cache/__pycache__/GroupNorm.cpython-312.pyc +0 -0
  87. package/unsloth_compiled_cache/__pycache__/LayerNorm.cpython-312.pyc +0 -0
  88. package/unsloth_compiled_cache/__pycache__/Linear4bit_peft_forward.cpython-312.pyc +0 -0
  89. package/unsloth_compiled_cache/__pycache__/Linear8bitLt_peft_forward.cpython-312.pyc +0 -0
  90. package/unsloth_compiled_cache/__pycache__/Linear_peft_forward.cpython-312.pyc +0 -0
  91. package/unsloth_compiled_cache/__pycache__/LoraParallelLinear_peft_forward.cpython-312.pyc +0 -0
  92. package/unsloth_compiled_cache/__pycache__/RMSNorm.cpython-312.pyc +0 -0
  93. package/unsloth_compiled_cache/__pycache__/UnslothBCOTrainer.cpython-312.pyc +0 -0
  94. package/unsloth_compiled_cache/__pycache__/UnslothCPOTrainer.cpython-312.pyc +0 -0
  95. package/unsloth_compiled_cache/__pycache__/UnslothDPOTrainer.cpython-312.pyc +0 -0
  96. package/unsloth_compiled_cache/__pycache__/UnslothGKDTrainer.cpython-312.pyc +0 -0
  97. package/unsloth_compiled_cache/__pycache__/UnslothGRPOTrainer.cpython-312.pyc +0 -0
  98. package/unsloth_compiled_cache/__pycache__/UnslothKTOTrainer.cpython-312.pyc +0 -0
  99. package/unsloth_compiled_cache/__pycache__/UnslothNashMDTrainer.cpython-312.pyc +0 -0
  100. package/unsloth_compiled_cache/__pycache__/UnslothORPOTrainer.cpython-312.pyc +0 -0
  101. package/unsloth_compiled_cache/__pycache__/UnslothOnlineDPOTrainer.cpython-312.pyc +0 -0
  102. package/unsloth_compiled_cache/__pycache__/UnslothPPOTrainer.cpython-312.pyc +0 -0
  103. package/unsloth_compiled_cache/__pycache__/UnslothPRMTrainer.cpython-312.pyc +0 -0
  104. package/unsloth_compiled_cache/__pycache__/UnslothRLOOTrainer.cpython-312.pyc +0 -0
  105. package/unsloth_compiled_cache/__pycache__/UnslothRewardTrainer.cpython-312.pyc +0 -0
  106. package/unsloth_compiled_cache/__pycache__/UnslothSFTTrainer.cpython-312.pyc +0 -0
  107. package/unsloth_compiled_cache/__pycache__/UnslothXPOTrainer.cpython-312.pyc +0 -0
  108. package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_qwen3_moe.cpython-312.pyc +0 -0
  109. package/unsloth_compiled_cache/__pycache__/unsloth_compiled_module_siglip.cpython-312.pyc +0 -0
  110. package/unsloth_compiled_cache/unsloth_compiled_module_qwen3_moe.py +726 -0
  111. package/unsloth_compiled_cache/unsloth_compiled_module_siglip.py +534 -0
@@ -0,0 +1,2220 @@
1
+ """
2
+ 2025.12.6
3
+ 2025.12.7
4
+ 4.57.1
5
+ 0.24.0
6
+ __UNSLOTH_VERSIONING__
7
+ """
8
+
9
+ # Unsloth auto generated code
10
+ # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
11
+ #
12
+ # This program is free software: you can redistribute it and/or modify
13
+ # it under the terms of the GNU Lesser General Public License as published by
14
+ # the Free Software Foundation, either version 3 of the License, or
15
+ # (at your option) any later version.
16
+ #
17
+ # This program is distributed in the hope that it will be useful,
18
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
+ # GNU General Public License for more details.
21
+ #
22
+ # You should have received a copy of the GNU Lesser General Public License
23
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
24
+
25
+ from torch import Tensor
26
+ import torch
27
+ import torch.nn as nn
28
+ from torch.nn import functional as F
29
+ from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable
30
+ from trl.trainer.kto_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, BaseTrainer, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, KTOConfig, KTOTrainer, Literal, Optional, PartialState, Path, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SequentialSampler, TrainerCallback, TrainingArguments, Union, _get_kl_dataset, _process_tokens, _tokenize, autocast, concatenate_datasets, contextmanager, create_reference_model, defaultdict, disable_dropout_in_model, has_length, inspect, is_comet_available, is_liger_kernel_available, is_peft_available, is_wandb_available, itemgetter, log_table_to_comet_experiment, logger, logging, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_deepspeed, prepare_model_for_kbit_training, random, selective_log_softmax, textwrap, torch, tqdm, warnings, F, Optional, PeftModel, PreTrainedModel, is_peft_available, logger, os, torch)
31
+
32
+
33
+ import os
34
+ from typing import *
35
+ from dataclasses import dataclass, field
36
+ from packaging.version import Version
37
+ import torch
38
+ import numpy as np
39
+ from contextlib import nullcontext
40
+ from torch.nn import functional as F
41
+ import inspect
42
+ from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
43
+ from transformers.training_args import ParallelMode
44
+
45
+ # Wrap trainer with padding to right and enable training mode
46
+ # Also patches W&B since multiple runs must use wandb.finish()
47
+ import functools
48
+ from types import MethodType
49
+ def prepare_for_training_mode(f):
50
+ @functools.wraps(f)
51
+ def wrapper(self, *args, **kwargs):
52
+ # Enable training mode
53
+ if hasattr(self, 'model') and hasattr(self.model, "for_training"):
54
+ self.model.for_training()
55
+ output = f(self, *args, **kwargs)
56
+ # Return inference mode
57
+ if hasattr(self, 'model') and hasattr(self.model, "for_inference"):
58
+ self.model.for_inference()
59
+ # Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run
60
+ try:
61
+ import wandb
62
+ wandb.finish()
63
+ except:
64
+ pass
65
+ return output
66
+ return wrapper
67
+ pass
68
+
69
+ torch_compile_options = {
70
+ "epilogue_fusion" : True,
71
+ "max_autotune" : False,
72
+ "shape_padding" : True,
73
+ "trace.enabled" : False,
74
+ "triton.cudagraphs" : False,
75
+ }
76
+
77
+ @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
78
+ def chunked_selective_log_softmax(logits, index):
79
+ # Split into 4 chunks only
80
+ chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0)
81
+ chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0)
82
+ all_per_token_logps = []
83
+ # Below loop does the same as selective_log_softmax(chunk_logits, chunk_index)
84
+ for chunk_logits, chunk_index in zip(chunked_logits, chunked_index):
85
+ chunk_logits = chunk_logits.to(torch.float32)
86
+ selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1)
87
+ logsumexp_values = torch.logsumexp(chunk_logits, dim = -1)
88
+ per_token_logps = selected_logits - logsumexp_values
89
+ all_per_token_logps.append(per_token_logps)
90
+ pass
91
+ all_per_token_logps = torch.concat(all_per_token_logps)
92
+ all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1]))
93
+ return all_per_token_logps
94
+
95
+ def calculate_pad_tokens_in_prompt(
96
+ input_ids: torch.Tensor,
97
+ logits_to_keep: int,
98
+ pad_token_id: int
99
+ ) -> torch.Tensor:
100
+ """
101
+ Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens
102
+ """
103
+ if logits_to_keep >= input_ids.shape[1]:
104
+ raise ValueError("logits_to_keep must be smaller than the sequence length.")
105
+
106
+ prompt_section = input_ids[:, :-logits_to_keep]
107
+
108
+ padding_mask = (prompt_section == pad_token_id)
109
+
110
+ pad_token_counts = padding_mask.sum(dim=1)
111
+
112
+ return pad_token_counts
113
+
114
+ def create_completion_attention_mask(
115
+ completion_input_ids: torch.Tensor,
116
+ left_pad_tokens_per_prompt: torch.Tensor,
117
+ max_left_pad: int,
118
+ pad_token_id: int
119
+ ) -> torch.Tensor:
120
+ """
121
+ Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad]
122
+
123
+ Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens
124
+ and pad are pad tokens, this function would make a completion mask that would 0 out the pad
125
+ and p tokens. so in this example [0,0,0,1,1,1,0,0,0]
126
+ """
127
+ batch_size, completion_len = completion_input_ids.shape
128
+ device = completion_input_ids.device
129
+
130
+ num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt
131
+
132
+ indices = torch.arange(completion_len, device=device).unsqueeze(0)
133
+ shift_mask = indices >= num_tokens_to_mask.unsqueeze(1)
134
+
135
+ non_padding_mask = (completion_input_ids != pad_token_id)
136
+
137
+ final_mask = shift_mask & non_padding_mask
138
+
139
+ return final_mask
140
+
141
+ def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor:
142
+ """
143
+ Moves all padding tokens in each sequence of a batch to the right.
144
+ """
145
+ mask = (tensor != pad_id)
146
+ # Must do stable=True since binary mark is unordered
147
+ sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True)
148
+ packed_tensor = torch.gather(tensor, 1, sorted_indices)
149
+ return packed_tensor
150
+
151
+ def align_logprobs_with_mask(
152
+ logprob_tensor: torch.Tensor,
153
+ attention_mask: torch.Tensor,
154
+ pad_value: float = 0.0
155
+ ) -> torch.Tensor:
156
+ """
157
+ Aligns a log probability tensor with a given attention mask.
158
+ """
159
+
160
+ device = logprob_tensor.device
161
+ batch_size, logprob_seq_len = logprob_tensor.shape
162
+ mask_seq_len = attention_mask.shape[1]
163
+
164
+ padded_logprobs = torch.full(
165
+ attention_mask.shape,
166
+ fill_value=pad_value,
167
+ dtype=logprob_tensor.dtype,
168
+ device=device
169
+ )
170
+
171
+ left_pad_counts = torch.argmax(attention_mask, dim=1)
172
+
173
+ cols = torch.arange(logprob_seq_len, device=device)
174
+ dest_indices = left_pad_counts.unsqueeze(1) + cols
175
+
176
+ # Create destination row indices
177
+ # Shape: [batch_size, logprob_seq_len]
178
+ row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices)
179
+
180
+ # --- 4. Filter out-of-bounds indices and perform assignment ---
181
+ # Create a mask to identify only the indices that are within the bounds
182
+ # of the target tensor's sequence length.
183
+ valid_mask = dest_indices < mask_seq_len
184
+
185
+ # Use this mask to select only the valid row indices, column indices,
186
+ # and the corresponding values from the logprob tensor.
187
+ # This flattens the selected elements into 1D tensors.
188
+ valid_rows = row_indices[valid_mask]
189
+ valid_cols = dest_indices[valid_mask]
190
+ valid_vals = logprob_tensor[valid_mask]
191
+
192
+ # Place the valid values into their correct positions in the padded tensor
193
+ # using a single, efficient advanced indexing operation.
194
+ padded_logprobs[valid_rows, valid_cols] = valid_vals
195
+
196
+ return padded_logprobs
197
+ @dataclass
198
+ class UnslothKTOConfig(KTOConfig):
199
+ """
200
+
201
+ Configuration class for the [`KTOTrainer`].
202
+
203
+ This class includes only the parameters that are specific to KTO training. For a full list of training arguments,
204
+ please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
205
+ differ from those in [`~transformers.TrainingArguments`].
206
+
207
+ Using [`~transformers.HfArgumentParser`] we can turn this class into
208
+ [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
209
+ command line.
210
+
211
+ Parameters:
212
+ max_length (`int` or `None`, *optional*, defaults to `1024`):
213
+ Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
214
+ to use the default data collator.
215
+ max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
216
+ Maximum length of the prompt. This argument is required if you want to use the default data collator.
217
+ max_completion_length (`int`, *optional*):
218
+ Maximum length of the completion. This argument is required if you want to use the default data collator
219
+ and your model is an encoder-decoder.
220
+ beta (`float`, *optional*, defaults to `0.1`):
221
+ Parameter controlling the deviation from the reference model. Higher β means less deviation from the
222
+ reference model.
223
+ loss_type (`str`, *optional*, defaults to `"kto"`):
224
+ Type of loss to use. Possible values are:
225
+
226
+ - `"kto"`: KTO loss from the [KTO](https://huggingface.co/papers/2402.01306) paper.
227
+ - `"apo_zero_unpaired"`: Unpaired variant of APO-zero loss from the
228
+ [APO](https://huggingface.co/papers/2408.06266) paper.
229
+
230
+ desirable_weight (`float`, *optional*, defaults to `1.0`):
231
+ Desirable losses are weighed by this factor to counter unequal number of desirable and undesirable paris.
232
+ undesirable_weight (`float`, *optional*, defaults to `1.0`):
233
+ Undesirable losses are weighed by this factor to counter unequal number of desirable and undesirable pairs.
234
+ label_pad_token_id (`int`, *optional*, defaults to `-100`):
235
+ Label pad token id. This argument is required if you want to use the default data collator.
236
+ padding_value (`int`, *optional*):
237
+ Padding value to use. If `None`, the padding value of the tokenizer is used.
238
+ truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
239
+ Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
240
+ This argument is required if you want to use the default data collator.
241
+ generate_during_eval (`bool`, *optional*, defaults to `False`):
242
+ If `True`, generates and logs completions from both the model and the reference model to W&B or Comet
243
+ during evaluation.
244
+ is_encoder_decoder (`bool`, *optional*):
245
+ When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
246
+ you need to specify if the model returned by the callable is an encoder-decoder model.
247
+ precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
248
+ Whether to precompute reference model log probabilities for training and evaluation datasets. This is
249
+ useful when training without the reference model to reduce the total GPU memory needed.
250
+ model_init_kwargs (`dict[str, Any]`, *optional*):
251
+ Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
252
+ string.
253
+ ref_model_init_kwargs (`dict[str, Any]`, *optional*):
254
+ Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
255
+ from a string.
256
+ dataset_num_proc: (`int`, *optional*):
257
+ Number of processes to use for processing the dataset.
258
+ disable_dropout (`bool`, *optional*, defaults to `True`):
259
+ Whether to disable dropout in the model and reference model.
260
+ use_liger_loss (`bool`, *optional*, defaults to `False`):
261
+ Whether to use Liger loss. It requires liger-kernel to be installed.
262
+ base_model_attribute_name (`str`, *optional*, defaults to `"model"`):
263
+ Name of the attribute in the model that contains the base model. This is used to get the base model from
264
+ the model when the model does not have a `get_decoder` method in the case when `use_liger_loss` is `True`.
265
+
266
+ """
267
+ vllm_sampling_params: Optional[Any] = field(
268
+ default = None,
269
+ metadata = {'help': 'vLLM SamplingParams'},
270
+ )
271
+ unsloth_num_chunks : Optional[int] = field(
272
+ default = -1,
273
+ metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
274
+ )
275
+ max_seq_length : Optional[int] = field(
276
+ default = None,
277
+ metadata = {'help': 'Maximum sequence length to truncate to.'},
278
+ )
279
+ def __init__(
280
+ self,
281
+ output_dir = None,
282
+ overwrite_output_dir = None,
283
+ do_train = False,
284
+ do_eval = False,
285
+ do_predict = False,
286
+ eval_strategy = 'no',
287
+ prediction_loss_only = False,
288
+ per_device_train_batch_size = 4,
289
+ per_device_eval_batch_size = 4,
290
+ per_gpu_train_batch_size = None,
291
+ per_gpu_eval_batch_size = None,
292
+ gradient_accumulation_steps = 2,
293
+ eval_accumulation_steps = 2,
294
+ eval_delay = 0,
295
+ torch_empty_cache_steps = 250,
296
+ learning_rate = 5e-05,
297
+ weight_decay = 0.01,
298
+ adam_beta1 = 0.9,
299
+ adam_beta2 = 0.999,
300
+ adam_epsilon = 1e-08,
301
+ max_grad_norm = 1.0,
302
+ num_train_epochs = 3.0,
303
+ max_steps = -1,
304
+ lr_scheduler_type = 'linear',
305
+ warmup_ratio = 0.1,
306
+ warmup_steps = 0,
307
+ log_level = 'passive',
308
+ log_level_replica = 'warning',
309
+ log_on_each_node = True,
310
+ logging_dir = None,
311
+ logging_strategy = 'steps',
312
+ logging_first_step = False,
313
+ logging_steps = 1,
314
+ logging_nan_inf_filter = False,
315
+ save_strategy = 'steps',
316
+ save_steps = 500,
317
+ save_total_limit = None,
318
+ save_safetensors = True,
319
+ save_on_each_node = False,
320
+ save_only_model = False,
321
+ restore_callback_states_from_checkpoint = False,
322
+ no_cuda = False,
323
+ use_cpu = False,
324
+ use_mps_device = False,
325
+ seed = 3407,
326
+ data_seed = 3407,
327
+ jit_mode_eval = False,
328
+ bf16 = False,
329
+ fp16 = False,
330
+ fp16_opt_level = 'O1',
331
+ half_precision_backend = 'auto',
332
+ bf16_full_eval = False,
333
+ fp16_full_eval = False,
334
+ tf32 = None,
335
+ local_rank = -1,
336
+ ddp_backend = None,
337
+ tpu_num_cores = None,
338
+ tpu_metrics_debug = False,
339
+ debug = '',
340
+ dataloader_drop_last = False,
341
+ eval_steps = None,
342
+ dataloader_num_workers = 0,
343
+ dataloader_prefetch_factor = None,
344
+ past_index = -1,
345
+ run_name = None,
346
+ disable_tqdm = None,
347
+ remove_unused_columns = True,
348
+ label_names = None,
349
+ load_best_model_at_end = False,
350
+ metric_for_best_model = None,
351
+ greater_is_better = None,
352
+ ignore_data_skip = False,
353
+ fsdp = None,
354
+ fsdp_min_num_params = 0,
355
+ fsdp_config = None,
356
+ fsdp_transformer_layer_cls_to_wrap = None,
357
+ accelerator_config = None,
358
+ parallelism_config = None,
359
+ deepspeed = None,
360
+ label_smoothing_factor = 0.0,
361
+ optim = 'adamw_8bit',
362
+ optim_args = None,
363
+ adafactor = False,
364
+ group_by_length = False,
365
+ length_column_name = 'length',
366
+ report_to = 'none',
367
+ project = 'huggingface',
368
+ trackio_space_id = 'trackio',
369
+ ddp_find_unused_parameters = None,
370
+ ddp_bucket_cap_mb = None,
371
+ ddp_broadcast_buffers = None,
372
+ dataloader_pin_memory = True,
373
+ dataloader_persistent_workers = False,
374
+ skip_memory_metrics = True,
375
+ use_legacy_prediction_loop = False,
376
+ push_to_hub = False,
377
+ resume_from_checkpoint = None,
378
+ hub_model_id = None,
379
+ hub_strategy = 'every_save',
380
+ hub_token = None,
381
+ hub_private_repo = None,
382
+ hub_always_push = False,
383
+ hub_revision = None,
384
+ gradient_checkpointing = True,
385
+ gradient_checkpointing_kwargs = None,
386
+ include_inputs_for_metrics = False,
387
+ eval_do_concat_batches = True,
388
+ fp16_backend = 'auto',
389
+ push_to_hub_model_id = None,
390
+ push_to_hub_organization = None,
391
+ push_to_hub_token = None,
392
+ mp_parameters = '',
393
+ auto_find_batch_size = False,
394
+ full_determinism = False,
395
+ torchdynamo = None,
396
+ ray_scope = 'last',
397
+ ddp_timeout = 1800,
398
+ torch_compile = False,
399
+ torch_compile_backend = None,
400
+ torch_compile_mode = None,
401
+ include_tokens_per_second = False,
402
+ include_num_input_tokens_seen = False,
403
+ neftune_noise_alpha = None,
404
+ optim_target_modules = None,
405
+ batch_eval_metrics = False,
406
+ eval_on_start = False,
407
+ use_liger_kernel = False,
408
+ liger_kernel_config = None,
409
+ eval_use_gather_object = False,
410
+ average_tokens_across_devices = True,
411
+ max_length = 1024,
412
+ max_prompt_length = 512,
413
+ max_completion_length = None,
414
+ beta = 0.1,
415
+ loss_type = 'kto',
416
+ desirable_weight = 1.0,
417
+ undesirable_weight = 1.0,
418
+ label_pad_token_id = -100,
419
+ padding_value = None,
420
+ truncation_mode = 'keep_end',
421
+ generate_during_eval = False,
422
+ is_encoder_decoder = None,
423
+ disable_dropout = True,
424
+ precompute_ref_log_probs = False,
425
+ model_init_kwargs = None,
426
+ ref_model_init_kwargs = None,
427
+ dataset_num_proc = None,
428
+ use_liger_loss = False,
429
+ base_model_attribute_name = 'model',
430
+ vllm_sampling_params = None,
431
+ unsloth_num_chunks = -1,
432
+ max_seq_length = None,
433
+ **kwargs,
434
+ ):
435
+ if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
436
+ if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
437
+ if output_dir is None and save_strategy == 'steps' and save_steps == 500:
438
+ output_dir = 'unsloth_training_checkpoints'
439
+ save_strategy = 'no'
440
+ if dataset_num_proc is None:
441
+ from multiprocessing import cpu_count
442
+ dataset_num_proc = min(max(cpu_count()+4, 2), 64)
443
+
444
+ super().__init__(
445
+ output_dir = output_dir,
446
+ overwrite_output_dir = overwrite_output_dir,
447
+ do_train = do_train,
448
+ do_eval = do_eval,
449
+ do_predict = do_predict,
450
+ eval_strategy = eval_strategy,
451
+ prediction_loss_only = prediction_loss_only,
452
+ per_device_train_batch_size = per_device_train_batch_size,
453
+ per_device_eval_batch_size = per_device_eval_batch_size,
454
+ per_gpu_train_batch_size = per_gpu_train_batch_size,
455
+ per_gpu_eval_batch_size = per_gpu_eval_batch_size,
456
+ gradient_accumulation_steps = gradient_accumulation_steps,
457
+ eval_accumulation_steps = eval_accumulation_steps,
458
+ eval_delay = eval_delay,
459
+ torch_empty_cache_steps = torch_empty_cache_steps,
460
+ learning_rate = learning_rate,
461
+ weight_decay = weight_decay,
462
+ adam_beta1 = adam_beta1,
463
+ adam_beta2 = adam_beta2,
464
+ adam_epsilon = adam_epsilon,
465
+ max_grad_norm = max_grad_norm,
466
+ num_train_epochs = num_train_epochs,
467
+ max_steps = max_steps,
468
+ lr_scheduler_type = lr_scheduler_type,
469
+ warmup_ratio = warmup_ratio,
470
+ warmup_steps = warmup_steps,
471
+ log_level = log_level,
472
+ log_level_replica = log_level_replica,
473
+ log_on_each_node = log_on_each_node,
474
+ logging_dir = logging_dir,
475
+ logging_strategy = logging_strategy,
476
+ logging_first_step = logging_first_step,
477
+ logging_steps = logging_steps,
478
+ logging_nan_inf_filter = logging_nan_inf_filter,
479
+ save_strategy = save_strategy,
480
+ save_steps = save_steps,
481
+ save_total_limit = save_total_limit,
482
+ save_safetensors = save_safetensors,
483
+ save_on_each_node = save_on_each_node,
484
+ save_only_model = save_only_model,
485
+ restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
486
+ no_cuda = no_cuda,
487
+ use_cpu = use_cpu,
488
+ use_mps_device = use_mps_device,
489
+ seed = seed,
490
+ data_seed = data_seed,
491
+ jit_mode_eval = jit_mode_eval,
492
+ bf16 = bf16,
493
+ fp16 = fp16,
494
+ fp16_opt_level = fp16_opt_level,
495
+ half_precision_backend = half_precision_backend,
496
+ bf16_full_eval = bf16_full_eval,
497
+ fp16_full_eval = fp16_full_eval,
498
+ tf32 = tf32,
499
+ local_rank = local_rank,
500
+ ddp_backend = ddp_backend,
501
+ tpu_num_cores = tpu_num_cores,
502
+ tpu_metrics_debug = tpu_metrics_debug,
503
+ debug = debug,
504
+ dataloader_drop_last = dataloader_drop_last,
505
+ eval_steps = eval_steps,
506
+ dataloader_num_workers = dataloader_num_workers,
507
+ dataloader_prefetch_factor = dataloader_prefetch_factor,
508
+ past_index = past_index,
509
+ run_name = run_name,
510
+ disable_tqdm = disable_tqdm,
511
+ remove_unused_columns = remove_unused_columns,
512
+ label_names = label_names,
513
+ load_best_model_at_end = load_best_model_at_end,
514
+ metric_for_best_model = metric_for_best_model,
515
+ greater_is_better = greater_is_better,
516
+ ignore_data_skip = ignore_data_skip,
517
+ fsdp = fsdp,
518
+ fsdp_min_num_params = fsdp_min_num_params,
519
+ fsdp_config = fsdp_config,
520
+ fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
521
+ accelerator_config = accelerator_config,
522
+ parallelism_config = parallelism_config,
523
+ deepspeed = deepspeed,
524
+ label_smoothing_factor = label_smoothing_factor,
525
+ optim = optim,
526
+ optim_args = optim_args,
527
+ adafactor = adafactor,
528
+ group_by_length = group_by_length,
529
+ length_column_name = length_column_name,
530
+ report_to = report_to,
531
+ project = project,
532
+ trackio_space_id = trackio_space_id,
533
+ ddp_find_unused_parameters = ddp_find_unused_parameters,
534
+ ddp_bucket_cap_mb = ddp_bucket_cap_mb,
535
+ ddp_broadcast_buffers = ddp_broadcast_buffers,
536
+ dataloader_pin_memory = dataloader_pin_memory,
537
+ dataloader_persistent_workers = dataloader_persistent_workers,
538
+ skip_memory_metrics = skip_memory_metrics,
539
+ use_legacy_prediction_loop = use_legacy_prediction_loop,
540
+ push_to_hub = push_to_hub,
541
+ resume_from_checkpoint = resume_from_checkpoint,
542
+ hub_model_id = hub_model_id,
543
+ hub_strategy = hub_strategy,
544
+ hub_token = hub_token,
545
+ hub_private_repo = hub_private_repo,
546
+ hub_always_push = hub_always_push,
547
+ hub_revision = hub_revision,
548
+ gradient_checkpointing = gradient_checkpointing,
549
+ gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
550
+ include_inputs_for_metrics = include_inputs_for_metrics,
551
+ eval_do_concat_batches = eval_do_concat_batches,
552
+ fp16_backend = fp16_backend,
553
+ push_to_hub_model_id = push_to_hub_model_id,
554
+ push_to_hub_organization = push_to_hub_organization,
555
+ push_to_hub_token = push_to_hub_token,
556
+ mp_parameters = mp_parameters,
557
+ auto_find_batch_size = auto_find_batch_size,
558
+ full_determinism = full_determinism,
559
+ torchdynamo = torchdynamo,
560
+ ray_scope = ray_scope,
561
+ ddp_timeout = ddp_timeout,
562
+ torch_compile = torch_compile,
563
+ torch_compile_backend = torch_compile_backend,
564
+ torch_compile_mode = torch_compile_mode,
565
+ include_tokens_per_second = include_tokens_per_second,
566
+ include_num_input_tokens_seen = include_num_input_tokens_seen,
567
+ neftune_noise_alpha = neftune_noise_alpha,
568
+ optim_target_modules = optim_target_modules,
569
+ batch_eval_metrics = batch_eval_metrics,
570
+ eval_on_start = eval_on_start,
571
+ use_liger_kernel = use_liger_kernel,
572
+ liger_kernel_config = liger_kernel_config,
573
+ eval_use_gather_object = eval_use_gather_object,
574
+ average_tokens_across_devices = average_tokens_across_devices,
575
+ max_length = max_length,
576
+ max_prompt_length = max_prompt_length,
577
+ max_completion_length = max_completion_length,
578
+ beta = beta,
579
+ loss_type = loss_type,
580
+ desirable_weight = desirable_weight,
581
+ undesirable_weight = undesirable_weight,
582
+ label_pad_token_id = label_pad_token_id,
583
+ padding_value = padding_value,
584
+ truncation_mode = truncation_mode,
585
+ generate_during_eval = generate_during_eval,
586
+ is_encoder_decoder = is_encoder_decoder,
587
+ disable_dropout = disable_dropout,
588
+ precompute_ref_log_probs = precompute_ref_log_probs,
589
+ model_init_kwargs = model_init_kwargs,
590
+ ref_model_init_kwargs = ref_model_init_kwargs,
591
+ dataset_num_proc = dataset_num_proc,
592
+ use_liger_loss = use_liger_loss,
593
+ base_model_attribute_name = base_model_attribute_name,**kwargs)
594
+ self.vllm_sampling_params = vllm_sampling_params
595
+ self.unsloth_num_chunks = unsloth_num_chunks
596
+ self.max_seq_length = max_seq_length
597
+ pass
598
+
599
+ class _UnslothKTOTrainer(BaseTrainer):
600
+ r""""""
601
+
602
+ _tag_names = ["trl", "kto"]
603
+ _name = "KTO"
604
+ _paper = {
605
+ "title": "KTO: Model Alignment as Prospect Theoretic Optimization",
606
+ "id": "2402.01306",
607
+ # docstyle-ignore
608
+ "citation": textwrap.dedent("""\
609
+ @article{ethayarajh2024kto,
610
+ title = {{KTO: Model Alignment as Prospect Theoretic Optimization}},
611
+ author = {Kawin Ethayarajh and Winnie Xu and Niklas Muennighoff and Dan Jurafsky and Douwe Kiela},
612
+ year = 2024,
613
+ eprint = {arXiv:2402.01306},
614
+ }"""),
615
+ }
616
+
617
+ def __init__(
618
+ self,
619
+ model: Union[PreTrainedModel, nn.Module, str] = None,
620
+ ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
621
+ args: KTOConfig = None,
622
+ train_dataset: Optional[Dataset] = None,
623
+ eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
624
+ processing_class: Optional[
625
+ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
626
+ ] = None,
627
+ data_collator: Optional[DataCollator] = None,
628
+ model_init: Optional[Callable[[], PreTrainedModel]] = None,
629
+ callbacks: Optional[list[TrainerCallback]] = None,
630
+ optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
631
+ preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
632
+ peft_config: Optional[dict] = None,
633
+ compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
634
+ model_adapter_name: Optional[str] = None,
635
+ ref_adapter_name: Optional[str] = None,
636
+ ):
637
+ if not os.environ.get("TRL_EXPERIMENTAL_SILENCE"):
638
+ warnings.warn(
639
+ "This trainer will soon be moved to trl.experimental and is a candidate for removal. If you rely on "
640
+ "it and want it to remain, please share your comments here: "
641
+ "https://github.com/huggingface/trl/issues/4223. Silence this warning by setting environment variable "
642
+ "TRL_EXPERIMENTAL_SILENCE=1."
643
+ )
644
+ if type(args) is TrainingArguments:
645
+ raise ValueError("Please use `KTOConfig` instead TrainingArguments.")
646
+
647
+ if not isinstance(model, str) and ref_model is model:
648
+ raise ValueError(
649
+ "`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
650
+ "same as `model`, you must mass a copy of it, or `None` if you use peft."
651
+ )
652
+
653
+ if args.model_init_kwargs is None:
654
+ model_init_kwargs = {}
655
+ elif not isinstance(model, str):
656
+ raise ValueError("You passed model_kwargs to the KTOTrainer. But your model is already instantiated.")
657
+ else:
658
+ model_init_kwargs = args.model_init_kwargs
659
+ dtype = model_init_kwargs.get("dtype")
660
+ if dtype is not None:
661
+ # Convert to `torch.dtype` if an str is passed
662
+ if isinstance(dtype, str) and dtype != "auto":
663
+ dtype = getattr(torch, dtype)
664
+ if dtype != "auto" and not isinstance(dtype, torch.dtype):
665
+ raise ValueError(
666
+ f"Invalid `dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}."
667
+ )
668
+ model_init_kwargs["dtype"] = dtype
669
+
670
+ if args.ref_model_init_kwargs is None:
671
+ ref_model_init_kwargs = {}
672
+ elif not isinstance(ref_model, str):
673
+ raise ValueError(
674
+ "You passed ref_model_kwargs to the KTOTrainer. But your ref_model is already instantiated."
675
+ )
676
+ else:
677
+ ref_model_init_kwargs = args.ref_model_init_kwargs
678
+ dtype = ref_model_init_kwargs.get("dtype")
679
+ if dtype is not None:
680
+ # Convert to `torch.dtype` if an str is passed
681
+ if isinstance(dtype, str) and dtype != "auto":
682
+ dtype = getattr(torch, dtype)
683
+ if dtype != "auto" and not isinstance(dtype, torch.dtype):
684
+ raise ValueError(
685
+ f"Invalid `dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}."
686
+ )
687
+ ref_model_init_kwargs["dtype"] = dtype
688
+
689
+ if isinstance(model, str):
690
+ model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
691
+
692
+ if isinstance(ref_model, str):
693
+ ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
694
+
695
+ # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
696
+ # has been called in order to properly call autocast if needed.
697
+ self._peft_has_been_casted_to_bf16 = False
698
+
699
+ if not is_peft_available() and peft_config is not None:
700
+ raise ValueError(
701
+ "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models"
702
+ )
703
+ elif is_peft_available() and peft_config is not None:
704
+ # if model is a peft model and we have a peft_config, we merge and unload it first
705
+ if isinstance(model, PeftModel):
706
+ model = model.merge_and_unload()
707
+
708
+ if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
709
+ _support_gc_kwargs = hasattr(
710
+ args, "gradient_checkpointing_kwargs"
711
+ ) and "gradient_checkpointing_kwargs" in list(
712
+ inspect.signature(prepare_model_for_kbit_training).parameters
713
+ )
714
+
715
+ prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
716
+
717
+ if _support_gc_kwargs:
718
+ prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
719
+
720
+ model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
721
+ elif args.gradient_checkpointing:
722
+ # For backward compatibility with older versions of transformers
723
+ if hasattr(model, "enable_input_require_grads"):
724
+ model.enable_input_require_grads()
725
+ else:
726
+
727
+ def make_inputs_require_grad(module, input, output):
728
+ output.requires_grad_(True)
729
+
730
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
731
+
732
+ # get peft model with the given config
733
+ model = model
734
+ if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
735
+ peft_module_casting_to_bf16(model)
736
+ # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
737
+ self._peft_has_been_casted_to_bf16 = True
738
+
739
+ # For models that use gradient_checkpointing, we need to attach a hook that enables input
740
+ # to explicitly have `requires_grad=True`, otherwise training will either silently
741
+ # fail or completely fail.
742
+ elif args.gradient_checkpointing:
743
+ # For backward compatibility with older versions of transformers
744
+ if hasattr(model, "enable_input_require_grads"):
745
+ model.enable_input_require_grads()
746
+ else:
747
+
748
+ def make_inputs_require_grad(module, input, output):
749
+ output.requires_grad_(True)
750
+
751
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
752
+
753
+ if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
754
+ raise ValueError(
755
+ "`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
756
+ " Please install `wandb` or `comet-ml` to resolve."
757
+ )
758
+
759
+ if model is not None:
760
+ self.is_encoder_decoder = model.config.is_encoder_decoder
761
+ elif args.is_encoder_decoder is None:
762
+ raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
763
+ else:
764
+ self.is_encoder_decoder = args.is_encoder_decoder
765
+
766
+ self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
767
+ self.model_adapter_name = model_adapter_name
768
+ self.ref_adapter_name = ref_adapter_name
769
+
770
+ if ref_model:
771
+ self.ref_model = ref_model
772
+ elif self.is_peft_model or args.precompute_ref_log_probs:
773
+ # The `model` with adapters turned off will be used as the reference model
774
+ self.ref_model = None
775
+ else:
776
+ self.ref_model = create_reference_model(model)
777
+
778
+ if processing_class is None:
779
+ raise ValueError(
780
+ "max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding"
781
+ )
782
+ if args.max_length is None:
783
+ logger.warning(
784
+ "When using DPODataCollatorWithPadding, you should set `max_length` in the KTOTrainer's init"
785
+ " it will be set to `512` by default, but you should do it yourself in the future.",
786
+ )
787
+ max_length = 512
788
+ if args.max_length is not None:
789
+ max_length = args.max_length
790
+
791
+ if args.max_prompt_length is None:
792
+ logger.warning(
793
+ "When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the KTOTrainer's init"
794
+ " it will be set to `128` by default, but you should do it yourself in the future.",
795
+ )
796
+ max_prompt_length = 128
797
+ if args.max_prompt_length is not None:
798
+ max_prompt_length = args.max_prompt_length
799
+
800
+ max_completion_length = None
801
+ if args.max_completion_length is None and self.is_encoder_decoder:
802
+ logger.warning(
803
+ "When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the KTOTrainer's init"
804
+ " it will be set to `128` by default, but you should do it yourself in the future.",
805
+ )
806
+ max_completion_length = 128
807
+ if args.max_completion_length is not None and self.is_encoder_decoder:
808
+ max_completion_length = args.max_completion_length
809
+
810
+ if data_collator is None:
811
+ data_collator = DPODataCollatorWithPadding(
812
+ pad_token_id=processing_class.pad_token_id,
813
+ label_pad_token_id=args.label_pad_token_id,
814
+ is_encoder_decoder=self.is_encoder_decoder,
815
+ )
816
+
817
+ if args.remove_unused_columns:
818
+ args.remove_unused_columns = False
819
+ # warn users
820
+ logger.warning(
821
+ "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your KTOConfig"
822
+ " we have set it for you, but you should do it yourself in the future.",
823
+ )
824
+
825
+ self.use_dpo_data_collator = True
826
+ else:
827
+ self.use_dpo_data_collator = False
828
+
829
+ # Disable dropout in the model and reference model
830
+ if args.disable_dropout:
831
+ disable_dropout_in_model(model)
832
+ if self.ref_model is not None:
833
+ disable_dropout_in_model(self.ref_model)
834
+
835
+ self.loss_type = args.loss_type
836
+ self.max_length = max_length
837
+ self.generate_during_eval = args.generate_during_eval
838
+ self.label_pad_token_id = args.label_pad_token_id
839
+ self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
840
+ self.max_prompt_length = max_prompt_length
841
+ self.truncation_mode = args.truncation_mode
842
+ self.max_completion_length = max_completion_length
843
+ self.processing_class = processing_class
844
+ self.precompute_ref_log_probs = args.precompute_ref_log_probs
845
+
846
+ # Not all losses require a KL calculation
847
+ self.calculate_KL = True
848
+ if self.loss_type in ["apo_zero_unpaired"]:
849
+ self.calculate_KL = False
850
+
851
+ # Since ref_logs are precomputed on the first call to get_train/eval_dataloader
852
+ # keep track of first called to avoid computation of future calls
853
+ self._precomputed_train_ref_log_probs = False
854
+ self._precomputed_eval_ref_log_probs = False
855
+
856
+ # metric
857
+ self._stored_metrics = defaultdict(lambda: defaultdict(list))
858
+
859
+ # KTO parameter
860
+ self.beta = args.beta
861
+ self.desirable_weight = args.desirable_weight
862
+ self.undesirable_weight = args.undesirable_weight
863
+ self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
864
+ self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
865
+ if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
866
+ logger.warning(
867
+ "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
868
+ "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
869
+ "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
870
+ "loss.",
871
+ )
872
+
873
+ # The trainer estimates the number of FLOPs [floating-point operations] using the number of elements in the
874
+ # input tensor associated with the key "input_ids". However, in KTO, the sampled data does not include the
875
+ # "input_ids" key. Instead, the available keys are "prompt_input_ids" and "completion_input_ids". As a result,
876
+ # the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point
877
+ # operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's
878
+ # "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been
879
+ # issued.
880
+ model.warnings_issued["estimate_tokens"] = True
881
+
882
+ # Compute that only on the main process for faster data processing.
883
+ # see: https://github.com/huggingface/trl/pull/1255
884
+ with PartialState().main_process_first():
885
+ # Extract the prompt if needed
886
+ train_dataset = train_dataset.map(
887
+ maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from train dataset"
888
+ )
889
+ # Unpair the dataset if needed
890
+ train_dataset = maybe_unpair_preference_dataset(
891
+ train_dataset, args.dataset_num_proc, desc="Unpairing train dataset"
892
+ )
893
+ # Apply the chat template if needed
894
+ train_dataset = train_dataset.map(
895
+ maybe_apply_chat_template,
896
+ fn_kwargs={"tokenizer": processing_class},
897
+ num_proc=args.dataset_num_proc,
898
+ desc="Applying chat template to train dataset",
899
+ )
900
+ if eval_dataset is not None:
901
+ eval_dataset = eval_dataset.map(
902
+ maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from eval dataset"
903
+ )
904
+ eval_dataset = maybe_unpair_preference_dataset(
905
+ eval_dataset, args.dataset_num_proc, desc="Unpairing eval dataset"
906
+ )
907
+ eval_dataset = eval_dataset.map(
908
+ maybe_apply_chat_template,
909
+ fn_kwargs={"tokenizer": processing_class},
910
+ num_proc=args.dataset_num_proc,
911
+ desc="Applying chat template to eval dataset",
912
+ )
913
+
914
+ # Tokenize and prepare the training datasets
915
+ train_dataset = train_dataset.map(
916
+ _tokenize,
917
+ batched=True,
918
+ fn_kwargs={"tokenizer": self.processing_class},
919
+ num_proc=args.dataset_num_proc,
920
+ desc="Tokenizing train dataset",
921
+ )
922
+
923
+ fn_kwargs = {
924
+ "prefix": "",
925
+ "is_encoder_decoder": self.is_encoder_decoder,
926
+ "tokenizer": self.processing_class,
927
+ "max_length": self.max_length,
928
+ "truncation_mode": self.truncation_mode,
929
+ "label_pad_token_id": self.label_pad_token_id,
930
+ "max_prompt_length": self.max_prompt_length,
931
+ "max_completion_length": self.max_completion_length,
932
+ }
933
+
934
+ train_dataset = train_dataset.map(
935
+ _process_tokens,
936
+ fn_kwargs=fn_kwargs,
937
+ num_proc=args.dataset_num_proc,
938
+ desc="Processing tokenized train dataset",
939
+ )
940
+
941
+ # Tokenize and prepare the eval datasets
942
+ if eval_dataset is not None:
943
+ eval_dataset = eval_dataset.map(
944
+ _tokenize,
945
+ fn_kwargs={"tokenizer": self.processing_class},
946
+ batched=True,
947
+ num_proc=args.dataset_num_proc,
948
+ desc="Tokenizing eval dataset",
949
+ )
950
+
951
+ eval_dataset = eval_dataset.map(
952
+ _process_tokens,
953
+ fn_kwargs=fn_kwargs,
954
+ num_proc=args.dataset_num_proc,
955
+ desc="Processing tokenized eval dataset",
956
+ )
957
+
958
+ # Get KL datasets if needed
959
+ if self.calculate_KL:
960
+ if args.per_device_train_batch_size <= 1:
961
+ raise ValueError(
962
+ "Actual (not effective) batch size must be > 1. KTO will not work properly because the KL term will be equivalent to the implied reward."
963
+ )
964
+
965
+ # create pairs for estimating the KL term by flipping the matched pairs in each batch of size total_batch_size
966
+ # i.e., [x_1, y_1], ..., [x_n, y_n] --> [x_1, y_n], ..., [x_n, y_1] = [x'_1, y'_1], ..., [x'_n, y'_n]
967
+ train_kl_dataset = train_dataset.map(
968
+ _get_kl_dataset,
969
+ batched=True,
970
+ batch_size=args.per_device_train_batch_size,
971
+ num_proc=args.dataset_num_proc,
972
+ desc="Extracting KL train dataset",
973
+ )
974
+
975
+ fn_kwargs["prefix"] = "KL_"
976
+ train_kl_dataset = train_kl_dataset.map(
977
+ _process_tokens,
978
+ fn_kwargs=fn_kwargs,
979
+ num_proc=args.dataset_num_proc,
980
+ remove_columns=[c for c in train_kl_dataset.column_names if c in train_dataset.column_names],
981
+ desc="Processing tokenized train KL dataset",
982
+ )
983
+
984
+ # merge the datasets
985
+ train_dataset = concatenate_datasets([train_dataset, train_kl_dataset], axis=1)
986
+
987
+ if eval_dataset is not None:
988
+ # Get KL dataset
989
+ eval_kl_dataset = eval_dataset.map(
990
+ _get_kl_dataset,
991
+ batched=True,
992
+ batch_size=args.per_device_train_batch_size,
993
+ num_proc=args.dataset_num_proc,
994
+ desc="Extracting eval KL dataset",
995
+ )
996
+
997
+ eval_kl_dataset = eval_kl_dataset.map(
998
+ _process_tokens,
999
+ fn_kwargs=fn_kwargs,
1000
+ num_proc=args.dataset_num_proc,
1001
+ remove_columns=[c for c in eval_kl_dataset.column_names if c in eval_dataset.column_names],
1002
+ desc="Processing tokenized eval KL dataset",
1003
+ )
1004
+
1005
+ # merge the datasets
1006
+ eval_dataset = concatenate_datasets([eval_dataset, eval_kl_dataset], axis=1)
1007
+
1008
+ # calculate dataset desirability balance
1009
+ num_desirable = max(sum(train_dataset["label"]), 1)
1010
+ num_undesirable = max(len(train_dataset["label"]) - num_desirable, 1) # "label" is binary
1011
+
1012
+ if num_desirable != num_undesirable:
1013
+ # The lower and upper bounds come from Eq. [8] of https://huggingface.co/papers/2402.01306
1014
+ des_weight_lower_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1, 2)
1015
+ des_weight_upper_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1.33, 2)
1016
+ und_weight_lower_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1.33, 2)
1017
+ und_weight_upper_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1, 2)
1018
+
1019
+ des_weight_in_range = des_weight_lower_bound <= self.desirable_weight <= des_weight_upper_bound
1020
+ und_weight_in_range = und_weight_lower_bound <= self.undesirable_weight <= und_weight_upper_bound
1021
+
1022
+ if not (des_weight_in_range or und_weight_in_range):
1023
+ logger.warning(
1024
+ "You have different amounts of desirable/positive and undesirable/negative examples but the "
1025
+ "weights on the desirable and undesirable losses don't seem to be in an ideal range. Based "
1026
+ f"on your data, we recommend EITHER "
1027
+ f"desirable_weight in [{des_weight_lower_bound}, {des_weight_upper_bound}] or "
1028
+ f"undesirable_weight in [{und_weight_lower_bound}, {und_weight_upper_bound}] (but NOT BOTH). "
1029
+ "See the documentation on how to optimally set these weights.",
1030
+ )
1031
+
1032
+ super().__init__(
1033
+ model=model,
1034
+ args=args,
1035
+ data_collator=data_collator,
1036
+ train_dataset=train_dataset,
1037
+ eval_dataset=eval_dataset,
1038
+ processing_class=processing_class,
1039
+ model_init=model_init,
1040
+ compute_metrics=compute_metrics,
1041
+ callbacks=callbacks,
1042
+ optimizers=optimizers,
1043
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics,
1044
+ )
1045
+
1046
+ # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
1047
+ # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
1048
+ # self.model_accepts_loss_kwargs to False to enable scaling.
1049
+ self.model_accepts_loss_kwargs = False
1050
+
1051
+ # Add tags for models that have been loaded with the correct transformers version
1052
+ if hasattr(self.model, "add_model_tags"):
1053
+ self.model.add_model_tags(self._tag_names)
1054
+
1055
+ if not hasattr(self, "accelerator"):
1056
+ raise AttributeError(
1057
+ "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
1058
+ )
1059
+
1060
+ # Deepspeed Zero-3 does not support precompute_ref_log_probs
1061
+ if self.is_deepspeed_enabled:
1062
+ if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
1063
+ raise ValueError(
1064
+ "You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
1065
+ )
1066
+
1067
+ if self.ref_model is None:
1068
+ if not (self.is_peft_model or self.precompute_ref_log_probs):
1069
+ raise ValueError(
1070
+ "No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
1071
+ )
1072
+ else:
1073
+ if self.is_deepspeed_enabled:
1074
+ self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
1075
+ else:
1076
+ self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
1077
+
1078
+ # Import Liger loss if enabled
1079
+ if self.args.use_liger_loss:
1080
+ if not is_liger_kernel_available():
1081
+ raise ImportError(
1082
+ "You set `use_liger_loss=True` but the liger kernel is not available. "
1083
+ "Please install liger-kernel first: `pip install liger-kernel`"
1084
+ )
1085
+ if self.loss_type in ["apo_zero_unpaired"]:
1086
+ raise ValueError(
1087
+ "You cannot set `loss_type='apo_zero_unpaired'` with liger-kernel."
1088
+ "Only KTO loss is supported with liger-kernel."
1089
+ )
1090
+ if self.precompute_ref_log_probs:
1091
+ raise ValueError(
1092
+ "You cannot use `precompute_ref_log_probs=True` with liger kernel. Please set "
1093
+ "`precompute_ref_log_probs=False`."
1094
+ )
1095
+ if self.is_peft_model or self.ref_adapter_name is not None:
1096
+ raise ValueError(
1097
+ "You cannot use `use_liger_loss=True` with Peft models. Please set `use_liger_loss=False`."
1098
+ )
1099
+ self.kto_loss_fn = LigerFusedLinearKTOLoss(
1100
+ ignore_index=self.label_pad_token_id, beta=self.beta, use_ref_model=(self.ref_model is not None)
1101
+ )
1102
+
1103
+ @contextmanager
1104
+ def null_ref_context(self):
1105
+ """Context manager for handling null reference model (that is, peft adapter manipulation)."""
1106
+ with (
1107
+ self.accelerator.unwrap_model(self.model).disable_adapter()
1108
+ if self.is_peft_model and not self.ref_adapter_name
1109
+ else nullcontext()
1110
+ ):
1111
+ if self.ref_adapter_name:
1112
+ self.model.set_adapter(self.ref_adapter_name)
1113
+ yield
1114
+ if self.ref_adapter_name:
1115
+ self.model.set_adapter(self.model_adapter_name or "default")
1116
+
1117
+ def get_train_dataloader(self) -> DataLoader:
1118
+ """
1119
+ Returns the training [`~torch.utils.data.DataLoader`].
1120
+
1121
+ Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
1122
+ """
1123
+
1124
+ if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
1125
+ dataloader_params = {
1126
+ "batch_size": self.args.per_device_train_batch_size,
1127
+ "collate_fn": self.data_collator,
1128
+ "num_workers": self.args.dataloader_num_workers,
1129
+ "pin_memory": self.args.dataloader_pin_memory,
1130
+ "shuffle": False,
1131
+ }
1132
+
1133
+ # prepare dataloader
1134
+ data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
1135
+ reference_completion_logps = []
1136
+ reference_KL_logps = []
1137
+
1138
+ for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
1139
+ reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
1140
+
1141
+ reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
1142
+ reference_completion_logps.append(reference_completion_logp.cpu())
1143
+
1144
+ if self.calculate_KL:
1145
+ reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
1146
+ reference_KL_logps.append(reference_KL_logp.cpu())
1147
+
1148
+ self.train_dataset = self.train_dataset.add_column(
1149
+ name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
1150
+ )
1151
+
1152
+ if self.calculate_KL:
1153
+ self.train_dataset = self.train_dataset.add_column(
1154
+ name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
1155
+ )
1156
+
1157
+ self._precomputed_train_ref_log_probs = True
1158
+
1159
+ return super().get_train_dataloader()
1160
+
1161
+ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
1162
+ """
1163
+ Returns the evaluation [`~torch.utils.data.DataLoader`].
1164
+
1165
+ Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
1166
+
1167
+ Args:
1168
+ eval_dataset (`torch.utils.data.Dataset`, *optional*):
1169
+ If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
1170
+ by the `model.forward()` method are automatically removed. It must implement `__len__`.
1171
+ """
1172
+ if eval_dataset is None and self.eval_dataset is None:
1173
+ raise ValueError("Trainer: evaluation requires an eval_dataset.")
1174
+ eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
1175
+
1176
+ if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
1177
+ dataloader_params = {
1178
+ "batch_size": self.args.per_device_eval_batch_size,
1179
+ "collate_fn": self.data_collator,
1180
+ "num_workers": self.args.dataloader_num_workers,
1181
+ "pin_memory": self.args.dataloader_pin_memory,
1182
+ "shuffle": False,
1183
+ }
1184
+
1185
+ # prepare dataloader
1186
+ data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
1187
+
1188
+ reference_completion_logps = []
1189
+ reference_KL_logps = []
1190
+
1191
+ for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
1192
+ reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
1193
+
1194
+ reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
1195
+ reference_completion_logps.append(reference_completion_logp.cpu())
1196
+
1197
+ if self.calculate_KL:
1198
+ reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
1199
+ reference_KL_logps.append(reference_KL_logp.cpu())
1200
+
1201
+ eval_dataset = eval_dataset.add_column(
1202
+ name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
1203
+ )
1204
+ if self.calculate_KL:
1205
+ eval_dataset = eval_dataset.add_column(
1206
+ name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
1207
+ )
1208
+
1209
+ # Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
1210
+ if self.eval_dataset is not None:
1211
+ self.eval_dataset = eval_dataset
1212
+ self._precomputed_eval_ref_log_probs = True
1213
+
1214
+ return super().get_eval_dataloader(eval_dataset=eval_dataset)
1215
+
1216
+ def compute_reference_log_probs(self, padded_batch: dict) -> dict:
1217
+ """Computes log probabilities of the reference model for a single padded batch of a KTO specific dataset."""
1218
+ with torch.no_grad():
1219
+ if self.ref_model is None:
1220
+ with self.null_ref_context():
1221
+ if self.is_encoder_decoder:
1222
+ completion_logits = self.model(
1223
+ padded_batch["prompt_input_ids"],
1224
+ attention_mask=padded_batch["prompt_attention_mask"],
1225
+ decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
1226
+ labels=padded_batch["completion_labels"],
1227
+ ).logits
1228
+
1229
+ if self.calculate_KL:
1230
+ KL_logits = self.model(
1231
+ padded_batch["KL_prompt_input_ids"],
1232
+ attention_mask=padded_batch["KL_prompt_attention_mask"],
1233
+ decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
1234
+ labels=padded_batch["KL_completion_labels"],
1235
+ ).logits
1236
+ else:
1237
+ completion_logits = self.model(
1238
+ padded_batch["completion_input_ids"],
1239
+ attention_mask=padded_batch["completion_attention_mask"],
1240
+ ).logits
1241
+
1242
+ if self.calculate_KL:
1243
+ KL_logits = self.model(
1244
+ padded_batch["KL_completion_input_ids"],
1245
+ attention_mask=padded_batch["KL_completion_attention_mask"],
1246
+ ).logits
1247
+ else:
1248
+ if self.is_encoder_decoder:
1249
+ completion_logits = self.ref_model(
1250
+ padded_batch["prompt_input_ids"],
1251
+ attention_mask=padded_batch["prompt_attention_mask"],
1252
+ decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
1253
+ labels=padded_batch["completion_labels"],
1254
+ ).logits
1255
+
1256
+ if self.calculate_KL:
1257
+ KL_logits = self.ref_model(
1258
+ padded_batch["KL_prompt_input_ids"],
1259
+ attention_mask=padded_batch["KL_prompt_attention_mask"],
1260
+ decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
1261
+ labels=padded_batch["KL_completion_labels"],
1262
+ ).logits
1263
+ else:
1264
+ completion_logits = self.ref_model(
1265
+ padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"]
1266
+ ).logits
1267
+
1268
+ if self.calculate_KL:
1269
+ KL_logits = self.ref_model(
1270
+ padded_batch["KL_completion_input_ids"],
1271
+ attention_mask=padded_batch["KL_completion_attention_mask"],
1272
+ ).logits
1273
+
1274
+ completion_logps = self.get_batch_logps(
1275
+ completion_logits,
1276
+ padded_batch["completion_labels"],
1277
+ average_log_prob=False,
1278
+ is_encoder_decoder=self.is_encoder_decoder,
1279
+ label_pad_token_id=self.label_pad_token_id,
1280
+ )
1281
+
1282
+ if self.calculate_KL:
1283
+ KL_logps = self.get_batch_logps(
1284
+ KL_logits,
1285
+ padded_batch["KL_completion_labels"],
1286
+ average_log_prob=False,
1287
+ is_encoder_decoder=self.is_encoder_decoder,
1288
+ label_pad_token_id=self.label_pad_token_id,
1289
+ )
1290
+ else:
1291
+ KL_logps = None
1292
+
1293
+ return completion_logps, KL_logps
1294
+
1295
+ @staticmethod
1296
+ def get_batch_logps(
1297
+ logits: torch.FloatTensor,
1298
+ labels: torch.LongTensor,
1299
+ average_log_prob: bool = False,
1300
+ label_pad_token_id: int = -100,
1301
+ is_encoder_decoder: bool = False,
1302
+ ) -> torch.FloatTensor:
1303
+ """Compute the log probabilities of the given labels under the given logits.
1304
+
1305
+ Args:
1306
+ logits:
1307
+ Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
1308
+ labels:
1309
+ Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are
1310
+ ignored. Shape: (batch_size, sequence_length)
1311
+ average_log_prob:
1312
+ If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the
1313
+ log probabilities of the (non-masked) tokens.
1314
+ label_pad_token_id:
1315
+ The label value to ignore when computing log probabilities.
1316
+ is_encoder_decoder:
1317
+ Whether the model is an encoder-decoder model. If True, the labels are not shifted and the logits are
1318
+ assumed to already be aligned with the labels. If False, the labels are shifted to the right by one
1319
+ position, and the logits are assumed to be aligned with the shifted labels.
1320
+
1321
+ Returns:
1322
+ A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the
1323
+ given logits.
1324
+ """
1325
+ if logits.shape[:-1] != labels.shape:
1326
+ raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
1327
+
1328
+ if not is_encoder_decoder:
1329
+ labels = labels[:, 1:].clone()
1330
+ logits = logits[:, :-1, :]
1331
+ else:
1332
+ # Fixes end-dec RuntimeError
1333
+ labels = labels.clone()
1334
+
1335
+ loss_mask = labels != label_pad_token_id
1336
+
1337
+ # dummy token; we'll ignore the losses on these tokens later
1338
+ labels[labels == label_pad_token_id] = 0
1339
+
1340
+ per_token_logps = selective_log_softmax(logits, labels)
1341
+
1342
+ if average_log_prob:
1343
+ return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
1344
+ else:
1345
+ return (per_token_logps * loss_mask).sum(-1)
1346
+
1347
+ def forward(
1348
+ self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
1349
+ ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
1350
+ KL_logps = self._compute_kl_logps(model, batch)
1351
+
1352
+ model_kwargs = (
1353
+ {
1354
+ "labels": batch["completion_labels"],
1355
+ "decoder_input_ids": batch.get("completion_decoder_input_ids"),
1356
+ }
1357
+ if self.is_encoder_decoder
1358
+ else {}
1359
+ )
1360
+ if self.aux_loss_enabled:
1361
+ model_kwargs["output_router_logits"] = True
1362
+
1363
+ outputs = model(
1364
+ batch["completion_input_ids"],
1365
+ attention_mask=batch["completion_attention_mask"],
1366
+ **model_kwargs,
1367
+ )
1368
+ completion_logits = outputs.logits
1369
+
1370
+ completion_logps = self.get_batch_logps(
1371
+ completion_logits,
1372
+ batch["completion_labels"],
1373
+ average_log_prob=False,
1374
+ is_encoder_decoder=self.is_encoder_decoder,
1375
+ label_pad_token_id=self.label_pad_token_id,
1376
+ )
1377
+
1378
+ if completion_logps.shape[0] != len(batch["label"]):
1379
+ raise ValueError(
1380
+ "There is a mismatch between the number of examples in this batch and the number of "
1381
+ "examples for which an output sequence was predicted."
1382
+ )
1383
+
1384
+ chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True]
1385
+ rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False]
1386
+
1387
+ chosen_logps = completion_logps[chosen_idx, ...]
1388
+ rejected_logps = completion_logps[rejected_idx, ...]
1389
+
1390
+ chosen_logits = completion_logits[chosen_idx, ...]
1391
+ rejected_logits = completion_logits[rejected_idx, ...]
1392
+
1393
+ if self.aux_loss_enabled:
1394
+ return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps, outputs.aux_loss)
1395
+ else:
1396
+ return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps)
1397
+
1398
+ def kto_loss(
1399
+ self,
1400
+ policy_chosen_logps: torch.FloatTensor,
1401
+ policy_rejected_logps: torch.FloatTensor,
1402
+ policy_KL_logps: torch.FloatTensor,
1403
+ reference_chosen_logps: torch.FloatTensor,
1404
+ reference_rejected_logps: torch.FloatTensor,
1405
+ reference_KL_logps: torch.FloatTensor,
1406
+ ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
1407
+ """Compute the KTO loss for a batch of policy and reference model log probabilities.
1408
+
1409
+ Args:
1410
+ policy_chosen_logps:
1411
+ Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,)
1412
+ policy_rejected_logps:
1413
+ Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,)
1414
+ policy_KL_logps: Log probabilities of the policy model for the KL responses. Shape: (batch_size,)
1415
+ reference_chosen_logps:
1416
+ Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,)
1417
+ reference_rejected_logps:
1418
+ Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in
1419
+ batch_size,)
1420
+ reference_KL_logps: Log probabilities of the reference model for the KL responses. Shape: (batch_size,)
1421
+
1422
+ Returns:
1423
+ A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, KL). The losses tensor contains the KTO
1424
+ loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards for
1425
+ the chosen and rejected responses, respectively. The KL tensor contains the detached KL divergence estimate
1426
+ between the policy and reference models.
1427
+ """
1428
+ if self.calculate_KL:
1429
+ kl = (policy_KL_logps - reference_KL_logps).mean().detach()
1430
+ kl = self.accelerator.gather_for_metrics(kl).mean().clamp(min=0)
1431
+ else:
1432
+ kl = torch.zeros(1).to(policy_chosen_logps.device)
1433
+
1434
+ # Chosen losses
1435
+ if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0:
1436
+ chosen_logratios = policy_chosen_logps - reference_chosen_logps
1437
+
1438
+ if self.loss_type == "kto":
1439
+ # Eqn (7) of the KTO paper (https://huggingface.co/papers/2402.01306)
1440
+ chosen_losses = 1 - F.sigmoid(self.beta * (chosen_logratios - kl))
1441
+ elif self.loss_type == "apo_zero_unpaired":
1442
+ # Unpaired variant of Eqn (7) of the APO paper (https://huggingface.co/papers/2408.06266)
1443
+ # Use this loss when you believe the chosen outputs are better than your model's default output
1444
+ chosen_losses = 1 - F.sigmoid(self.beta * chosen_logratios)
1445
+
1446
+ chosen_rewards = self.beta * chosen_logratios.detach()
1447
+
1448
+ else:
1449
+ # lists can't be empty -- if they are, then accelerate.gather will hang
1450
+ chosen_losses = torch.Tensor([]).to(self.accelerator.device)
1451
+ chosen_rewards = torch.Tensor([]).to(self.accelerator.device)
1452
+
1453
+ # Rejected losses
1454
+ if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0:
1455
+ rejected_logratios = policy_rejected_logps - reference_rejected_logps
1456
+
1457
+ if self.loss_type == "kto":
1458
+ rejected_losses = 1 - F.sigmoid(self.beta * (kl - rejected_logratios))
1459
+ elif self.loss_type == "apo_zero_unpaired":
1460
+ rejected_losses = F.sigmoid(self.beta * rejected_logratios)
1461
+
1462
+ rejected_rewards = self.beta * rejected_logratios.detach()
1463
+ else:
1464
+ # lists can't be empty -- if they are, then accelerate.gather will hang
1465
+ rejected_losses = torch.Tensor([]).to(self.accelerator.device)
1466
+ rejected_rewards = torch.Tensor([]).to(self.accelerator.device)
1467
+
1468
+ losses = torch.cat(
1469
+ (self.desirable_weight * chosen_losses, self.undesirable_weight * rejected_losses),
1470
+ 0,
1471
+ )
1472
+
1473
+ return losses, chosen_rewards, rejected_rewards, kl
1474
+
1475
+ def _compute_kl_logps(self, model, batch):
1476
+ """Compute KL log probabilities for a given batch."""
1477
+ KL_logps = None
1478
+ if self.calculate_KL:
1479
+ if self.is_encoder_decoder:
1480
+ KL_model_kwargs = {
1481
+ "input_ids": batch["KL_prompt_input_ids"],
1482
+ "attention_mask": batch["KL_prompt_attention_mask"],
1483
+ "labels": batch["KL_completion_labels"],
1484
+ "decoder_input_ids": batch.get("KL_completion_decoder_input_ids"),
1485
+ }
1486
+ else:
1487
+ KL_model_kwargs = {
1488
+ "input_ids": batch["KL_completion_input_ids"],
1489
+ "attention_mask": batch["KL_completion_attention_mask"],
1490
+ }
1491
+
1492
+ with torch.no_grad():
1493
+ KL_logits = model(**KL_model_kwargs).logits
1494
+
1495
+ KL_logps = self.get_batch_logps(
1496
+ KL_logits,
1497
+ batch["KL_completion_labels"],
1498
+ average_log_prob=False,
1499
+ is_encoder_decoder=self.is_encoder_decoder,
1500
+ label_pad_token_id=self.label_pad_token_id,
1501
+ )
1502
+ return KL_logps
1503
+
1504
+ def _compute_loss_liger(self, model, batch):
1505
+ """
1506
+ Compute the KTO loss using the Liger-Kernel's LigerFusedLinearKTOLoss.
1507
+
1508
+ Args:
1509
+ model:
1510
+ The policy model used for generating log probabilities and outputs. It could be an encoder-decoder
1511
+ model or a regular language model.
1512
+ batch: A dictionary containing the input data and labels for the batch.
1513
+
1514
+ Returns:
1515
+ A dictionary containing the following keys:
1516
+ - "loss": The computed KTO loss for the batch.
1517
+ - "chosen_logits_sum": Sum of the logits for the chosen responses from the policy model.
1518
+ - "rejected_logits_sum": Sum of the logits for the rejected responses from the policy model.
1519
+ - "chosen_logps": Log probabilities of the chosen responses from the policy model.
1520
+ - "rejected_logps": Log probabilities of the rejected responses from the policy model.
1521
+ - "chosen_rewards": Rewards for the chosen responses.
1522
+ - "rejected_rewards": Rewards for the rejected responses.
1523
+ - "kl": The KL divergence between the policy and reference models (detached).
1524
+
1525
+ If auxiliary loss is enabled, the dictionary will also include:
1526
+ - "aux_loss": The auxiliary loss from the model outputs.
1527
+ """
1528
+ policy_KL_logps = self._compute_kl_logps(model, batch)
1529
+ reference_KL_logps = self._compute_kl_logps(self.ref_model, batch)
1530
+ if self.calculate_KL:
1531
+ kl = (policy_KL_logps - reference_KL_logps).mean().detach()
1532
+ kl = self.accelerator.gather_for_metrics(kl).mean().clamp(min=0)
1533
+ else:
1534
+ kl = torch.zeros(1).to(self.accelerator.device)
1535
+
1536
+ model_kwargs = (
1537
+ {
1538
+ "labels": batch["completion_labels"],
1539
+ "decoder_input_ids": batch.get("completion_decoder_input_ids"),
1540
+ }
1541
+ if self.is_encoder_decoder
1542
+ else {}
1543
+ )
1544
+ if self.aux_loss_enabled:
1545
+ model_kwargs["output_router_logits"] = True
1546
+
1547
+ if self.is_encoder_decoder:
1548
+ # 1. Get encoder outputs
1549
+ encoder_outputs = model.get_encoder()(
1550
+ batch["completion_input_ids"],
1551
+ attention_mask=batch["completion_attention_mask"],
1552
+ return_dict=True,
1553
+ **model_kwargs,
1554
+ )
1555
+ # 2. Get decoder outputs
1556
+ outputs = model.get_decoder()(
1557
+ input_ids=model_kwargs["decoder_input_ids"],
1558
+ encoder_hidden_states=encoder_outputs.last_hidden_state,
1559
+ use_cache=False,
1560
+ **model_kwargs,
1561
+ )
1562
+ # 1. Get reference encoder outputs
1563
+ ref_encoder_outputs = self.ref_model.get_encoder()(
1564
+ batch["completion_input_ids"],
1565
+ attention_mask=batch["completion_attention_mask"],
1566
+ return_dict=True,
1567
+ **model_kwargs,
1568
+ )
1569
+ # 2. Get reference decoder outputs
1570
+ ref_outputs = self.ref_model.get_decoder()(
1571
+ input_ids=model_kwargs["decoder_input_ids"],
1572
+ encoder_hidden_states=ref_encoder_outputs.last_hidden_state,
1573
+ use_cache=False,
1574
+ **model_kwargs,
1575
+ )
1576
+ else:
1577
+ # skip the lm head and get the last hidden state
1578
+ if hasattr(model, "get_decoder") and model.get_decoder() is not None:
1579
+ base_model = model.get_decoder()
1580
+ else:
1581
+ base_attr = getattr(model, "base_model_prefix", self.args.base_model_attribute_name)
1582
+ base_model = getattr(model, base_attr, model)
1583
+ outputs = base_model(
1584
+ batch["completion_input_ids"],
1585
+ attention_mask=batch["completion_attention_mask"],
1586
+ use_cache=False,
1587
+ **model_kwargs,
1588
+ )
1589
+
1590
+ # reference model
1591
+ if hasattr(self.ref_model, "get_decoder") and self.ref_model.get_decoder() is not None:
1592
+ ref_base_model = self.ref_model.get_decoder()
1593
+ else:
1594
+ ref_attr = getattr(self.ref_model, "base_model_prefix", self.args.base_model_attribute_name)
1595
+ ref_base_model = getattr(self.ref_model, ref_attr, self.ref_model)
1596
+ ref_outputs = ref_base_model(
1597
+ batch["completion_input_ids"],
1598
+ attention_mask=batch["completion_attention_mask"],
1599
+ use_cache=False,
1600
+ **model_kwargs,
1601
+ )
1602
+ lm_head = model.get_output_embeddings()
1603
+ ref_lm_head = self.ref_model.get_output_embeddings()
1604
+
1605
+ (
1606
+ loss,
1607
+ (
1608
+ chosen_logps_sum,
1609
+ rejected_logps_sum,
1610
+ chosen_logits_sum,
1611
+ rejected_logits_sum,
1612
+ chosen_rewards_sum,
1613
+ rejected_rewards_sum,
1614
+ ),
1615
+ ) = self.kto_loss_fn(
1616
+ _input=outputs.last_hidden_state[:, :-1] if not self.is_encoder_decoder else outputs.last_hidden_state,
1617
+ lin_weight=lm_head.weight,
1618
+ target=batch["completion_labels"][:, 1:],
1619
+ bias=lm_head.bias if hasattr(lm_head, "bias") else None,
1620
+ preference_labels=torch.tensor(batch["label"], dtype=torch.bool).to(self.accelerator.device),
1621
+ ref_input=ref_outputs.last_hidden_state[:, :-1]
1622
+ if not self.is_encoder_decoder
1623
+ else outputs.last_hidden_state,
1624
+ ref_weight=ref_lm_head.weight,
1625
+ ref_bias=ref_lm_head.bias if hasattr(lm_head, "bias") else None,
1626
+ kl=kl,
1627
+ )
1628
+
1629
+ output = {
1630
+ "loss": loss,
1631
+ "chosen_logits_sum": chosen_logits_sum,
1632
+ "rejected_logits_sum": rejected_logits_sum,
1633
+ "chosen_logps_sum": chosen_logps_sum,
1634
+ "rejected_logps_sum": rejected_logps_sum,
1635
+ "chosen_rewards_sum": chosen_rewards_sum,
1636
+ "rejected_rewards_sum": rejected_rewards_sum,
1637
+ "kl": kl,
1638
+ }
1639
+ if self.aux_loss_enabled:
1640
+ output["aux_loss"] = outputs.aux_loss
1641
+
1642
+ return output
1643
+
1644
+ def get_batch_loss_metrics(
1645
+ self,
1646
+ model,
1647
+ batch: dict[str, Union[list, torch.LongTensor]],
1648
+ ):
1649
+ """Compute the KTO loss and other metrics for the given batch of inputs for train or test."""
1650
+ metrics = {}
1651
+ batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
1652
+
1653
+ labels = torch.tensor(batch["label"])
1654
+ num_chosen = labels.sum().to(self.accelerator.device)
1655
+ num_rejected = (len(labels) - num_chosen).to(self.accelerator.device)
1656
+
1657
+ if self.args.use_liger_loss:
1658
+ model_output = self._compute_loss_liger(model, batch)
1659
+ losses = model_output["loss"]
1660
+ policy_chosen_logits = model_output["chosen_logits_sum"]
1661
+ policy_rejected_logits = model_output["rejected_logits_sum"]
1662
+ policy_chosen_logps = model_output["chosen_logps_sum"]
1663
+ policy_rejected_logps = model_output["rejected_logps_sum"]
1664
+ chosen_rewards = model_output["chosen_rewards_sum"]
1665
+ rejected_rewards = model_output["rejected_rewards_sum"]
1666
+ kl = model_output["kl"]
1667
+ if self.aux_loss_enabled:
1668
+ aux_loss = model_output["aux_loss"]
1669
+ else:
1670
+ forward_output = self.forward(model, batch)
1671
+ (
1672
+ policy_chosen_logps,
1673
+ policy_rejected_logps,
1674
+ policy_chosen_logits,
1675
+ policy_rejected_logits,
1676
+ policy_KL_logps,
1677
+ ) = forward_output[:5]
1678
+ if self.aux_loss_enabled:
1679
+ aux_loss = forward_output[5]
1680
+
1681
+ # if reference_logps in batch use them, otherwise use the reference model
1682
+ if "reference_logps" in batch:
1683
+ chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True]
1684
+ rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False]
1685
+
1686
+ reference_chosen_logps = batch["reference_logps"][chosen_idx, ...]
1687
+ reference_rejected_logps = batch["reference_logps"][rejected_idx, ...]
1688
+ if self.calculate_KL:
1689
+ reference_KL_logps = batch["reference_KL_logps"]
1690
+ else:
1691
+ reference_KL_logps = None
1692
+ else:
1693
+ with torch.no_grad():
1694
+ if self.ref_model is None:
1695
+ with self.null_ref_context():
1696
+ (
1697
+ reference_chosen_logps,
1698
+ reference_rejected_logps,
1699
+ _,
1700
+ _,
1701
+ reference_KL_logps,
1702
+ ) = self.forward(self.model, batch)[:5]
1703
+ else:
1704
+ (
1705
+ reference_chosen_logps,
1706
+ reference_rejected_logps,
1707
+ _,
1708
+ _,
1709
+ reference_KL_logps,
1710
+ ) = self.forward(self.ref_model, batch)[:5]
1711
+
1712
+ losses, chosen_rewards, rejected_rewards, kl = self.kto_loss(
1713
+ policy_chosen_logps,
1714
+ policy_rejected_logps,
1715
+ policy_KL_logps,
1716
+ reference_chosen_logps,
1717
+ reference_rejected_logps,
1718
+ reference_KL_logps,
1719
+ )
1720
+
1721
+ metrics["kl"] = kl.item()
1722
+
1723
+ all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item()
1724
+ all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item()
1725
+
1726
+ if all_num_chosen > 0:
1727
+ metrics["rewards/chosen_sum"] = (
1728
+ self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item()
1729
+ )
1730
+ metrics["logps/chosen_sum"] = (
1731
+ self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item()
1732
+ )
1733
+ metrics["logits/chosen_sum"] = (
1734
+ self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item()
1735
+ )
1736
+ metrics["count/chosen"] = all_num_chosen
1737
+
1738
+ if all_num_rejected > 0:
1739
+ metrics["rewards/rejected_sum"] = (
1740
+ self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item()
1741
+ )
1742
+ metrics["logps/rejected_sum"] = (
1743
+ self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item()
1744
+ )
1745
+ metrics["logits/rejected_sum"] = (
1746
+ self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item()
1747
+ )
1748
+ metrics["count/rejected"] = all_num_rejected
1749
+
1750
+ loss = losses.nanmean()
1751
+ if self.aux_loss_enabled:
1752
+ loss += self.aux_loss_coef * aux_loss
1753
+
1754
+ return loss, metrics
1755
+
1756
+ def compute_loss(
1757
+ self,
1758
+ model: Union[PreTrainedModel, nn.Module],
1759
+ inputs: dict[str, Union[torch.Tensor, Any]],
1760
+ return_outputs=False,
1761
+ num_items_in_batch=None,
1762
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
1763
+ compute_loss_context_manager = (
1764
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1765
+ )
1766
+
1767
+ with compute_loss_context_manager:
1768
+ loss, metrics = self.get_batch_loss_metrics(model, inputs)
1769
+
1770
+ # Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
1771
+ loss = loss.to(self.args.device)
1772
+ # force log the metrics
1773
+ if self.accelerator.is_main_process:
1774
+ self.store_metrics(metrics, train_eval="train")
1775
+
1776
+ if return_outputs:
1777
+ return (loss, metrics)
1778
+ return loss
1779
+
1780
+ def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
1781
+ for key, value in metrics.items():
1782
+ self._stored_metrics[train_eval][key].append(value)
1783
+
1784
+ def _get_train_sampler(self, dataset: Optional[Dataset] = None) -> Optional[torch.utils.data.Sampler]:
1785
+ if dataset is None:
1786
+ dataset = self.train_dataset
1787
+ if dataset is None or not has_length(dataset):
1788
+ return None
1789
+ return SequentialSampler(dataset)
1790
+
1791
+ def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
1792
+ """Generate samples from the model and reference model for the given batch of inputs."""
1793
+
1794
+ # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
1795
+ # the torch amp context manager as some hidden states are silently casted to full precision.
1796
+ generate_context_manager = (
1797
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1798
+ )
1799
+
1800
+ with generate_context_manager:
1801
+ policy_output = model.generate(
1802
+ input_ids=batch["prompt_input_ids"],
1803
+ attention_mask=batch["prompt_attention_mask"],
1804
+ max_length=self.max_length,
1805
+ do_sample=True,
1806
+ pad_token_id=self.processing_class.pad_token_id,
1807
+ )
1808
+
1809
+ # if reference_output in batch use that otherwise use the reference model
1810
+ if "reference_output" in batch:
1811
+ reference_output = batch["reference_output"]
1812
+ else:
1813
+ if self.ref_model is None:
1814
+ with self.null_ref_context():
1815
+ reference_output = self.model.generate(
1816
+ input_ids=batch["prompt_input_ids"],
1817
+ attention_mask=batch["prompt_attention_mask"],
1818
+ max_length=self.max_length,
1819
+ do_sample=True,
1820
+ pad_token_id=self.processing_class.pad_token_id,
1821
+ )
1822
+ else:
1823
+ reference_output = self.ref_model.generate(
1824
+ input_ids=batch["prompt_input_ids"],
1825
+ attention_mask=batch["prompt_attention_mask"],
1826
+ max_length=self.max_length,
1827
+ do_sample=True,
1828
+ pad_token_id=self.processing_class.pad_token_id,
1829
+ )
1830
+
1831
+ policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
1832
+ policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
1833
+
1834
+ reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
1835
+ reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
1836
+
1837
+ return policy_output_decoded, reference_output_decoded
1838
+
1839
+ def prediction_step(
1840
+ self,
1841
+ model: Union[PreTrainedModel, nn.Module],
1842
+ inputs: dict[str, Union[torch.Tensor, Any]],
1843
+ prediction_loss_only: bool,
1844
+ ignore_keys: Optional[list[str]] = None,
1845
+ ):
1846
+ if ignore_keys is None:
1847
+ if hasattr(model, "config"):
1848
+ ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
1849
+ else:
1850
+ ignore_keys = []
1851
+
1852
+ prediction_context_manager = (
1853
+ autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
1854
+ )
1855
+ with torch.no_grad(), prediction_context_manager:
1856
+ loss, metrics = self.get_batch_loss_metrics(model, inputs)
1857
+
1858
+ # force log the metrics
1859
+ if self.accelerator.is_main_process:
1860
+ self.store_metrics(metrics, train_eval="eval")
1861
+
1862
+ if prediction_loss_only:
1863
+ return (loss.detach(), None, None)
1864
+
1865
+ # logits for the chosen and rejected samples from model
1866
+ logits_dict = {}
1867
+ if "logits/chosen_sum" in metrics:
1868
+ logits_dict["eval_logits/chosen"] = metrics["logits/chosen_sum"]
1869
+ if "logits/rejected_sum" in metrics:
1870
+ logits_dict["eval_logits/rejected"] = metrics["logits/rejected_sum"]
1871
+ logits = [v for k, v in logits_dict.items() if k not in ignore_keys]
1872
+ logits = torch.tensor(logits, device=self.accelerator.device)
1873
+ labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
1874
+
1875
+ return (loss.detach(), logits, labels)
1876
+
1877
+ def evaluation_loop(
1878
+ self,
1879
+ dataloader: DataLoader,
1880
+ description: str,
1881
+ prediction_loss_only: Optional[bool] = None,
1882
+ ignore_keys: Optional[list[str]] = None,
1883
+ metric_key_prefix: str = "eval",
1884
+ ) -> EvalLoopOutput:
1885
+ """
1886
+ Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by
1887
+ `Trainer.evaluate()` and `Trainer.predict()`.
1888
+
1889
+ Works both with or without labels.
1890
+ """
1891
+
1892
+ # Sample and save to game log if requested (for one batch to save time)
1893
+ if self.generate_during_eval:
1894
+ # Generate random indices within the range of the total number of samples
1895
+ num_samples = len(dataloader.dataset)
1896
+ random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
1897
+
1898
+ # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
1899
+ random_batch_dataset = dataloader.dataset.select(random_indices)
1900
+ random_batch = self.data_collator(random_batch_dataset)
1901
+ random_batch = self._prepare_inputs(random_batch)
1902
+
1903
+ target_labels = torch.tensor(random_batch["label"], dtype=torch.bool, device=self.accelerator.device)
1904
+ target_indices = torch.where(~target_labels)[0]
1905
+ target_batch = {
1906
+ "prompt_input_ids": random_batch["prompt_input_ids"][target_indices],
1907
+ "prompt_attention_mask": random_batch["prompt_attention_mask"][target_indices],
1908
+ "prompt": itemgetter(*target_indices)(random_batch["prompt"]),
1909
+ }
1910
+ policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch)
1911
+
1912
+ table = pd.DataFrame(
1913
+ columns=["Prompt", "Policy", "Ref Model"],
1914
+ data=[
1915
+ [prompt, pol[len(prompt) :], ref[len(prompt) :]]
1916
+ for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded)
1917
+ ],
1918
+ )
1919
+ if "wandb" in self.args.report_to:
1920
+ wandb.log({"game_log": wandb.Table(data=table)})
1921
+
1922
+ if "comet_ml" in self.args.report_to:
1923
+ log_table_to_comet_experiment(
1924
+ name="game_log.csv",
1925
+ table=table,
1926
+ )
1927
+
1928
+ # Base evaluation
1929
+ initial_output = super().evaluation_loop(
1930
+ dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
1931
+ )
1932
+
1933
+ return initial_output
1934
+
1935
+ def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
1936
+ """
1937
+ Log `logs` on the various objects watching training, including stored metrics.
1938
+
1939
+ Args:
1940
+ logs (`dict[str, float]`):
1941
+ The values to log.
1942
+ start_time (`float`, *optional*):
1943
+ Start time of the training.
1944
+ """
1945
+ # logs either has 'loss' or 'eval_loss'
1946
+ train_eval = "train" if "loss" in logs else "eval"
1947
+ # train metrics should have no prefix, eval should have 'eval_'
1948
+ prefix = "eval_" if train_eval == "eval" else ""
1949
+ # accumulate average metrics from sums and lengths
1950
+ for split in ["chosen", "rejected"]:
1951
+ if f"count/{split}" in self._stored_metrics[train_eval]:
1952
+ count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item()
1953
+ for metric in ["rewards", "logps", "logits"]:
1954
+ logs[f"{prefix}{metric}/{split}"] = (
1955
+ torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item()
1956
+ / count_sum
1957
+ )
1958
+ # delete obsolete metric
1959
+ del self._stored_metrics[train_eval][f"{metric}/{split}_sum"]
1960
+ del self._stored_metrics[train_eval][f"count/{split}"]
1961
+ # calculate reward margin
1962
+ if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs:
1963
+ logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
1964
+ # Add averaged stored metrics to logs
1965
+ for key, metrics in self._stored_metrics[train_eval].items():
1966
+ logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item()
1967
+ del self._stored_metrics[train_eval]
1968
+ return super().log(logs, start_time)
1969
+
1970
+ # Ensure the model card is saved along with the checkpoint
1971
+ def _save_checkpoint(self, model, trial):
1972
+ if self.args.hub_model_id is None:
1973
+ model_name = Path(self.args.output_dir).name
1974
+ else:
1975
+ model_name = self.args.hub_model_id.split("/")[-1]
1976
+ self.create_model_card(model_name=model_name)
1977
+ super()._save_checkpoint(model, trial)
1978
+ class UnslothKTOTrainer(_UnslothKTOTrainer):
1979
+ """
1980
+
1981
+ Initialize KTOTrainer.
1982
+
1983
+ Args:
1984
+ model ([`~transformers.PreTrainedModel`]):
1985
+ The model to train, preferably an [`~transformers.AutoModelForSequenceClassification`].
1986
+ ref_model ([`PreTrainedModelWrapper`]):
1987
+ Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation
1988
+ and loss. If no reference model is provided, the trainer will create a reference model with the same
1989
+ architecture as the model to be optimized.
1990
+ args ([`KTOConfig`]):
1991
+ The arguments to use for training.
1992
+ train_dataset ([`~datasets.Dataset`]):
1993
+ The dataset to use for training.
1994
+ eval_dataset ([`~datasets.Dataset`]):
1995
+ The dataset to use for evaluation.
1996
+ processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*):
1997
+ Processing class used to process the data. If provided, will be used to automatically process the inputs
1998
+ for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
1999
+ reuse the fine-tuned model.
2000
+ data_collator ([`~transformers.DataCollator`], *optional*):
2001
+ The data collator to use for training. If None is specified, the default data collator
2002
+ ([`DPODataCollatorWithPadding`]) will be used which will pad the sequences to the maximum length of the
2003
+ sequences in the batch, given a dataset of paired sequences.
2004
+ model_init (`Callable[[], transformers.PreTrainedModel]`):
2005
+ The model initializer to use for training. If None is specified, the default model initializer will be
2006
+ used.
2007
+ callbacks (`list[transformers.TrainerCallback]`):
2008
+ The callbacks to use for training.
2009
+ optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
2010
+ The optimizer and scheduler to use for training.
2011
+ preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
2012
+ The function to use to preprocess the logits before computing the metrics.
2013
+ peft_config (`dict`, defaults to `None`):
2014
+ The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in
2015
+ a PEFT model.
2016
+ compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
2017
+ The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to
2018
+ metric values.
2019
+ model_adapter_name (`str`, defaults to `None`):
2020
+ Name of the train target PEFT adapter, when using LoRA with multiple adapters.
2021
+ ref_adapter_name (`str`, defaults to `None`):
2022
+ Name of the reference PEFT adapter, when using LoRA with multiple adapters.
2023
+
2024
+ """
2025
+ def __init__(
2026
+ self,
2027
+ model = None,
2028
+ ref_model = None,
2029
+ args = None,
2030
+ train_dataset = None,
2031
+ eval_dataset = None,
2032
+ processing_class = None,
2033
+ data_collator = None,
2034
+ model_init = None,
2035
+ callbacks = None,
2036
+ preprocess_logits_for_metrics = None,
2037
+ peft_config = None,
2038
+ compute_metrics = None,
2039
+ model_adapter_name = None,
2040
+ ref_adapter_name = None,
2041
+ **kwargs
2042
+ ):
2043
+ if args is None: args = UnslothKTOConfig()
2044
+ use_bf16 = getattr(args, 'bf16', False)
2045
+ if type(use_bf16) is not bool: use_bf16 = False
2046
+ use_fp16 = getattr(args, 'fp16', False)
2047
+ if type(use_fp16) is not bool: use_fp16 = False
2048
+ force_float32 = False
2049
+ full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1'
2050
+ if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'):
2051
+ print('Unsloth: Switching to float32 training since model cannot work with float16')
2052
+ force_float32 = True
2053
+ mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
2054
+ dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None)
2055
+ if dtype is None: dtype = model.get_input_embeddings().weight.dtype
2056
+ from unsloth_zoo.utils import _get_dtype
2057
+ dtype = _get_dtype(dtype)
2058
+ float16 = dtype == torch.float16
2059
+ if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
2060
+ if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
2061
+ if force_float32:
2062
+ # Forced float32 training
2063
+ args.fp16 = False
2064
+ args.bf16 = False
2065
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
2066
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
2067
+ # args.mixed_precision is a new argument which needs to be set now
2068
+ elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
2069
+ # Mixed precision training
2070
+ args.fp16 = float16
2071
+ args.bf16 = not float16
2072
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
2073
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16'
2074
+ # args.mixed_precision is a new argument which needs to be set now
2075
+ elif mixed_precision_dtype == 'bfloat16':
2076
+ # Both False since bfloat16 full finetuning doesn't do any autocasting.
2077
+ args.fp16 = False
2078
+ args.bf16 = False
2079
+ os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
2080
+ if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
2081
+ # args.mixed_precision is a new argument which needs to be set now
2082
+
2083
+ if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
2084
+ args.eval_strategy = 'steps'
2085
+ if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
2086
+ ga_steps = getattr(args, 'gradient_accumulation_steps', None)
2087
+ if ga_steps is not None and ga_steps > 1:
2088
+ from transformers import __version__ as transformers_version
2089
+ if Version(transformers_version) <= Version('4.45.2'):
2090
+ print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
2091
+ '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
2092
+ if getattr(args, 'eval_strategy', 'no') != 'no':
2093
+ eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
2094
+ if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
2095
+ if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
2096
+ fp16_full_eval = getattr(args, 'fp16_full_eval', False)
2097
+ if type(fp16_full_eval) is not bool: fp16_full_eval = False
2098
+ bf16_full_eval = getattr(args, 'bf16_full_eval', False)
2099
+ if type(bf16_full_eval) is not bool: bf16_full_eval = False
2100
+ if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
2101
+ if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
2102
+ if force_float32:
2103
+ args.bf16_full_eval = False
2104
+ args.fp16_full_eval = False
2105
+ elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
2106
+ args.bf16_full_eval = True
2107
+ args.fp16_full_eval = False
2108
+ elif not bf16_full_eval and not fp16_full_eval:
2109
+ args.bf16_full_eval = args.bf16
2110
+ args.fp16_full_eval = args.fp16
2111
+ _output_logits = False
2112
+ if locals().get('compute_metrics', None) is not None: _output_logits = True
2113
+ if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
2114
+ if _output_logits:
2115
+ os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
2116
+ if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
2117
+ pass
2118
+ else:
2119
+ model_max_seq_length = getattr(model, 'max_seq_length', None)
2120
+ args_max_seq_length = getattr(args, 'max_seq_length', None)
2121
+ if args_max_seq_length is None and model_max_seq_length is not None:
2122
+ max_seq_length = model.max_seq_length
2123
+ if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
2124
+ if model is not None and hasattr(model, 'for_training'):
2125
+ model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
2126
+ if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
2127
+ if 'processing_class' in locals():
2128
+ if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
2129
+ if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
2130
+ __tokenizer = processing_class if 'processing_class' in locals() else tokenizer
2131
+ from unsloth_zoo.vision_utils import UnslothVisionDataCollator
2132
+ if not isinstance(data_collator, UnslothVisionDataCollator):
2133
+ if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
2134
+ data_collator = TransformersDataCollatorForLanguageModeling(
2135
+ __tokenizer,
2136
+ mlm = False,
2137
+ mlm_probability = 0.0,
2138
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
2139
+ )
2140
+ elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
2141
+ data_collator = DataCollatorForSeq2Seq(
2142
+ __tokenizer,
2143
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
2144
+ )
2145
+ else:
2146
+ if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
2147
+ if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
2148
+ if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
2149
+ if not isinstance(data_collator, UnslothVisionDataCollator):
2150
+ if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
2151
+ if isinstance(data_collator, DataCollatorForSeq2Seq):
2152
+ data_collator = DataCollatorForSeq2Seq(
2153
+ __tokenizer.tokenizer,
2154
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
2155
+ )
2156
+ else:
2157
+ data_collator = TransformersDataCollatorForLanguageModeling(
2158
+ __tokenizer.tokenizer,
2159
+ mlm = False,
2160
+ mlm_probability = 0.0,
2161
+ pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
2162
+ )
2163
+ other_metrics = []
2164
+
2165
+ from unsloth_zoo.logging_utils import PatchRLStatistics
2166
+ PatchRLStatistics('kto_trainer', other_metrics)
2167
+
2168
+ # [TODO] Fix up DataParallel multiplying batch sizes
2169
+ # [TODO] DDP works, but DP seems to not work? [TODO]
2170
+ if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1:
2171
+ if getattr(args, "_n_gpu", 1) != 1:
2172
+ args._n_gpu = 1
2173
+ if "model" in locals() and hasattr(model, "for_training"):
2174
+ model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
2175
+ super().__init__(
2176
+ model = model,
2177
+ ref_model = ref_model,
2178
+ args = args,
2179
+ train_dataset = train_dataset,
2180
+ eval_dataset = eval_dataset,
2181
+ processing_class = processing_class,
2182
+ data_collator = data_collator,
2183
+ model_init = model_init,
2184
+ callbacks = callbacks,
2185
+ preprocess_logits_for_metrics = preprocess_logits_for_metrics,
2186
+ peft_config = peft_config,
2187
+ compute_metrics = compute_metrics,
2188
+ model_adapter_name = model_adapter_name,
2189
+ ref_adapter_name = ref_adapter_name,**kwargs)
2190
+ if "model" in locals() and hasattr(model, "for_inference"):
2191
+ model.for_inference()
2192
+ if hasattr(self, 'neftune_hook_handle'):
2193
+ self.neftune_hook_handle.remove()
2194
+ if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
2195
+ if getattr(args, 'neftune_noise_alpha', None) is not None:
2196
+ model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
2197
+ pass
2198
+ if hasattr(self, 'accelerator'):
2199
+ scaler = self.accelerator.scaler
2200
+ current_model = model
2201
+ while hasattr(current_model, 'model'):
2202
+ current_model.accelerator_scaler = scaler
2203
+ current_model = current_model.model
2204
+ current_model.accelerator_scaler = scaler
2205
+ pass
2206
+ if hasattr(self, 'train'):
2207
+ self.train = MethodType(prepare_for_training_mode(self.__class__.train), self)
2208
+ pass
2209
+
2210
+ pass
2211
+
2212
+
2213
+ if hasattr(logger, "addFilter"):
2214
+ import logging
2215
+ class HideLoggingMessage(logging.Filter):
2216
+ def __init__(self, text): self.text = text
2217
+ def filter(self, x): return not (self.text in x.getMessage())
2218
+ pass
2219
+ logger.addFilter(HideLoggingMessage("`use_cache=True`"))
2220
+