liger-kernel-nightly 0.0.1.dev20240819184814__py3-none-any.whl → 0.6.4.dev20251212103629__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. liger_kernel/__init__.py +0 -0
  2. liger_kernel/chunked_loss/README.md +25 -0
  3. liger_kernel/chunked_loss/__init__.py +8 -0
  4. liger_kernel/chunked_loss/cosine_similarity_loss.py +136 -0
  5. liger_kernel/chunked_loss/cpo_loss.py +157 -0
  6. liger_kernel/chunked_loss/dpo_loss.py +229 -0
  7. liger_kernel/chunked_loss/functional.py +17 -0
  8. liger_kernel/chunked_loss/fused_linear_distillation.py +292 -0
  9. liger_kernel/chunked_loss/fused_linear_ppo.py +366 -0
  10. liger_kernel/chunked_loss/fused_linear_preference.py +433 -0
  11. liger_kernel/chunked_loss/fused_linear_unpaired_preference.py +341 -0
  12. liger_kernel/chunked_loss/grpo_loss.py +307 -0
  13. liger_kernel/chunked_loss/jsd_loss.py +200 -0
  14. liger_kernel/chunked_loss/kto_loss.py +210 -0
  15. liger_kernel/chunked_loss/orpo_loss.py +144 -0
  16. liger_kernel/chunked_loss/simpo_loss.py +165 -0
  17. liger_kernel/env_report.py +63 -0
  18. liger_kernel/ops/__init__.py +141 -0
  19. liger_kernel/ops/backends/README.md +151 -0
  20. liger_kernel/ops/backends/__init__.py +13 -0
  21. liger_kernel/ops/backends/_ascend/__init__.py +5 -0
  22. liger_kernel/ops/backends/_ascend/ops/__init__.py +15 -0
  23. liger_kernel/ops/backends/registry.py +61 -0
  24. liger_kernel/ops/cross_entropy.py +383 -114
  25. liger_kernel/ops/dyt.py +160 -0
  26. liger_kernel/ops/experimental/embedding.py +141 -0
  27. liger_kernel/ops/experimental/mm_int8int2.py +349 -0
  28. liger_kernel/ops/fused_add_rms_norm.py +416 -0
  29. liger_kernel/ops/fused_linear_cross_entropy.py +346 -132
  30. liger_kernel/ops/fused_linear_jsd.py +228 -0
  31. liger_kernel/ops/fused_neighborhood_attention.py +1022 -0
  32. liger_kernel/ops/geglu.py +66 -64
  33. liger_kernel/ops/group_norm.py +306 -0
  34. liger_kernel/ops/grpo_loss.py +312 -0
  35. liger_kernel/ops/jsd.py +201 -0
  36. liger_kernel/ops/kl_div.py +262 -0
  37. liger_kernel/ops/layer_norm.py +320 -0
  38. liger_kernel/ops/llama4_rope.py +225 -0
  39. liger_kernel/ops/multi_token_attention.py +207 -0
  40. liger_kernel/ops/poly_norm.py +390 -0
  41. liger_kernel/ops/qwen2vl_mrope.py +222 -0
  42. liger_kernel/ops/rms_norm.py +484 -88
  43. liger_kernel/ops/rope.py +122 -117
  44. liger_kernel/ops/softmax.py +201 -0
  45. liger_kernel/ops/sparsemax.py +179 -0
  46. liger_kernel/ops/swiglu.py +68 -65
  47. liger_kernel/ops/tiled_mlp.py +136 -0
  48. liger_kernel/ops/tvd.py +207 -0
  49. liger_kernel/ops/utils.py +82 -3
  50. liger_kernel/transformers/__init__.py +218 -6
  51. liger_kernel/transformers/auto_model.py +38 -0
  52. liger_kernel/transformers/cross_entropy.py +52 -7
  53. liger_kernel/transformers/dyt.py +22 -0
  54. liger_kernel/transformers/experimental/__init__.py +5 -0
  55. liger_kernel/transformers/experimental/embedding.py +26 -0
  56. liger_kernel/transformers/fsdp.py +55 -0
  57. liger_kernel/transformers/functional.py +301 -0
  58. liger_kernel/transformers/fused_add_rms_norm.py +39 -0
  59. liger_kernel/transformers/fused_linear_cross_entropy.py +59 -10
  60. liger_kernel/transformers/fused_linear_jsd.py +95 -0
  61. liger_kernel/transformers/fused_neighborhood_attention.py +234 -0
  62. liger_kernel/transformers/geglu.py +6 -7
  63. liger_kernel/transformers/group_norm.py +50 -0
  64. liger_kernel/transformers/grpo_loss.py +153 -0
  65. liger_kernel/transformers/jsd.py +70 -0
  66. liger_kernel/transformers/kl_div.py +12 -0
  67. liger_kernel/transformers/layer_norm.py +24 -0
  68. liger_kernel/transformers/llama4_rope.py +93 -0
  69. liger_kernel/transformers/model/falcon_h1.py +122 -0
  70. liger_kernel/transformers/model/gemma.py +261 -0
  71. liger_kernel/transformers/model/gemma2.py +283 -0
  72. liger_kernel/transformers/model/gemma3.py +332 -0
  73. liger_kernel/transformers/model/glm4.py +141 -0
  74. liger_kernel/transformers/model/glm4v.py +163 -0
  75. liger_kernel/transformers/model/glm4v_moe.py +172 -0
  76. liger_kernel/transformers/model/gpt_oss.py +211 -0
  77. liger_kernel/transformers/model/hunyuan_v1.py +134 -0
  78. liger_kernel/transformers/model/internvl.py +157 -0
  79. liger_kernel/transformers/model/llama.py +221 -41
  80. liger_kernel/transformers/model/llama4.py +121 -0
  81. liger_kernel/transformers/model/llava.py +344 -0
  82. liger_kernel/transformers/model/loss_utils.py +95 -0
  83. liger_kernel/transformers/model/mistral.py +145 -0
  84. liger_kernel/transformers/model/mixtral.py +293 -0
  85. liger_kernel/transformers/model/mllama.py +269 -0
  86. liger_kernel/transformers/model/olmo2.py +141 -0
  87. liger_kernel/transformers/model/olmo3.py +142 -0
  88. liger_kernel/transformers/model/output_classes.py +147 -0
  89. liger_kernel/transformers/model/paligemma.py +433 -0
  90. liger_kernel/transformers/model/phi3.py +120 -0
  91. liger_kernel/transformers/model/qwen2.py +259 -0
  92. liger_kernel/transformers/model/qwen2_5_vl.py +163 -0
  93. liger_kernel/transformers/model/qwen2_vl.py +159 -0
  94. liger_kernel/transformers/model/qwen3.py +136 -0
  95. liger_kernel/transformers/model/qwen3_moe.py +152 -0
  96. liger_kernel/transformers/model/qwen3_next.py +146 -0
  97. liger_kernel/transformers/model/qwen3_vl.py +150 -0
  98. liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
  99. liger_kernel/transformers/model/smollm3.py +199 -0
  100. liger_kernel/transformers/model/smolvlm.py +158 -0
  101. liger_kernel/transformers/monkey_patch.py +2816 -21
  102. liger_kernel/transformers/multi_token_attention.py +64 -0
  103. liger_kernel/transformers/poly_norm.py +42 -0
  104. liger_kernel/transformers/qwen2vl_mrope.py +20 -0
  105. liger_kernel/transformers/rms_norm.py +75 -5
  106. liger_kernel/transformers/rope.py +47 -3
  107. liger_kernel/transformers/softmax.py +12 -0
  108. liger_kernel/transformers/sparsemax.py +16 -0
  109. liger_kernel/transformers/swiglu.py +62 -6
  110. liger_kernel/transformers/tiled_mlp.py +133 -0
  111. liger_kernel/transformers/trainer/__init__.py +4 -0
  112. liger_kernel/transformers/trainer/orpo_trainer.py +130 -0
  113. liger_kernel/transformers/trainer_integration.py +2 -45
  114. liger_kernel/transformers/tvd.py +13 -0
  115. liger_kernel/triton/__init__.py +1 -3
  116. liger_kernel/triton/monkey_patch.py +1 -5
  117. liger_kernel/utils.py +96 -0
  118. liger_kernel_nightly-0.6.4.dev20251212103629.dist-info/METADATA +447 -0
  119. liger_kernel_nightly-0.6.4.dev20251212103629.dist-info/NOTICE +58 -0
  120. liger_kernel_nightly-0.6.4.dev20251212103629.dist-info/RECORD +124 -0
  121. {liger_kernel_nightly-0.0.1.dev20240819184814.dist-info → liger_kernel_nightly-0.6.4.dev20251212103629.dist-info}/WHEEL +1 -1
  122. liger_kernel_nightly-0.0.1.dev20240819184814.dist-info/METADATA +0 -21
  123. liger_kernel_nightly-0.0.1.dev20240819184814.dist-info/NOTICE +0 -4
  124. liger_kernel_nightly-0.0.1.dev20240819184814.dist-info/RECORD +0 -27
  125. {liger_kernel_nightly-0.0.1.dev20240819184814.dist-info → liger_kernel_nightly-0.6.4.dev20251212103629.dist-info}/LICENSE +0 -0
  126. {liger_kernel_nightly-0.0.1.dev20240819184814.dist-info → liger_kernel_nightly-0.6.4.dev20251212103629.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,228 @@
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import triton
5
+
6
+ from liger_kernel.ops.jsd import _jsd_kernel
7
+ from liger_kernel.ops.utils import amp_custom_bwd
8
+ from liger_kernel.ops.utils import amp_custom_fwd
9
+ from liger_kernel.ops.utils import element_mul_kernel
10
+ from liger_kernel.ops.utils import is_hip
11
+ from liger_kernel.utils import infer_device
12
+
13
+ # The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576 https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
14
+ # However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
15
+ # The optimal maximum block size depends on your hardware, your kernel, and your dtype
16
+ MAX_FUSED_SIZE = 4096 if infer_device() == "xpu" else 65536 // 2
17
+
18
+
19
+ def fused_linear_jsd_forward(
20
+ student_input,
21
+ student_weight,
22
+ teacher_input,
23
+ teacher_weight,
24
+ shift_labels,
25
+ jsd_beta,
26
+ ignore_index,
27
+ has_label,
28
+ temperature,
29
+ ):
30
+ device = student_input.device
31
+ dtype = student_input.dtype
32
+
33
+ # inputs have shape: BT x H
34
+ # materialized activations will have shape: BT x V
35
+ # the increase in memory = BT x V
36
+ # reduction can be achieved by partitioning the number of tokens BT into smaller chunks.
37
+ # for ex: if we were to achieve the same memory consumption as BT x H, then the chunk size should be:
38
+ # inc_factor = (V+H-1)//H, chunk_size = (BT + inc_factor - 1)//inc_factor
39
+ # for ex: BT = 4096*4, V = 32000, H = 4096 ==> inc_factor = 8, chunk_size = 2048
40
+ BT, H = student_input.shape
41
+ V = student_weight.shape[0]
42
+ BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(V))
43
+
44
+ inc_factor = triton.cdiv(V, H) # (V + H - 1) // H
45
+ chunk_size = triton.next_power_of_2(triton.cdiv(BT, inc_factor)) # (BT + inc_factor - 1) // inc_factor
46
+ num_chunks = triton.cdiv(BT, chunk_size) # (BT + chunk_size - 1) // chunk_size
47
+
48
+ grad_weight = torch.zeros_like(student_weight, device=device) if student_weight.requires_grad else None
49
+ grad_input = torch.zeros_like(student_input)
50
+ # we use fp32 for loss accumulator
51
+ loss_1d = torch.zeros((BT, V), dtype=torch.float32, device=device)
52
+
53
+ if has_label:
54
+ n_non_ignore = (shift_labels != ignore_index).sum().item()
55
+ else:
56
+ n_non_ignore = BT
57
+
58
+ for chunk_id in range(num_chunks):
59
+ start_idx = chunk_id * chunk_size
60
+ end_idx = min((chunk_id + 1) * chunk_size, BT)
61
+
62
+ # chunk both inputs, shape: chunk_size x H
63
+ student_input_chunk = student_input[start_idx:end_idx]
64
+ teacher_input_chunk = teacher_input[start_idx:end_idx]
65
+
66
+ # shape: chunk_size x V
67
+ # For anything starting from logits to the final JSD loss, we do computation
68
+ # in FP32 to avoid losing numerical stability.
69
+ student_logits_chunk = (student_input_chunk @ student_weight.t()).to(torch.float32)
70
+ teacher_logits_chunk = (teacher_input_chunk @ teacher_weight.t()).to(torch.float32)
71
+ chunk_n_rows = student_logits_chunk.shape[0]
72
+
73
+ # unreduced loss
74
+ loss_1d_slice = loss_1d[start_idx:end_idx] # chunk_size
75
+ # log-softmax with temperature
76
+ student_logits_chunk = student_logits_chunk / temperature
77
+ teacher_logits_chunk = teacher_logits_chunk / temperature
78
+ student_prob_chunk = torch.log_softmax(student_logits_chunk, dim=-1)
79
+ teacher_prob_chunk = torch.log_softmax(teacher_logits_chunk, dim=-1)
80
+
81
+ # ensure _input and target are contiguous
82
+ student_prob_chunk = student_prob_chunk.contiguous()
83
+ teacher_prob_chunk = teacher_prob_chunk.contiguous()
84
+
85
+ # Here we calculate the gradient of prob_chunk in place so we can save memory.
86
+ _jsd_kernel[(chunk_n_rows,)](
87
+ X_ptr=student_prob_chunk,
88
+ X_stride=student_prob_chunk.stride(-2),
89
+ Y_ptr=teacher_prob_chunk,
90
+ Y_stride=teacher_prob_chunk.stride(-2),
91
+ loss_ptr=loss_1d_slice,
92
+ loss_stride=loss_1d_slice.stride(-2),
93
+ dX_ptr=student_prob_chunk,
94
+ dX_stride=student_prob_chunk.stride(-2),
95
+ label_ptr=(
96
+ shift_labels[start_idx:end_idx] if has_label else torch.empty(1, device=device)
97
+ ), # dummy ptr if no label
98
+ beta=jsd_beta,
99
+ n_non_ignore=n_non_ignore,
100
+ ignore_index=ignore_index,
101
+ n_cols=V,
102
+ BLOCK_SIZE=BLOCK_SIZE,
103
+ HAS_LABEL=has_label,
104
+ )
105
+ loss_1d[start_idx:end_idx] = loss_1d_slice
106
+ # gradients of prob_chunk in place, shape: chunk_size x V
107
+ # gradients of logits_chunk in place, shape: chunk_size x V
108
+ student_logits_chunk = (
109
+ student_prob_chunk
110
+ - torch.softmax(student_logits_chunk, dim=-1)
111
+ * student_prob_chunk.sum(dim=-1, keepdim=True).broadcast_to(student_prob_chunk.shape)
112
+ ) / temperature
113
+ # now we traverse back to grad w.r.t. input to `lm_head` and grad
114
+ # w.r.t. `lm_head` which should be computed in original dtype
115
+ student_logits_chunk = student_logits_chunk.to(dtype)
116
+ grad_input[start_idx:end_idx] = student_logits_chunk @ student_weight
117
+
118
+ if grad_weight is not None:
119
+ grad_weight.add_(student_logits_chunk.t() @ student_input_chunk)
120
+
121
+ loss = torch.sum(loss_1d)
122
+ return loss, grad_input, grad_weight
123
+
124
+
125
+ def fused_linear_jsd_backward(grad_output, grad_input, grad_weight):
126
+ # If JSD is the last layer, grad_output is 1.0. Skip the mul to save time
127
+ if torch.ne(grad_output, torch.tensor(1.0, device=grad_output.device)):
128
+ # We use a Triton kernel instead of a PyTorch operation because modifying inputs in-place
129
+ # for gradient storage and backward multiple times causes anomalies with PyTorch but not with Triton.
130
+ BT, H = grad_input.shape
131
+ n_rows = BT
132
+ BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(H))
133
+
134
+ element_mul_kernel[(n_rows,)](
135
+ grad_input,
136
+ grad_input.stride(-2),
137
+ grad_output,
138
+ H,
139
+ BLOCK_SIZE=BLOCK_SIZE,
140
+ num_warps=32 if not is_hip() else 16,
141
+ )
142
+
143
+ # handle grad_weight
144
+ if grad_weight is not None:
145
+ V, H = grad_weight.shape
146
+ n_rows = V
147
+
148
+ element_mul_kernel[(n_rows,)](
149
+ grad_weight,
150
+ grad_weight.stride(-2),
151
+ grad_output,
152
+ H,
153
+ BLOCK_SIZE=BLOCK_SIZE,
154
+ num_warps=32 if not is_hip() else 16,
155
+ )
156
+
157
+ return grad_input, grad_weight
158
+
159
+
160
+ class LigerFusedLinearJSDFunction(torch.autograd.Function):
161
+ """
162
+ Fusing the last linear layer with generalized JSD
163
+
164
+ Handle the forward and backward pass of the final linear layer via JSD by avoiding
165
+ the materialization of the large logits tensor. Since JSD is the last layer, we can
166
+ compute the gradient at the forward pass.
167
+ """
168
+
169
+ @staticmethod
170
+ @amp_custom_fwd
171
+ def forward(
172
+ ctx,
173
+ student_input: torch.Tensor,
174
+ student_weight: torch.Tensor,
175
+ teacher_input: torch.Tensor,
176
+ teacher_weight: torch.Tensor,
177
+ shift_labels: Optional[torch.Tensor] = None,
178
+ jsd_beta: float = 0.5,
179
+ ignore_index: int = -100,
180
+ temperature: float = 1.0,
181
+ ):
182
+ """
183
+ Args:
184
+
185
+ student_input (torch.tensor): input of the last projection layer in student model, with shape (B*T, H), where B is batch size, T is sequence length, H is hidden dimension.
186
+ student_weight (torch.tensor): the last projection layer in student model, with shape (V, H), where V is vocab size
187
+ teacher_input (torch.tensor): input of the last projection layer in teacher model, with shape (B*T, H), where B is batch size, T is sequence length, H is hidden dimension.
188
+ teacher_weight (torch.tensor): the last projection layer in teacher model, with shape (V, H), where V is vocab size
189
+ shift_labels (Optional[torch.LongTensor]): indicator of next predicted vocab with shape (BT) where each value is in [0, V-1].
190
+ jsd_beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5`
191
+ ignore_index (int): the index to ignore. Default: -100
192
+ temperature (float): temperature in softmax function to control the output probability distribution. Default: `1.0`
193
+
194
+ Returns:
195
+ loss (torch.Tensor): generalized JSD
196
+ """
197
+ has_label = False
198
+ if shift_labels is not None:
199
+ assert shift_labels.shape == (teacher_input.shape[0],), (
200
+ f"the shape of shift_labels must be (BT,). Got: {shift_labels.shape}"
201
+ )
202
+ shift_labels = shift_labels.contiguous()
203
+ has_label = True
204
+
205
+ loss, grad_input, grad_weight = fused_linear_jsd_forward(
206
+ student_input,
207
+ student_weight,
208
+ teacher_input,
209
+ teacher_weight,
210
+ shift_labels,
211
+ jsd_beta,
212
+ ignore_index,
213
+ has_label,
214
+ temperature,
215
+ )
216
+ # downcast to dtype and store for backward
217
+ ctx.save_for_backward(
218
+ grad_input.detach(),
219
+ grad_weight.detach() if grad_weight is not None else None,
220
+ )
221
+ return loss
222
+
223
+ @staticmethod
224
+ @amp_custom_bwd
225
+ def backward(ctx, grad_output):
226
+ (grad_input, grad_weight) = ctx.saved_tensors
227
+ grad_input, grad_weight = fused_linear_jsd_backward(grad_output, grad_input, grad_weight)
228
+ return (grad_input, grad_weight, None, None, None, None, None, None)