liger-kernel-nightly 0.6.3.dev20251121010234__py3-none-any.whl → 0.6.3.dev20251121195543__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,6 +42,8 @@ if TYPE_CHECKING:
42
42
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_glm4v # noqa: F401
43
43
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_glm4v_moe # noqa: F401
44
44
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_granite # noqa: F401
45
+ from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_hunyuan_v1_dense # noqa: F401
46
+ from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_hunyuan_v1_moe # noqa: F401
45
47
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_internvl # noqa: F401
46
48
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llama # noqa: F401
47
49
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llama4 # noqa: F401
@@ -128,6 +130,8 @@ def __getattr__(name: str):
128
130
  "apply_liger_kernel_to_qwen3_vl_moe",
129
131
  "apply_liger_kernel_to_smollm3",
130
132
  "apply_liger_kernel_to_smolvlm",
133
+ "apply_liger_kernel_to_hunyuan_v1_dense",
134
+ "apply_liger_kernel_to_hunyuan_v1_moe",
131
135
  }
132
136
 
133
137
  if name in monkey_patch_symbols:
@@ -202,5 +206,7 @@ if _TRANSFORMERS_AVAILABLE:
202
206
  "apply_liger_kernel_to_qwen3_vl_moe",
203
207
  "apply_liger_kernel_to_smollm3",
204
208
  "apply_liger_kernel_to_smolvlm",
209
+ "apply_liger_kernel_to_hunyuan_v1_dense",
210
+ "apply_liger_kernel_to_hunyuan_v1_moe",
205
211
  ]
206
212
  )
@@ -0,0 +1,134 @@
1
+ from typing import List
2
+ from typing import Optional
3
+ from typing import Union
4
+
5
+ import torch
6
+
7
+ from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
8
+ from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
9
+ from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
10
+
11
+
12
+ def lce_forward(
13
+ self,
14
+ input_ids: Optional[torch.LongTensor] = None,
15
+ attention_mask: Optional[torch.Tensor] = None,
16
+ position_ids: Optional[torch.LongTensor] = None,
17
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
18
+ inputs_embeds: Optional[torch.FloatTensor] = None,
19
+ labels: Optional[torch.LongTensor] = None,
20
+ use_cache: Optional[bool] = None,
21
+ output_attentions: Optional[bool] = None,
22
+ output_hidden_states: Optional[bool] = None,
23
+ cache_position: Optional[torch.LongTensor] = None,
24
+ logits_to_keep: Union[int, torch.Tensor] = 0,
25
+ skip_logits: Optional[bool] = None,
26
+ return_dict: Optional[bool] = None,
27
+ **kwargs,
28
+ ) -> LigerCausalLMOutputWithPast:
29
+ r"""
30
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
31
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
32
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
33
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
34
+
35
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
36
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
37
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
38
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
39
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
40
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
41
+
42
+ Returns:
43
+
44
+ Example:
45
+
46
+ ```python
47
+ >>> from transformers import AutoTokenizer, HunYuanDenseV1ForCausalLM
48
+
49
+ >>> model = HunYuanDenseV1ForCausalLM.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
50
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
51
+
52
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
53
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
54
+
55
+ >>> # Generate
56
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
57
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
58
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
59
+ ```"""
60
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
61
+ output_hidden_states = (
62
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
63
+ )
64
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
65
+
66
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
67
+ outputs = self.model(
68
+ input_ids=input_ids,
69
+ attention_mask=attention_mask,
70
+ position_ids=position_ids,
71
+ past_key_values=past_key_values,
72
+ inputs_embeds=inputs_embeds,
73
+ use_cache=use_cache,
74
+ output_attentions=output_attentions,
75
+ output_hidden_states=output_hidden_states,
76
+ cache_position=cache_position,
77
+ **kwargs,
78
+ )
79
+
80
+ hidden_states = outputs[0]
81
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
82
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
83
+ kept_hidden_states = hidden_states[:, slice_indices, :]
84
+
85
+ shift_labels = kwargs.pop("shift_labels", None)
86
+ logits = None
87
+ loss = None
88
+ token_accuracy = None
89
+
90
+ if skip_logits and labels is None and shift_labels is None:
91
+ raise ValueError("skip_logits is True, but labels and shift_labels are None")
92
+
93
+ if skip_logits is None:
94
+ # By default, if in training mode, don't materialize logits
95
+ skip_logits = self.training and (labels is not None or shift_labels is not None)
96
+
97
+ # Compute loss
98
+ if skip_logits:
99
+ result = LigerForCausalLMLoss(
100
+ hidden_states=kept_hidden_states,
101
+ lm_head_weight=self.lm_head.weight,
102
+ labels=labels,
103
+ shift_labels=shift_labels,
104
+ hidden_size=self.config.hidden_size,
105
+ **kwargs,
106
+ )
107
+ loss, _, token_accuracy = unpack_cross_entropy_result(result)
108
+
109
+ else:
110
+ logits = self.lm_head(kept_hidden_states)
111
+ if labels is not None or shift_labels is not None:
112
+ loss = self.loss_function(
113
+ logits=logits,
114
+ labels=labels,
115
+ shift_labels=shift_labels,
116
+ vocab_size=self.config.vocab_size,
117
+ **kwargs,
118
+ )
119
+
120
+ if not return_dict:
121
+ output = (logits,) + outputs[1:]
122
+ output = ((loss,) + output) if loss is not None else output
123
+ output = output + (token_accuracy,) if token_accuracy is not None else output
124
+ return output
125
+
126
+ # Return custom output class with accuracy field
127
+ return LigerCausalLMOutputWithPast(
128
+ loss=loss,
129
+ logits=logits,
130
+ past_key_values=outputs.past_key_values,
131
+ hidden_states=outputs.hidden_states,
132
+ attentions=outputs.attentions,
133
+ token_accuracy=token_accuracy,
134
+ )
@@ -2558,6 +2558,123 @@ def apply_liger_kernel_to_qwen3_next(
2558
2558
  _patch_swiglu_module(expert, LigerQwen3MoeSwiGLUMLP)
2559
2559
 
2560
2560
 
2561
+ def apply_liger_kernel_to_hunyuan_v1_dense(
2562
+ rope: bool = True,
2563
+ cross_entropy: bool = False,
2564
+ fused_linear_cross_entropy: bool = True,
2565
+ rms_norm: bool = True,
2566
+ swiglu: bool = True,
2567
+ model: PreTrainedModel = None,
2568
+ ) -> None:
2569
+ """
2570
+ Apply Liger kernels to replace original implementation in HuggingFace Hunyuan v1 dense models.
2571
+ """
2572
+ assert not (cross_entropy and fused_linear_cross_entropy), (
2573
+ "cross_entropy and fused_linear_cross_entropy cannot both be True."
2574
+ )
2575
+
2576
+ from transformers.models.hunyuan_v1_dense import modeling_hunyuan_v1_dense
2577
+ from transformers.models.hunyuan_v1_dense.modeling_hunyuan_v1_dense import HunYuanDenseV1Model
2578
+
2579
+ from liger_kernel.transformers.model.hunyuan_v1 import lce_forward as hunyuan_v1_lce_forward
2580
+ from liger_kernel.transformers.swiglu import LigerHunyuanV1SwiGLUMLP
2581
+
2582
+ if rope:
2583
+ modeling_hunyuan_v1_dense.apply_rotary_pos_emb = liger_rotary_pos_emb
2584
+
2585
+ if rms_norm:
2586
+ modeling_hunyuan_v1_dense.HunYuanDenseV1RMSNorm = LigerRMSNorm
2587
+
2588
+ if cross_entropy:
2589
+ from transformers.loss.loss_utils import nn
2590
+
2591
+ nn.functional.cross_entropy = liger_cross_entropy
2592
+
2593
+ if fused_linear_cross_entropy:
2594
+ if model is not None:
2595
+ model.forward = MethodType(hunyuan_v1_lce_forward, model)
2596
+ else:
2597
+ modeling_hunyuan_v1_dense.HunYuanDenseV1ForCausalLM.forward = hunyuan_v1_lce_forward
2598
+
2599
+ if swiglu:
2600
+ modeling_hunyuan_v1_dense.HunYuanDenseV1MLP = LigerHunyuanV1SwiGLUMLP
2601
+
2602
+ if model is not None:
2603
+ # The model instance already exists, so we need to additionally patch the
2604
+ # instance variables that reference already-instantiated modules
2605
+
2606
+ # get the base model from the model instance
2607
+ base_model: HunYuanDenseV1Model = getattr(model, model.base_model_prefix, model)
2608
+
2609
+ if rms_norm:
2610
+ _patch_rms_norm_module(base_model.norm)
2611
+ for decoder_layer in base_model.layers:
2612
+ if swiglu:
2613
+ _patch_swiglu_module(decoder_layer.mlp, LigerHunyuanV1SwiGLUMLP)
2614
+ if rms_norm:
2615
+ _patch_rms_norm_module(decoder_layer.input_layernorm)
2616
+ _patch_rms_norm_module(decoder_layer.post_attention_layernorm)
2617
+
2618
+
2619
+ def apply_liger_kernel_to_hunyuan_v1_moe(
2620
+ rope: bool = True,
2621
+ cross_entropy: bool = False,
2622
+ fused_linear_cross_entropy: bool = True,
2623
+ rms_norm: bool = True,
2624
+ swiglu: bool = True,
2625
+ model: PreTrainedModel = None,
2626
+ ) -> None:
2627
+ """
2628
+ Apply Liger kernels to replace original implementation in HuggingFace Qwen3 models.
2629
+ """
2630
+ assert not (cross_entropy and fused_linear_cross_entropy), (
2631
+ "cross_entropy and fused_linear_cross_entropy cannot both be True."
2632
+ )
2633
+
2634
+ from transformers.models.hunyuan_v1_moe import modeling_hunyuan_v1_moe
2635
+ from transformers.models.hunyuan_v1_moe.modeling_hunyuan_v1_moe import HunYuanMoEV1Model
2636
+
2637
+ from liger_kernel.transformers.model.hunyuan_v1 import lce_forward as hunyuan_v1_moe_lce_forward
2638
+ from liger_kernel.transformers.swiglu import LigerHunyuanV1SwiGLUMLP
2639
+
2640
+ if rope:
2641
+ modeling_hunyuan_v1_moe.apply_rotary_pos_emb = liger_rotary_pos_emb
2642
+
2643
+ if rms_norm:
2644
+ modeling_hunyuan_v1_moe.HunYuanMoEV1RMSNorm = LigerRMSNorm
2645
+
2646
+ if cross_entropy:
2647
+ from transformers.loss.loss_utils import nn
2648
+
2649
+ nn.functional.cross_entropy = liger_cross_entropy
2650
+
2651
+ if fused_linear_cross_entropy:
2652
+ if model is not None:
2653
+ model.forward = MethodType(hunyuan_v1_moe_lce_forward, model)
2654
+ else:
2655
+ modeling_hunyuan_v1_moe.HunYuanMoEV1ForCausalLM.forward = hunyuan_v1_moe_lce_forward
2656
+
2657
+ if swiglu:
2658
+ modeling_hunyuan_v1_moe.HunYuanMoEV1MLP = LigerHunyuanV1SwiGLUMLP
2659
+
2660
+ if model is not None:
2661
+ # The model instance already exists, so we need to additionally patch the
2662
+ # instance variables that reference already-instantiated modules
2663
+
2664
+ # get the base model from the model instance
2665
+ base_model: HunYuanMoEV1Model = getattr(model, model.base_model_prefix, model)
2666
+
2667
+ if rms_norm:
2668
+ _patch_rms_norm_module(base_model.norm)
2669
+ for decoder_layer in base_model.layers:
2670
+ if swiglu:
2671
+ for mlp_expert in decoder_layer.mlp.experts:
2672
+ _patch_swiglu_module(mlp_expert, LigerHunyuanV1SwiGLUMLP)
2673
+ if rms_norm:
2674
+ _patch_rms_norm_module(decoder_layer.input_layernorm)
2675
+ _patch_rms_norm_module(decoder_layer.post_attention_layernorm)
2676
+
2677
+
2561
2678
  # Model type corresponds to the keys defined in transformers/models/auto/modeling_auto.py
2562
2679
  MODEL_TYPE_TO_APPLY_LIGER_FN = {
2563
2680
  "gemma": apply_liger_kernel_to_gemma,
@@ -2595,6 +2712,8 @@ MODEL_TYPE_TO_APPLY_LIGER_FN = {
2595
2712
  "paligemma": apply_liger_kernel_to_paligemma,
2596
2713
  "falcon_h1": apply_liger_kernel_to_falcon_h1,
2597
2714
  "smolvlm": apply_liger_kernel_to_smolvlm,
2715
+ "hunyuan_v1_dense": apply_liger_kernel_to_hunyuan_v1_dense,
2716
+ "hunyuan_v1_moe": apply_liger_kernel_to_hunyuan_v1_moe,
2598
2717
  }
2599
2718
 
2600
2719
 
@@ -77,3 +77,20 @@ class LigerQwen3MoeSwiGLUMLP(nn.Module):
77
77
 
78
78
  def forward(self, x):
79
79
  return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
80
+
81
+
82
+ class LigerHunyuanV1SwiGLUMLP(nn.Module):
83
+ def __init__(self, config, layer_idx=None, is_shared_mlp=False):
84
+ super().__init__()
85
+ self.config = config
86
+ self.hidden_size = config.hidden_size
87
+ self.intermediate_size = config.intermediate_size
88
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
89
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
90
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
91
+ self.layer_idx = layer_idx
92
+ if config.hidden_act not in ["silu", "swish"]:
93
+ raise ValueError(f"Activation function {config.hidden_act} not supported.")
94
+
95
+ def forward(self, x):
96
+ return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.6.3.dev20251121010234
3
+ Version: 0.6.3.dev20251121195543
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -312,6 +312,8 @@ loss.backward()
312
312
  | OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
313
313
  | GLM-4 | `liger_kernel.transformers.apply_liger_kernel_to_glm4` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
314
314
  | InternVL3 | `liger_kernel.transformers.apply_liger_kernel_to_internvl` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
315
+ | HunyuanV1 | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_dense` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
316
+ | HunyuanV1 MoE | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_moe` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
315
317
 
316
318
 
317
319
  ## Low-level APIs
@@ -43,7 +43,7 @@ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
43
43
  liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
44
44
  liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
45
45
  liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
46
- liger_kernel/transformers/__init__.py,sha256=XX1ySRgZXeQe0or-6GNclAsNQG_VkABQlkwqpB1Wn8A,10090
46
+ liger_kernel/transformers/__init__.py,sha256=zApQL1sGf2GOo7gWHfQxJ9X6D7QFwAuds4cPJJ-H81Y,10508
47
47
  liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
48
48
  liger_kernel/transformers/cross_entropy.py,sha256=DMtHkKrVJDSsels7KgGQJqrXkEAd6Zopcdr-5oRmQgE,2010
49
49
  liger_kernel/transformers/dyt.py,sha256=i-4GPaMrl-jab9TVI5qN0-H9qycn_mCbV82ozU4nbmU,723
@@ -60,7 +60,7 @@ liger_kernel/transformers/jsd.py,sha256=DGqRnxIZxsvxo0_tbbxX3b-sDbDjC_yKufyRIHCc
60
60
  liger_kernel/transformers/kl_div.py,sha256=WLffFbh1EExD2Eb1F7lN11fo9JJC-0751WJjZAF1Fj8,409
61
61
  liger_kernel/transformers/layer_norm.py,sha256=c9pk3PEasOKYR0rhe5e5nNrnYKVCEW4VC8S6LpCq9EQ,906
62
62
  liger_kernel/transformers/llama4_rope.py,sha256=kS6PSHEwf3dS7hD7C7p8S0geugx2EMCiP0h0F7LsUoY,3639
63
- liger_kernel/transformers/monkey_patch.py,sha256=ZGnLygHuCiKGd6hT-C0pt1aY85f6GNFdV98oCDpxHHo,124742
63
+ liger_kernel/transformers/monkey_patch.py,sha256=Th4XiYT2fRRo1YNOCLkLLiTgjEpCdidWOT8-ozxgFsE,129377
64
64
  liger_kernel/transformers/multi_token_attention.py,sha256=K3NIY9_5TPgZ4_Rahn0xnkMXxD_fmlJHK4CWGYvGQp0,1752
65
65
  liger_kernel/transformers/poly_norm.py,sha256=g5tC75i3qy1_N26ZUP-jfpct7ivQAEdJfIfx8IXzeyE,1377
66
66
  liger_kernel/transformers/qwen2vl_mrope.py,sha256=5EwSqrMdsL9MYspeBMXBsNJKvH0MOmRrtJXAJlnnlOI,1047
@@ -68,7 +68,7 @@ liger_kernel/transformers/rms_norm.py,sha256=HwddVqrqS58jE-M2_4NkFGARtCDBhGnkKyj
68
68
  liger_kernel/transformers/rope.py,sha256=VMlDZI6zss9mLaLcN5XCE_ktmYRwAi_Eh4TIgO6NrIQ,2361
69
69
  liger_kernel/transformers/softmax.py,sha256=yadlAgE4V2JByMwrDDa2s5SUBp8Jgd57xwnVvAWoBaI,264
70
70
  liger_kernel/transformers/sparsemax.py,sha256=0lQA0UEOs4mu8CMruZ3VLhImxQVXJWhPsAKUsYA7vj8,403
71
- liger_kernel/transformers/swiglu.py,sha256=LZ8YeLIdv2k46JleZMjzubGk98smt6t780kSgcVLsQk,3454
71
+ liger_kernel/transformers/swiglu.py,sha256=FLvxamjGru9N-ZelsccTvNn0CjUnId9ldiBrOnH-8QQ,4290
72
72
  liger_kernel/transformers/tiled_mlp.py,sha256=J51-kpzwikDMMhT5bX-RZCKMaXBK6zZc1bhgRYTK5F0,4651
73
73
  liger_kernel/transformers/trainer_integration.py,sha256=W3ON51O5GkyzNJsItz0y5rKx-uy2f2cFfveZpqbUdhw,123
74
74
  liger_kernel/transformers/tvd.py,sha256=XrRfyJIqN6HFxXk8MYyFVZM1OLz3mtSbRZvWfZ_JerQ,450
@@ -82,6 +82,7 @@ liger_kernel/transformers/model/gemma3.py,sha256=mEV3Kuy-dqfTk_b899Vb-InuD4_DvwH
82
82
  liger_kernel/transformers/model/glm4.py,sha256=bSp22iPIjsli4-c_usUOsyh1Bs2gIK8X6ynS0azseUs,5900
83
83
  liger_kernel/transformers/model/glm4v.py,sha256=dd-BQpccDCp1SbIxcJ5rG8xcwYQK3KOv1Tgm9TGnZc4,6594
84
84
  liger_kernel/transformers/model/glm4v_moe.py,sha256=zKhMdOOrRhlrvCSFaeVYfddL1ubpY8edEO91TN81n98,7135
85
+ liger_kernel/transformers/model/hunyuan_v1.py,sha256=MJvP9xkUFePIV0HLETJM4YPbVCEPkAE1ZI5Jxyiebh0,5731
85
86
  liger_kernel/transformers/model/internvl.py,sha256=OOutracs9qrPHSU7FVYar08yinvGrHQVPvo39JEws6w,6473
86
87
  liger_kernel/transformers/model/llama.py,sha256=kqZeONzwTBzudoChlKMzq1w23BtYGbxWZC1l1V__JTw,13410
87
88
  liger_kernel/transformers/model/llama4.py,sha256=PfkynGVI0xxMs3EtyYpCgaALI6stu25OIrTIymE-pvg,4853
@@ -108,9 +109,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
108
109
  liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
109
110
  liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
110
111
  liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
111
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
112
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/METADATA,sha256=jKZfz6bFVCE0C1wJq6pLohUaG0jsIyDvpxq2XIpRTuM,24777
113
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
114
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
115
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
116
- liger_kernel_nightly-0.6.3.dev20251121010234.dist-info/RECORD,,
112
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
113
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/METADATA,sha256=3jCmn8uhrcmwtmASbQwo4NBudeZ9NJOfj5RN5Xlylr0,25097
114
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
115
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
116
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
117
+ liger_kernel_nightly-0.6.3.dev20251121195543.dist-info/RECORD,,