liger-kernel-nightly 0.5.9.dev20250508211521__py3-none-any.whl → 0.5.9.dev20250510102528__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,6 +14,7 @@ from liger_kernel.transformers.rms_norm import LigerRMSNorm # noqa: F401
14
14
  from liger_kernel.transformers.rope import liger_rotary_pos_emb # noqa: F401
15
15
  from liger_kernel.transformers.swiglu import LigerBlockSparseTop2MLP # noqa: F401
16
16
  from liger_kernel.transformers.swiglu import LigerPhi3SwiGLUMLP # noqa: F401
17
+ from liger_kernel.transformers.swiglu import LigerQwen3MoeSwiGLUMLP # noqa: F401
17
18
  from liger_kernel.transformers.swiglu import LigerSwiGLUMLP # noqa: F401
18
19
  from liger_kernel.transformers.tvd import LigerTVDLoss # noqa: F401
19
20
 
@@ -40,6 +41,7 @@ if TYPE_CHECKING:
40
41
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_5_vl # noqa: F401
41
42
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_vl # noqa: F401
42
43
  from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen3 # noqa: F401
44
+ from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen3_moe # noqa: F401
43
45
 
44
46
 
45
47
  # Check if 'transformers' is installed
@@ -95,6 +97,7 @@ def __getattr__(name: str):
95
97
  "apply_liger_kernel_to_qwen2_5_vl",
96
98
  "apply_liger_kernel_to_qwen2_vl",
97
99
  "apply_liger_kernel_to_qwen3",
100
+ "apply_liger_kernel_to_qwen3_moe",
98
101
  }
99
102
 
100
103
  if name in monkey_patch_symbols:
@@ -118,6 +121,7 @@ __all__ = [
118
121
  "liger_rotary_pos_emb",
119
122
  "LigerBlockSparseTop2MLP",
120
123
  "LigerPhi3SwiGLUMLP",
124
+ "LigerQwen3MoeSwiGLUMLP",
121
125
  "LigerSwiGLUMLP",
122
126
  "LigerTVDLoss",
123
127
  ]
@@ -147,5 +151,6 @@ if _TRANSFORMERS_AVAILABLE:
147
151
  "apply_liger_kernel_to_qwen2_5_vl",
148
152
  "apply_liger_kernel_to_qwen2_vl",
149
153
  "apply_liger_kernel_to_qwen3",
154
+ "apply_liger_kernel_to_qwen3_moe",
150
155
  ]
151
156
  )
@@ -0,0 +1,134 @@
1
+ from typing import List
2
+ from typing import Optional
3
+ from typing import Union
4
+
5
+ import torch
6
+
7
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast
8
+ from transformers.modeling_outputs import MoeModelOutputWithPast
9
+ from transformers.models.mixtral.modeling_mixtral import load_balancing_loss_func
10
+ from transformers.models.qwen3_moe.modeling_qwen3_moe import _CONFIG_FOR_DOC
11
+ from transformers.models.qwen3_moe.modeling_qwen3_moe import QWEN3_MOE_INPUTS_DOCSTRING
12
+ from transformers.utils import add_start_docstrings_to_model_forward
13
+ from transformers.utils import replace_return_docstrings
14
+
15
+ from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
16
+
17
+
18
+ @add_start_docstrings_to_model_forward(QWEN3_MOE_INPUTS_DOCSTRING)
19
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
20
+ def lce_forward(
21
+ self,
22
+ input_ids: Optional[torch.LongTensor] = None,
23
+ attention_mask: Optional[torch.Tensor] = None,
24
+ position_ids: Optional[torch.LongTensor] = None,
25
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
26
+ inputs_embeds: Optional[torch.FloatTensor] = None,
27
+ labels: Optional[torch.LongTensor] = None,
28
+ use_cache: Optional[bool] = None,
29
+ output_attentions: Optional[bool] = None,
30
+ output_hidden_states: Optional[bool] = None,
31
+ output_router_logits: Optional[bool] = None,
32
+ cache_position: Optional[torch.LongTensor] = None,
33
+ logits_to_keep: Union[int, torch.Tensor] = 0,
34
+ **loss_kwargs,
35
+ ) -> MoeCausalLMOutputWithPast:
36
+ r"""
37
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
38
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
39
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
40
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
41
+
42
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
43
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
44
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
45
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
46
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
47
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
48
+
49
+ Returns:
50
+
51
+ Example:
52
+
53
+ ```python
54
+ >>> from transformers import AutoTokenizer, Qwen3MoeForCausalLM
55
+
56
+ >>> model = Qwen3MoeForCausalLM.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
57
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
58
+
59
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
60
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
61
+
62
+ >>> # Generate
63
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
64
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
65
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
66
+ ```"""
67
+
68
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
69
+ output_router_logits = (
70
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
71
+ )
72
+
73
+ output_hidden_states = (
74
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
75
+ )
76
+
77
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
78
+ outputs: MoeModelOutputWithPast = self.model(
79
+ input_ids=input_ids,
80
+ attention_mask=attention_mask,
81
+ position_ids=position_ids,
82
+ past_key_values=past_key_values,
83
+ inputs_embeds=inputs_embeds,
84
+ use_cache=use_cache,
85
+ output_attentions=output_attentions,
86
+ output_hidden_states=output_hidden_states,
87
+ output_router_logits=output_router_logits,
88
+ cache_position=cache_position,
89
+ )
90
+
91
+ hidden_states = outputs.last_hidden_state
92
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
93
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
94
+ kept_hidden_states = hidden_states[:, slice_indices, :]
95
+
96
+ shift_labels = loss_kwargs.pop("shift_labels", None)
97
+ logits = None
98
+ loss = None
99
+
100
+ # if in training mode, do not materialize logits
101
+ if self.training and (labels is not None or shift_labels is not None):
102
+ loss = LigerForCausalLMLoss(
103
+ hidden_states=kept_hidden_states,
104
+ lm_head_weight=self.lm_head.weight,
105
+ labels=labels,
106
+ shift_labels=shift_labels,
107
+ hidden_size=self.config.hidden_size,
108
+ **loss_kwargs,
109
+ )
110
+ else: # if in inference model materialize logits
111
+ logits = self.lm_head(kept_hidden_states)
112
+ if labels is not None:
113
+ loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
114
+
115
+ aux_loss = None
116
+ if output_router_logits:
117
+ aux_loss = load_balancing_loss_func(
118
+ outputs.router_logits,
119
+ self.num_experts,
120
+ self.num_experts_per_tok,
121
+ attention_mask,
122
+ )
123
+ if labels is not None:
124
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
125
+
126
+ return MoeCausalLMOutputWithPast(
127
+ loss=loss,
128
+ aux_loss=aux_loss,
129
+ logits=logits,
130
+ past_key_values=outputs.past_key_values,
131
+ hidden_states=outputs.hidden_states,
132
+ attentions=outputs.attentions,
133
+ router_logits=outputs.router_logits,
134
+ )
@@ -1102,6 +1102,61 @@ def apply_liger_kernel_to_qwen3(
1102
1102
  _patch_rms_norm_module(decoder_layer.post_attention_layernorm)
1103
1103
 
1104
1104
 
1105
+ def apply_liger_kernel_to_qwen3_moe(
1106
+ rope: bool = True,
1107
+ cross_entropy: bool = False,
1108
+ fused_linear_cross_entropy: bool = True,
1109
+ rms_norm: bool = True,
1110
+ swiglu: bool = True,
1111
+ model: PreTrainedModel = None,
1112
+ ) -> None:
1113
+ """
1114
+ Apply Liger kernels to replace original implementation in HuggingFace Qwen3 models.
1115
+ """
1116
+ assert not (cross_entropy and fused_linear_cross_entropy), (
1117
+ "cross_entropy and fused_linear_cross_entropy cannot both be True."
1118
+ )
1119
+
1120
+ from transformers.models.qwen3_moe import modeling_qwen3_moe
1121
+ from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeModel
1122
+
1123
+ from liger_kernel.transformers.model.qwen3_moe import lce_forward as qwen3_lce_forward
1124
+ from liger_kernel.transformers.swiglu import LigerQwen3MoeSwiGLUMLP
1125
+
1126
+ if rope:
1127
+ modeling_qwen3_moe.apply_rotary_pos_emb = liger_rotary_pos_emb
1128
+
1129
+ if rms_norm:
1130
+ modeling_qwen3_moe.Qwen3MoeRMSNorm = LigerRMSNorm
1131
+
1132
+ if cross_entropy:
1133
+ from transformers.loss.loss_utils import nn
1134
+
1135
+ nn.functional.cross_entropy = liger_cross_entropy
1136
+
1137
+ if fused_linear_cross_entropy:
1138
+ modeling_qwen3_moe.Qwen3MoeForCausalLM.forward = qwen3_lce_forward
1139
+
1140
+ if swiglu:
1141
+ modeling_qwen3_moe.Qwen3MoeMLP = LigerQwen3MoeSwiGLUMLP
1142
+
1143
+ if model is not None:
1144
+ # The model instance already exists, so we need to additionally patch the
1145
+ # instance variables that reference already-instantiated modules
1146
+
1147
+ # get the base model from the model instance
1148
+ base_model: Qwen3MoeModel = getattr(model, model.base_model_prefix, model)
1149
+
1150
+ if rms_norm:
1151
+ _patch_rms_norm_module(base_model.norm)
1152
+ for decoder_layer in base_model.layers:
1153
+ if swiglu:
1154
+ _patch_swiglu_module(decoder_layer.mlp, LigerQwen3MoeSwiGLUMLP)
1155
+ if rms_norm:
1156
+ _patch_rms_norm_module(decoder_layer.input_layernorm)
1157
+ _patch_rms_norm_module(decoder_layer.post_attention_layernorm)
1158
+
1159
+
1105
1160
  def apply_liger_kernel_to_qwen2_vl(
1106
1161
  rope: bool = True,
1107
1162
  cross_entropy: bool = False,
@@ -1455,6 +1510,7 @@ MODEL_TYPE_TO_APPLY_LIGER_FN = {
1455
1510
  "olmo2": apply_liger_kernel_to_olmo2,
1456
1511
  "qwen2": apply_liger_kernel_to_qwen2,
1457
1512
  "qwen3": apply_liger_kernel_to_qwen3,
1513
+ "qwen3_moe": apply_liger_kernel_to_qwen3_moe,
1458
1514
  "qwen2_vl": apply_liger_kernel_to_qwen2_vl,
1459
1515
  "qwen2_5_vl": apply_liger_kernel_to_qwen2_5_vl,
1460
1516
  "phi3": apply_liger_kernel_to_phi3,
@@ -56,3 +56,24 @@ class LigerPhi3SwiGLUMLP(nn.Module):
56
56
  up_states = self.gate_up_proj(x)
57
57
  gate, up_states = up_states.chunk(2, dim=-1)
58
58
  return self.down_proj(LigerSiLUMulFunction.apply(gate, up_states))
59
+
60
+
61
+ class LigerQwen3MoeSwiGLUMLP(nn.Module):
62
+ """
63
+ Patch Qwen3MoeMLP to use LigerSiLUMulFunction.
64
+ https://github.com/huggingface/transformers/blob/v4.51.3/src/transformers/models/qwen3_moe/modular_qwen3_moe.py#L57
65
+ """
66
+
67
+ def __init__(self, config, intermediate_size=None):
68
+ super().__init__()
69
+ self.config = config
70
+ self.hidden_size = config.hidden_size
71
+ self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
72
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
73
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
74
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
75
+ if config.hidden_act not in ["silu", "swish"]:
76
+ raise ValueError(f"Activation function {config.hidden_act} not supported.")
77
+
78
+ def forward(self, x):
79
+ return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.5.9.dev20250508211521
3
+ Version: 0.5.9.dev20250510102528
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -300,6 +300,7 @@ loss.backward()
300
300
  | Qwen2-VL, & QVQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
301
301
  | Qwen2.5-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_5_vl` | RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
302
302
  | Qwen3 | `liger_kernel.transformers.apply_liger_kernel_to_qwen3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
303
+ | Qwen3 MoE | `liger_kernel_transformers.apply_liger_kernel_to_qwen3_moe` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
303
304
  | Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
304
305
  | Granite 3.0 & 3.1 | `liger_kernel.transformers.apply_liger_kernel_to_granite` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
305
306
  | OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
@@ -33,7 +33,7 @@ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
33
33
  liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
34
34
  liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
35
35
  liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
36
- liger_kernel/transformers/__init__.py,sha256=x_3CYHJt-xj4va3N32kfwf000F-DNBtj-YE6OylDAW8,6774
36
+ liger_kernel/transformers/__init__.py,sha256=0KX0rxyy0E_uNWVE0PSTzEVzKqc5KdFHtvdHhJm23Kk,7077
37
37
  liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
38
38
  liger_kernel/transformers/cross_entropy.py,sha256=z3KTWQnFxr_IZaVjtYt0ZNEWQdDdYThN35xWkHlDGH0,1683
39
39
  liger_kernel/transformers/dyt.py,sha256=QMqqc14pkE0WhpRZvapfnNAun-6C0C_tHExL2ZJuCUA,648
@@ -46,11 +46,11 @@ liger_kernel/transformers/group_norm.py,sha256=6qMAWOprr4SzP0YhNVNGQIBpM5aUHplUD
46
46
  liger_kernel/transformers/jsd.py,sha256=DGqRnxIZxsvxo0_tbbxX3b-sDbDjC_yKufyRIHCcScY,2979
47
47
  liger_kernel/transformers/kl_div.py,sha256=WLffFbh1EExD2Eb1F7lN11fo9JJC-0751WJjZAF1Fj8,409
48
48
  liger_kernel/transformers/layer_norm.py,sha256=c9pk3PEasOKYR0rhe5e5nNrnYKVCEW4VC8S6LpCq9EQ,906
49
- liger_kernel/transformers/monkey_patch.py,sha256=8Q84xxWA7ltgqgGRBxKxPPNeG7k5HYQfgaw1-HFnKGM,69287
49
+ liger_kernel/transformers/monkey_patch.py,sha256=k8WIkx_f3ObG6TjhIiN_4KeOABurB2W7xy7td0ie-W8,71339
50
50
  liger_kernel/transformers/qwen2vl_mrope.py,sha256=5EwSqrMdsL9MYspeBMXBsNJKvH0MOmRrtJXAJlnnlOI,1047
51
51
  liger_kernel/transformers/rms_norm.py,sha256=GqCEJuGt0YdqqlMcToE0Wp4A8YFquDa4UUSyH2uFW2A,1191
52
52
  liger_kernel/transformers/rope.py,sha256=ZTrTORSAyfcFIKjk6XEeYmk4ROH7xXED9L4g2NFntlE,999
53
- liger_kernel/transformers/swiglu.py,sha256=i9WTqcNRqReU4XJs391IPbl-I5X0wG4T72D4pqGFfJg,2422
53
+ liger_kernel/transformers/swiglu.py,sha256=LZ8YeLIdv2k46JleZMjzubGk98smt6t780kSgcVLsQk,3454
54
54
  liger_kernel/transformers/trainer_integration.py,sha256=W3ON51O5GkyzNJsItz0y5rKx-uy2f2cFfveZpqbUdhw,123
55
55
  liger_kernel/transformers/tvd.py,sha256=XrRfyJIqN6HFxXk8MYyFVZM1OLz3mtSbRZvWfZ_JerQ,450
56
56
  liger_kernel/transformers/experimental/embedding.py,sha256=2P0QYdlFyFrG5OqTzTa1wcRgDSyjBMv5i1a7BrDPDQw,881
@@ -72,13 +72,14 @@ liger_kernel/transformers/model/qwen2.py,sha256=bEusb6vrVbagtSUHyntpi9j0x79IrZ1N
72
72
  liger_kernel/transformers/model/qwen2_5_vl.py,sha256=oACIsTpg9_GdoSvekCyXLhJkuCpQEiFOTzKj7cjgi2E,9413
73
73
  liger_kernel/transformers/model/qwen2_vl.py,sha256=F6DeQ65wPtcpeQJZ9a3SJZKkQ-e24SRLdYUgC-_jT-k,9809
74
74
  liger_kernel/transformers/model/qwen3.py,sha256=JdIeh0fvDLdGs8nk4_eHrovHCNa09VG15D4aa0X0mwI,5084
75
+ liger_kernel/transformers/model/qwen3_moe.py,sha256=2EFIltbaQ6y8ksYDTk0NC0b2Zdbir7eW15avY4XisLQ,5917
75
76
  liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7HHWHwku25A-GYL0WU,193
76
77
  liger_kernel/transformers/trainer/orpo_trainer.py,sha256=pdekW7l6Qg_aqa5SYKYlSWUF8m3lkOFvFLcIMEHrz9s,8338
77
78
  liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
78
79
  liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
79
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
80
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/METADATA,sha256=M-Zb60ynDAUMnErHxaEYXf2BuOnvSywE5Xu8lzOFeqg,23725
81
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
82
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
83
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
84
- liger_kernel_nightly-0.5.9.dev20250508211521.dist-info/RECORD,,
80
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
81
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/METADATA,sha256=QJdzn7ZNK-ENSx22-UoYoFvvkiB8CyI0NR08gdgb0fY,23874
82
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
83
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
84
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
85
+ liger_kernel_nightly-0.5.9.dev20250510102528.dist-info/RECORD,,