liger-kernel-nightly 0.6.1.dev20250819172918__py3-none-any.whl → 0.6.1.dev20250819173444__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/transformers/__init__.py +3 -0
- liger_kernel/transformers/model/glm4v.py +150 -0
- liger_kernel/transformers/monkey_patch.py +90 -0
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/METADATA +1 -1
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/RECORD +9 -8
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.6.1.dev20250819172918.dist-info → liger_kernel_nightly-0.6.1.dev20250819173444.dist-info}/top_level.txt +0 -0
@@ -35,6 +35,7 @@ if TYPE_CHECKING:
|
|
35
35
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma3 # noqa: F401
|
36
36
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma3_text # noqa: F401
|
37
37
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_glm4 # noqa: F401
|
38
|
+
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_glm4v # noqa: F401
|
38
39
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_granite # noqa: F401
|
39
40
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llama # noqa: F401
|
40
41
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llama4 # noqa: F401
|
@@ -93,6 +94,7 @@ def __getattr__(name: str):
|
|
93
94
|
"apply_liger_kernel_to_gemma3",
|
94
95
|
"apply_liger_kernel_to_gemma3_text",
|
95
96
|
"apply_liger_kernel_to_glm4",
|
97
|
+
"apply_liger_kernel_to_glm4v",
|
96
98
|
"apply_liger_kernel_to_granite",
|
97
99
|
"apply_liger_kernel_to_llama",
|
98
100
|
"apply_liger_kernel_to_llava",
|
@@ -156,6 +158,7 @@ if _TRANSFORMERS_AVAILABLE:
|
|
156
158
|
"apply_liger_kernel_to_gemma3",
|
157
159
|
"apply_liger_kernel_to_gemma3_text",
|
158
160
|
"apply_liger_kernel_to_glm4",
|
161
|
+
"apply_liger_kernel_to_glm4v",
|
159
162
|
"apply_liger_kernel_to_granite",
|
160
163
|
"apply_liger_kernel_to_llama",
|
161
164
|
"apply_liger_kernel_to_llava",
|
@@ -0,0 +1,150 @@
|
|
1
|
+
from typing import List
|
2
|
+
from typing import Optional
|
3
|
+
from typing import Tuple
|
4
|
+
from typing import Union
|
5
|
+
|
6
|
+
import torch
|
7
|
+
|
8
|
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
9
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
10
|
+
|
11
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
12
|
+
|
13
|
+
|
14
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
15
|
+
def lce_forward(
|
16
|
+
self,
|
17
|
+
input_ids: torch.LongTensor = None,
|
18
|
+
attention_mask: Optional[torch.Tensor] = None,
|
19
|
+
position_ids: Optional[torch.LongTensor] = None,
|
20
|
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
21
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
22
|
+
labels: Optional[torch.LongTensor] = None,
|
23
|
+
use_cache: Optional[bool] = None,
|
24
|
+
output_attentions: Optional[bool] = None,
|
25
|
+
output_hidden_states: Optional[bool] = None,
|
26
|
+
return_dict: Optional[bool] = None,
|
27
|
+
cache_position: Optional[torch.LongTensor] = None,
|
28
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
29
|
+
skip_logits: Optional[bool] = None,
|
30
|
+
**kwargs,
|
31
|
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
32
|
+
r"""
|
33
|
+
Args:
|
34
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
35
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
36
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
37
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
38
|
+
|
39
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
40
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
41
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
42
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
43
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
44
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
45
|
+
|
46
|
+
Returns:
|
47
|
+
|
48
|
+
Example:
|
49
|
+
|
50
|
+
```python
|
51
|
+
>>> from PIL import Image
|
52
|
+
>>> from transformers import AutoTokenizer, Glm4vForConditionalGeneration
|
53
|
+
|
54
|
+
>>> MODEL_PATH = "THUDM/GLM-4.1V-9B-Thinking"
|
55
|
+
>>> messages = [
|
56
|
+
{
|
57
|
+
"role": "user",
|
58
|
+
"content": [
|
59
|
+
{
|
60
|
+
"type": "image",
|
61
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
|
62
|
+
},
|
63
|
+
{
|
64
|
+
"type": "text",
|
65
|
+
"text": "describe this image"
|
66
|
+
}
|
67
|
+
],
|
68
|
+
}
|
69
|
+
]
|
70
|
+
>>> processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
|
71
|
+
>>> model = Glm4vForConditionalGeneration.from_pretrained(
|
72
|
+
pretrained_model_name_or_path=MODEL_PATH,
|
73
|
+
torch_dtype=torch.bfloat16,
|
74
|
+
device_map="auto",
|
75
|
+
)
|
76
|
+
>>> inputs = processor.apply_chat_template(
|
77
|
+
messages,
|
78
|
+
tokenize=True,
|
79
|
+
add_generation_prompt=True,
|
80
|
+
return_dict=True,
|
81
|
+
return_tensors="pt"
|
82
|
+
).to(model.device)
|
83
|
+
>>> generated_ids = model.generate(**inputs, max_new_tokens=8192)
|
84
|
+
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
|
85
|
+
<think>Got it, let's describe the image. First, there's a vintage car, specifically a Volkswagen Beetle
|
86
|
+
```"""
|
87
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
88
|
+
output_hidden_states = (
|
89
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
90
|
+
)
|
91
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
92
|
+
|
93
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
94
|
+
outputs = self.model(
|
95
|
+
input_ids=input_ids,
|
96
|
+
attention_mask=attention_mask,
|
97
|
+
position_ids=position_ids,
|
98
|
+
past_key_values=past_key_values,
|
99
|
+
inputs_embeds=inputs_embeds,
|
100
|
+
use_cache=use_cache,
|
101
|
+
output_attentions=output_attentions,
|
102
|
+
output_hidden_states=output_hidden_states,
|
103
|
+
return_dict=return_dict,
|
104
|
+
cache_position=cache_position,
|
105
|
+
**kwargs,
|
106
|
+
)
|
107
|
+
|
108
|
+
hidden_states = outputs[0]
|
109
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
110
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
111
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
112
|
+
|
113
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
114
|
+
logits = None
|
115
|
+
loss = None
|
116
|
+
|
117
|
+
if skip_logits and labels is None and shift_labels is None:
|
118
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
119
|
+
|
120
|
+
if skip_logits is None:
|
121
|
+
# By default, if in training mode, don't materialize logits
|
122
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
123
|
+
|
124
|
+
if skip_logits:
|
125
|
+
loss = LigerForCausalLMLoss(
|
126
|
+
hidden_states=kept_hidden_states,
|
127
|
+
lm_head_weight=self.lm_head.weight,
|
128
|
+
labels=labels,
|
129
|
+
shift_labels=shift_labels,
|
130
|
+
hidden_size=self.config.hidden_size,
|
131
|
+
**kwargs,
|
132
|
+
)
|
133
|
+
|
134
|
+
else:
|
135
|
+
logits = self.lm_head(kept_hidden_states)
|
136
|
+
if labels is not None:
|
137
|
+
loss = self.loss_function(
|
138
|
+
logits=logits,
|
139
|
+
labels=labels,
|
140
|
+
vocab_size=self.config.vocab_size,
|
141
|
+
**kwargs,
|
142
|
+
)
|
143
|
+
|
144
|
+
return CausalLMOutputWithPast(
|
145
|
+
loss=loss,
|
146
|
+
logits=logits,
|
147
|
+
past_key_values=outputs.past_key_values,
|
148
|
+
hidden_states=outputs.hidden_states,
|
149
|
+
attentions=outputs.attentions,
|
150
|
+
)
|
@@ -1839,6 +1839,95 @@ def apply_liger_kernel_to_glm4(
|
|
1839
1839
|
_patch_rms_norm_module(decoder_layer.post_mlp_layernorm, in_place=False)
|
1840
1840
|
|
1841
1841
|
|
1842
|
+
def apply_liger_kernel_to_glm4v(
|
1843
|
+
rope: bool = False,
|
1844
|
+
cross_entropy: bool = False,
|
1845
|
+
fused_linear_cross_entropy: bool = True,
|
1846
|
+
rms_norm: bool = True,
|
1847
|
+
swiglu: bool = True,
|
1848
|
+
model: PreTrainedModel = None,
|
1849
|
+
) -> None:
|
1850
|
+
"""
|
1851
|
+
Apply Liger kernels to replace original implementation in HuggingFace GLM-4v models.
|
1852
|
+
|
1853
|
+
Args:
|
1854
|
+
rope (bool): Whether to apply Liger's rotary position embedding. Default is False.
|
1855
|
+
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
1856
|
+
fused_linear_cross_entropy (bool):
|
1857
|
+
Whether to apply Liger's fused linear cross entropy loss. Default is True.
|
1858
|
+
`cross_entropy` and `fused_linear_cross_entropy` cannot both be True.
|
1859
|
+
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
1860
|
+
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True.
|
1861
|
+
swiglu (bool): Whether to apply Liger's SwiGLU Glm4MLP. Default is True.
|
1862
|
+
model (PreTrainedModel): The model instance to apply Liger kernels to, if the model has already been
|
1863
|
+
loaded. Default is None.
|
1864
|
+
"""
|
1865
|
+
assert not (cross_entropy and fused_linear_cross_entropy), (
|
1866
|
+
"cross_entropy and fused_linear_cross_entropy cannot both be True."
|
1867
|
+
)
|
1868
|
+
|
1869
|
+
from transformers.models.glm4v import modeling_glm4v
|
1870
|
+
from transformers.models.glm4v.modeling_glm4v import Glm4vForConditionalGeneration
|
1871
|
+
from transformers.models.glm4v.modeling_glm4v import Glm4vModel
|
1872
|
+
from transformers.models.glm4v.modeling_glm4v import Glm4vTextModel
|
1873
|
+
from transformers.models.glm4v.modeling_glm4v import Glm4vVisionModel
|
1874
|
+
|
1875
|
+
from liger_kernel.transformers.model.glm4v import lce_forward as glm4v_lce_forward
|
1876
|
+
from liger_kernel.transformers.rms_norm import LigerRMSNormForGlm4
|
1877
|
+
|
1878
|
+
if rope:
|
1879
|
+
raise NotImplementedError("liger_rotary_pos_emb is not available for Glm4 models.")
|
1880
|
+
if rms_norm:
|
1881
|
+
modeling_glm4v.Glm4vRMSNorm = LigerRMSNormForGlm4
|
1882
|
+
if cross_entropy:
|
1883
|
+
from transformers.loss.loss_utils import nn
|
1884
|
+
|
1885
|
+
nn.functional.cross_entropy = liger_cross_entropy
|
1886
|
+
if fused_linear_cross_entropy:
|
1887
|
+
if model is not None:
|
1888
|
+
model.forward = MethodType(glm4v_lce_forward, model)
|
1889
|
+
else:
|
1890
|
+
modeling_glm4v.Glm4vForConditionalGeneration.forward = glm4v_lce_forward
|
1891
|
+
|
1892
|
+
if model is not None:
|
1893
|
+
# The model instance already exists, so we need to additionally patch the
|
1894
|
+
# instance variables that reference already-instantiated modules
|
1895
|
+
if isinstance(model, (Glm4vForConditionalGeneration, Glm4vModel)):
|
1896
|
+
# Note: language_model and visual properties can be accessed throught conditional class for BC.
|
1897
|
+
# Not sure if it is subject to changes in the future.
|
1898
|
+
# Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/glm4v/modeling_glm4v.py#L1305
|
1899
|
+
text_model: Glm4vTextModel = model.language_model
|
1900
|
+
vision_model: Glm4vVisionModel = model.visual
|
1901
|
+
elif isinstance(model, Glm4vTextModel):
|
1902
|
+
text_model: Glm4vTextModel = model
|
1903
|
+
vision_model = None
|
1904
|
+
else:
|
1905
|
+
# Note: Currently there's no support for patching vision model only. Feel free to raise an issue if needed.
|
1906
|
+
raise TypeError(
|
1907
|
+
f"Unsupported glm4.1v model type. `model` must be `Glm4VLForConditionalGeneration`, `Glm4vVisionModel` or `Glm4vTextModel`. Got: {type(model)}"
|
1908
|
+
)
|
1909
|
+
|
1910
|
+
if vision_model is not None:
|
1911
|
+
for vision_block in vision_model.blocks:
|
1912
|
+
if rms_norm:
|
1913
|
+
_patch_rms_norm_module(vision_block.norm1)
|
1914
|
+
_patch_rms_norm_module(vision_block.norm2)
|
1915
|
+
if swiglu:
|
1916
|
+
_patch_swiglu_module(vision_block.mlp, LigerSwiGLUMLP)
|
1917
|
+
|
1918
|
+
if text_model is not None:
|
1919
|
+
if rms_norm:
|
1920
|
+
_patch_rms_norm_module(text_model.norm)
|
1921
|
+
for decoder_layer in text_model.layers:
|
1922
|
+
if swiglu:
|
1923
|
+
_patch_swiglu_module(decoder_layer.mlp, LigerPhi3SwiGLUMLP)
|
1924
|
+
if rms_norm:
|
1925
|
+
_patch_rms_norm_module(decoder_layer.input_layernorm)
|
1926
|
+
_patch_rms_norm_module(decoder_layer.post_attention_layernorm)
|
1927
|
+
_patch_rms_norm_module(decoder_layer.post_self_attn_layernorm)
|
1928
|
+
_patch_rms_norm_module(decoder_layer.post_mlp_layernorm)
|
1929
|
+
|
1930
|
+
|
1842
1931
|
# Model type corresponds to the keys defined in transformers/models/auto/modeling_auto.py
|
1843
1932
|
MODEL_TYPE_TO_APPLY_LIGER_FN = {
|
1844
1933
|
"gemma": apply_liger_kernel_to_gemma,
|
@@ -1846,6 +1935,7 @@ MODEL_TYPE_TO_APPLY_LIGER_FN = {
|
|
1846
1935
|
"gemma3_text": apply_liger_kernel_to_gemma3_text,
|
1847
1936
|
"gemma3": apply_liger_kernel_to_gemma3,
|
1848
1937
|
"glm4": apply_liger_kernel_to_glm4,
|
1938
|
+
"glm4v": apply_liger_kernel_to_glm4v,
|
1849
1939
|
"llama": apply_liger_kernel_to_llama,
|
1850
1940
|
"llama4_text": apply_liger_kernel_to_llama4,
|
1851
1941
|
"llama4": apply_liger_kernel_to_llama4,
|
@@ -41,7 +41,7 @@ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
|
|
41
41
|
liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
|
42
42
|
liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
|
43
43
|
liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
|
44
|
-
liger_kernel/transformers/__init__.py,sha256=
|
44
|
+
liger_kernel/transformers/__init__.py,sha256=jkokP69dbCzUDTz-H6QowB5xNEflmgQ7Zv-_4MVuxpY,8440
|
45
45
|
liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
|
46
46
|
liger_kernel/transformers/cross_entropy.py,sha256=z3KTWQnFxr_IZaVjtYt0ZNEWQdDdYThN35xWkHlDGH0,1683
|
47
47
|
liger_kernel/transformers/dyt.py,sha256=i-4GPaMrl-jab9TVI5qN0-H9qycn_mCbV82ozU4nbmU,723
|
@@ -58,7 +58,7 @@ liger_kernel/transformers/jsd.py,sha256=DGqRnxIZxsvxo0_tbbxX3b-sDbDjC_yKufyRIHCc
|
|
58
58
|
liger_kernel/transformers/kl_div.py,sha256=WLffFbh1EExD2Eb1F7lN11fo9JJC-0751WJjZAF1Fj8,409
|
59
59
|
liger_kernel/transformers/layer_norm.py,sha256=c9pk3PEasOKYR0rhe5e5nNrnYKVCEW4VC8S6LpCq9EQ,906
|
60
60
|
liger_kernel/transformers/llama4_rope.py,sha256=kS6PSHEwf3dS7hD7C7p8S0geugx2EMCiP0h0F7LsUoY,3639
|
61
|
-
liger_kernel/transformers/monkey_patch.py,sha256=
|
61
|
+
liger_kernel/transformers/monkey_patch.py,sha256=pG3Yf0fMg4_0pAncc2wLtpdfXvmC5CROpNJ43-MmElM,93075
|
62
62
|
liger_kernel/transformers/multi_token_attention.py,sha256=l9VDICK0dfmifUDW668hGscP8AHq2rYcM2oGUa3baRQ,1751
|
63
63
|
liger_kernel/transformers/qwen2vl_mrope.py,sha256=5EwSqrMdsL9MYspeBMXBsNJKvH0MOmRrtJXAJlnnlOI,1047
|
64
64
|
liger_kernel/transformers/rms_norm.py,sha256=vkekcvTeWY8vL4H6hg3t0XeY0Ew_3OFMPHuzqlxPPVw,2719
|
@@ -75,6 +75,7 @@ liger_kernel/transformers/model/gemma.py,sha256=mNX-mIwV6jI4zfbrUHp0C468pOmjzsL7
|
|
75
75
|
liger_kernel/transformers/model/gemma2.py,sha256=R_JFPyWTk7RyA7D05ZiIaNO5pX8gWcvfWf-6rdCRMxs,11296
|
76
76
|
liger_kernel/transformers/model/gemma3.py,sha256=FKO4j3t4W_5uECRA1lhVnXC-It2GhirHm4tpCf9ApAc,12785
|
77
77
|
liger_kernel/transformers/model/glm4.py,sha256=GlnEhdGJuDIqp2R9qC54biY3HwV1tWmfpJm6ijoAsrM,5257
|
78
|
+
liger_kernel/transformers/model/glm4v.py,sha256=zbV3agptEYpGAD0eeCRwIpJAhJUviTT5xQbbLlgpVnc,5957
|
78
79
|
liger_kernel/transformers/model/llama.py,sha256=i8jJgyZsMKWQ-zKloETLugtwFpUOdaWxLDceciFXKd4,12832
|
79
80
|
liger_kernel/transformers/model/llama4.py,sha256=IgbB8sTh3dlETQnaNNy1bZLuXy-Nt7qmeAjF27ydGpg,4210
|
80
81
|
liger_kernel/transformers/model/llava.py,sha256=bLCioday_SOm69ogMDBhy_4UsVkH2-BSl93-EXY6-7I,15076
|
@@ -95,9 +96,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
|
|
95
96
|
liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
|
96
97
|
liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
|
97
98
|
liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
|
98
|
-
liger_kernel_nightly-0.6.1.
|
99
|
-
liger_kernel_nightly-0.6.1.
|
100
|
-
liger_kernel_nightly-0.6.1.
|
101
|
-
liger_kernel_nightly-0.6.1.
|
102
|
-
liger_kernel_nightly-0.6.1.
|
103
|
-
liger_kernel_nightly-0.6.1.
|
99
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
100
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/METADATA,sha256=OaVW-70Zf6I4qZbU4W9HcUlXza8L-zhHOmyViKLUftQ,24504
|
101
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
|
102
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
103
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
104
|
+
liger_kernel_nightly-0.6.1.dev20250819173444.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|