liger-kernel-nightly 0.6.3.dev20251121195543__py3-none-any.whl → 0.6.3.dev20251121200119__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/transformers/__init__.py +3 -0
- liger_kernel/transformers/model/olmo3.py +142 -0
- liger_kernel/transformers/monkey_patch.py +70 -1
- liger_kernel/transformers/swiglu.py +1 -1
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/METADATA +2 -1
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/RECORD +10 -9
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.6.3.dev20251121195543.dist-info → liger_kernel_nightly-0.6.3.dev20251121200119.dist-info}/top_level.txt +0 -0
|
@@ -52,6 +52,7 @@ if TYPE_CHECKING:
|
|
|
52
52
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_mixtral # noqa: F401
|
|
53
53
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_mllama # noqa: F401
|
|
54
54
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_olmo2 # noqa: F401
|
|
55
|
+
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_olmo3 # noqa: F401
|
|
55
56
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_paligemma # noqa: F401
|
|
56
57
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_phi3 # noqa: F401
|
|
57
58
|
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2 # noqa: F401
|
|
@@ -118,6 +119,7 @@ def __getattr__(name: str):
|
|
|
118
119
|
"apply_liger_kernel_to_mixtral",
|
|
119
120
|
"apply_liger_kernel_to_mllama",
|
|
120
121
|
"apply_liger_kernel_to_olmo2",
|
|
122
|
+
"apply_liger_kernel_to_olmo3",
|
|
121
123
|
"apply_liger_kernel_to_paligemma",
|
|
122
124
|
"apply_liger_kernel_to_phi3",
|
|
123
125
|
"apply_liger_kernel_to_qwen2",
|
|
@@ -194,6 +196,7 @@ if _TRANSFORMERS_AVAILABLE:
|
|
|
194
196
|
"apply_liger_kernel_to_mixtral",
|
|
195
197
|
"apply_liger_kernel_to_mllama",
|
|
196
198
|
"apply_liger_kernel_to_olmo2",
|
|
199
|
+
"apply_liger_kernel_to_olmo3",
|
|
197
200
|
"apply_liger_kernel_to_paligemma",
|
|
198
201
|
"apply_liger_kernel_to_phi3",
|
|
199
202
|
"apply_liger_kernel_to_qwen2",
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from typing import Tuple
|
|
4
|
+
from typing import Union
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from transformers.modeling_outputs import BaseModelOutputWithPast
|
|
9
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
10
|
+
|
|
11
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
12
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
13
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
17
|
+
def lce_forward(
|
|
18
|
+
self,
|
|
19
|
+
input_ids: torch.LongTensor = None,
|
|
20
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
21
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
22
|
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
23
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
24
|
+
labels: Optional[torch.LongTensor] = None,
|
|
25
|
+
use_cache: Optional[bool] = None,
|
|
26
|
+
output_attentions: Optional[bool] = None,
|
|
27
|
+
output_hidden_states: Optional[bool] = None,
|
|
28
|
+
return_dict: Optional[bool] = None,
|
|
29
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
30
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
31
|
+
skip_logits: Optional[bool] = None,
|
|
32
|
+
**kwargs,
|
|
33
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
34
|
+
r"""
|
|
35
|
+
Args:
|
|
36
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
37
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
38
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
39
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
40
|
+
|
|
41
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
42
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
43
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
44
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
45
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
46
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
>>> from transformers import AutoTokenizer, Olmo3ForCausalLM
|
|
54
|
+
|
|
55
|
+
>>> model = Olmo3ForCausalLM.from_pretrained("allenai/Olmo-3-7B-Instruct")
|
|
56
|
+
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/Olmo-3-7B-Instruct")
|
|
57
|
+
|
|
58
|
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
|
59
|
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
60
|
+
|
|
61
|
+
>>> # Generate
|
|
62
|
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
|
63
|
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
64
|
+
'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m'
|
|
65
|
+
```
|
|
66
|
+
"""
|
|
67
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
68
|
+
output_hidden_states = (
|
|
69
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
70
|
+
)
|
|
71
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
72
|
+
|
|
73
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
74
|
+
outputs: BaseModelOutputWithPast = self.model(
|
|
75
|
+
input_ids=input_ids,
|
|
76
|
+
attention_mask=attention_mask,
|
|
77
|
+
position_ids=position_ids,
|
|
78
|
+
past_key_values=past_key_values,
|
|
79
|
+
inputs_embeds=inputs_embeds,
|
|
80
|
+
use_cache=use_cache,
|
|
81
|
+
output_attentions=output_attentions,
|
|
82
|
+
output_hidden_states=output_hidden_states,
|
|
83
|
+
return_dict=return_dict,
|
|
84
|
+
cache_position=cache_position,
|
|
85
|
+
**kwargs,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
hidden_states = outputs[0]
|
|
89
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
90
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
91
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
92
|
+
|
|
93
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
94
|
+
logits = None
|
|
95
|
+
loss = None
|
|
96
|
+
token_accuracy = None
|
|
97
|
+
|
|
98
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
99
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
100
|
+
|
|
101
|
+
if skip_logits is None:
|
|
102
|
+
# By default, if in training mode, don't materialize logits
|
|
103
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
104
|
+
|
|
105
|
+
# Compute loss
|
|
106
|
+
if skip_logits:
|
|
107
|
+
result = LigerForCausalLMLoss(
|
|
108
|
+
hidden_states=kept_hidden_states,
|
|
109
|
+
lm_head_weight=self.lm_head.weight,
|
|
110
|
+
labels=labels,
|
|
111
|
+
shift_labels=shift_labels,
|
|
112
|
+
hidden_size=self.config.hidden_size,
|
|
113
|
+
**kwargs,
|
|
114
|
+
)
|
|
115
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
116
|
+
|
|
117
|
+
else:
|
|
118
|
+
logits = self.lm_head(kept_hidden_states)
|
|
119
|
+
if labels is not None or shift_labels is not None:
|
|
120
|
+
loss = self.loss_function(
|
|
121
|
+
logits=logits,
|
|
122
|
+
labels=labels,
|
|
123
|
+
shift_labels=shift_labels,
|
|
124
|
+
vocab_size=self.config.vocab_size,
|
|
125
|
+
**kwargs,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if not return_dict:
|
|
129
|
+
output = (logits,) + outputs[1:]
|
|
130
|
+
output = ((loss,) + output) if loss is not None else output
|
|
131
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
132
|
+
return output
|
|
133
|
+
|
|
134
|
+
# Return custom output class with token_accuracy field
|
|
135
|
+
return LigerCausalLMOutputWithPast(
|
|
136
|
+
loss=loss,
|
|
137
|
+
logits=logits,
|
|
138
|
+
past_key_values=outputs.past_key_values,
|
|
139
|
+
hidden_states=outputs.hidden_states,
|
|
140
|
+
attentions=outputs.attentions,
|
|
141
|
+
token_accuracy=token_accuracy,
|
|
142
|
+
)
|
|
@@ -1928,6 +1928,74 @@ def apply_liger_kernel_to_olmo2(
|
|
|
1928
1928
|
_patch_rms_norm_module(decoder_layer.post_feedforward_layernorm, in_place=False)
|
|
1929
1929
|
|
|
1930
1930
|
|
|
1931
|
+
def apply_liger_kernel_to_olmo3(
|
|
1932
|
+
rope: bool = True,
|
|
1933
|
+
cross_entropy: bool = False,
|
|
1934
|
+
fused_linear_cross_entropy: bool = True,
|
|
1935
|
+
rms_norm: bool = True,
|
|
1936
|
+
swiglu: bool = True,
|
|
1937
|
+
model: PreTrainedModel = None,
|
|
1938
|
+
) -> None:
|
|
1939
|
+
"""
|
|
1940
|
+
Apply Liger kernels to replace original implementation in HuggingFace Olmo3 models.
|
|
1941
|
+
|
|
1942
|
+
Args:
|
|
1943
|
+
rope (bool): Whether to apply Liger's rotary position embedding. Default is True.
|
|
1944
|
+
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
|
1945
|
+
fused_linear_cross_entropy (bool):
|
|
1946
|
+
Whether to apply Liger's fused linear cross entropy loss. Default is True.
|
|
1947
|
+
`cross_entropy` and `fused_linear_cross_entropy` cannot both be True.
|
|
1948
|
+
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
|
1949
|
+
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True.
|
|
1950
|
+
swiglu (bool): Whether to apply Liger's SwiGLU to Olmo3MLP. Default is True.
|
|
1951
|
+
model (PreTrainedModel): The model instance to apply Liger kernels to, if the model has already been
|
|
1952
|
+
loaded. Default is None.
|
|
1953
|
+
"""
|
|
1954
|
+
assert not (cross_entropy and fused_linear_cross_entropy), (
|
|
1955
|
+
"cross_entropy and fused_linear_cross_entropy cannot both be True."
|
|
1956
|
+
)
|
|
1957
|
+
|
|
1958
|
+
from transformers.models.olmo3 import modeling_olmo3
|
|
1959
|
+
from transformers.models.olmo3.modeling_olmo3 import Olmo3Model
|
|
1960
|
+
|
|
1961
|
+
from liger_kernel.transformers.model.olmo3 import lce_forward as olmo3_lce_forward
|
|
1962
|
+
from liger_kernel.transformers.rms_norm import LigerRMSNormForOlmo2
|
|
1963
|
+
|
|
1964
|
+
# Olmo3 arch is very similar to Olmo2, so we can reuse all these components in the same way.
|
|
1965
|
+
if rope:
|
|
1966
|
+
modeling_olmo3.apply_rotary_pos_emb = liger_rotary_pos_emb
|
|
1967
|
+
if rms_norm:
|
|
1968
|
+
modeling_olmo3.Olmo3RMSNorm = LigerRMSNormForOlmo2 # same as olmo2
|
|
1969
|
+
if swiglu:
|
|
1970
|
+
modeling_olmo3.Olmo3MLP = LigerSwiGLUMLP
|
|
1971
|
+
if cross_entropy:
|
|
1972
|
+
from transformers.loss.loss_utils import nn
|
|
1973
|
+
|
|
1974
|
+
nn.functional.cross_entropy = liger_cross_entropy
|
|
1975
|
+
if fused_linear_cross_entropy:
|
|
1976
|
+
if model is not None:
|
|
1977
|
+
model.forward = MethodType(olmo3_lce_forward, model)
|
|
1978
|
+
else:
|
|
1979
|
+
modeling_olmo3.Olmo3ForCausalLM.forward = olmo3_lce_forward
|
|
1980
|
+
|
|
1981
|
+
if model is not None:
|
|
1982
|
+
# The model instance already exists, so we need to additionally patch the
|
|
1983
|
+
# instance variables that reference already-instantiated modules
|
|
1984
|
+
|
|
1985
|
+
# get the base model from the model instance
|
|
1986
|
+
base_model: Olmo3Model = getattr(model, model.base_model_prefix, model)
|
|
1987
|
+
|
|
1988
|
+
if rms_norm:
|
|
1989
|
+
_patch_rms_norm_module(base_model.norm)
|
|
1990
|
+
|
|
1991
|
+
for decoder_layer in base_model.layers:
|
|
1992
|
+
if swiglu:
|
|
1993
|
+
_patch_swiglu_module(decoder_layer.mlp, LigerSwiGLUMLP)
|
|
1994
|
+
if rms_norm:
|
|
1995
|
+
_patch_rms_norm_module(decoder_layer.post_attention_layernorm, in_place=False)
|
|
1996
|
+
_patch_rms_norm_module(decoder_layer.post_feedforward_layernorm, in_place=False)
|
|
1997
|
+
|
|
1998
|
+
|
|
1931
1999
|
def apply_liger_kernel_to_glm4(
|
|
1932
2000
|
rope: bool = False,
|
|
1933
2001
|
cross_entropy: bool = False,
|
|
@@ -2589,7 +2657,7 @@ def apply_liger_kernel_to_hunyuan_v1_dense(
|
|
|
2589
2657
|
from transformers.loss.loss_utils import nn
|
|
2590
2658
|
|
|
2591
2659
|
nn.functional.cross_entropy = liger_cross_entropy
|
|
2592
|
-
|
|
2660
|
+
|
|
2593
2661
|
if fused_linear_cross_entropy:
|
|
2594
2662
|
if model is not None:
|
|
2595
2663
|
model.forward = MethodType(hunyuan_v1_lce_forward, model)
|
|
@@ -2695,6 +2763,7 @@ MODEL_TYPE_TO_APPLY_LIGER_FN = {
|
|
|
2695
2763
|
"mistral": apply_liger_kernel_to_mistral,
|
|
2696
2764
|
"mixtral": apply_liger_kernel_to_mixtral,
|
|
2697
2765
|
"olmo2": apply_liger_kernel_to_olmo2,
|
|
2766
|
+
"olmo3": apply_liger_kernel_to_olmo3,
|
|
2698
2767
|
"qwen2": apply_liger_kernel_to_qwen2,
|
|
2699
2768
|
"qwen3": apply_liger_kernel_to_qwen3,
|
|
2700
2769
|
"qwen3_moe": apply_liger_kernel_to_qwen3_moe,
|
|
@@ -93,4 +93,4 @@ class LigerHunyuanV1SwiGLUMLP(nn.Module):
|
|
|
93
93
|
raise ValueError(f"Activation function {config.hidden_act} not supported.")
|
|
94
94
|
|
|
95
95
|
def forward(self, x):
|
|
96
|
-
return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
|
|
96
|
+
return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: liger_kernel_nightly
|
|
3
|
-
Version: 0.6.3.
|
|
3
|
+
Version: 0.6.3.dev20251121200119
|
|
4
4
|
Summary: Efficient Triton kernels for LLM Training
|
|
5
5
|
License: BSD 2-CLAUSE LICENSE
|
|
6
6
|
Copyright 2024 LinkedIn Corporation
|
|
@@ -310,6 +310,7 @@ loss.backward()
|
|
|
310
310
|
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
311
311
|
| Granite 3.0 & 3.1 | `liger_kernel.transformers.apply_liger_kernel_to_granite` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
|
|
312
312
|
| OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
313
|
+
| Olmo3 | `liger_kernel.transformers.apply_liger_kernel_to_olmo3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
313
314
|
| GLM-4 | `liger_kernel.transformers.apply_liger_kernel_to_glm4` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
314
315
|
| InternVL3 | `liger_kernel.transformers.apply_liger_kernel_to_internvl` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
315
316
|
| HunyuanV1 | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_dense` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
@@ -43,7 +43,7 @@ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
|
|
|
43
43
|
liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
|
|
44
44
|
liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
|
|
45
45
|
liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
|
|
46
|
-
liger_kernel/transformers/__init__.py,sha256=
|
|
46
|
+
liger_kernel/transformers/__init__.py,sha256=CgwhrY5cdx6OcRgR2ZZJbOIkLswQWPTr-BAaoxDNNOY,10687
|
|
47
47
|
liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
|
|
48
48
|
liger_kernel/transformers/cross_entropy.py,sha256=DMtHkKrVJDSsels7KgGQJqrXkEAd6Zopcdr-5oRmQgE,2010
|
|
49
49
|
liger_kernel/transformers/dyt.py,sha256=i-4GPaMrl-jab9TVI5qN0-H9qycn_mCbV82ozU4nbmU,723
|
|
@@ -60,7 +60,7 @@ liger_kernel/transformers/jsd.py,sha256=DGqRnxIZxsvxo0_tbbxX3b-sDbDjC_yKufyRIHCc
|
|
|
60
60
|
liger_kernel/transformers/kl_div.py,sha256=WLffFbh1EExD2Eb1F7lN11fo9JJC-0751WJjZAF1Fj8,409
|
|
61
61
|
liger_kernel/transformers/layer_norm.py,sha256=c9pk3PEasOKYR0rhe5e5nNrnYKVCEW4VC8S6LpCq9EQ,906
|
|
62
62
|
liger_kernel/transformers/llama4_rope.py,sha256=kS6PSHEwf3dS7hD7C7p8S0geugx2EMCiP0h0F7LsUoY,3639
|
|
63
|
-
liger_kernel/transformers/monkey_patch.py,sha256=
|
|
63
|
+
liger_kernel/transformers/monkey_patch.py,sha256=4LV6LSz_AAop6HWk1spZm1QigPN9nUDPJu9tK21-jIo,132446
|
|
64
64
|
liger_kernel/transformers/multi_token_attention.py,sha256=K3NIY9_5TPgZ4_Rahn0xnkMXxD_fmlJHK4CWGYvGQp0,1752
|
|
65
65
|
liger_kernel/transformers/poly_norm.py,sha256=g5tC75i3qy1_N26ZUP-jfpct7ivQAEdJfIfx8IXzeyE,1377
|
|
66
66
|
liger_kernel/transformers/qwen2vl_mrope.py,sha256=5EwSqrMdsL9MYspeBMXBsNJKvH0MOmRrtJXAJlnnlOI,1047
|
|
@@ -68,7 +68,7 @@ liger_kernel/transformers/rms_norm.py,sha256=HwddVqrqS58jE-M2_4NkFGARtCDBhGnkKyj
|
|
|
68
68
|
liger_kernel/transformers/rope.py,sha256=VMlDZI6zss9mLaLcN5XCE_ktmYRwAi_Eh4TIgO6NrIQ,2361
|
|
69
69
|
liger_kernel/transformers/softmax.py,sha256=yadlAgE4V2JByMwrDDa2s5SUBp8Jgd57xwnVvAWoBaI,264
|
|
70
70
|
liger_kernel/transformers/sparsemax.py,sha256=0lQA0UEOs4mu8CMruZ3VLhImxQVXJWhPsAKUsYA7vj8,403
|
|
71
|
-
liger_kernel/transformers/swiglu.py,sha256=
|
|
71
|
+
liger_kernel/transformers/swiglu.py,sha256=dRR69wDWSWfdjtnsTECyxQqWVo5QkdXdXm9SpSQ4Jvw,4291
|
|
72
72
|
liger_kernel/transformers/tiled_mlp.py,sha256=J51-kpzwikDMMhT5bX-RZCKMaXBK6zZc1bhgRYTK5F0,4651
|
|
73
73
|
liger_kernel/transformers/trainer_integration.py,sha256=W3ON51O5GkyzNJsItz0y5rKx-uy2f2cFfveZpqbUdhw,123
|
|
74
74
|
liger_kernel/transformers/tvd.py,sha256=XrRfyJIqN6HFxXk8MYyFVZM1OLz3mtSbRZvWfZ_JerQ,450
|
|
@@ -92,6 +92,7 @@ liger_kernel/transformers/model/mistral.py,sha256=OcwOzVDMwwDbVccVPv-AaocznzWwzL
|
|
|
92
92
|
liger_kernel/transformers/model/mixtral.py,sha256=YcBDoTEJDgLFJ_RTo180DYGxR8D5Ad9-idumif7kCPE,12130
|
|
93
93
|
liger_kernel/transformers/model/mllama.py,sha256=vAHwCm63sn4kpAY0rDGf_N0HR7KRTBVpBYDVTPOaZTg,12079
|
|
94
94
|
liger_kernel/transformers/model/olmo2.py,sha256=-h2bUOeuPfY1MdShdRvq5_wFDHKP4PEimgIl0fL-BT4,5902
|
|
95
|
+
liger_kernel/transformers/model/olmo3.py,sha256=k2zYOlS8U_b5MwjdToB3tDRQ0bH_mWapVQqJcH8-qAo,6007
|
|
95
96
|
liger_kernel/transformers/model/output_classes.py,sha256=0BGXVR4dYQpSHLkSqpRoXuHMryrceGSlTYRu6pvd8ZY,4542
|
|
96
97
|
liger_kernel/transformers/model/paligemma.py,sha256=r0smHLADkEwfLS6d6ArWoSWEeLt2d_8pmgOO5F04b1o,20793
|
|
97
98
|
liger_kernel/transformers/model/phi3.py,sha256=PT7Kw6yySg-7TsssWfi82eVMN3SWujCqzCqHigAdfeQ,4574
|
|
@@ -109,9 +110,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
|
|
|
109
110
|
liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
|
|
110
111
|
liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
|
|
111
112
|
liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
|
|
112
|
-
liger_kernel_nightly-0.6.3.
|
|
113
|
-
liger_kernel_nightly-0.6.3.
|
|
114
|
-
liger_kernel_nightly-0.6.3.
|
|
115
|
-
liger_kernel_nightly-0.6.3.
|
|
116
|
-
liger_kernel_nightly-0.6.3.
|
|
117
|
-
liger_kernel_nightly-0.6.3.
|
|
113
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
|
114
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/METADATA,sha256=dTCc8yabO75aXtlWdPFHw23yAhHuEr5K06YDaMH4OHU,25238
|
|
115
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
|
|
116
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
117
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
|
118
|
+
liger_kernel_nightly-0.6.3.dev20251121200119.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|