liger-kernel-nightly 0.5.6.dev20250407214804__py3-none-any.whl → 0.5.6.dev20250408194537__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/ops/layer_norm.py +14 -1
- liger_kernel/ops/rms_norm.py +12 -1
- liger_kernel/transformers/__init__.py +107 -19
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/METADATA +1 -1
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/RECORD +9 -9
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.5.6.dev20250407214804.dist-info → liger_kernel_nightly-0.5.6.dev20250408194537.dist-info}/top_level.txt +0 -0
liger_kernel/ops/layer_norm.py
CHANGED
@@ -154,6 +154,11 @@ def layer_norm_forward(X, W, B, eps):
|
|
154
154
|
f"must match weight size (W.shape[0]={W.shape[0]})"
|
155
155
|
)
|
156
156
|
|
157
|
+
# XPU-specific optimization
|
158
|
+
kernel_args = {}
|
159
|
+
if X.device.type == "xpu":
|
160
|
+
kernel_args["grf_mode"] = "large"
|
161
|
+
|
157
162
|
_layer_norm_forward_kernel[(n_rows,)](
|
158
163
|
Y,
|
159
164
|
Y.stride(0),
|
@@ -171,6 +176,7 @@ def layer_norm_forward(X, W, B, eps):
|
|
171
176
|
eps,
|
172
177
|
BLOCK_SIZE=BLOCK_SIZE,
|
173
178
|
num_warps=num_warps,
|
179
|
+
**kernel_args, # XPU-specific optimization
|
174
180
|
)
|
175
181
|
return Y.view(*shape), X, Mean, RSTD, BLOCK_SIZE, num_warps
|
176
182
|
|
@@ -185,7 +191,7 @@ def layer_norm_backward(dY, X, W, B, Mean, RSTD):
|
|
185
191
|
if X.device.type == "cuda":
|
186
192
|
sm_count = torch.cuda.get_device_properties(X.device).multi_processor_count
|
187
193
|
elif X.device.type == "xpu":
|
188
|
-
sm_count = torch.xpu.get_device_properties(X.device).
|
194
|
+
sm_count = torch.xpu.get_device_properties(X.device).gpu_eu_count
|
189
195
|
|
190
196
|
DX = torch.empty((n_rows, n_cols), dtype=X.dtype, device=X.device)
|
191
197
|
_DW = torch.empty((sm_count, n_cols), dtype=W.dtype, device=W.device)
|
@@ -208,6 +214,12 @@ def layer_norm_backward(dY, X, W, B, Mean, RSTD):
|
|
208
214
|
if X.dtype == torch.float16
|
209
215
|
else tl.float32 # fallback to float32 for other types
|
210
216
|
)
|
217
|
+
|
218
|
+
# XPU-specific optimization
|
219
|
+
kernel_args = {}
|
220
|
+
if X.device.type == "xpu":
|
221
|
+
kernel_args.update({"grf_mode": "large", "num_warps": 32, "num_stages": 4})
|
222
|
+
|
211
223
|
_layer_norm_backward_kernel[grid](
|
212
224
|
X,
|
213
225
|
W,
|
@@ -227,6 +239,7 @@ def layer_norm_backward(dY, X, W, B, Mean, RSTD):
|
|
227
239
|
rows_per_program,
|
228
240
|
BLOCK_SIZE=BLOCK_SIZE,
|
229
241
|
dtype=triton_dtype,
|
242
|
+
**kernel_args, # XPU-specific optimization
|
230
243
|
)
|
231
244
|
|
232
245
|
DW = _DW.sum(dim=0).to(W.dtype)
|
liger_kernel/ops/rms_norm.py
CHANGED
@@ -223,6 +223,10 @@ def rms_norm_forward(X, W, eps, offset, casting_mode):
|
|
223
223
|
# Check constraints.
|
224
224
|
assert X.shape[1] == W.shape[0], "Incompatible hidden size dimension between tensor1.shape[1] and tensor2.shape[0]"
|
225
225
|
|
226
|
+
# XPU-specific optimization
|
227
|
+
kernel_args = {}
|
228
|
+
if X.device.type == "xpu":
|
229
|
+
kernel_args["grf_mode"] = "large"
|
226
230
|
_rms_norm_forward_kernel[(n_rows,)](
|
227
231
|
Y,
|
228
232
|
Y.stride(0),
|
@@ -238,6 +242,7 @@ def rms_norm_forward(X, W, eps, offset, casting_mode):
|
|
238
242
|
casting_mode,
|
239
243
|
BLOCK_SIZE=BLOCK_SIZE,
|
240
244
|
num_warps=num_warps,
|
245
|
+
**kernel_args, # XPU-specific optimization
|
241
246
|
)
|
242
247
|
return Y.view(*shape), X, RSTD, BLOCK_SIZE, num_warps, casting_mode
|
243
248
|
|
@@ -252,7 +257,7 @@ def rms_norm_backward(dY, X, W, RSTD, offset, casting_mode, BLOCK_SIZE, num_warp
|
|
252
257
|
if X.device.type == "cuda":
|
253
258
|
sm_count = torch.cuda.get_device_properties(X.device).multi_processor_count
|
254
259
|
elif X.device.type == "xpu":
|
255
|
-
sm_count = torch.xpu.get_device_properties(X.device).
|
260
|
+
sm_count = torch.xpu.get_device_properties(X.device).gpu_eu_count
|
256
261
|
|
257
262
|
# fp32 for numerical stability especially.
|
258
263
|
_dW = torch.empty((sm_count, n_cols), dtype=torch.float32, device=W.device)
|
@@ -267,6 +272,11 @@ def rms_norm_backward(dY, X, W, RSTD, offset, casting_mode, BLOCK_SIZE, num_warp
|
|
267
272
|
else:
|
268
273
|
dX = torch.zeros_like(dY)
|
269
274
|
|
275
|
+
# XPU-specific optimization
|
276
|
+
kernel_args = {}
|
277
|
+
if X.device.type == "xpu":
|
278
|
+
kernel_args["grf_mode"] = "large"
|
279
|
+
|
270
280
|
_rms_norm_backward_kernel[grid](
|
271
281
|
dY,
|
272
282
|
dY.stride(0),
|
@@ -288,6 +298,7 @@ def rms_norm_backward(dY, X, W, RSTD, offset, casting_mode, BLOCK_SIZE, num_warp
|
|
288
298
|
casting_mode,
|
289
299
|
BLOCK_SIZE=BLOCK_SIZE,
|
290
300
|
num_warps=num_warps,
|
301
|
+
**kernel_args, # XPU-specific optimization
|
291
302
|
)
|
292
303
|
dX = dX.view(*shape)
|
293
304
|
dW = _dW.sum(dim=0).to(W.dtype)
|
@@ -1,4 +1,6 @@
|
|
1
|
-
|
1
|
+
import importlib
|
2
|
+
|
3
|
+
# Always-safe imports (independent of 'transformers')
|
2
4
|
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss # noqa: F401
|
3
5
|
from liger_kernel.transformers.dyt import LigerDyT # noqa: F401
|
4
6
|
from liger_kernel.transformers.fused_linear_cross_entropy import LigerFusedLinearCrossEntropyLoss # noqa: F401
|
@@ -6,27 +8,113 @@ from liger_kernel.transformers.fused_linear_jsd import LigerFusedLinearJSD # no
|
|
6
8
|
from liger_kernel.transformers.geglu import LigerGEGLUMLP # noqa: F401
|
7
9
|
from liger_kernel.transformers.jsd import LigerJSD # noqa: F401
|
8
10
|
from liger_kernel.transformers.layer_norm import LigerLayerNorm # noqa: F401
|
9
|
-
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel # noqa: F401
|
10
|
-
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance # noqa: F401
|
11
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma # noqa: F401
|
12
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma2 # noqa: F401
|
13
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma3 # noqa: F401
|
14
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_gemma3_text # noqa: F401
|
15
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_granite # noqa: F401
|
16
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llama # noqa: F401
|
17
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_llava # noqa: F401
|
18
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_mistral # noqa: F401
|
19
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_mixtral # noqa: F401
|
20
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_mllama # noqa: F401
|
21
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_olmo2 # noqa: F401
|
22
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_paligemma # noqa: F401
|
23
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_phi3 # noqa: F401
|
24
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2 # noqa: F401
|
25
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_5_vl # noqa: F401
|
26
|
-
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_vl # noqa: F401
|
27
11
|
from liger_kernel.transformers.rms_norm import LigerRMSNorm # noqa: F401
|
28
12
|
from liger_kernel.transformers.rope import liger_rotary_pos_emb # noqa: F401
|
29
13
|
from liger_kernel.transformers.swiglu import LigerBlockSparseTop2MLP # noqa: F401
|
30
14
|
from liger_kernel.transformers.swiglu import LigerPhi3SwiGLUMLP # noqa: F401
|
31
15
|
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP # noqa: F401
|
32
16
|
from liger_kernel.transformers.tvd import LigerTVDLoss # noqa: F401
|
17
|
+
|
18
|
+
# Check if 'transformers' is installed
|
19
|
+
try:
|
20
|
+
import transformers # noqa: F401
|
21
|
+
|
22
|
+
_TRANSFORMERS_AVAILABLE = True
|
23
|
+
except ImportError:
|
24
|
+
_TRANSFORMERS_AVAILABLE = False
|
25
|
+
|
26
|
+
|
27
|
+
def is_transformers_available() -> bool:
|
28
|
+
"""
|
29
|
+
Returns True if the 'transformers' package is available.
|
30
|
+
Useful for conditional logic in downstream code.
|
31
|
+
"""
|
32
|
+
return _TRANSFORMERS_AVAILABLE
|
33
|
+
|
34
|
+
|
35
|
+
def __getattr__(name: str):
|
36
|
+
"""
|
37
|
+
Handles lazy access to transformer-dependent attributes.
|
38
|
+
If 'transformers' is not installed, raises a user-friendly ImportError.
|
39
|
+
"""
|
40
|
+
if not _TRANSFORMERS_AVAILABLE:
|
41
|
+
raise ImportError(
|
42
|
+
f"The attribute '{name}' requires the 'transformers' library, which is not installed.\n"
|
43
|
+
f"Please install it with `pip install transformers` to use this functionality."
|
44
|
+
)
|
45
|
+
|
46
|
+
if name == "AutoLigerKernelForCausalLM":
|
47
|
+
module = importlib.import_module("liger_kernel.transformers.auto_model")
|
48
|
+
return getattr(module, name)
|
49
|
+
|
50
|
+
monkey_patch_symbols = {
|
51
|
+
"_apply_liger_kernel",
|
52
|
+
"_apply_liger_kernel_to_instance",
|
53
|
+
"apply_liger_kernel_to_gemma",
|
54
|
+
"apply_liger_kernel_to_gemma2",
|
55
|
+
"apply_liger_kernel_to_gemma3",
|
56
|
+
"apply_liger_kernel_to_gemma3_text",
|
57
|
+
"apply_liger_kernel_to_granite",
|
58
|
+
"apply_liger_kernel_to_llama",
|
59
|
+
"apply_liger_kernel_to_llava",
|
60
|
+
"apply_liger_kernel_to_mistral",
|
61
|
+
"apply_liger_kernel_to_mixtral",
|
62
|
+
"apply_liger_kernel_to_mllama",
|
63
|
+
"apply_liger_kernel_to_olmo2",
|
64
|
+
"apply_liger_kernel_to_paligemma",
|
65
|
+
"apply_liger_kernel_to_phi3",
|
66
|
+
"apply_liger_kernel_to_qwen2",
|
67
|
+
"apply_liger_kernel_to_qwen2_5_vl",
|
68
|
+
"apply_liger_kernel_to_qwen2_vl",
|
69
|
+
}
|
70
|
+
|
71
|
+
if name in monkey_patch_symbols:
|
72
|
+
module = importlib.import_module("liger_kernel.transformers.monkey_patch")
|
73
|
+
return getattr(module, name)
|
74
|
+
|
75
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
76
|
+
|
77
|
+
|
78
|
+
# Shared symbols in all environments
|
79
|
+
__all__ = [
|
80
|
+
"is_transformers_available",
|
81
|
+
"LigerCrossEntropyLoss",
|
82
|
+
"LigerDyT",
|
83
|
+
"LigerFusedLinearCrossEntropyLoss",
|
84
|
+
"LigerFusedLinearJSD",
|
85
|
+
"LigerGEGLUMLP",
|
86
|
+
"LigerJSD",
|
87
|
+
"LigerLayerNorm",
|
88
|
+
"LigerRMSNorm",
|
89
|
+
"liger_rotary_pos_emb",
|
90
|
+
"LigerBlockSparseTop2MLP",
|
91
|
+
"LigerPhi3SwiGLUMLP",
|
92
|
+
"LigerSwiGLUMLP",
|
93
|
+
"LigerTVDLoss",
|
94
|
+
]
|
95
|
+
|
96
|
+
# Add transformer-dependent symbols only if available
|
97
|
+
if _TRANSFORMERS_AVAILABLE:
|
98
|
+
__all__.extend(
|
99
|
+
[
|
100
|
+
"AutoLigerKernelForCausalLM",
|
101
|
+
"_apply_liger_kernel",
|
102
|
+
"_apply_liger_kernel_to_instance",
|
103
|
+
"apply_liger_kernel_to_gemma",
|
104
|
+
"apply_liger_kernel_to_gemma2",
|
105
|
+
"apply_liger_kernel_to_gemma3",
|
106
|
+
"apply_liger_kernel_to_gemma3_text",
|
107
|
+
"apply_liger_kernel_to_granite",
|
108
|
+
"apply_liger_kernel_to_llama",
|
109
|
+
"apply_liger_kernel_to_llava",
|
110
|
+
"apply_liger_kernel_to_mistral",
|
111
|
+
"apply_liger_kernel_to_mixtral",
|
112
|
+
"apply_liger_kernel_to_mllama",
|
113
|
+
"apply_liger_kernel_to_olmo2",
|
114
|
+
"apply_liger_kernel_to_paligemma",
|
115
|
+
"apply_liger_kernel_to_phi3",
|
116
|
+
"apply_liger_kernel_to_qwen2",
|
117
|
+
"apply_liger_kernel_to_qwen2_5_vl",
|
118
|
+
"apply_liger_kernel_to_qwen2_vl",
|
119
|
+
]
|
120
|
+
)
|
@@ -24,16 +24,16 @@ liger_kernel/ops/geglu.py,sha256=axGvCIvlBzuluoAIrWTsp2iZM4BFKNInkPov8YVvH9E,412
|
|
24
24
|
liger_kernel/ops/group_norm.py,sha256=qD4D4lSjSgVtO52EBNLC2iTseALRgPgqXE50U2woggk,10837
|
25
25
|
liger_kernel/ops/jsd.py,sha256=onHp5T3MbvJaVz5Vup7Ww6EQp_HTaZeayTjJk6FgQMY,7042
|
26
26
|
liger_kernel/ops/kl_div.py,sha256=NkG7D6_DnPBzr-ohhYiQbRBnq_fbGmpn5UU7y0UBKQo,8420
|
27
|
-
liger_kernel/ops/layer_norm.py,sha256=
|
27
|
+
liger_kernel/ops/layer_norm.py,sha256=vWCyOm-F2GMAilB-ozJcFeUQQLCJoTE_uiXq-_0uYuI,8356
|
28
28
|
liger_kernel/ops/qwen2vl_mrope.py,sha256=3GExhYpLgB4VUtyZyjRk8XjEur3W4EWF6HQ67ML5vBU,8481
|
29
|
-
liger_kernel/ops/rms_norm.py,sha256=
|
29
|
+
liger_kernel/ops/rms_norm.py,sha256=PP27OIBmV9By63i13jot9ylDowW0nuxY_JFIkaPLgL4,12078
|
30
30
|
liger_kernel/ops/rope.py,sha256=ofmBOkUpZZO-Q8Z5B_LOFYYLD-YT-8WnJ4vGOrDYouI,8943
|
31
31
|
liger_kernel/ops/swiglu.py,sha256=KmgMjaJQnbLLgZn2nEpbwHU_xpnYRweCyrLQSVvM1vA,3015
|
32
32
|
liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
|
33
33
|
liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
|
34
34
|
liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
|
35
35
|
liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
|
36
|
-
liger_kernel/transformers/__init__.py,sha256=
|
36
|
+
liger_kernel/transformers/__init__.py,sha256=d02IvvSSxSzLenLtexzi7pHUCfl0BGGV0TxOErzb1VE,4454
|
37
37
|
liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
|
38
38
|
liger_kernel/transformers/cross_entropy.py,sha256=z3KTWQnFxr_IZaVjtYt0ZNEWQdDdYThN35xWkHlDGH0,1683
|
39
39
|
liger_kernel/transformers/dyt.py,sha256=QMqqc14pkE0WhpRZvapfnNAun-6C0C_tHExL2ZJuCUA,648
|
@@ -74,9 +74,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
|
|
74
74
|
liger_kernel/transformers/trainer/orpo_trainer.py,sha256=pdekW7l6Qg_aqa5SYKYlSWUF8m3lkOFvFLcIMEHrz9s,8338
|
75
75
|
liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
|
76
76
|
liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
|
77
|
-
liger_kernel_nightly-0.5.6.
|
78
|
-
liger_kernel_nightly-0.5.6.
|
79
|
-
liger_kernel_nightly-0.5.6.
|
80
|
-
liger_kernel_nightly-0.5.6.
|
81
|
-
liger_kernel_nightly-0.5.6.
|
82
|
-
liger_kernel_nightly-0.5.6.
|
77
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
78
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/METADATA,sha256=QqNEMLqg80zH5iTfcmAvYvM3ukixN6OSByeATy0BImM,23297
|
79
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
|
80
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
81
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
82
|
+
liger_kernel_nightly-0.5.6.dev20250408194537.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|