liger-kernel-nightly 0.6.4.dev20251201192513__py3-none-any.whl → 0.6.4.dev20251202094519__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,8 +10,9 @@ from liger_kernel.ops.utils import compare_version
10
10
  from liger_kernel.ops.utils import element_mul_kernel
11
11
  from liger_kernel.ops.utils import is_hip
12
12
  from liger_kernel.utils import infer_device
13
+ from liger_kernel.utils import is_npu_available
13
14
 
14
- if compare_version("triton", operator.ge, "3.0.0"):
15
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
15
16
  try:
16
17
  # typical import path with dispatch available
17
18
  from triton.language.extra.libdevice import tanh
liger_kernel/ops/dyt.py CHANGED
@@ -7,8 +7,10 @@ import triton.language as tl
7
7
  from liger_kernel.ops.utils import compare_version
8
8
  from liger_kernel.ops.utils import ensure_contiguous
9
9
  from liger_kernel.ops.utils import infer_device
10
+ from liger_kernel.utils import get_npu_multi_processor_count
11
+ from liger_kernel.utils import is_npu_available
10
12
 
11
- if compare_version("triton", operator.ge, "3.0.0"):
13
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
12
14
  try:
13
15
  # typical import path with dispatch available
14
16
  from triton.language.extra.libdevice import tanh
@@ -125,7 +127,8 @@ def liger_dyt_bwd(dy, x, alpha, gamma, beta):
125
127
  NUM_SMS = torch.cuda.get_device_properties(x.device).multi_processor_count
126
128
  elif device == "xpu":
127
129
  NUM_SMS = torch.xpu.get_device_properties(x.device).gpu_subslice_count
128
-
130
+ elif device == "npu":
131
+ NUM_SMS = get_npu_multi_processor_count()
129
132
  da = torch.zeros(NUM_SMS, triton.cdiv(N, 512), dtype=torch.float32, device=x.device)
130
133
  dg = torch.empty(NUM_SMS, N, dtype=torch.float32, device=x.device)
131
134
  db = torch.empty(NUM_SMS, N, dtype=torch.float32, device=x.device) if HAVE_BETA else None
@@ -9,8 +9,10 @@ from liger_kernel.ops.utils import calculate_settings
9
9
  from liger_kernel.ops.utils import compare_version
10
10
  from liger_kernel.ops.utils import ensure_contiguous
11
11
  from liger_kernel.ops.utils import torch_to_triton_dtype
12
+ from liger_kernel.utils import get_npu_multi_processor_count
13
+ from liger_kernel.utils import is_npu_available
12
14
 
13
- if compare_version("triton", operator.ge, "3.0.0"):
15
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
14
16
  try:
15
17
  # typical import path with dispatch available
16
18
  from triton.language.extra.libdevice import rsqrt
@@ -293,6 +295,8 @@ def fused_add_rms_norm_backward(dY, dS_out, S, W, RSTD, offset, casting_mode, BL
293
295
  sm_count = torch.cuda.get_device_properties(S.device).multi_processor_count
294
296
  elif S.device.type == "xpu":
295
297
  sm_count = torch.xpu.get_device_properties(S.device).gpu_eu_count
298
+ elif S.device.type == "npu":
299
+ sm_count = get_npu_multi_processor_count()
296
300
 
297
301
  # fp32 for numerical stability especially.
298
302
  _dW = torch.empty((sm_count, n_cols), dtype=torch.float32, device=W.device)
liger_kernel/ops/geglu.py CHANGED
@@ -7,8 +7,9 @@ import triton.language as tl
7
7
  from liger_kernel.ops.utils import calculate_settings
8
8
  from liger_kernel.ops.utils import compare_version
9
9
  from liger_kernel.ops.utils import ensure_contiguous
10
+ from liger_kernel.utils import is_npu_available
10
11
 
11
- if compare_version("triton", operator.ge, "3.0.0"):
12
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
12
13
  try:
13
14
  # typical import path with dispatch available
14
15
  from triton.language.extra.libdevice import tanh
@@ -6,8 +6,9 @@ import triton.language as tl
6
6
 
7
7
  from liger_kernel.ops.utils import compare_version
8
8
  from liger_kernel.ops.utils import ensure_contiguous
9
+ from liger_kernel.utils import is_npu_available
9
10
 
10
- if compare_version("triton", operator.ge, "3.0.0"):
11
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
11
12
  try:
12
13
  # typical import path with dispatch available
13
14
  from triton.language.extra.libdevice import rsqrt
@@ -8,8 +8,9 @@ import triton.language as tl
8
8
  from liger_kernel.ops.utils import calculate_settings
9
9
  from liger_kernel.ops.utils import compare_version
10
10
  from liger_kernel.ops.utils import ensure_contiguous
11
+ from liger_kernel.utils import is_npu_available
11
12
 
12
- if compare_version("triton", operator.ge, "3.0.0"):
13
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
13
14
  try:
14
15
  # typical import path with dispatch available
15
16
  from triton.language.extra.libdevice import rsqrt
@@ -7,8 +7,10 @@ import triton.language as tl
7
7
  from liger_kernel.ops.utils import calculate_settings
8
8
  from liger_kernel.ops.utils import compare_version
9
9
  from liger_kernel.ops.utils import ensure_contiguous
10
+ from liger_kernel.utils import get_npu_multi_processor_count
11
+ from liger_kernel.utils import is_npu_available
10
12
 
11
- if compare_version("triton", operator.ge, "3.0.0"):
13
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
12
14
  try:
13
15
  from triton.language.extra.libdevice import rsqrt
14
16
  except ModuleNotFoundError:
@@ -290,6 +292,8 @@ def poly_norm_backward(dY, X, W, RSTD, BLOCK_SIZE, num_warps, in_place):
290
292
  sm_count = torch.cuda.get_device_properties(X.device).multi_processor_count
291
293
  elif X.device.type == "xpu":
292
294
  sm_count = torch.xpu.get_device_properties(X.device).gpu_eu_count
295
+ elif X.device.type == "npu":
296
+ sm_count = get_npu_multi_processor_count()
293
297
 
294
298
  # Allocate or reuse gradients
295
299
  if in_place is True:
@@ -21,8 +21,10 @@ from liger_kernel.ops.utils import calculate_settings
21
21
  from liger_kernel.ops.utils import compare_version
22
22
  from liger_kernel.ops.utils import ensure_contiguous
23
23
  from liger_kernel.ops.utils import torch_to_triton_dtype
24
+ from liger_kernel.utils import get_npu_multi_processor_count
25
+ from liger_kernel.utils import is_npu_available
24
26
 
25
- if compare_version("triton", operator.ge, "3.0.0"):
27
+ if compare_version("triton", operator.ge, "3.0.0") and not is_npu_available():
26
28
  try:
27
29
  # typical import path with dispatch available
28
30
  from triton.language.extra.libdevice import rsqrt
@@ -450,6 +452,8 @@ def rms_norm_backward(dY, X, W, RSTD, offset, casting_mode, BLOCK_SIZE, num_warp
450
452
  sm_count = torch.cuda.get_device_properties(X.device).multi_processor_count
451
453
  elif X.device.type == "xpu":
452
454
  sm_count = torch.xpu.get_device_properties(X.device).gpu_eu_count
455
+ elif X.device.type == "npu":
456
+ sm_count = get_npu_multi_processor_count()
453
457
 
454
458
  # fp32 for numerical stability especially.
455
459
  _dW = torch.empty((sm_count, n_cols), dtype=torch.float32, device=W.device)
liger_kernel/ops/utils.py CHANGED
@@ -78,6 +78,8 @@ def get_amp_custom_fwd_bwd() -> Callable:
78
78
  functools.partial(torch.amp.custom_fwd, device_type=device),
79
79
  functools.partial(torch.amp.custom_bwd, device_type=device),
80
80
  )
81
+ if hasattr(torch, "npu") and getattr(torch.npu, "amp", None) is not None:
82
+ return torch.npu.amp.custom_fwd, torch.npu.amp.custom_bwd
81
83
  return torch.cuda.amp.custom_fwd, torch.cuda.amp.custom_bwd
82
84
 
83
85
 
@@ -235,6 +235,7 @@ def multimodal_forward(
235
235
  **lm_kwargs,
236
236
  )
237
237
 
238
+ shift_labels = lm_kwargs.pop("shift_labels", None)
238
239
  hidden_states = outputs[0]
239
240
 
240
241
  slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
@@ -330,6 +330,7 @@ def lce_forward(
330
330
  **lm_kwargs,
331
331
  )
332
332
 
333
+ shift_labels = lm_kwargs.pop("shift_labels", None)
333
334
  hidden_states = outputs[0]
334
335
 
335
336
  loss = None
liger_kernel/utils.py CHANGED
@@ -18,12 +18,37 @@ def infer_device():
18
18
  """
19
19
  if torch.cuda.is_available(): # Works for both Nvidia and AMD
20
20
  return "cuda"
21
+ # Use Ascend NPU if available (torch.npu)
22
+ elif is_npu_available():
23
+ return "npu"
24
+ # XPU (Intel) if available
21
25
  elif torch.xpu.is_available():
22
26
  return "xpu"
23
27
  else:
24
28
  return "cpu"
25
29
 
26
30
 
31
+ def is_npu_available() -> bool:
32
+ """Detect Ascend NPU availability."""
33
+ try:
34
+ from transformers.utils import is_torch_npu_available
35
+
36
+ return is_torch_npu_available()
37
+ except Exception:
38
+ return False
39
+
40
+
41
+ def get_npu_multi_processor_count() -> int:
42
+ """Return a heuristic multi-processor count for NPU."""
43
+ if is_npu_available():
44
+ NPU_MULTI_PROCESSOR_COUNT = 48
45
+ dev_props = torch.npu.get_device_properties()
46
+ # The vector_core_num attribute is supported in the torch.npu v7.2.0 release version.
47
+ return dev_props.vector_core_num if hasattr(dev_props, "vector_core_num") else NPU_MULTI_PROCESSOR_COUNT
48
+ # Reasonable default to avoid division by zero
49
+ return 1
50
+
51
+
27
52
  def transformers_version_dispatch(
28
53
  required_version: str,
29
54
  before_fn,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.6.4.dev20251201192513
3
+ Version: 0.6.4.dev20251202094519
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -1,6 +1,6 @@
1
1
  liger_kernel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  liger_kernel/env_report.py,sha256=uhdEC8OydxoZlb7B6YYcAaBF3crGFdIck-4cxaW4NJY,1728
3
- liger_kernel/utils.py,sha256=BQleeZWHSZPNuPcYcoZTOp1kcNEZONZilPP5-AmjgWI,2024
3
+ liger_kernel/utils.py,sha256=TW-OSkuSLrMAPPMZtOsRKBqZ7MCiSrkATB1z_p81Ets,2879
4
4
  liger_kernel/chunked_loss/README.md,sha256=0FmkFC3hKBqyoDT5uTlIYmrvRkF-EOCR1y-EBU1LpWU,2248
5
5
  liger_kernel/chunked_loss/__init__.py,sha256=J5_jNnzZ4gZmA38W5f_4oab7xMoNk1Xy-yh3X_Xlf-s,714
6
6
  liger_kernel/chunked_loss/cosine_similarity_loss.py,sha256=x2nprTHPraU8Ya2NMZtaDk9r-s-1NKJwCTrzQIdmg-8,4680
@@ -17,30 +17,30 @@ liger_kernel/chunked_loss/kto_loss.py,sha256=llVCe6DkcpCo57seGWoMikaQVFApx764jsm
17
17
  liger_kernel/chunked_loss/orpo_loss.py,sha256=nu9UYG16dcMw93lvHi4_hYs3Q0FK1KnlmMRj7OpYU8s,4872
18
18
  liger_kernel/chunked_loss/simpo_loss.py,sha256=fy2w8KbhMrBv7b1jdIeH3bBFxY52bPQPZb3KwBvmurM,5385
19
19
  liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- liger_kernel/ops/cross_entropy.py,sha256=-fd8qVxn_66MGSLs-Gs8yGmWlkET5YAoyb__Bolfz4c,22617
21
- liger_kernel/ops/dyt.py,sha256=gCLz4S8aul8SY9nvIGaoK67aGb7U9MJRQdo3ONqmQYs,5417
22
- liger_kernel/ops/fused_add_rms_norm.py,sha256=UBqmlqFCmhSAIpkNKd8rrfXatX7Z4J9bp2dX9A0lrJQ,14017
20
+ liger_kernel/ops/cross_entropy.py,sha256=J0OaI9b3l1H5FPeLft84XGz4g-WhMbrKXGo3wqlWwd0,22692
21
+ liger_kernel/ops/dyt.py,sha256=4XmkCCZaPPM8Tl4QHo6vSF2m68jrwsnjucrbyOJvZpM,5628
22
+ liger_kernel/ops/fused_add_rms_norm.py,sha256=lvwrLsKvoAQqS9KatgBkAyy0Xdecado-g0rvXYXaBak,14237
23
23
  liger_kernel/ops/fused_linear_cross_entropy.py,sha256=YepeWqX37gKc1-FUrzkDTzXYdOvmBmfv4KgL__KN_UI,16158
24
24
  liger_kernel/ops/fused_linear_jsd.py,sha256=CSoprxb-YcJy-YUKiTcYkxN8sb9h2kdk_iHuncvSV5c,9683
25
25
  liger_kernel/ops/fused_neighborhood_attention.py,sha256=vPi5xbnh6wxyZehaqo6Tuilqo2fN5SGDiONjnNmIKqs,35556
26
- liger_kernel/ops/geglu.py,sha256=r0WSq9E93zzynL44Wh8femzOWK07_SseBM_pJUyxT3s,4144
27
- liger_kernel/ops/group_norm.py,sha256=qD4D4lSjSgVtO52EBNLC2iTseALRgPgqXE50U2woggk,10837
26
+ liger_kernel/ops/geglu.py,sha256=z-t9OMk3SDL5sJenJjCzhGEeGusL22j3nDjTDEUDAz8,4219
27
+ liger_kernel/ops/group_norm.py,sha256=zoy-TcNkYtKGmGhTFJmnyiG_4Es4ZphpqP8jtUSI6-I,10912
28
28
  liger_kernel/ops/grpo_loss.py,sha256=2SyOujtF9I3xiNo4wFf4s6MeiDotE_qeYfRWgj_bOBE,9573
29
29
  liger_kernel/ops/jsd.py,sha256=onHp5T3MbvJaVz5Vup7Ww6EQp_HTaZeayTjJk6FgQMY,7042
30
30
  liger_kernel/ops/kl_div.py,sha256=ZjGdDLKWksHT9dZ0xF_TDgAkj5cuMTwwT5tr9E-_24o,8734
31
- liger_kernel/ops/layer_norm.py,sha256=OMaex1MDsM9kaFs0-q5Pnx3DrMVjongQoZ5-iFIOy00,10523
31
+ liger_kernel/ops/layer_norm.py,sha256=-4UEyko9eKgBi5LNmfdEU2hTpJOWVnEy5iYjJkMvHmk,10598
32
32
  liger_kernel/ops/llama4_rope.py,sha256=-aqdZzllklTN8b9--e-TsWY_ntGCN8-tyseT4x0bd8s,8223
33
33
  liger_kernel/ops/multi_token_attention.py,sha256=Oz_RXDp-OSS_R_HuGmaETHdAJ7Toda_70OfE7TXMUlY,7645
34
- liger_kernel/ops/poly_norm.py,sha256=MLgI8Ea93fugKibHCUauQ2ASYVXCvpPZe5v3kQZU6po,11152
34
+ liger_kernel/ops/poly_norm.py,sha256=5IdJEZnbbhblkL_X8UhSD4A2CooQbOAZJw8nAekWNs4,11372
35
35
  liger_kernel/ops/qwen2vl_mrope.py,sha256=3GExhYpLgB4VUtyZyjRk8XjEur3W4EWF6HQ67ML5vBU,8481
36
- liger_kernel/ops/rms_norm.py,sha256=kKncS1OCBgZE4jQnBRNpPhL2PjYaVN3rY9V9jEhzHoM,18991
36
+ liger_kernel/ops/rms_norm.py,sha256=2V8qheEvidBm0VxwfOoAnV837F6blmtTgP91VYdGs7c,19211
37
37
  liger_kernel/ops/rope.py,sha256=v-7JHRrv-5ImoROkpKfl30WwWI4qTa2tAl7zQeB4ml4,8956
38
38
  liger_kernel/ops/softmax.py,sha256=tgORx6MK1IDDtZKqGarj0IPIVjqAIEUXXYPiinhRdtI,5864
39
39
  liger_kernel/ops/sparsemax.py,sha256=AeWe1xgkHJFEKWTj2vu_0hj7LztGvjqXAps-QTpCY0U,5087
40
40
  liger_kernel/ops/swiglu.py,sha256=D7nd4u_LInwsIRNCDdY77lqnTz8-W5dJrpEAt8zEO_A,3033
41
41
  liger_kernel/ops/tiled_mlp.py,sha256=eyMFsFFgHch8a_6R6IYRG24_jqKg5GF_BQUoQuAG8SY,4529
42
42
  liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
43
- liger_kernel/ops/utils.py,sha256=uoFKQqo-34N2TWQNvXMFywqGiOMMXNEVBxVojzlUAa0,3836
43
+ liger_kernel/ops/utils.py,sha256=kYp84AOA7D9PYrvBUSrNsfQIt8elr_uA9OxCkbfiUFA,3980
44
44
  liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
45
45
  liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
46
46
  liger_kernel/transformers/__init__.py,sha256=CgwhrY5cdx6OcRgR2ZZJbOIkLswQWPTr-BAaoxDNNOY,10687
@@ -78,7 +78,7 @@ liger_kernel/transformers/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
78
78
  liger_kernel/transformers/model/falcon_h1.py,sha256=heUZ4wUt2ATmtBtmv8Rcro3pQl6fV9T0pburjTTW7os,5004
79
79
  liger_kernel/transformers/model/gemma.py,sha256=pAri4PYpknsFfkvyo8Ez2NNlqrUDW-KkExUXTGZAcH4,10621
80
80
  liger_kernel/transformers/model/gemma2.py,sha256=qa9Ok42vFojVGNmASTH3Ek566Vu507kjd--ZpZDKX9M,12024
81
- liger_kernel/transformers/model/gemma3.py,sha256=mEV3Kuy-dqfTk_b899Vb-InuD4_DvwH0nm5xgbG-0MM,14911
81
+ liger_kernel/transformers/model/gemma3.py,sha256=ZUrFCc-pfF8jYHV0HsptBr98hx6p2q9ea0kSzVAoFPo,14966
82
82
  liger_kernel/transformers/model/glm4.py,sha256=bSp22iPIjsli4-c_usUOsyh1Bs2gIK8X6ynS0azseUs,5900
83
83
  liger_kernel/transformers/model/glm4v.py,sha256=dd-BQpccDCp1SbIxcJ5rG8xcwYQK3KOv1Tgm9TGnZc4,6594
84
84
  liger_kernel/transformers/model/glm4v_moe.py,sha256=zKhMdOOrRhlrvCSFaeVYfddL1ubpY8edEO91TN81n98,7135
@@ -94,7 +94,7 @@ liger_kernel/transformers/model/mllama.py,sha256=vAHwCm63sn4kpAY0rDGf_N0HR7KRTBV
94
94
  liger_kernel/transformers/model/olmo2.py,sha256=-h2bUOeuPfY1MdShdRvq5_wFDHKP4PEimgIl0fL-BT4,5902
95
95
  liger_kernel/transformers/model/olmo3.py,sha256=k2zYOlS8U_b5MwjdToB3tDRQ0bH_mWapVQqJcH8-qAo,6007
96
96
  liger_kernel/transformers/model/output_classes.py,sha256=0BGXVR4dYQpSHLkSqpRoXuHMryrceGSlTYRu6pvd8ZY,4542
97
- liger_kernel/transformers/model/paligemma.py,sha256=r0smHLADkEwfLS6d6ArWoSWEeLt2d_8pmgOO5F04b1o,20793
97
+ liger_kernel/transformers/model/paligemma.py,sha256=UAYoKkIMvvix7GG3cSdWaDxVjMp26YsvthJuE7wFf6Y,20848
98
98
  liger_kernel/transformers/model/phi3.py,sha256=PT7Kw6yySg-7TsssWfi82eVMN3SWujCqzCqHigAdfeQ,4574
99
99
  liger_kernel/transformers/model/qwen2.py,sha256=ojqdJpD3A9A5uCS0N_rSq8gyNYWSsHfuvx3Z3ObC7ss,10686
100
100
  liger_kernel/transformers/model/qwen2_5_vl.py,sha256=FbIZDcg9cOr4PtBLNN8yVubN-gu2clndjSIzfi8NMos,6894
@@ -110,9 +110,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
110
110
  liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
111
111
  liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
112
112
  liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
113
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
114
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/METADATA,sha256=4uWQ3Lbvwt1esBU_QdhJUSgaDg9JMHMwdvtyLrorz8I,25238
115
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
116
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
117
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
118
- liger_kernel_nightly-0.6.4.dev20251201192513.dist-info/RECORD,,
113
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
114
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/METADATA,sha256=ZgwmbyNXCX9-W_MkkjoTyoMWce_cteqdiqlMCeQ_Tt8,25238
115
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
116
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
117
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
118
+ liger_kernel_nightly-0.6.4.dev20251202094519.dist-info/RECORD,,