liger-kernel 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/ops/cross_entropy.py +4 -33
- liger_kernel/ops/fused_linear_cross_entropy.py +6 -6
- liger_kernel/ops/geglu.py +14 -3
- liger_kernel/ops/rms_norm.py +2 -2
- liger_kernel/ops/utils.py +12 -0
- liger_kernel/transformers/model/llama.py +3 -0
- liger_kernel/transformers/monkey_patch.py +5 -8
- liger_kernel-0.0.1.dist-info/LICENSE +23 -0
- {liger_kernel-0.0.0.dist-info → liger_kernel-0.0.1.dist-info}/METADATA +3 -1
- liger_kernel-0.0.1.dist-info/NOTICE +4 -0
- {liger_kernel-0.0.0.dist-info → liger_kernel-0.0.1.dist-info}/RECORD +13 -11
- {liger_kernel-0.0.0.dist-info → liger_kernel-0.0.1.dist-info}/WHEEL +0 -0
- {liger_kernel-0.0.0.dist-info → liger_kernel-0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -17,7 +17,7 @@ def liger_cross_entropy_kernel(
|
|
|
17
17
|
BLOCK_SIZE: tl.constexpr,
|
|
18
18
|
):
|
|
19
19
|
"""
|
|
20
|
-
This kernel computes both cross entropy loss and the gradient of the
|
|
20
|
+
This kernel computes both cross entropy loss and the gradient of the input.
|
|
21
21
|
We only consider hard label + mean reduction for now. Please refer to https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html for the math.
|
|
22
22
|
|
|
23
23
|
Parameters:
|
|
@@ -34,7 +34,7 @@ def liger_cross_entropy_kernel(
|
|
|
34
34
|
"""
|
|
35
35
|
|
|
36
36
|
# https://github.com/triton-lang/triton/issues/1058
|
|
37
|
-
#
|
|
37
|
+
# If B*T*V is too large, program_id * stride will overflow out of int32, so we convert to int64
|
|
38
38
|
program_id = tl.program_id(0).to(tl.int64)
|
|
39
39
|
|
|
40
40
|
# 1. Load Y_ptr first because if the target is ignore_index, we can return right away
|
|
@@ -90,13 +90,7 @@ def liger_cross_entropy_kernel(
|
|
|
90
90
|
tl.debug_barrier()
|
|
91
91
|
|
|
92
92
|
# 5. Calculate the loss
|
|
93
|
-
# Old Approach: Problematic LogSoftmax
|
|
94
|
-
# min of bfloat16 and float32 is 1e-38, so we set a value larger than that but small enough
|
|
95
|
-
# This will overflow if X_y * n_non_ignore is too small. Even if we add a tiny epsilon, it will still overflow
|
|
96
|
-
# loss = -tl.log(X_y * n_non_ignore)
|
|
97
93
|
|
|
98
|
-
# New Approach: Safe LogSoftmax
|
|
99
|
-
# Therefore, we propose to use safe logsoftmax by reordering the formula.
|
|
100
94
|
# loss = log (softmax(X_y)) = log ((e ^ (X_y - max(X)) / sum(e ^ (X - max(X))))
|
|
101
95
|
# = (X_y - max(X)) - log(sum(e ^ (X - max(X))))
|
|
102
96
|
# sum(e ^ (X - max(X))) must >= 1 because the max term is e ^ 0 = 1
|
|
@@ -114,7 +108,7 @@ def liger_cross_entropy_kernel(
|
|
|
114
108
|
# The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576 https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
|
|
115
109
|
# However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
|
|
116
110
|
# The optimal maximum block size depends on your hardware, your kernel, and your dtype
|
|
117
|
-
MAX_FUSED_SIZE = 65536 // 2 #
|
|
111
|
+
MAX_FUSED_SIZE = 65536 // 2 # the best size we found by manually tuning
|
|
118
112
|
|
|
119
113
|
|
|
120
114
|
@triton.jit
|
|
@@ -184,28 +178,6 @@ class LigerCrossEntropyFunction(torch.autograd.Function):
|
|
|
184
178
|
n_non_ignore = (target != ignore_index).sum().item()
|
|
185
179
|
|
|
186
180
|
# ensure _input and target are contiguous in the last dimension
|
|
187
|
-
# there are examples that are NOT contiguous overall but contiguous in the last dimension
|
|
188
|
-
####################################################################
|
|
189
|
-
# tensor = torch.arange(1, 21).reshape(5, -1)
|
|
190
|
-
# print(tensor)
|
|
191
|
-
# tensor([[ 1, 2, 3, 4],
|
|
192
|
-
# [ 5, 6, 7, 8],
|
|
193
|
-
# [ 9, 10, 11, 12],
|
|
194
|
-
# [13, 14, 15, 16],
|
|
195
|
-
# [17, 18, 19, 20]])
|
|
196
|
-
# print(tensor.is_contiguous())
|
|
197
|
-
# True
|
|
198
|
-
# slice = tensor[::2, :]
|
|
199
|
-
# print(slice)
|
|
200
|
-
# tensor([[ 1, 2, 3, 4],
|
|
201
|
-
# [ 9, 10, 11, 12],
|
|
202
|
-
# [17, 18, 19, 20]])
|
|
203
|
-
# print(slice.is_contiguous())
|
|
204
|
-
# False
|
|
205
|
-
# print(slice.stride())
|
|
206
|
-
# (8, 1)
|
|
207
|
-
# slice is NOT a contiguous tensor but is contiguous in the last dimension, CE kernel can execute because the stride is 8, and each triton program will jump by 8
|
|
208
|
-
####################################################################
|
|
209
181
|
if _input.stride(-1) != 1:
|
|
210
182
|
_input = _input.contiguous()
|
|
211
183
|
if target.stride(-1) != 1:
|
|
@@ -252,10 +224,9 @@ class LigerCrossEntropyFunction(torch.autograd.Function):
|
|
|
252
224
|
# If cross entropy is the last layer, grad_output is 1.0. Skip the mul to save time
|
|
253
225
|
if torch.equal(grad_output, torch.tensor(1.0, device=grad_output.device)):
|
|
254
226
|
pass
|
|
227
|
+
|
|
255
228
|
# We use a Triton kernel instead of a PyTorch operation because modifying inputs in-place
|
|
256
229
|
# for gradient storage and backward multiple times causes anomalies with PyTorch but not with Triton.
|
|
257
|
-
# Although the Brew trainer should only perform backward once, it encounters this issue.
|
|
258
|
-
# https://github.com/triton-lang/triton/issues/4004
|
|
259
230
|
else:
|
|
260
231
|
BT, V = _input.shape
|
|
261
232
|
n_rows = BT
|
|
@@ -1,8 +1,3 @@
|
|
|
1
|
-
"""Fusing the last linear layer with cross-entropy loss
|
|
2
|
-
|
|
3
|
-
Reference: https://github.com/mgmalek/efficient_cross_entropy
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
1
|
import torch
|
|
7
2
|
import triton
|
|
8
3
|
|
|
@@ -11,13 +6,16 @@ from liger_kernel.ops.cross_entropy import element_mul, liger_cross_entropy_kern
|
|
|
11
6
|
# The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576 https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
|
|
12
7
|
# However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
|
|
13
8
|
# The optimal maximum block size depends on your hardware, your kernel, and your dtype
|
|
14
|
-
MAX_FUSED_SIZE = 65536 // 2
|
|
9
|
+
MAX_FUSED_SIZE = 65536 // 2
|
|
15
10
|
|
|
16
11
|
|
|
17
12
|
class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
18
13
|
@staticmethod
|
|
19
14
|
def forward(ctx, _input, linear, target, ignore_index):
|
|
20
15
|
"""
|
|
16
|
+
Fusing the last linear layer with cross-entropy loss
|
|
17
|
+
Reference: https://github.com/mgmalek/efficient_cross_entropy
|
|
18
|
+
|
|
21
19
|
Handle the forward and backward pass of the final linear layer via cross-entropy loss by avoiding
|
|
22
20
|
the materialization of the large logits tensor. Since Cross Entropy Loss is the last layer, we can
|
|
23
21
|
compute the gradient at the forward pass. By doing so, we don't have to store the _input and target
|
|
@@ -54,6 +52,8 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
|
54
52
|
|
|
55
53
|
grad_linear = torch.zeros_like(linear, device=device)
|
|
56
54
|
grad_input = torch.zeros_like(_input, device=device)
|
|
55
|
+
|
|
56
|
+
# we use fp32 for loss accumulator
|
|
57
57
|
loss_1d = torch.zeros(BT, dtype=torch.float32, device=device)
|
|
58
58
|
|
|
59
59
|
total_n_non_ignore = (target != ignore_index).sum().item()
|
liger_kernel/ops/geglu.py
CHANGED
|
@@ -1,8 +1,19 @@
|
|
|
1
|
+
import operator
|
|
2
|
+
|
|
1
3
|
import torch
|
|
2
4
|
import triton
|
|
3
5
|
import triton.language as tl
|
|
4
6
|
|
|
5
|
-
from liger_kernel.ops.utils import
|
|
7
|
+
from liger_kernel.ops.utils import (
|
|
8
|
+
calculate_settings,
|
|
9
|
+
compare_version,
|
|
10
|
+
ensure_contiguous,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
if compare_version("triton", operator.ge, "3.0.0"):
|
|
14
|
+
from triton.language.extra.libdevice import tanh
|
|
15
|
+
else:
|
|
16
|
+
from triton.language.math import tanh
|
|
6
17
|
|
|
7
18
|
|
|
8
19
|
@triton.jit
|
|
@@ -26,7 +37,7 @@ def _geglu_tanh_forward_kernel(
|
|
|
26
37
|
sqrt_2_over_pi = 0.7978845608028654 # sqrt(2 / pi)
|
|
27
38
|
a_cubed = a_row * a_row * a_row
|
|
28
39
|
tanh_arg = sqrt_2_over_pi * (a_row + 0.044715 * a_cubed)
|
|
29
|
-
tanh_result =
|
|
40
|
+
tanh_result = tanh(tanh_arg)
|
|
30
41
|
geglu_a = 0.5 * a_row * (1 + tanh_result)
|
|
31
42
|
c_row = geglu_a * b_row
|
|
32
43
|
tl.store(c + col_offsets, c_row, mask=mask)
|
|
@@ -54,7 +65,7 @@ def _geglu_tanh_backward_kernel(
|
|
|
54
65
|
sqrt_2_over_pi = 0.7978845608028654 # sqrt(2 / pi)
|
|
55
66
|
a_cubed = a_row * a_row * a_row
|
|
56
67
|
tanh_arg = sqrt_2_over_pi * (a_row + 0.044715 * a_cubed)
|
|
57
|
-
tanh_result =
|
|
68
|
+
tanh_result = tanh(tanh_arg)
|
|
58
69
|
geglu_a = 0.5 * a_row * (1 + tanh_result)
|
|
59
70
|
|
|
60
71
|
db_row = dc_row * geglu_a
|
liger_kernel/ops/rms_norm.py
CHANGED
|
@@ -107,8 +107,8 @@ class LigerRMSNormFunction(torch.autograd.Function):
|
|
|
107
107
|
n_rows, n_cols = X.shape
|
|
108
108
|
BLOCK_SIZE, num_warps = calculate_settings(n_cols)
|
|
109
109
|
|
|
110
|
-
Y = torch.empty((n_rows, n_cols), dtype=X.dtype, device=
|
|
111
|
-
r = torch.empty(n_rows, dtype=X.dtype, device=
|
|
110
|
+
Y = torch.empty((n_rows, n_cols), dtype=X.dtype, device=X.device)
|
|
111
|
+
r = torch.empty(n_rows, dtype=X.dtype, device=X.device)
|
|
112
112
|
|
|
113
113
|
# Check constraints.
|
|
114
114
|
assert (
|
liger_kernel/ops/utils.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
import functools
|
|
2
|
+
import importlib
|
|
3
|
+
from typing import Callable
|
|
2
4
|
|
|
3
5
|
import torch
|
|
4
6
|
import triton
|
|
7
|
+
from packaging.version import Version
|
|
5
8
|
|
|
6
9
|
|
|
7
10
|
def ensure_contiguous(fn):
|
|
@@ -36,3 +39,12 @@ def calculate_settings(n):
|
|
|
36
39
|
elif BLOCK_SIZE >= 2048:
|
|
37
40
|
num_warps = 8
|
|
38
41
|
return BLOCK_SIZE, num_warps
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def compare_version(package: str, operator: Callable, target: str):
|
|
45
|
+
try:
|
|
46
|
+
pkg = importlib.import_module(package)
|
|
47
|
+
except ImportError:
|
|
48
|
+
return False
|
|
49
|
+
pkg_version = Version(pkg.__version__)
|
|
50
|
+
return operator(pkg_version, Version(target))
|
|
@@ -37,6 +37,9 @@ def lce_forward(
|
|
|
37
37
|
cache_position: Optional[torch.LongTensor] = None,
|
|
38
38
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
39
39
|
r"""
|
|
40
|
+
Copy paste llama forward but replace torch cross entropy with liger fused linear cross entropy
|
|
41
|
+
|
|
42
|
+
|
|
40
43
|
Args:
|
|
41
44
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
42
45
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
@@ -5,23 +5,21 @@ from liger_kernel.transformers.rope import liger_rotary_pos_emb
|
|
|
5
5
|
from liger_kernel.transformers.swiglu import LigerBlockSparseTop2MLP, LigerSwiGLUMLP
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
# TODO: probably rename utils.py as hf_patcher.py to be more descriptive
|
|
9
8
|
def apply_liger_kernel_to_llama(
|
|
10
9
|
rope: bool = True,
|
|
11
|
-
cross_entropy: bool =
|
|
12
|
-
fused_linear_cross_entropy: bool =
|
|
10
|
+
cross_entropy: bool = False,
|
|
11
|
+
fused_linear_cross_entropy: bool = True,
|
|
13
12
|
rms_norm: bool = True,
|
|
14
13
|
swiglu: bool = True,
|
|
15
14
|
) -> None:
|
|
16
15
|
"""
|
|
17
16
|
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
|
|
18
|
-
to make GPU go burrr.
|
|
19
17
|
|
|
20
18
|
Args:
|
|
21
19
|
rope (bool): Whether to apply Liger's rotary position embedding. Default is True.
|
|
22
|
-
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is
|
|
20
|
+
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
|
23
21
|
fused_linear_cross_entropy (bool):
|
|
24
|
-
Whether to apply Liger's fused lienar cross entropy loss. Default is
|
|
22
|
+
Whether to apply Liger's fused lienar cross entropy loss. Default is True.
|
|
25
23
|
`cross_entropy` and `fused_linear_cross_entropy` cannot both be True.
|
|
26
24
|
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
|
27
25
|
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True.
|
|
@@ -54,7 +52,6 @@ def apply_liger_kernel_to_mistral(
|
|
|
54
52
|
) -> None:
|
|
55
53
|
"""
|
|
56
54
|
Apply Liger kernels to replace original implementation in HuggingFace Mistral models
|
|
57
|
-
to make GPU go burrr.
|
|
58
55
|
|
|
59
56
|
Args:
|
|
60
57
|
rope (bool): Whether to apply Liger's rotary position embedding. Default is True.
|
|
@@ -83,12 +80,12 @@ def apply_liger_kernel_to_mixtral(
|
|
|
83
80
|
) -> None:
|
|
84
81
|
"""
|
|
85
82
|
Apply Liger kernels to replace original implementation in HuggingFace Mixtral models
|
|
86
|
-
to make GPU go burrr.
|
|
87
83
|
|
|
88
84
|
Args:
|
|
89
85
|
rope (bool): Whether to apply Liger's rotary position embedding. Default is True.
|
|
90
86
|
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is True.
|
|
91
87
|
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True.
|
|
88
|
+
swiglu (bool): Whether to apply Liger's SwiGLU MLP. Default is True.
|
|
92
89
|
"""
|
|
93
90
|
|
|
94
91
|
from transformers.models.mixtral import modeling_mixtral
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
BSD 2-CLAUSE LICENSE
|
|
2
|
+
Copyright 2024 LinkedIn Corporation
|
|
3
|
+
All Rights Reserved.
|
|
4
|
+
Redistribution and use in source and binary forms, with or
|
|
5
|
+
without modification, are permitted provided that the following
|
|
6
|
+
conditions are met:
|
|
7
|
+
1. Redistributions of source code must retain the above copyright
|
|
8
|
+
notice, this list of conditions and the following disclaimer.
|
|
9
|
+
2. Redistributions in binary form must reproduce the above
|
|
10
|
+
copyright notice, this list of conditions and the following
|
|
11
|
+
disclaimer in the documentation and/or other materials provided
|
|
12
|
+
with the distribution.
|
|
13
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
14
|
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
15
|
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
16
|
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
17
|
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
18
|
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
19
|
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
20
|
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
21
|
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
22
|
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
23
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@@ -1,24 +1,26 @@
|
|
|
1
1
|
liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
liger_kernel/ops/cross_entropy.py,sha256=
|
|
3
|
-
liger_kernel/ops/fused_linear_cross_entropy.py,sha256
|
|
4
|
-
liger_kernel/ops/geglu.py,sha256=
|
|
5
|
-
liger_kernel/ops/rms_norm.py,sha256=
|
|
2
|
+
liger_kernel/ops/cross_entropy.py,sha256=YTHKVyPW748EWtbWJeKdIe9S1dEq6i90_PbBuCD-9s0,9178
|
|
3
|
+
liger_kernel/ops/fused_linear_cross_entropy.py,sha256=58MmDhLJGR5b8ixztkhR707yp0VY28oBRASFVwGbeV8,7346
|
|
4
|
+
liger_kernel/ops/geglu.py,sha256=5tGinryOOYRpGtKwJ4B1ertwtzd81xdjevD3Ha7H1AY,3849
|
|
5
|
+
liger_kernel/ops/rms_norm.py,sha256=Tyz5Ea7U8dNtNUpuRmT6qsV7PmDe0FuUFjaEPTsFu1E,4303
|
|
6
6
|
liger_kernel/ops/rope.py,sha256=fYBct8gDQfKPZdMWlzkZZ8kBzh6nQ7DIpDsc7lZwM8c,8584
|
|
7
7
|
liger_kernel/ops/swiglu.py,sha256=__QsfYxKyZHtRScm31zL3sAOVEblQFqKj2ll8I4Odqg,2835
|
|
8
|
-
liger_kernel/ops/utils.py,sha256=
|
|
8
|
+
liger_kernel/ops/utils.py,sha256=vsFIywd8LQlVPRA3RPZOm5HyN8c0cS4NFEEnwjNw-MI,1427
|
|
9
9
|
liger_kernel/transformers/__init__.py,sha256=7rOw9yZ8kNXO483Colx-EUq8GcTCvCZxrxF-S7pmkkU,172
|
|
10
10
|
liger_kernel/transformers/cross_entropy.py,sha256=G-L4EaUYVc25NKZ2jrlaG-d5YUvDqJdUlawPN7K1d1g,389
|
|
11
11
|
liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=h0AW9ubFGfz4DBwgh2CLW8rpKo9PvxYpB6AUzjx-1b0,501
|
|
12
12
|
liger_kernel/transformers/geglu.py,sha256=FrLBHZRdI68jw9RR6MSTE59-xCzueOwSRp9jL8y-j98,896
|
|
13
|
-
liger_kernel/transformers/monkey_patch.py,sha256=
|
|
13
|
+
liger_kernel/transformers/monkey_patch.py,sha256=9CilRC9pBBbQ8R1_4HLsZq2xfmxVC4xGx345vfejX6I,3914
|
|
14
14
|
liger_kernel/transformers/rms_norm.py,sha256=2LHfEctSpzuNRaoZ9uUECSFK8fZeIxIsHm9QbEHZvDQ,452
|
|
15
15
|
liger_kernel/transformers/rope.py,sha256=m-ah8vZBYW8tfplTXCiAPMHJWlB1tdp_JPXJeWE-Boo,943
|
|
16
16
|
liger_kernel/transformers/swiglu.py,sha256=8kt4MffEZT5vx3k0WA-GO-WPLv5kGdnu_nAwlJyMI2U,1516
|
|
17
17
|
liger_kernel/transformers/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
|
-
liger_kernel/transformers/model/llama.py,sha256=
|
|
18
|
+
liger_kernel/transformers/model/llama.py,sha256=4mfVTMrY7T-xiJeQJe02hBVnAwNCKlvLGp49gj6TWiU,5298
|
|
19
19
|
liger_kernel/triton/__init__.py,sha256=yfRe0zMb47QnqjecZWG7LnanfCTzeku7SgWRAwNVmzU,101
|
|
20
20
|
liger_kernel/triton/monkey_patch.py,sha256=yRNaGdyG5PrwX5ed_MQdqtqvvpVvQ7ZD2FQ_9W1q9u8,1629
|
|
21
|
-
liger_kernel-0.0.
|
|
22
|
-
liger_kernel-0.0.
|
|
23
|
-
liger_kernel-0.0.
|
|
24
|
-
liger_kernel-0.0.
|
|
21
|
+
liger_kernel-0.0.1.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
|
22
|
+
liger_kernel-0.0.1.dist-info/METADATA,sha256=2PhmP9NVtu0CsGG2_jnxCukPTMZx6vnzLpTQlJDrqq4,504
|
|
23
|
+
liger_kernel-0.0.1.dist-info/NOTICE,sha256=BXkXY9aWvEy_7MAB57zDu1z8uMYT1i1l9B6EpHuBa8s,173
|
|
24
|
+
liger_kernel-0.0.1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
25
|
+
liger_kernel-0.0.1.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
|
26
|
+
liger_kernel-0.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|