liger-kernel-nightly 0.4.2.dev20241119223206__py3-none-any.whl → 0.4.2.dev20241121224158__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ from liger_kernel.chunked_loss.cpo_loss import LigerFusedLinearCPOLoss # noqa: F401
2
+ from liger_kernel.chunked_loss.dpo_loss import LigerFusedLinearDPOLoss # noqa: F401
3
+ from liger_kernel.chunked_loss.orpo_loss import LigerFusedLinearORPOLoss # noqa: F401
4
+ from liger_kernel.chunked_loss.simpo_loss import LigerFusedLinearSimPOLoss # noqa: F401
@@ -1,3 +1,4 @@
1
+ import torch
1
2
  import torch.nn.functional as F
2
3
 
3
4
  from liger_kernel.chunked_loss.fused_linear_preference import (
@@ -46,10 +47,10 @@ class LigerFusedLinearCPOFunction(LigerFusedLinearPreferenceBase):
46
47
  target,
47
48
  bias,
48
49
  loss_fn=LigerFusedLinearCPOFunction.preference_loss_fn,
49
- compute_nll_loss=compute_nll_loss,
50
50
  ignore_index=ignore_index,
51
51
  alpha=alpha,
52
52
  beta=beta,
53
+ compute_nll_loss=compute_nll_loss,
53
54
  compiled=compiled,
54
55
  )
55
56
 
@@ -59,3 +60,42 @@ class LigerFusedLinearCPOFunction(LigerFusedLinearPreferenceBase):
59
60
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
60
61
  # Return these gradients, followed by None for the remaining inputs
61
62
  return *grads, None, None, None, None, None
63
+
64
+
65
+ class LigerFusedLinearCPOLoss(torch.nn.Module):
66
+ """
67
+ Fused linear layer with CPO loss.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ ignore_index: int = -100,
73
+ beta: float = 0.1,
74
+ alpha: float = 1.0,
75
+ compute_nll_loss: bool = True,
76
+ compiled: bool = True,
77
+ ):
78
+ """
79
+ Args:
80
+ ignore_index (int): Index to ignore in the loss.
81
+ beta (float): Weight for the odds ratio loss.
82
+ """
83
+ super().__init__()
84
+ self.ignore_index = ignore_index
85
+ self.beta = beta
86
+ self.alpha = alpha
87
+ self.compute_nll_loss = compute_nll_loss
88
+ self.compiled = compiled
89
+
90
+ def forward(self, lin_weight, _input, target, bias=None):
91
+ return LigerFusedLinearCPOFunction.apply(
92
+ _input,
93
+ lin_weight,
94
+ target,
95
+ bias,
96
+ self.ignore_index,
97
+ self.beta,
98
+ self.alpha,
99
+ self.compute_nll_loss,
100
+ self.compiled,
101
+ )
@@ -1,3 +1,4 @@
1
+ import torch
1
2
  import torch.nn.functional as F
2
3
 
3
4
  from liger_kernel.chunked_loss.fused_linear_preference import (
@@ -43,9 +44,9 @@ class LigerFusedLinearDPOFunction(LigerFusedLinearPreferenceBase):
43
44
  target=target,
44
45
  bias=bias,
45
46
  loss_fn=LigerFusedLinearDPOFunction.preference_loss_fn,
46
- compute_nll_loss=compute_nll_loss,
47
47
  ignore_index=ignore_index,
48
48
  beta=beta,
49
+ compute_nll_loss=compute_nll_loss,
49
50
  compiled=compiled,
50
51
  )
51
52
 
@@ -55,3 +56,39 @@ class LigerFusedLinearDPOFunction(LigerFusedLinearPreferenceBase):
55
56
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
56
57
  # Return these gradients, followed by None for the remaining inputs
57
58
  return *grads, None, None, None, None
59
+
60
+
61
+ class LigerFusedLinearDPOLoss(torch.nn.Module):
62
+ """
63
+ Fused linear layer with DPO loss.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ ignore_index: int = -100,
69
+ beta: float = 0.1,
70
+ compute_nll_loss: bool = True,
71
+ compiled: bool = True,
72
+ ):
73
+ """
74
+ Args:
75
+ ignore_index (int): Index to ignore in the loss.
76
+ beta (float): Weight for the odds ratio loss.
77
+ """
78
+ super().__init__()
79
+ self.ignore_index = ignore_index
80
+ self.beta = beta
81
+ self.compute_nll_loss = compute_nll_loss
82
+ self.compiled = compiled
83
+
84
+ def forward(self, lin_weight, _input, target, bias=None):
85
+ return LigerFusedLinearDPOFunction.apply(
86
+ _input,
87
+ lin_weight,
88
+ target,
89
+ bias,
90
+ self.ignore_index,
91
+ self.beta,
92
+ self.compute_nll_loss,
93
+ self.compiled,
94
+ )
@@ -0,0 +1,9 @@
1
+ from liger_kernel.chunked_loss.cpo_loss import LigerFusedLinearCPOFunction
2
+ from liger_kernel.chunked_loss.dpo_loss import LigerFusedLinearDPOFunction
3
+ from liger_kernel.chunked_loss.orpo_loss import LigerFusedLinearORPOFunction
4
+ from liger_kernel.chunked_loss.simpo_loss import LigerFusedLinearSimPOFunction
5
+
6
+ liger_fused_linear_orpo = LigerFusedLinearORPOFunction.apply
7
+ liger_fused_linear_dpo = LigerFusedLinearDPOFunction.apply
8
+ liger_fused_linear_cpo = LigerFusedLinearCPOFunction.apply
9
+ liger_fused_linear_simpo = LigerFusedLinearSimPOFunction.apply
@@ -27,10 +27,10 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
27
27
  bias=None,
28
28
  loss_fn=None,
29
29
  chunk_size=1,
30
- compute_nll_loss=True,
31
30
  ignore_index=-100,
32
31
  alpha=1.0,
33
32
  beta=0.1,
33
+ compute_nll_loss=True,
34
34
  compiled=True,
35
35
  **loss_kwargs,
36
36
  ):
@@ -34,7 +34,7 @@ class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
34
34
  ignore_index=-100,
35
35
  beta=0.1,
36
36
  compute_nll_loss=True,
37
- compiled=False,
37
+ compiled=True,
38
38
  ):
39
39
  """
40
40
  Fused linear layer with ORPO (Odds-Ratio Preference Optimization) loss.
@@ -49,9 +49,9 @@ class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
49
49
  target=target,
50
50
  bias=bias,
51
51
  loss_fn=LigerFusedLinearORPOFunction.preference_loss_fn,
52
- compute_nll_loss=compute_nll_loss,
53
52
  ignore_index=ignore_index,
54
53
  beta=beta,
54
+ compute_nll_loss=compute_nll_loss,
55
55
  compiled=compiled,
56
56
  )
57
57
 
@@ -61,3 +61,39 @@ class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
61
61
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
62
62
  # Return these gradients, followed by None for the remaining inputs
63
63
  return *grads, None, None, None, None
64
+
65
+
66
+ class LigerFusedLinearORPOLoss(torch.nn.Module):
67
+ """
68
+ Fused linear layer with ORPO (Odds-Ratio Preference Optimization) loss.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ ignore_index: int = -100,
74
+ beta: float = 0.1,
75
+ compute_nll_loss: bool = True,
76
+ compiled: bool = True,
77
+ ):
78
+ """
79
+ Args:
80
+ ignore_index (int): Index to ignore in the loss.
81
+ beta (float): Weight for the odds ratio loss.
82
+ """
83
+ super().__init__()
84
+ self.ignore_index = ignore_index
85
+ self.beta = beta
86
+ self.compute_nll_loss = compute_nll_loss
87
+ self.compiled = compiled
88
+
89
+ def forward(self, lin_weight, _input, target, bias=None):
90
+ return LigerFusedLinearORPOFunction.apply(
91
+ _input,
92
+ lin_weight,
93
+ target,
94
+ bias,
95
+ self.ignore_index,
96
+ self.beta,
97
+ self.compute_nll_loss,
98
+ self.compiled,
99
+ )
@@ -1,3 +1,4 @@
1
+ import torch
1
2
  import torch.nn.functional as F
2
3
 
3
4
  from liger_kernel.chunked_loss.fused_linear_preference import (
@@ -62,3 +63,45 @@ class LigerFusedLinearSimPOFunction(LigerFusedLinearPreferenceBase):
62
63
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
63
64
  # Return these gradients, followed by None for the remaining inputs
64
65
  return *grads, None, None, None, None, None, None
66
+
67
+
68
+ class LigerFusedLinearSimPOLoss(torch.nn.Module):
69
+ """
70
+ Fused linear layer with SimPO loss.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ ignore_index: int = -100,
76
+ beta: float = 0.1,
77
+ alpha: float = 1.0,
78
+ compute_nll_loss: bool = True,
79
+ compiled: bool = True,
80
+ gamma: float = 0.5,
81
+ ):
82
+ """
83
+ Args:
84
+ ignore_index (int): Index to ignore in the loss.
85
+ beta (float): Weight for the odds ratio loss.
86
+ """
87
+ super().__init__()
88
+ self.ignore_index = ignore_index
89
+ self.beta = beta
90
+ self.alpha = alpha
91
+ self.compute_nll_loss = compute_nll_loss
92
+ self.compiled = compiled
93
+ self.gamma = gamma
94
+
95
+ def forward(self, lin_weight, _input, target, bias=None):
96
+ return LigerFusedLinearSimPOFunction.apply(
97
+ _input,
98
+ lin_weight,
99
+ target,
100
+ bias,
101
+ self.ignore_index,
102
+ self.beta,
103
+ self.alpha,
104
+ self.compute_nll_loss,
105
+ self.compiled,
106
+ self.gamma,
107
+ )
@@ -202,7 +202,7 @@ class LigerFusedLinearJSDFunction(torch.autograd.Function):
202
202
  teacher_input (torch.tensor): input of the last projection layer in teacher model, with shape (B*T, H), where B is batch size, T is sequence length, H is hidden dimension.
203
203
  teacher_weight (torch.tensor): the last projection layer in teacher model, with shape (V, H), where V is vocab size
204
204
  shift_labels (Optional[torch.LongTensor]): indicator of next predicted vocab with shape (BT) where each value is in [0, V-1].
205
- jsd_beta (float): coefficient beta of generalized JSD in the open interval (0, 1). Default: `0.5`
205
+ jsd_beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5`
206
206
  ignore_index (int): the index to ignore. Default: -100
207
207
  temperature (float): temperature in softmax function to control the output probability distribution. Default: `1.0`
208
208
 
liger_kernel/ops/jsd.py CHANGED
@@ -18,7 +18,7 @@ def _jsd_kernel(
18
18
  dX_ptr,
19
19
  dX_stride,
20
20
  label_ptr,
21
- beta,
21
+ beta: tl.constexpr,
22
22
  n_non_ignore: int,
23
23
  ignore_index: tl.constexpr,
24
24
  n_cols,
@@ -50,17 +50,26 @@ def _jsd_kernel(
50
50
  X = tl.load(X_ptr + offsets, mask=mask, other=float("-inf")).to(tl.float32)
51
51
  Y = tl.load(Y_ptr + offsets, mask=mask, other=float("-inf")).to(tl.float32)
52
52
 
53
- Q = tl.exp(X)
54
- P = tl.exp(Y)
55
- M = beta * P + (1 - beta) * Q
56
- log_M = tl.log(M)
53
+ if beta == 0.0: # forward KL
54
+ Y_prob = tl.exp(Y)
55
+ loss = Y_prob * (Y - X)
56
+ dX = -Y_prob
57
+ elif beta == 1.0:
58
+ X_prob = tl.exp(X)
59
+ loss = X_prob * (X - Y)
60
+ dX = loss + X_prob
61
+ else:
62
+ Q = tl.exp(X)
63
+ P = tl.exp(Y)
64
+ M = beta * P + (1 - beta) * Q
65
+ log_M = tl.log(M)
66
+
67
+ loss = beta * P * Y + (1 - beta) * Q * X - M * log_M
68
+ dX = (1 - beta) * Q * (X - log_M)
57
69
 
58
- loss = beta * P * Y + (1 - beta) * Q * X - M * log_M
59
- # reduction == "batchmean"
60
70
  loss = loss / n_non_ignore
71
+ dX = dX / n_non_ignore
61
72
  tl.store(loss_ptr + offsets, loss, mask=mask)
62
-
63
- dX = (1 - beta) * Q * (X - log_M) / n_non_ignore
64
73
  tl.store(dX_ptr + offsets, dX, mask=mask)
65
74
 
66
75
 
@@ -142,7 +151,7 @@ class LigerJSDFunction(torch.autograd.Function):
142
151
  _input (torch.Tensor): predict values with shape (BT, V) in logspace
143
152
  target (torch.Tensor): ground truth values with shape (BT, V) in logspace
144
153
  shift_labels (Optional[torch.LongTensor]): indicator of next predicted vocab with shape (BT) where each value is in [0, V-1].
145
- beta (float): coefficient beta of generalized JSD in the open interval (0, 1)
154
+ beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5`
146
155
  ignore_index (int): the index to ignore. Default: -100
147
156
 
148
157
  Returns:
@@ -12,7 +12,7 @@ class LigerFusedLinearJSD(torch.nn.Module):
12
12
  the materialization of the large logits tensor.
13
13
 
14
14
  Args:
15
- jsd_beta (float): coefficient beta of generalized JSD in the open interval (0, 1). Default: `0.5`
15
+ jsd_beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5`
16
16
  ignore_index (int): The index to ignore in the target. Default: `-100`
17
17
  temperature (float): temperature in softmax function to control the output probability distribution. Default: `1.0`
18
18
 
@@ -70,9 +70,6 @@ class LigerFusedLinearJSD(torch.nn.Module):
70
70
 
71
71
  def __init__(self, jsd_beta=0.5, ignore_index=-100, temperature=1.0):
72
72
  super().__init__()
73
- assert (
74
- jsd_beta > 0 and jsd_beta < 1
75
- ), f"beta must be greater than 0 and less than 1. Got: {jsd_beta}"
76
73
  assert temperature != 0, "temperature cannot be 0."
77
74
  self.jsd_beta = jsd_beta
78
75
  self.temperature = temperature
@@ -18,7 +18,7 @@ class LigerJSD(torch.nn.Module):
18
18
  :math:`P` denotes the teacher model and :math:`Q` denotes the student model.
19
19
 
20
20
  Args:
21
- beta (float): coefficient beta of generalized JSD in the open interval (0, 1). Default: `0.5`
21
+ beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5`
22
22
  ignore_index (int): The index to ignore in the target. Default: `-100`
23
23
 
24
24
  Shape:
@@ -58,9 +58,6 @@ class LigerJSD(torch.nn.Module):
58
58
 
59
59
  def __init__(self, beta: float = 0.5, ignore_index: int = -100):
60
60
  super().__init__()
61
- assert (
62
- beta > 0 and beta < 1
63
- ), f"beta must be greater than 0 and less than 1. Got: {beta}"
64
61
  self.beta = beta
65
62
  self.ignore_index = ignore_index
66
63
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.4.2.dev20241119223206
3
+ Version: 0.4.2.dev20241121224158
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -303,8 +303,8 @@ $$\text{GeGLU}(x)=\text{GELU}(xW+b)\otimes(xV+c)$$
303
303
  <!-- TODO: verify vocab sizes are accurate -->
304
304
  - **FusedLinearCrossEntropy**: Peak memory usage of cross entropy loss is further improved by fusing the model head with the CE loss and chunking the input for block-wise loss and gradient calculation, a technique inspired by [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy). It achieves >4X memory reduction for 128k vocab size. **This is highly effective for large batch size, large sequence length, and large vocabulary sizes.** Please refer to the [Medusa example](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) for individual kernel usage.
305
305
  - **KLDivergence**: [KL Divergence](https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html) is implemented by fusing the forward into a single triton kernel, with reduction done outside the kernel. It achieves ~1.5X speed and ~15% memory reduction for 128K vocab size.
306
- - **JSD**: [Generalized JSD](https://arxiv.org/pdf/2306.13649) (Jensen-Shannon divergence), is implemented by computing both the loss and gradient in the forward pass. It achieves ~1.5X speed and ~54% memory reduction for 128k vocab size.
307
- - **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the JSD and chunking the input for block-wise loss and gradient calculation. It achieves ~85% memory reduction for 128k vocab size where batch size $\times$ sequence length is 8192.
306
+ - **JSD**: [Generalized JSD](https://arxiv.org/pdf/2306.13649) (Jensen-Shannon divergence), is implemented by computing both the loss and gradient in the forward pass. It achieves ~1.5X speed and ~54% memory reduction for 128k vocab size. **NOTE**: It implements forward/reverse KL when `beta` equals 0 and 1 respectively.
307
+ - **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the JSD and chunking the input for block-wise loss and gradient calculation. It achieves ~85% memory reduction for 128k vocab size where batch size $\times$ sequence length is 8192. **NOTE**: It implements forward/reverse KL when `beta` equals 0 and 1 respectively.
308
308
 
309
309
 
310
310
  ### Experimental Kernels
@@ -1,17 +1,18 @@
1
1
  liger_kernel/env_report.py,sha256=jye8RvUkmhqaIshdeIpoUABoAu7FPKJUib4FnAfvkpw,1132
2
- liger_kernel/chunked_loss/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- liger_kernel/chunked_loss/cpo_loss.py,sha256=ty3nAlpxGqH6HMvTDzNOVulwvs-j6k26FIgEK0nl9Rc,2059
4
- liger_kernel/chunked_loss/dpo_loss.py,sha256=_sftycUsxypLiQaCIoqMEwtc425Kxiq97YI6DvFvscc,1943
5
- liger_kernel/chunked_loss/fused_linear_preference.py,sha256=gtsWG3rpTlWpiiom_oMPeS-w-lofBVrguN0KglAXTGk,8727
6
- liger_kernel/chunked_loss/orpo_loss.py,sha256=QtHPDQwZdU7QFgu9tPg81vQfF_Dm3zQcsmhp9SdKKvA,2180
7
- liger_kernel/chunked_loss/simpo_loss.py,sha256=lmPopkHcqfglEnXv28FcQQjIkpNg8CEhn0Wt19xcoE4,2223
2
+ liger_kernel/chunked_loss/__init__.py,sha256=R2wCcz4Y0kTAve926DH3k182XKezpXeACMHj05g9Mm8,346
3
+ liger_kernel/chunked_loss/cpo_loss.py,sha256=H2L6mNtU8RMJ17u4aMZ9FHEfBvg1Z_hliY5-jZxiDBM,3079
4
+ liger_kernel/chunked_loss/dpo_loss.py,sha256=Y6NIU93D_QrhcxXU8Z8zNliqCFAIeQARnNBhUdbX8_w,2884
5
+ liger_kernel/chunked_loss/functional.py,sha256=9Gr-YXIuEzEJkBUhDx3G2fuQayckLor7cC7svhmPML4,549
6
+ liger_kernel/chunked_loss/fused_linear_preference.py,sha256=AXr7oHCoca2feYSCmlSAJq8mndm09ky0-kcQOAdcc6k,8727
7
+ liger_kernel/chunked_loss/orpo_loss.py,sha256=DZ-_hm1twllBWujEV4M4-VDBkxMDBvoGqMGe-aGP1hA,3147
8
+ liger_kernel/chunked_loss/simpo_loss.py,sha256=Jpl_U6DfxlzyHnlKN2i05K0vwz-ouiTmxlLGb439FwY,3328
8
9
  liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  liger_kernel/ops/cross_entropy.py,sha256=sfUb7-jIZp0EKXjg1DYy2Wdzw_Mg-mHmGoR5bpdm4tw,15526
10
11
  liger_kernel/ops/fused_linear_cross_entropy.py,sha256=ib7M3AjJE164yMfuS9R39k-5qnDgYOXptIT146lqYbg,9964
11
- liger_kernel/ops/fused_linear_jsd.py,sha256=5D_obamh08lGGTMyh85kBJD_aNjPhOYf4-TmCZ6m4s4,9626
12
+ liger_kernel/ops/fused_linear_jsd.py,sha256=nOv4zwfxHqqepKEmMsQuz-B3H-gRjyo8uClpmqSGLYA,9693
12
13
  liger_kernel/ops/geglu.py,sha256=MQL4zyzneZqZYUGPvb1QjI_EYT9_pKfSDgR25WD9jrI,4127
13
14
  liger_kernel/ops/group_norm.py,sha256=VaRErVJGR4JqgXXvuIjNGTn3E2egjLtU1y3ymwIf4d8,10961
14
- liger_kernel/ops/jsd.py,sha256=anWfdioucxZy4JQfTvbHBR-IQrZKeH-gBF1MHwwTuTQ,5781
15
+ liger_kernel/ops/jsd.py,sha256=Ap2b0_geCl6fqBXLI1IS6Yn6GlO-8LgPmnOW3y47dus,6151
15
16
  liger_kernel/ops/kl_div.py,sha256=03FNXfvCb6M-56hhFepAFV9p6brArPR6KOKkdGD34mw,8374
16
17
  liger_kernel/ops/layer_norm.py,sha256=unGMYMOPqtkM9aTrokhcqgPmsV2AUN7Yzv86isVB9OI,7422
17
18
  liger_kernel/ops/qwen2vl_mrope.py,sha256=xZvQnhkSTjU-k6KiiRn9e0SYO1ESs1jmuZFMICduLpc,8552
@@ -26,10 +27,10 @@ liger_kernel/transformers/auto_model.py,sha256=RMIwQHSiXoksXFTIqFZ4PLBgoqkxJJAT3
26
27
  liger_kernel/transformers/cross_entropy.py,sha256=yEm_YQ7oa3_BzT3hdW6KrAslduhSqWcJQVNZZDcWCg4,1758
27
28
  liger_kernel/transformers/functional.py,sha256=jwTHmyjOVC1_I-6ztY1EbbRqPIfFHojcHrP2c4P6U4I,2123
28
29
  liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=_i0PXSp5iZ9pKXdEeZ4lvHCENJYjV4y74yz3ZRG5XQg,1484
29
- liger_kernel/transformers/fused_linear_jsd.py,sha256=MJ-KjmLZnakuoVpnbDGkd95DQgvESniyrRWYzollVZM,4066
30
+ liger_kernel/transformers/fused_linear_jsd.py,sha256=bZ4otCvWBuOnA5XdQL-FzZVItJlDt-ht9e_pG7PG93E,3999
30
31
  liger_kernel/transformers/geglu.py,sha256=QcrME_8ooIn0xa59LaC0aoOdRrBIFd11Y0bAyF0NfCw,1130
31
32
  liger_kernel/transformers/group_norm.py,sha256=FJ9R7mS9G1wO-GRIQ6QKSmIhnZ6nQ6GIkE4NnX_hnn0,2241
32
- liger_kernel/transformers/jsd.py,sha256=W-5CypO2mx4-bUWOxq1KScfCdoXlLoYbtt5xBnRzMs4,3056
33
+ liger_kernel/transformers/jsd.py,sha256=sbr8DnKSYZJH9pv2rpmboNijYGpZKbhb2-WSGp5_v6g,3001
33
34
  liger_kernel/transformers/kl_div.py,sha256=qVhjBg6tjRyue5iZ3NFxo8uySY4JuIFJyv0IM_50F24,431
34
35
  liger_kernel/transformers/layer_norm.py,sha256=fd6o4kSHJWolQMWxh-l1qObfgL08ruNbUoBiANKX1ow,972
35
36
  liger_kernel/transformers/monkey_patch.py,sha256=Fk2v4GZQDJzfh3Cpc6BHNJbs_tungDyWmqS9nuG9Lc4,38406
@@ -51,9 +52,9 @@ liger_kernel/transformers/model/qwen2.py,sha256=EyhSSzQOskGjSnCsKMZpd1s5IAIlHd5P
51
52
  liger_kernel/transformers/model/qwen2_vl.py,sha256=bIQe2bWiY--G84FhCD29Gdi64_qHP6vbcGsK6vKysQE,8547
52
53
  liger_kernel/triton/__init__.py,sha256=yfRe0zMb47QnqjecZWG7LnanfCTzeku7SgWRAwNVmzU,101
53
54
  liger_kernel/triton/monkey_patch.py,sha256=5BcGKTtdqeYchypBIBopGIWPx1-cFALz7sOKoEsqXJ0,1584
54
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
55
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/METADATA,sha256=3S156bmOl95ATcsu-0-5JsPlbtUHzzI3tycudgujFc8,21723
56
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
57
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
58
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
59
- liger_kernel_nightly-0.4.2.dev20241119223206.dist-info/RECORD,,
55
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
56
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/METADATA,sha256=3HyUur6qJmSMTQaxiLaiDaGUrvU3_ILHlvWdobywuso,21891
57
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
58
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
59
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
60
+ liger_kernel_nightly-0.4.2.dev20241121224158.dist-info/RECORD,,