liger-kernel 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. liger_kernel/chunked_loss/cosine_similarity_loss.py +20 -5
  2. liger_kernel/chunked_loss/fused_linear_distillation.py +23 -5
  3. liger_kernel/chunked_loss/fused_linear_ppo.py +21 -5
  4. liger_kernel/chunked_loss/grpo_loss.py +8 -5
  5. liger_kernel/chunked_loss/jsd_loss.py +39 -11
  6. liger_kernel/ops/__init__.py +141 -0
  7. liger_kernel/ops/backends/README.md +151 -0
  8. liger_kernel/ops/backends/__init__.py +13 -0
  9. liger_kernel/ops/backends/_ascend/__init__.py +5 -0
  10. liger_kernel/ops/backends/_ascend/ascend-ub-manager-design.md +492 -0
  11. liger_kernel/ops/backends/_ascend/ops/__init__.py +61 -0
  12. liger_kernel/ops/backends/_ascend/ops/embedding.py +214 -0
  13. liger_kernel/ops/backends/_ascend/ops/geglu.py +191 -0
  14. liger_kernel/ops/backends/_ascend/ops/llama4_rope.py +298 -0
  15. liger_kernel/ops/backends/_ascend/ops/qwen2vl_mrope.py +275 -0
  16. liger_kernel/ops/backends/_ascend/ops/rope.py +265 -0
  17. liger_kernel/ops/backends/_ascend/ops/swiglu.py +142 -0
  18. liger_kernel/ops/backends/_ascend/ops/tvd.py +223 -0
  19. liger_kernel/ops/backends/_ascend/ub_manager.py +367 -0
  20. liger_kernel/ops/backends/registry.py +61 -0
  21. liger_kernel/ops/cross_entropy.py +71 -11
  22. liger_kernel/ops/dyt.py +5 -2
  23. liger_kernel/ops/fused_add_rms_norm.py +21 -23
  24. liger_kernel/ops/fused_linear_cross_entropy.py +32 -5
  25. liger_kernel/ops/geglu.py +5 -3
  26. liger_kernel/ops/group_norm.py +12 -8
  27. liger_kernel/ops/grpo_loss.py +3 -1
  28. liger_kernel/ops/kl_div.py +8 -11
  29. liger_kernel/ops/layer_norm.py +89 -69
  30. liger_kernel/ops/poly_norm.py +19 -21
  31. liger_kernel/ops/rms_norm.py +149 -71
  32. liger_kernel/ops/tiled_mlp.py +136 -0
  33. liger_kernel/ops/utils.py +25 -0
  34. liger_kernel/transformers/__init__.py +25 -0
  35. liger_kernel/transformers/auto_model.py +21 -0
  36. liger_kernel/transformers/cross_entropy.py +9 -4
  37. liger_kernel/transformers/dyt.py +1 -1
  38. liger_kernel/transformers/experimental/embedding.py +1 -1
  39. liger_kernel/transformers/functional.py +44 -26
  40. liger_kernel/transformers/fused_add_rms_norm.py +1 -1
  41. liger_kernel/transformers/fused_linear_cross_entropy.py +9 -4
  42. liger_kernel/transformers/fused_linear_jsd.py +1 -1
  43. liger_kernel/transformers/fused_neighborhood_attention.py +1 -1
  44. liger_kernel/transformers/geglu.py +1 -1
  45. liger_kernel/transformers/group_norm.py +1 -1
  46. liger_kernel/transformers/grpo_loss.py +57 -2
  47. liger_kernel/transformers/jsd.py +1 -1
  48. liger_kernel/transformers/kl_div.py +1 -1
  49. liger_kernel/transformers/layer_norm.py +1 -1
  50. liger_kernel/transformers/llama4_rope.py +1 -1
  51. liger_kernel/transformers/model/exaone4.py +136 -0
  52. liger_kernel/transformers/model/falcon_h1.py +19 -5
  53. liger_kernel/transformers/model/gemma.py +17 -6
  54. liger_kernel/transformers/model/gemma2.py +17 -8
  55. liger_kernel/transformers/model/gemma3.py +35 -16
  56. liger_kernel/transformers/model/glm4.py +16 -4
  57. liger_kernel/transformers/model/glm4v.py +16 -4
  58. liger_kernel/transformers/model/glm4v_moe.py +23 -4
  59. liger_kernel/transformers/model/gpt_oss.py +211 -0
  60. liger_kernel/transformers/model/hunyuan_v1.py +134 -0
  61. liger_kernel/transformers/model/internvl.py +12 -5
  62. liger_kernel/transformers/model/llama.py +14 -5
  63. liger_kernel/transformers/model/llama4.py +16 -4
  64. liger_kernel/transformers/model/llava.py +12 -4
  65. liger_kernel/transformers/model/loss_utils.py +37 -3
  66. liger_kernel/transformers/model/mistral.py +15 -6
  67. liger_kernel/transformers/model/mixtral.py +16 -7
  68. liger_kernel/transformers/model/mllama.py +12 -4
  69. liger_kernel/transformers/model/olmo2.py +16 -4
  70. liger_kernel/transformers/model/olmo3.py +142 -0
  71. liger_kernel/transformers/model/output_classes.py +147 -0
  72. liger_kernel/transformers/model/paligemma.py +23 -5
  73. liger_kernel/transformers/model/phi3.py +14 -7
  74. liger_kernel/transformers/model/qwen2.py +16 -3
  75. liger_kernel/transformers/model/qwen2_5_vl.py +14 -6
  76. liger_kernel/transformers/model/qwen2_vl.py +16 -4
  77. liger_kernel/transformers/model/qwen3.py +20 -5
  78. liger_kernel/transformers/model/qwen3_moe.py +19 -5
  79. liger_kernel/transformers/model/qwen3_next.py +17 -5
  80. liger_kernel/transformers/model/qwen3_vl.py +150 -0
  81. liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
  82. liger_kernel/transformers/model/smollm3.py +15 -6
  83. liger_kernel/transformers/monkey_patch.py +584 -49
  84. liger_kernel/transformers/multi_token_attention.py +1 -1
  85. liger_kernel/transformers/poly_norm.py +1 -1
  86. liger_kernel/transformers/qwen2vl_mrope.py +1 -1
  87. liger_kernel/transformers/rms_norm.py +8 -3
  88. liger_kernel/transformers/rope.py +45 -1
  89. liger_kernel/transformers/softmax.py +1 -1
  90. liger_kernel/transformers/sparsemax.py +1 -1
  91. liger_kernel/transformers/swiglu.py +18 -1
  92. liger_kernel/transformers/tiled_mlp.py +125 -0
  93. liger_kernel/transformers/tvd.py +1 -1
  94. liger_kernel/utils.py +54 -0
  95. {liger_kernel-0.6.3.dist-info → liger_kernel-0.6.5.dist-info}/METADATA +14 -4
  96. liger_kernel-0.6.5.dist-info/RECORD +134 -0
  97. {liger_kernel-0.6.3.dist-info → liger_kernel-0.6.5.dist-info}/WHEEL +1 -1
  98. liger_kernel-0.6.3.dist-info/RECORD +0 -111
  99. {liger_kernel-0.6.3.dist-info → liger_kernel-0.6.5.dist-info}/licenses/LICENSE +0 -0
  100. {liger_kernel-0.6.3.dist-info → liger_kernel-0.6.5.dist-info}/licenses/NOTICE +0 -0
  101. {liger_kernel-0.6.3.dist-info → liger_kernel-0.6.5.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ import torch.nn as nn
5
5
 
6
6
  from torch.nn.modules.utils import _pair
7
7
 
8
- from liger_kernel.ops.multi_token_attention import LigerMultiTokenAttentionFunction
8
+ from liger_kernel.ops import LigerMultiTokenAttentionFunction
9
9
 
10
10
 
11
11
  class LigerMultiTokenAttention(nn.Module):
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.poly_norm import LigerPolyNormFunction
4
+ from liger_kernel.ops import LigerPolyNormFunction
5
5
 
6
6
 
7
7
  class LigerPolyNorm(nn.Module):
@@ -1,4 +1,4 @@
1
- from liger_kernel.ops.qwen2vl_mrope import LigerQwen2VLMRopeFunction
1
+ from liger_kernel.ops import LigerQwen2VLMRopeFunction
2
2
 
3
3
 
4
4
  def liger_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.rms_norm import LigerRMSNormFunction
4
+ from liger_kernel.ops import LigerRMSNormFunction
5
5
 
6
6
 
7
7
  class LigerRMSNorm(nn.Module):
@@ -14,13 +14,18 @@ class LigerRMSNorm(nn.Module):
14
14
  init_fn="ones",
15
15
  in_place=True,
16
16
  row_mode=None,
17
+ elementwise_affine=True,
17
18
  ):
18
19
  super().__init__()
19
20
  assert init_fn in [
20
21
  "ones",
21
22
  "zeros",
22
23
  ], f"init_fn must be either 'ones' or 'zeros', got {init_fn}"
23
- self.weight = nn.Parameter(torch.ones(hidden_size) if init_fn == "ones" else torch.zeros(hidden_size))
24
+ self.elementwise_affine = elementwise_affine
25
+ if self.elementwise_affine:
26
+ self.weight = nn.Parameter(torch.ones(hidden_size) if init_fn == "ones" else torch.zeros(hidden_size))
27
+ else:
28
+ self.register_parameter("weight", None)
24
29
  self.variance_epsilon, self.offset, self.casting_mode, self.in_place, self.row_mode = (
25
30
  eps,
26
31
  offset,
@@ -41,7 +46,7 @@ class LigerRMSNorm(nn.Module):
41
46
  )
42
47
 
43
48
  def extra_repr(self):
44
- return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}, offset={self.offset}, in_place={self.in_place}, row_mode={self.row_mode}"
49
+ return f"weight_shape={tuple(self.weight.shape) if self.weight is not None else None}, eps={self.variance_epsilon}, offset={self.offset}, in_place={self.in_place}, row_mode={self.row_mode}"
45
50
 
46
51
 
47
52
  class LigerRMSNormForGemma(LigerRMSNorm):
@@ -1,4 +1,8 @@
1
- from liger_kernel.ops.rope import LigerRopeFunction
1
+ from typing import Tuple
2
+
3
+ import torch
4
+
5
+ from liger_kernel.ops import LigerRopeFunction
2
6
 
3
7
 
4
8
  def liger_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
@@ -18,3 +22,43 @@ def liger_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
18
22
  """
19
23
 
20
24
  return LigerRopeFunction.apply(q, k, cos, sin, position_ids, unsqueeze_dim)
25
+
26
+
27
+ def liger_rotary_pos_emb_vision(
28
+ q: torch.Tensor,
29
+ k: torch.Tensor,
30
+ cos: torch.Tensor,
31
+ sin: torch.Tensor,
32
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
33
+ """
34
+ Modified version of liger_rotary_pos_emb for qwen3_vl's apply_rotary_pos_emb_vision function.
35
+ Manually tranposed the input and output to match the expected shape for liger_rotary_pos_emb.
36
+ Reference: https://https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py#L116
37
+
38
+ Args:
39
+ q (torch.Tensor): The query tensor of shape (seq_length, num_heads, head_dim),
40
+ with stride (num_heads * head_dim, head_dim, 1).
41
+ k (torch.Tensor): The query tensor of shape (seq_length, num_heads, head_dim),
42
+ with stride (num_heads * head_dim, head_dim, 1). Same as q.
43
+ cos (torch.Tensor): The cosine tensor of shape (seq_length, head_dim).
44
+ sin (torch.Tensor): The sine tensor of shape (seq_length, head_dim).
45
+
46
+ Returns:
47
+ Tuple[torch.Tensor, torch.Tensor]: The query and key tensors with the same shape and stride as inputs.
48
+ """
49
+ orig_q_dtype, orig_k_dtype = q.dtype, k.dtype
50
+
51
+ # tranpose to (1, num_heads, seq_length, head_dim) and cast to float32 to match liger_rotary_pos_emb input shape
52
+ # also unsqueeze for batch dim
53
+ q32 = q.to(torch.float32).unsqueeze(0).transpose(1, 2)
54
+ k32 = k.to(torch.float32).unsqueeze(0).transpose(1, 2)
55
+ cos32 = cos.to(torch.float32)
56
+ sin32 = sin.to(torch.float32)
57
+
58
+ q_out, k_out = liger_rotary_pos_emb(q32, k32, cos32, sin32)
59
+
60
+ # transpose back to (seq_length, num_heads, head_dim) and cast back to original dtype
61
+ # also squeeze out batch dim
62
+ q_out = q_out.transpose(1, 2).squeeze(0).to(orig_q_dtype)
63
+ k_out = k_out.transpose(1, 2).squeeze(0).to(orig_k_dtype)
64
+ return q_out, k_out
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.softmax import LigerSoftmaxFunction
4
+ from liger_kernel.ops import LigerSoftmaxFunction
5
5
 
6
6
 
7
7
  class LigerSoftmax(nn.Module):
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.sparsemax import LigerSparsemaxFunction
4
+ from liger_kernel.ops import LigerSparsemaxFunction
5
5
 
6
6
 
7
7
  class LigerSparsemax(nn.Module):
@@ -1,6 +1,6 @@
1
1
  import torch.nn as nn
2
2
 
3
- from liger_kernel.ops.swiglu import LigerSiLUMulFunction
3
+ from liger_kernel.ops import LigerSiLUMulFunction
4
4
 
5
5
 
6
6
  class LigerSwiGLUMLP(nn.Module):
@@ -77,3 +77,20 @@ class LigerQwen3MoeSwiGLUMLP(nn.Module):
77
77
 
78
78
  def forward(self, x):
79
79
  return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
80
+
81
+
82
+ class LigerHunyuanV1SwiGLUMLP(nn.Module):
83
+ def __init__(self, config, layer_idx=None, is_shared_mlp=False):
84
+ super().__init__()
85
+ self.config = config
86
+ self.hidden_size = config.hidden_size
87
+ self.intermediate_size = config.intermediate_size
88
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
89
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
90
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
91
+ self.layer_idx = layer_idx
92
+ if config.hidden_act not in ["silu", "swish"]:
93
+ raise ValueError(f"Activation function {config.hidden_act} not supported.")
94
+
95
+ def forward(self, x):
96
+ return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
@@ -0,0 +1,125 @@
1
+ from typing import Optional
2
+
3
+ import torch.nn as nn
4
+
5
+ from liger_kernel.ops import LigerGELUMulFunction
6
+ from liger_kernel.ops import LigerSiLUMulFunction
7
+ from liger_kernel.ops import apply_tiled_mlp
8
+
9
+
10
+ class LigerTiledGEGLUMLP(nn.Module):
11
+ """
12
+ Memory-efficient GEGLU MLP using tiled computation.
13
+
14
+ This module combines GEGLU activation with tiled processing to handle
15
+ very long sequences efficiently. The forward pass is recomputed during
16
+ backward to save memory.
17
+
18
+ Args:
19
+ config: Model configuration with hidden_size and intermediate_size attributes
20
+ num_shards: Number of shards to split the sequence. If None, automatically
21
+ calculated as ceil(seqlen / hidden_size)
22
+ """
23
+
24
+ def __init__(self, config, num_shards: Optional[int] = None):
25
+ super().__init__()
26
+ self.config = config
27
+ self.hidden_size = config.hidden_size
28
+ self.intermediate_size = config.intermediate_size
29
+ self.num_shards = num_shards
30
+
31
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
32
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
33
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
34
+
35
+ # Validate activation function
36
+ if hasattr(config, "hidden_act") and config.hidden_act not in [
37
+ "gelu",
38
+ "gelu_new",
39
+ "gelu_pytorch_tanh",
40
+ ]:
41
+ raise ValueError(f"LigerTiledGEGLUMLP requires GELU activation, got {config.hidden_act}")
42
+
43
+ def _mlp_forward(self, module, x):
44
+ """Internal MLP forward function for tiled computation."""
45
+ gate = module.gate_proj(x)
46
+ up = module.up_proj(x)
47
+ return module.down_proj(LigerGELUMulFunction.apply(gate, up))
48
+
49
+ def forward(self, x):
50
+ """
51
+ Forward pass with tiled computation.
52
+
53
+ Args:
54
+ x: Input tensor of shape [batch_size, seq_len, hidden_size]
55
+ or [seq_len, hidden_size]
56
+
57
+ Returns:
58
+ Output tensor of the same shape as input
59
+ """
60
+ compute_params = [p for p in self.parameters() if p.requires_grad]
61
+
62
+ return apply_tiled_mlp(
63
+ fn=self._mlp_forward,
64
+ mlp_module=self,
65
+ x=x,
66
+ num_shards=self.num_shards,
67
+ compute_params=compute_params,
68
+ )
69
+
70
+
71
+ class LigerTiledSwiGLUMLP(nn.Module):
72
+ """
73
+ Memory-efficient SwiGLU MLP using tiled computation.
74
+
75
+ This module combines SwiGLU activation with tiled processing to handle
76
+ very long sequences efficiently. The forward pass is recomputed during
77
+ backward to save memory.
78
+
79
+ Args:
80
+ config: Model configuration with hidden_size and intermediate_size attributes
81
+ num_shards: Number of shards to split the sequence. If None, automatically
82
+ calculated as ceil(seqlen / hidden_size)
83
+ """
84
+
85
+ def __init__(self, config, num_shards: Optional[int] = None):
86
+ super().__init__()
87
+ self.config = config
88
+ self.hidden_size = config.hidden_size
89
+ self.intermediate_size = config.intermediate_size
90
+ self.num_shards = num_shards
91
+
92
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
93
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
94
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
95
+
96
+ # Validate activation function
97
+ if hasattr(config, "hidden_act") and config.hidden_act not in ["silu", "swish"]:
98
+ raise ValueError(f"LigerTiledSwiGLUMLP requires SiLU/Swish activation, got {config.hidden_act}")
99
+
100
+ def _mlp_forward(self, module, x):
101
+ """Internal MLP forward function for tiled computation."""
102
+ gate = module.gate_proj(x)
103
+ up = module.up_proj(x)
104
+ return module.down_proj(LigerSiLUMulFunction.apply(gate, up))
105
+
106
+ def forward(self, x):
107
+ """
108
+ Forward pass with tiled computation.
109
+
110
+ Args:
111
+ x: Input tensor of shape [batch_size, seq_len, hidden_size]
112
+ or [seq_len, hidden_size]
113
+
114
+ Returns:
115
+ Output tensor of the same shape as input
116
+ """
117
+ compute_params = [p for p in self.parameters() if p.requires_grad]
118
+
119
+ return apply_tiled_mlp(
120
+ fn=self._mlp_forward,
121
+ mlp_module=self,
122
+ x=x,
123
+ num_shards=self.num_shards,
124
+ compute_params=compute_params,
125
+ )
@@ -1,6 +1,6 @@
1
1
  import torch.nn as nn
2
2
 
3
- from liger_kernel.ops.tvd import LigerTVDLossFunction
3
+ from liger_kernel.ops import LigerTVDLossFunction
4
4
 
5
5
 
6
6
  class LigerTVDLoss(nn.Module):
liger_kernel/utils.py CHANGED
@@ -12,18 +12,59 @@ def is_peft_available():
12
12
  return PEFT_AVAILABLE
13
13
 
14
14
 
15
+ def infer_comm_backend():
16
+ """
17
+ Get communication backend name based on the environment.
18
+ """
19
+ if torch.distributed.is_nccl_available():
20
+ # Works for Nvidia
21
+ # TODO: nccl may not work for AMD decices that may require use of rccl.
22
+ return "nccl"
23
+ elif is_npu_available():
24
+ # Use Ascend NPU if available (torch.npu)
25
+ # Ascend is not standard torch backend and requires extension.
26
+ # Assume that it is installed if NPUs are being used in
27
+ # multi device environment.
28
+ return "ascend"
29
+ # XPU (Intel) if available
30
+ elif torch.distributed.distributed_c10d.is_xccl_available():
31
+ return "xccl"
32
+ elif torch.distributed.is_mpi_available():
33
+ # CPU backend, first option
34
+ return "mpi"
35
+ elif torch.distributed.is_gloo_available():
36
+ # CPU backend, backup option
37
+ return "gloo"
38
+ else:
39
+ raise RuntimeError("There is no distributed backend available.")
40
+
41
+
15
42
  def infer_device():
16
43
  """
17
44
  Get current device name based on available devices
18
45
  """
19
46
  if torch.cuda.is_available(): # Works for both Nvidia and AMD
20
47
  return "cuda"
48
+ # Use Ascend NPU if available (torch.npu)
49
+ elif is_npu_available():
50
+ return "npu"
51
+ # XPU (Intel) if available
21
52
  elif torch.xpu.is_available():
22
53
  return "xpu"
23
54
  else:
24
55
  return "cpu"
25
56
 
26
57
 
58
+ def is_npu_available() -> bool:
59
+ """Detect Ascend NPU availability."""
60
+ try:
61
+ from transformers.utils import is_torch_npu_available
62
+
63
+ return is_torch_npu_available()
64
+ except Exception:
65
+ return False
66
+
67
+
27
68
  def transformers_version_dispatch(
28
69
  required_version: str,
29
70
  before_fn,
@@ -69,3 +110,16 @@ def transformers_version_dispatch(
69
110
  return before_fn(*before_args, **before_kwargs)
70
111
  else:
71
112
  return after_fn(*after_args, **after_kwargs)
113
+
114
+
115
+ def get_total_gpu_memory() -> int:
116
+ """Returns total GPU memory in GBs."""
117
+ device = infer_device()
118
+ if device == "cuda":
119
+ return torch.cuda.get_device_properties(0).total_memory // (1024**3)
120
+ elif device == "xpu":
121
+ return torch.xpu.get_device_properties(0).total_memory // (1024**3)
122
+ elif device == "npu":
123
+ return torch.npu.get_device_properties(0).total_memory // (1024**3)
124
+ else:
125
+ raise RuntimeError(f"Unsupported device: {device}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: liger_kernel
3
- Version: 0.6.3
3
+ Version: 0.6.5
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -33,7 +33,7 @@ License-File: NOTICE
33
33
  Requires-Dist: torch>=2.1.2
34
34
  Requires-Dist: triton>=2.3.1
35
35
  Provides-Extra: dev
36
- Requires-Dist: transformers>=4.49.0; extra == "dev"
36
+ Requires-Dist: transformers<5.0.0,>=4.49.0; extra == "dev"
37
37
  Requires-Dist: matplotlib>=3.7.2; extra == "dev"
38
38
  Requires-Dist: ruff>=0.12.0; extra == "dev"
39
39
  Requires-Dist: pytest>=7.1.2; extra == "dev"
@@ -45,6 +45,7 @@ Requires-Dist: datasets>=2.19.2; extra == "dev"
45
45
  Requires-Dist: seaborn; extra == "dev"
46
46
  Requires-Dist: mkdocs-material; extra == "dev"
47
47
  Requires-Dist: torchvision>=0.20; extra == "dev"
48
+ Requires-Dist: prek>=0.2.28; extra == "dev"
48
49
  Dynamic: license-file
49
50
  Dynamic: provides-extra
50
51
  Dynamic: requires-dist
@@ -82,8 +83,8 @@ Dynamic: requires-dist
82
83
  </a>
83
84
  </td>
84
85
  <td style="padding: 10px;">
85
- <a href="https://discord.gg/gpumode">
86
- <img src="https://dcbadge.limes.pink/api/server/gpumode?style=flat" alt="Join Our Discord">
86
+ <a href="https://discord.gg/X4MaxPgA">
87
+ <img src="https://dcbadge.limes.pink/api/server/https://discord.gg/X4MaxPgA?style=flat" alt="Join Our Discord">
87
88
  </a>
88
89
  </td>
89
90
  </tr>
@@ -98,6 +99,7 @@ Dynamic: requires-dist
98
99
  <details>
99
100
  <summary>Latest News 🔥</summary>
100
101
 
102
+ - [2025/12/19] We announced a liger kernel discord channel at https://discord.gg/X4MaxPgA; We will be hosting Liger Kernel x Triton China Meetup in mid of January 2026
101
103
  - [2025/03/06] We release a joint blog post on TorchTune × Liger - [Peak Performance, Minimized Memory: Optimizing torchtune’s performance with torch.compile & Liger Kernel](https://pytorch.org/blog/peak-performance-minimized-memory/)
102
104
  - [2024/12/11] We release [v0.5.0](https://github.com/linkedin/Liger-Kernel/releases/tag/v0.5.0): 80% more memory efficient post training losses (DPO, ORPO, CPO, etc)!
103
105
  - [2024/12/5] We release LinkedIn Engineering Blog - [Liger-Kernel: Empowering an open source ecosystem of Triton Kernels for Efficient LLM Training](https://www.linkedin.com/blog/engineering/open-source/liger-kernel-open-source-ecosystem-for-efficient-llm-training)
@@ -116,6 +118,8 @@ We've also added optimized Post-Training kernels that deliver **up to 80% memory
116
118
 
117
119
  You can view the documentation site for additional installation, usage examples, and API references:https://linkedin.github.io/Liger-Kernel/
118
120
 
121
+ You can view the Liger Kernel Technical Report: https://openreview.net/forum?id=36SjAIT42G
122
+
119
123
  ## Supercharge Your Model with Liger Kernel
120
124
 
121
125
  ![Banner](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/banner.GIF)
@@ -313,8 +317,12 @@ loss.backward()
313
317
  | Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
314
318
  | Granite 3.0 & 3.1 | `liger_kernel.transformers.apply_liger_kernel_to_granite` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
315
319
  | OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
320
+ | Olmo3 | `liger_kernel.transformers.apply_liger_kernel_to_olmo3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
316
321
  | GLM-4 | `liger_kernel.transformers.apply_liger_kernel_to_glm4` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
322
+ | GPT-OSS | `liger_kernel.transformers.apply_liger_kernel_to_gpt_oss` | RoPE, RMSNorm, CrossEntropyLoss, FusedLinearCrossEntropy |
317
323
  | InternVL3 | `liger_kernel.transformers.apply_liger_kernel_to_internvl` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
324
+ | HunyuanV1 | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_dense` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
325
+ | HunyuanV1 MoE | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_moe` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
318
326
 
319
327
 
320
328
  ## Low-level APIs
@@ -441,3 +449,5 @@ url={https://openreview.net/forum?id=36SjAIT42G}
441
449
  ↑ Back to Top ↑
442
450
  </a>
443
451
  </p>
452
+
453
+
@@ -0,0 +1,134 @@
1
+ liger_kernel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ liger_kernel/env_report.py,sha256=uhdEC8OydxoZlb7B6YYcAaBF3crGFdIck-4cxaW4NJY,1728
3
+ liger_kernel/utils.py,sha256=NQIT6TTbVrpSUQBu6E1yj-Ucje56LJDxpee1W6s8uis,3832
4
+ liger_kernel/chunked_loss/README.md,sha256=0FmkFC3hKBqyoDT5uTlIYmrvRkF-EOCR1y-EBU1LpWU,2248
5
+ liger_kernel/chunked_loss/__init__.py,sha256=J5_jNnzZ4gZmA38W5f_4oab7xMoNk1Xy-yh3X_Xlf-s,714
6
+ liger_kernel/chunked_loss/cosine_similarity_loss.py,sha256=h8lPAkw8oYPUUBZ5YEG2tMMmQ7XkWnOo7r29A5vx-Eg,4759
7
+ liger_kernel/chunked_loss/cpo_loss.py,sha256=Gzz1eU4kgcbdubFVRy55e8A1Cr-r45UgNicXwZIjmBU,5454
8
+ liger_kernel/chunked_loss/dpo_loss.py,sha256=I83khNs3QQjuhr8U3NIOAACkbse6DNiBV-TulPZ0lXw,9006
9
+ liger_kernel/chunked_loss/functional.py,sha256=-XPDbLml9dHmvoSU2VNTUrBDFehuzvuAGPikVetBMtI,1132
10
+ liger_kernel/chunked_loss/fused_linear_distillation.py,sha256=Bjdxnjfg-GwYvMJ102juP06gFMlbkxYPpvv7cV_mZcg,12912
11
+ liger_kernel/chunked_loss/fused_linear_ppo.py,sha256=baU19PwqO1FTVxwlB-eyJv6gOLtL7baXGzSncYQ8Ktc,14296
12
+ liger_kernel/chunked_loss/fused_linear_preference.py,sha256=FIH85uUXAOgYx5Ax8MjFhJHVu-2pKtY7wSegd0zSyyY,18336
13
+ liger_kernel/chunked_loss/fused_linear_unpaired_preference.py,sha256=RiuK3UtRwH9T6jZ36sA8Urj-TVuOLOO2syLg_JOQapY,13437
14
+ liger_kernel/chunked_loss/grpo_loss.py,sha256=bmuZaNgqNbJ5pJGFDXWE-B4BGYF7xWVSN15UyCfuq_s,13079
15
+ liger_kernel/chunked_loss/jsd_loss.py,sha256=NJKmJ76_-kI875ZkC4hQfC4nAvPNCj4ZsNyDNsfD74k,8761
16
+ liger_kernel/chunked_loss/kto_loss.py,sha256=llVCe6DkcpCo57seGWoMikaQVFApx764jsmSbQyqwQY,7529
17
+ liger_kernel/chunked_loss/orpo_loss.py,sha256=nu9UYG16dcMw93lvHi4_hYs3Q0FK1KnlmMRj7OpYU8s,4872
18
+ liger_kernel/chunked_loss/simpo_loss.py,sha256=fy2w8KbhMrBv7b1jdIeH3bBFxY52bPQPZb3KwBvmurM,5385
19
+ liger_kernel/ops/__init__.py,sha256=F3m9qlXbgttykKEBsrMFf1WyK_0H8CKqLuDnFRR-cvc,7237
20
+ liger_kernel/ops/cross_entropy.py,sha256=DnXFRZ9TGN1SnEo8xGBFFPLNQaen8aLVNPJ1em-LbK4,22910
21
+ liger_kernel/ops/dyt.py,sha256=OWjY6J6dHFdjm2JZFuvkHSOljZq6mL3XhrdZSQ2CULw,5610
22
+ liger_kernel/ops/fused_add_rms_norm.py,sha256=YLYwHS56NNxhjNe305mylXtJuTn1DtNuUs4IuwklYLo,14156
23
+ liger_kernel/ops/fused_linear_cross_entropy.py,sha256=1gx2qljre9PVc861iknFnNCGC-P35D2w1cc_yMDO9ow,16239
24
+ liger_kernel/ops/fused_linear_jsd.py,sha256=CSoprxb-YcJy-YUKiTcYkxN8sb9h2kdk_iHuncvSV5c,9683
25
+ liger_kernel/ops/fused_neighborhood_attention.py,sha256=vPi5xbnh6wxyZehaqo6Tuilqo2fN5SGDiONjnNmIKqs,35556
26
+ liger_kernel/ops/geglu.py,sha256=-ruMACDsFH1YsAak6BGvZ0ktLGIrBE6yGF0dAyR82UU,4307
27
+ liger_kernel/ops/group_norm.py,sha256=7BqYIP5-HQCdvHKMJlA6jCQoYKZjbtsoD9-eXld5qzk,11133
28
+ liger_kernel/ops/grpo_loss.py,sha256=2SyOujtF9I3xiNo4wFf4s6MeiDotE_qeYfRWgj_bOBE,9573
29
+ liger_kernel/ops/jsd.py,sha256=onHp5T3MbvJaVz5Vup7Ww6EQp_HTaZeayTjJk6FgQMY,7042
30
+ liger_kernel/ops/kl_div.py,sha256=MZZb7eAPMXlydYVV4uL9aTytXFkdQdp-jmiDw9tC0pg,8652
31
+ liger_kernel/ops/layer_norm.py,sha256=fx-Xq2Qd_bDjxd9aQwmAf8xxkpNhl6EO86twytbYV_0,10678
32
+ liger_kernel/ops/llama4_rope.py,sha256=-aqdZzllklTN8b9--e-TsWY_ntGCN8-tyseT4x0bd8s,8223
33
+ liger_kernel/ops/multi_token_attention.py,sha256=Oz_RXDp-OSS_R_HuGmaETHdAJ7Toda_70OfE7TXMUlY,7645
34
+ liger_kernel/ops/poly_norm.py,sha256=lv3d1vu9Wr_LYOXOGEV2aXwAxP7nuDWZgJuALYlWLAw,11270
35
+ liger_kernel/ops/qwen2vl_mrope.py,sha256=3GExhYpLgB4VUtyZyjRk8XjEur3W4EWF6HQ67ML5vBU,8481
36
+ liger_kernel/ops/rms_norm.py,sha256=F1lgDzpKfxZMIz89CoPX6qrbRfnRaLDKooJAKVBzEeE,21856
37
+ liger_kernel/ops/rope.py,sha256=v-7JHRrv-5ImoROkpKfl30WwWI4qTa2tAl7zQeB4ml4,8956
38
+ liger_kernel/ops/softmax.py,sha256=tgORx6MK1IDDtZKqGarj0IPIVjqAIEUXXYPiinhRdtI,5864
39
+ liger_kernel/ops/sparsemax.py,sha256=AeWe1xgkHJFEKWTj2vu_0hj7LztGvjqXAps-QTpCY0U,5087
40
+ liger_kernel/ops/swiglu.py,sha256=D7nd4u_LInwsIRNCDdY77lqnTz8-W5dJrpEAt8zEO_A,3033
41
+ liger_kernel/ops/tiled_mlp.py,sha256=eyMFsFFgHch8a_6R6IYRG24_jqKg5GF_BQUoQuAG8SY,4529
42
+ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
43
+ liger_kernel/ops/utils.py,sha256=90V8P0ElZeBathDhmIKm_506Nhrsr1ojO0qRl53_Tn0,4909
44
+ liger_kernel/ops/backends/README.md,sha256=ZP59UUqD1WW8LwM5Y-cTpSM-Dtgdp8Wku2mE9kqAc2E,4185
45
+ liger_kernel/ops/backends/__init__.py,sha256=-mgef3cHfDFeL5NbXbq1TI7ngCahE9qqL3aMaHnXvis,629
46
+ liger_kernel/ops/backends/registry.py,sha256=yJa_Sh2FZ__iPCIU8h2nOQbnsFQh1I-_czROLtb1uQM,1637
47
+ liger_kernel/ops/backends/_ascend/__init__.py,sha256=6n0keOX9H-kLadBdVZlx-Ce0ZLVJvLiEfR-9-uxmYUk,221
48
+ liger_kernel/ops/backends/_ascend/ascend-ub-manager-design.md,sha256=1QClHtxq-hYoH0wqT456sFZRLDIMj8UyWEU-eXMeFLM,19934
49
+ liger_kernel/ops/backends/_ascend/ub_manager.py,sha256=LP4xSkBXxq9ckjCDH7-11_eYzoPqLBGRGrd7wrev1K4,14097
50
+ liger_kernel/ops/backends/_ascend/ops/__init__.py,sha256=zBlsxmbP6psQY_0_IgMwg3BYzWZ8Ip3OQ4-JLduazbo,2725
51
+ liger_kernel/ops/backends/_ascend/ops/embedding.py,sha256=8FQjT68VTPs37_Ehukx8dQCXw2f38bLTdDjfzNq33z8,6885
52
+ liger_kernel/ops/backends/_ascend/ops/geglu.py,sha256=TiIVdL-HTUaiqQFL8LxQBEMZip2Tn5ks9Mes4z-Q4V8,6249
53
+ liger_kernel/ops/backends/_ascend/ops/llama4_rope.py,sha256=embRPABSaQW84hh6CqdYlwVyEPUWiLDXazIyNMwvjcQ,9198
54
+ liger_kernel/ops/backends/_ascend/ops/qwen2vl_mrope.py,sha256=XbqL-hXqEjuu7vwGJ2-7tdSQaspg8QS7HAG4C4FeAZ4,9810
55
+ liger_kernel/ops/backends/_ascend/ops/rope.py,sha256=8PUP0gjSg6zSfKeUMpOtgSYzvG0PC6gVFjOvjee9jxA,9458
56
+ liger_kernel/ops/backends/_ascend/ops/swiglu.py,sha256=yrbEgIgeCZyayMYHCRNq7LntZE9cEemht39_TFPro0k,4682
57
+ liger_kernel/ops/backends/_ascend/ops/tvd.py,sha256=4Tsteapds8e-RWymsP-7HMZs2x_vqypAw1fCpzbaDV8,7883
58
+ liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
59
+ liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
60
+ liger_kernel/transformers/__init__.py,sha256=h7U1Vxrg5OoqOstBmZMd-0G0LROYleYt_fS-RpvEq84,11057
61
+ liger_kernel/transformers/auto_model.py,sha256=RnJhK8xHamRnnswgRLG_muJE1i6T6LszjK8lC6vonhE,2410
62
+ liger_kernel/transformers/cross_entropy.py,sha256=08H8RxSxGX_52UzrHNnSZ_wWH-uvU8KrRiDmVrkOw14,1996
63
+ liger_kernel/transformers/dyt.py,sha256=Rng-MZQSprnGGWFtpmYKt7MIX26vFUYbq5ruM4MjH-U,719
64
+ liger_kernel/transformers/fsdp.py,sha256=CUiyjTmjkjY7pLXQv8ly9rnzgXw6529csd9pvtJNMYc,3096
65
+ liger_kernel/transformers/functional.py,sha256=f9sOWEfh5HZwOH5cVlcB_ts0MB_-fFFPki8PVZ5w__M,8352
66
+ liger_kernel/transformers/fused_add_rms_norm.py,sha256=k98sfcZhsgtdVxChciHmv0WUizzn6f-Rn72JtGgmafI,1180
67
+ liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=WnGuR_rjIWO0XHUyVakz-qsIRm028OKzi1vayvmPfbg,2320
68
+ liger_kernel/transformers/fused_linear_jsd.py,sha256=BW22DX3J6J8uZdoaU9JFUU5HnTrNYL63H9IQZzHkGu0,3982
69
+ liger_kernel/transformers/fused_neighborhood_attention.py,sha256=21O9DSRXgMQst9Lc3b62CsOLkYn-hjuskj9Zi3mvG7Y,7928
70
+ liger_kernel/transformers/geglu.py,sha256=esltAhNJZjWydvh07C6EaTdjA2aQzFPMNK92yR15SEI,1101
71
+ liger_kernel/transformers/group_norm.py,sha256=k7LDIG8H5CA5kiNj2uOi8D_Z6FlZtQDLyzJQxK2E-gA,2162
72
+ liger_kernel/transformers/grpo_loss.py,sha256=wNVz1o3q9XH17tDqaCZFEVXJhH9mQX44pWhQEwiRo_Q,6088
73
+ liger_kernel/transformers/jsd.py,sha256=_KlOX8YcdONU0tq0bIRDQ5VDBwtywm3Ro-FmlmI01qk,2975
74
+ liger_kernel/transformers/kl_div.py,sha256=94VR4uuj-2dZCTEnwFksvDi-LporrpB5HgmYtQCZnw0,402
75
+ liger_kernel/transformers/layer_norm.py,sha256=l4nsT_Zj4CdVZOM7F0I0Ox-lmLHyIJzqQvVaF0o0HbI,895
76
+ liger_kernel/transformers/llama4_rope.py,sha256=A_nxcS_KiUCyNeL2FAZX7yUhDsX7krrI9BG49OaN_nM,3627
77
+ liger_kernel/transformers/monkey_patch.py,sha256=hCFLKviPteLyDTUxjehiUS6k4hEx2GHDEualDhKpEYs,138949
78
+ liger_kernel/transformers/multi_token_attention.py,sha256=LtEjG7qy1-JK-HIPaz8zZ4P08aSZTnj5D635Pa04Onc,1730
79
+ liger_kernel/transformers/poly_norm.py,sha256=T3VdLQHLcCY7KzNzrc6IJRs8SzO8Yc7a0BS_2p6d7Wo,1367
80
+ liger_kernel/transformers/qwen2vl_mrope.py,sha256=0hOBR3j2Yd6xbT4z9BNRKEy1D0eyOUsIW6EmI_3PPNI,1033
81
+ liger_kernel/transformers/rms_norm.py,sha256=dD_69_GA3GUdtvdYVxTLKGeg8QZinJpS3qfeV7WvOuA,3237
82
+ liger_kernel/transformers/rope.py,sha256=-W9aYLa2hMOmmG5yeHcvPsOI5UTc95ylYxUddxkwmkA,2867
83
+ liger_kernel/transformers/softmax.py,sha256=VI5QGHYpXSiXckgovEnDGcXwitimsxKB0GX-AT4dAC4,256
84
+ liger_kernel/transformers/sparsemax.py,sha256=Os49bSpPX4pWymsasv_3j20m8GFaI54e03XFPkHiPE0,393
85
+ liger_kernel/transformers/swiglu.py,sha256=LpgikAs9hibAL7G6itygBbOlW9tZe5s4D2IGAKGpbPw,4284
86
+ liger_kernel/transformers/tiled_mlp.py,sha256=_Go2bN8huL4I0EHBPXNfpIRaEukl8hiQEEJIwpJST20,4498
87
+ liger_kernel/transformers/trainer_integration.py,sha256=W3ON51O5GkyzNJsItz0y5rKx-uy2f2cFfveZpqbUdhw,123
88
+ liger_kernel/transformers/tvd.py,sha256=GYjhtXgS3RTPveOTN2gyK4uBnjs6ii2vkSZRX21QpqA,446
89
+ liger_kernel/transformers/experimental/__init__.py,sha256=oQqk-f32JYgWEP9DJCj6ty6bbJSGrdXsFDQFwGeX6vI,127
90
+ liger_kernel/transformers/experimental/embedding.py,sha256=bjy9hHj--ivy6xEWdiE6qLy9uLyeS4PsBEgl_MdDrng,858
91
+ liger_kernel/transformers/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
92
+ liger_kernel/transformers/model/exaone4.py,sha256=T5Ef2FnkJ-i8ktRWvBB5GXFOIyJmvMPyGsDFt5awpmE,5802
93
+ liger_kernel/transformers/model/falcon_h1.py,sha256=heUZ4wUt2ATmtBtmv8Rcro3pQl6fV9T0pburjTTW7os,5004
94
+ liger_kernel/transformers/model/gemma.py,sha256=pAri4PYpknsFfkvyo8Ez2NNlqrUDW-KkExUXTGZAcH4,10621
95
+ liger_kernel/transformers/model/gemma2.py,sha256=KgSpXVi04c8hVFa7dqJtjzVobz6z7BNTvGc1WjoV4nk,12006
96
+ liger_kernel/transformers/model/gemma3.py,sha256=2XPmtpZxR55wccKflIDqf2AwHJdxypUbd62fLuZ8two,15092
97
+ liger_kernel/transformers/model/glm4.py,sha256=bSp22iPIjsli4-c_usUOsyh1Bs2gIK8X6ynS0azseUs,5900
98
+ liger_kernel/transformers/model/glm4v.py,sha256=dd-BQpccDCp1SbIxcJ5rG8xcwYQK3KOv1Tgm9TGnZc4,6594
99
+ liger_kernel/transformers/model/glm4v_moe.py,sha256=zKhMdOOrRhlrvCSFaeVYfddL1ubpY8edEO91TN81n98,7135
100
+ liger_kernel/transformers/model/gpt_oss.py,sha256=8jEAQQNEXgVA-yuvEjKkBQvCvZy0E9ns-O9BPlajXXU,11197
101
+ liger_kernel/transformers/model/hunyuan_v1.py,sha256=MJvP9xkUFePIV0HLETJM4YPbVCEPkAE1ZI5Jxyiebh0,5731
102
+ liger_kernel/transformers/model/internvl.py,sha256=OOutracs9qrPHSU7FVYar08yinvGrHQVPvo39JEws6w,6473
103
+ liger_kernel/transformers/model/llama.py,sha256=kqZeONzwTBzudoChlKMzq1w23BtYGbxWZC1l1V__JTw,13410
104
+ liger_kernel/transformers/model/llama4.py,sha256=PfkynGVI0xxMs3EtyYpCgaALI6stu25OIrTIymE-pvg,4853
105
+ liger_kernel/transformers/model/llava.py,sha256=yoADM_BuIEummtTDiwWqjfUjXUMZD78VJzS0TRj5GJ4,15687
106
+ liger_kernel/transformers/model/loss_utils.py,sha256=tNbC94Z4Ca2mlv3MRhnqfpJ7sBc5MZJtt1-mzMMJT1M,3088
107
+ liger_kernel/transformers/model/mistral.py,sha256=OcwOzVDMwwDbVccVPv-AaocznzWwzLT3aRaKK5SMaAg,6030
108
+ liger_kernel/transformers/model/mixtral.py,sha256=YcBDoTEJDgLFJ_RTo180DYGxR8D5Ad9-idumif7kCPE,12130
109
+ liger_kernel/transformers/model/mllama.py,sha256=vAHwCm63sn4kpAY0rDGf_N0HR7KRTBVpBYDVTPOaZTg,12079
110
+ liger_kernel/transformers/model/olmo2.py,sha256=-h2bUOeuPfY1MdShdRvq5_wFDHKP4PEimgIl0fL-BT4,5902
111
+ liger_kernel/transformers/model/olmo3.py,sha256=k2zYOlS8U_b5MwjdToB3tDRQ0bH_mWapVQqJcH8-qAo,6007
112
+ liger_kernel/transformers/model/output_classes.py,sha256=0BGXVR4dYQpSHLkSqpRoXuHMryrceGSlTYRu6pvd8ZY,4542
113
+ liger_kernel/transformers/model/paligemma.py,sha256=UAYoKkIMvvix7GG3cSdWaDxVjMp26YsvthJuE7wFf6Y,20848
114
+ liger_kernel/transformers/model/phi3.py,sha256=PT7Kw6yySg-7TsssWfi82eVMN3SWujCqzCqHigAdfeQ,4574
115
+ liger_kernel/transformers/model/qwen2.py,sha256=ojqdJpD3A9A5uCS0N_rSq8gyNYWSsHfuvx3Z3ObC7ss,10686
116
+ liger_kernel/transformers/model/qwen2_5_vl.py,sha256=FbIZDcg9cOr4PtBLNN8yVubN-gu2clndjSIzfi8NMos,6894
117
+ liger_kernel/transformers/model/qwen2_vl.py,sha256=967Ex4Scm0ehhiVxOtjwfj396nD9xkAwFwHcoURH6-o,6578
118
+ liger_kernel/transformers/model/qwen3.py,sha256=1fvioVmq5CRZSIuTd7uuLet-fti9ee3r8eLibvfNTcQ,5769
119
+ liger_kernel/transformers/model/qwen3_moe.py,sha256=yljJO4kyeM5U2Q4pXH3Mmq71ZFEC_Z73qgBx1-an-o8,6457
120
+ liger_kernel/transformers/model/qwen3_next.py,sha256=TayfD91GVLA1-fJwtVl6vMZgkUTYLQYURMRGBdCtnFc,6331
121
+ liger_kernel/transformers/model/qwen3_vl.py,sha256=sUIdJ-32IlFm_4pHv6PpLgVafqBS0QeJm_91tY67NdY,6646
122
+ liger_kernel/transformers/model/qwen3_vl_moe.py,sha256=CJEFcwBqItSEw9NA0mhEozlDTgIuJQ6VTjgkh5iLZ78,4856
123
+ liger_kernel/transformers/model/smollm3.py,sha256=1ewDY-99UAFJEfoeqfZxDcxjkqKYUSr5b7X-E_2BLLs,8126
124
+ liger_kernel/transformers/model/smolvlm.py,sha256=yFpPKawLVo3zXzLjM7Y_T8FyRrPxVyp-YPFMM8m3k0c,6734
125
+ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7HHWHwku25A-GYL0WU,193
126
+ liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
127
+ liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
128
+ liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
129
+ liger_kernel-0.6.5.dist-info/licenses/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
130
+ liger_kernel-0.6.5.dist-info/licenses/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
131
+ liger_kernel-0.6.5.dist-info/METADATA,sha256=w8cyQQnr9OCbw58ZKaap1xxjOwExgCuLe01jgDZyKdk,25754
132
+ liger_kernel-0.6.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
133
+ liger_kernel-0.6.5.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
134
+ liger_kernel-0.6.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5