liger-kernel-nightly 0.6.2.dev20251011154427__py3-none-any.whl → 0.6.4.dev20260107111351__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of liger-kernel-nightly might be problematic. Click here for more details.

Files changed (97) hide show
  1. liger_kernel/chunked_loss/cosine_similarity_loss.py +20 -5
  2. liger_kernel/chunked_loss/fused_linear_distillation.py +23 -5
  3. liger_kernel/chunked_loss/fused_linear_ppo.py +21 -5
  4. liger_kernel/chunked_loss/grpo_loss.py +8 -5
  5. liger_kernel/chunked_loss/jsd_loss.py +39 -11
  6. liger_kernel/ops/__init__.py +141 -0
  7. liger_kernel/ops/backends/README.md +151 -0
  8. liger_kernel/ops/backends/__init__.py +13 -0
  9. liger_kernel/ops/backends/_ascend/__init__.py +5 -0
  10. liger_kernel/ops/backends/_ascend/ascend-ub-manager-design.md +485 -0
  11. liger_kernel/ops/backends/_ascend/ops/__init__.py +43 -0
  12. liger_kernel/ops/backends/_ascend/ops/geglu.py +244 -0
  13. liger_kernel/ops/backends/_ascend/ops/qwen2vl_mrope.py +285 -0
  14. liger_kernel/ops/backends/_ascend/ops/rope.py +290 -0
  15. liger_kernel/ops/backends/_ascend/ops/swiglu.py +142 -0
  16. liger_kernel/ops/backends/_ascend/ub_manager.py +349 -0
  17. liger_kernel/ops/backends/registry.py +61 -0
  18. liger_kernel/ops/cross_entropy.py +75 -12
  19. liger_kernel/ops/dyt.py +5 -2
  20. liger_kernel/ops/fused_add_rms_norm.py +5 -1
  21. liger_kernel/ops/fused_linear_cross_entropy.py +45 -14
  22. liger_kernel/ops/geglu.py +5 -3
  23. liger_kernel/ops/group_norm.py +2 -1
  24. liger_kernel/ops/grpo_loss.py +3 -1
  25. liger_kernel/ops/layer_norm.py +86 -66
  26. liger_kernel/ops/poly_norm.py +390 -0
  27. liger_kernel/ops/rms_norm.py +131 -49
  28. liger_kernel/ops/tiled_mlp.py +136 -0
  29. liger_kernel/ops/utils.py +14 -0
  30. liger_kernel/transformers/__init__.py +30 -0
  31. liger_kernel/transformers/auto_model.py +21 -0
  32. liger_kernel/transformers/cross_entropy.py +9 -4
  33. liger_kernel/transformers/dyt.py +1 -1
  34. liger_kernel/transformers/experimental/embedding.py +1 -1
  35. liger_kernel/transformers/functional.py +48 -25
  36. liger_kernel/transformers/fused_add_rms_norm.py +1 -1
  37. liger_kernel/transformers/fused_linear_cross_entropy.py +9 -4
  38. liger_kernel/transformers/fused_linear_jsd.py +1 -1
  39. liger_kernel/transformers/fused_neighborhood_attention.py +1 -1
  40. liger_kernel/transformers/geglu.py +1 -1
  41. liger_kernel/transformers/group_norm.py +1 -1
  42. liger_kernel/transformers/grpo_loss.py +57 -2
  43. liger_kernel/transformers/jsd.py +1 -1
  44. liger_kernel/transformers/kl_div.py +1 -1
  45. liger_kernel/transformers/layer_norm.py +1 -1
  46. liger_kernel/transformers/llama4_rope.py +1 -1
  47. liger_kernel/transformers/model/falcon_h1.py +19 -5
  48. liger_kernel/transformers/model/gemma.py +17 -6
  49. liger_kernel/transformers/model/gemma2.py +14 -5
  50. liger_kernel/transformers/model/gemma3.py +26 -12
  51. liger_kernel/transformers/model/glm4.py +16 -4
  52. liger_kernel/transformers/model/glm4v.py +16 -4
  53. liger_kernel/transformers/model/glm4v_moe.py +23 -4
  54. liger_kernel/transformers/model/gpt_oss.py +211 -0
  55. liger_kernel/transformers/model/hunyuan_v1.py +134 -0
  56. liger_kernel/transformers/model/internvl.py +12 -5
  57. liger_kernel/transformers/model/llama.py +14 -5
  58. liger_kernel/transformers/model/llama4.py +16 -4
  59. liger_kernel/transformers/model/llava.py +12 -4
  60. liger_kernel/transformers/model/loss_utils.py +31 -3
  61. liger_kernel/transformers/model/mistral.py +15 -6
  62. liger_kernel/transformers/model/mixtral.py +16 -7
  63. liger_kernel/transformers/model/mllama.py +12 -4
  64. liger_kernel/transformers/model/olmo2.py +16 -4
  65. liger_kernel/transformers/model/olmo3.py +142 -0
  66. liger_kernel/transformers/model/output_classes.py +147 -0
  67. liger_kernel/transformers/model/paligemma.py +23 -5
  68. liger_kernel/transformers/model/phi3.py +14 -7
  69. liger_kernel/transformers/model/qwen2.py +16 -3
  70. liger_kernel/transformers/model/qwen2_5_vl.py +14 -6
  71. liger_kernel/transformers/model/qwen2_vl.py +16 -4
  72. liger_kernel/transformers/model/qwen3.py +20 -5
  73. liger_kernel/transformers/model/qwen3_moe.py +19 -5
  74. liger_kernel/transformers/model/qwen3_next.py +146 -0
  75. liger_kernel/transformers/model/qwen3_vl.py +150 -0
  76. liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
  77. liger_kernel/transformers/model/smollm3.py +15 -6
  78. liger_kernel/transformers/model/smolvlm.py +158 -0
  79. liger_kernel/transformers/monkey_patch.py +702 -48
  80. liger_kernel/transformers/multi_token_attention.py +1 -1
  81. liger_kernel/transformers/poly_norm.py +42 -0
  82. liger_kernel/transformers/qwen2vl_mrope.py +1 -1
  83. liger_kernel/transformers/rms_norm.py +15 -3
  84. liger_kernel/transformers/rope.py +45 -1
  85. liger_kernel/transformers/softmax.py +1 -1
  86. liger_kernel/transformers/sparsemax.py +1 -1
  87. liger_kernel/transformers/swiglu.py +18 -1
  88. liger_kernel/transformers/tiled_mlp.py +133 -0
  89. liger_kernel/transformers/tvd.py +1 -1
  90. liger_kernel/utils.py +52 -0
  91. {liger_kernel_nightly-0.6.2.dev20251011154427.dist-info → liger_kernel_nightly-0.6.4.dev20260107111351.dist-info}/METADATA +12 -3
  92. liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/RECORD +130 -0
  93. liger_kernel_nightly-0.6.2.dev20251011154427.dist-info/RECORD +0 -107
  94. {liger_kernel_nightly-0.6.2.dev20251011154427.dist-info → liger_kernel_nightly-0.6.4.dev20260107111351.dist-info}/LICENSE +0 -0
  95. {liger_kernel_nightly-0.6.2.dev20251011154427.dist-info → liger_kernel_nightly-0.6.4.dev20260107111351.dist-info}/NOTICE +0 -0
  96. {liger_kernel_nightly-0.6.2.dev20251011154427.dist-info → liger_kernel_nightly-0.6.4.dev20260107111351.dist-info}/WHEEL +0 -0
  97. {liger_kernel_nightly-0.6.2.dev20251011154427.dist-info → liger_kernel_nightly-0.6.4.dev20260107111351.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ import torch.nn as nn
5
5
 
6
6
  from torch.nn.modules.utils import _pair
7
7
 
8
- from liger_kernel.ops.multi_token_attention import LigerMultiTokenAttentionFunction
8
+ from liger_kernel.ops import LigerMultiTokenAttentionFunction
9
9
 
10
10
 
11
11
  class LigerMultiTokenAttention(nn.Module):
@@ -0,0 +1,42 @@
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from liger_kernel.ops import LigerPolyNormFunction
5
+
6
+
7
+ class LigerPolyNorm(nn.Module):
8
+ """
9
+ PolyNorm layer wrapper for Liger kernel.
10
+
11
+ PolyNorm formula:
12
+ y = w₀·norm(x³) + w₁·norm(x²) + w₂·norm(x) + b
13
+ where norm(u) = u / sqrt(mean(u²) + ε)
14
+
15
+ Reference:
16
+ https://github.com/BryceZhuo/PolyCom/
17
+
18
+ Args:
19
+ eps: epsilon for numerical stability (default: 1e-6)
20
+ in_place: whether to in-place modify grad_output in backward to save memory (default: False).
21
+ Set to True to save memory if grad_output is not needed elsewhere.
22
+ """
23
+
24
+ def __init__(self, eps=1e-6, in_place=True):
25
+ super().__init__()
26
+ # Align with PolyCom reference: initialize weights to (1/3, 1/3, 1/3) and bias to 1.0
27
+ self.weight = nn.Parameter(torch.full((3,), 1.0 / 3.0))
28
+ self.bias = nn.Parameter(torch.tensor(1.0))
29
+ self.variance_epsilon = eps
30
+ self.in_place = in_place
31
+
32
+ def forward(self, hidden_states):
33
+ return LigerPolyNormFunction.apply(
34
+ hidden_states,
35
+ self.weight,
36
+ self.bias,
37
+ self.variance_epsilon,
38
+ self.in_place,
39
+ )
40
+
41
+ def extra_repr(self):
42
+ return f"weight_shape={tuple(self.weight.shape)}, eps={self.variance_epsilon}, in_place={self.in_place}"
@@ -1,4 +1,4 @@
1
- from liger_kernel.ops.qwen2vl_mrope import LigerQwen2VLMRopeFunction
1
+ from liger_kernel.ops import LigerQwen2VLMRopeFunction
2
2
 
3
3
 
4
4
  def liger_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.rms_norm import LigerRMSNormFunction
4
+ from liger_kernel.ops import LigerRMSNormFunction
5
5
 
6
6
 
7
7
  class LigerRMSNorm(nn.Module):
@@ -14,13 +14,18 @@ class LigerRMSNorm(nn.Module):
14
14
  init_fn="ones",
15
15
  in_place=True,
16
16
  row_mode=None,
17
+ elementwise_affine=True,
17
18
  ):
18
19
  super().__init__()
19
20
  assert init_fn in [
20
21
  "ones",
21
22
  "zeros",
22
23
  ], f"init_fn must be either 'ones' or 'zeros', got {init_fn}"
23
- self.weight = nn.Parameter(torch.ones(hidden_size) if init_fn == "ones" else torch.zeros(hidden_size))
24
+ self.elementwise_affine = elementwise_affine
25
+ if self.elementwise_affine:
26
+ self.weight = nn.Parameter(torch.ones(hidden_size) if init_fn == "ones" else torch.zeros(hidden_size))
27
+ else:
28
+ self.register_parameter("weight", None)
24
29
  self.variance_epsilon, self.offset, self.casting_mode, self.in_place, self.row_mode = (
25
30
  eps,
26
31
  offset,
@@ -41,7 +46,7 @@ class LigerRMSNorm(nn.Module):
41
46
  )
42
47
 
43
48
  def extra_repr(self):
44
- return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}, offset={self.offset}, in_place={self.in_place}, row_mode={self.row_mode}"
49
+ return f"weight_shape={tuple(self.weight.shape) if self.weight is not None else None}, eps={self.variance_epsilon}, offset={self.offset}, in_place={self.in_place}, row_mode={self.row_mode}"
45
50
 
46
51
 
47
52
  class LigerRMSNormForGemma(LigerRMSNorm):
@@ -77,3 +82,10 @@ class LigerRMSNormForGlm4(LigerRMSNorm):
77
82
  self, hidden_size, eps=1e-6, offset=0.0, casting_mode="llama", init_fn="ones", in_place=False, row_mode=None
78
83
  ):
79
84
  super().__init__(hidden_size, eps, offset, casting_mode, init_fn, in_place, row_mode)
85
+
86
+
87
+ class LigerRMSNormForQwen3Next(LigerRMSNorm):
88
+ def __init__(
89
+ self, hidden_size, eps=1e-6, offset=1.0, casting_mode="gemma", init_fn="zeros", in_place=False, row_mode=None
90
+ ):
91
+ super().__init__(hidden_size, eps, offset, casting_mode, init_fn, in_place, row_mode)
@@ -1,4 +1,8 @@
1
- from liger_kernel.ops.rope import LigerRopeFunction
1
+ from typing import Tuple
2
+
3
+ import torch
4
+
5
+ from liger_kernel.ops import LigerRopeFunction
2
6
 
3
7
 
4
8
  def liger_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
@@ -18,3 +22,43 @@ def liger_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
18
22
  """
19
23
 
20
24
  return LigerRopeFunction.apply(q, k, cos, sin, position_ids, unsqueeze_dim)
25
+
26
+
27
+ def liger_rotary_pos_emb_vision(
28
+ q: torch.Tensor,
29
+ k: torch.Tensor,
30
+ cos: torch.Tensor,
31
+ sin: torch.Tensor,
32
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
33
+ """
34
+ Modified version of liger_rotary_pos_emb for qwen3_vl's apply_rotary_pos_emb_vision function.
35
+ Manually tranposed the input and output to match the expected shape for liger_rotary_pos_emb.
36
+ Reference: https://https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py#L116
37
+
38
+ Args:
39
+ q (torch.Tensor): The query tensor of shape (seq_length, num_heads, head_dim),
40
+ with stride (num_heads * head_dim, head_dim, 1).
41
+ k (torch.Tensor): The query tensor of shape (seq_length, num_heads, head_dim),
42
+ with stride (num_heads * head_dim, head_dim, 1). Same as q.
43
+ cos (torch.Tensor): The cosine tensor of shape (seq_length, head_dim).
44
+ sin (torch.Tensor): The sine tensor of shape (seq_length, head_dim).
45
+
46
+ Returns:
47
+ Tuple[torch.Tensor, torch.Tensor]: The query and key tensors with the same shape and stride as inputs.
48
+ """
49
+ orig_q_dtype, orig_k_dtype = q.dtype, k.dtype
50
+
51
+ # tranpose to (1, num_heads, seq_length, head_dim) and cast to float32 to match liger_rotary_pos_emb input shape
52
+ # also unsqueeze for batch dim
53
+ q32 = q.to(torch.float32).unsqueeze(0).transpose(1, 2)
54
+ k32 = k.to(torch.float32).unsqueeze(0).transpose(1, 2)
55
+ cos32 = cos.to(torch.float32)
56
+ sin32 = sin.to(torch.float32)
57
+
58
+ q_out, k_out = liger_rotary_pos_emb(q32, k32, cos32, sin32)
59
+
60
+ # transpose back to (seq_length, num_heads, head_dim) and cast back to original dtype
61
+ # also squeeze out batch dim
62
+ q_out = q_out.transpose(1, 2).squeeze(0).to(orig_q_dtype)
63
+ k_out = k_out.transpose(1, 2).squeeze(0).to(orig_k_dtype)
64
+ return q_out, k_out
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.softmax import LigerSoftmaxFunction
4
+ from liger_kernel.ops import LigerSoftmaxFunction
5
5
 
6
6
 
7
7
  class LigerSoftmax(nn.Module):
@@ -1,7 +1,7 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
 
4
- from liger_kernel.ops.sparsemax import LigerSparsemaxFunction
4
+ from liger_kernel.ops import LigerSparsemaxFunction
5
5
 
6
6
 
7
7
  class LigerSparsemax(nn.Module):
@@ -1,6 +1,6 @@
1
1
  import torch.nn as nn
2
2
 
3
- from liger_kernel.ops.swiglu import LigerSiLUMulFunction
3
+ from liger_kernel.ops import LigerSiLUMulFunction
4
4
 
5
5
 
6
6
  class LigerSwiGLUMLP(nn.Module):
@@ -77,3 +77,20 @@ class LigerQwen3MoeSwiGLUMLP(nn.Module):
77
77
 
78
78
  def forward(self, x):
79
79
  return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
80
+
81
+
82
+ class LigerHunyuanV1SwiGLUMLP(nn.Module):
83
+ def __init__(self, config, layer_idx=None, is_shared_mlp=False):
84
+ super().__init__()
85
+ self.config = config
86
+ self.hidden_size = config.hidden_size
87
+ self.intermediate_size = config.intermediate_size
88
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
89
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
90
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
91
+ self.layer_idx = layer_idx
92
+ if config.hidden_act not in ["silu", "swish"]:
93
+ raise ValueError(f"Activation function {config.hidden_act} not supported.")
94
+
95
+ def forward(self, x):
96
+ return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
@@ -0,0 +1,133 @@
1
+ from typing import Optional
2
+
3
+ import torch.nn as nn
4
+
5
+ from liger_kernel.ops import LigerGELUMulFunction
6
+ from liger_kernel.ops import LigerSiLUMulFunction
7
+ from liger_kernel.ops import apply_tiled_mlp
8
+
9
+
10
+ class LigerTiledGEGLUMLP(nn.Module):
11
+ """
12
+ Memory-efficient GEGLU MLP using tiled computation.
13
+
14
+ This module combines GEGLU activation with tiled processing to handle
15
+ very long sequences efficiently. The forward pass is recomputed during
16
+ backward to save memory.
17
+
18
+ Args:
19
+ config: Model configuration with hidden_size and intermediate_size attributes
20
+ num_shards: Number of shards to split the sequence. If None, automatically
21
+ calculated as ceil(seqlen / hidden_size)
22
+ """
23
+
24
+ def __init__(self, config, num_shards: Optional[int] = None):
25
+ super().__init__()
26
+ self.config = config
27
+ self.hidden_size = config.hidden_size
28
+ self.intermediate_size = config.intermediate_size
29
+ self.num_shards = num_shards
30
+
31
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
32
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
33
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
34
+
35
+ # Validate activation function
36
+ if hasattr(config, "hidden_act") and config.hidden_act not in [
37
+ "gelu",
38
+ "gelu_new",
39
+ "gelu_pytorch_tanh",
40
+ ]:
41
+ raise ValueError(f"LigerTiledGEGLUMLP requires GELU activation, got {config.hidden_act}")
42
+
43
+ def _mlp_forward(self, module, x):
44
+ """Internal MLP forward function for tiled computation."""
45
+ gate = module.gate_proj(x)
46
+ up = module.up_proj(x)
47
+ return module.down_proj(LigerGELUMulFunction.apply(gate, up))
48
+
49
+ def forward(self, x):
50
+ """
51
+ Forward pass with tiled computation.
52
+
53
+ Args:
54
+ x: Input tensor of shape [batch_size, seq_len, hidden_size]
55
+ or [seq_len, hidden_size]
56
+
57
+ Returns:
58
+ Output tensor of the same shape as input
59
+ """
60
+ compute_params = [
61
+ self.gate_proj.weight,
62
+ self.up_proj.weight,
63
+ self.down_proj.weight,
64
+ ]
65
+
66
+ return apply_tiled_mlp(
67
+ fn=self._mlp_forward,
68
+ mlp_module=self,
69
+ x=x,
70
+ num_shards=self.num_shards,
71
+ compute_params=compute_params,
72
+ )
73
+
74
+
75
+ class LigerTiledSwiGLUMLP(nn.Module):
76
+ """
77
+ Memory-efficient SwiGLU MLP using tiled computation.
78
+
79
+ This module combines SwiGLU activation with tiled processing to handle
80
+ very long sequences efficiently. The forward pass is recomputed during
81
+ backward to save memory.
82
+
83
+ Args:
84
+ config: Model configuration with hidden_size and intermediate_size attributes
85
+ num_shards: Number of shards to split the sequence. If None, automatically
86
+ calculated as ceil(seqlen / hidden_size)
87
+ """
88
+
89
+ def __init__(self, config, num_shards: Optional[int] = None):
90
+ super().__init__()
91
+ self.config = config
92
+ self.hidden_size = config.hidden_size
93
+ self.intermediate_size = config.intermediate_size
94
+ self.num_shards = num_shards
95
+
96
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
97
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
98
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
99
+
100
+ # Validate activation function
101
+ if hasattr(config, "hidden_act") and config.hidden_act not in ["silu", "swish"]:
102
+ raise ValueError(f"LigerTiledSwiGLUMLP requires SiLU/Swish activation, got {config.hidden_act}")
103
+
104
+ def _mlp_forward(self, module, x):
105
+ """Internal MLP forward function for tiled computation."""
106
+ gate = module.gate_proj(x)
107
+ up = module.up_proj(x)
108
+ return module.down_proj(LigerSiLUMulFunction.apply(gate, up))
109
+
110
+ def forward(self, x):
111
+ """
112
+ Forward pass with tiled computation.
113
+
114
+ Args:
115
+ x: Input tensor of shape [batch_size, seq_len, hidden_size]
116
+ or [seq_len, hidden_size]
117
+
118
+ Returns:
119
+ Output tensor of the same shape as input
120
+ """
121
+ compute_params = [
122
+ self.gate_proj.weight,
123
+ self.up_proj.weight,
124
+ self.down_proj.weight,
125
+ ]
126
+
127
+ return apply_tiled_mlp(
128
+ fn=self._mlp_forward,
129
+ mlp_module=self,
130
+ x=x,
131
+ num_shards=self.num_shards,
132
+ compute_params=compute_params,
133
+ )
@@ -1,6 +1,6 @@
1
1
  import torch.nn as nn
2
2
 
3
- from liger_kernel.ops.tvd import LigerTVDLossFunction
3
+ from liger_kernel.ops import LigerTVDLossFunction
4
4
 
5
5
 
6
6
  class LigerTVDLoss(nn.Module):
liger_kernel/utils.py CHANGED
@@ -12,18 +12,70 @@ def is_peft_available():
12
12
  return PEFT_AVAILABLE
13
13
 
14
14
 
15
+ def infer_comm_backend():
16
+ """
17
+ Get communication backend name based on the environment.
18
+ """
19
+ if torch.distributed.is_nccl_available():
20
+ # Works for Nvidia
21
+ # TODO: nccl may not work for AMD decices that may require use of rccl.
22
+ return "nccl"
23
+ elif is_npu_available():
24
+ # Use Ascend NPU if available (torch.npu)
25
+ # Ascend is not standard torch backend and requires extension.
26
+ # Assume that it is installed if NPUs are being used in
27
+ # multi device environment.
28
+ return "ascend"
29
+ # XPU (Intel) if available
30
+ elif torch.distributed.distributed_c10d.is_xccl_available():
31
+ return "xccl"
32
+ elif torch.distributed.is_mpi_available():
33
+ # CPU backend, first option
34
+ return "mpi"
35
+ elif torch.distributed.is_gloo_available():
36
+ # CPU backend, backup option
37
+ return "gloo"
38
+ else:
39
+ raise RuntimeError("There is no distributed backend available.")
40
+
41
+
15
42
  def infer_device():
16
43
  """
17
44
  Get current device name based on available devices
18
45
  """
19
46
  if torch.cuda.is_available(): # Works for both Nvidia and AMD
20
47
  return "cuda"
48
+ # Use Ascend NPU if available (torch.npu)
49
+ elif is_npu_available():
50
+ return "npu"
51
+ # XPU (Intel) if available
21
52
  elif torch.xpu.is_available():
22
53
  return "xpu"
23
54
  else:
24
55
  return "cpu"
25
56
 
26
57
 
58
+ def is_npu_available() -> bool:
59
+ """Detect Ascend NPU availability."""
60
+ try:
61
+ from transformers.utils import is_torch_npu_available
62
+
63
+ return is_torch_npu_available()
64
+ except Exception:
65
+ return False
66
+
67
+
68
+ def get_npu_multi_processor_count() -> int:
69
+ """Return a heuristic multi-processor count for NPU."""
70
+ if is_npu_available():
71
+ NPU_MULTI_PROCESSOR_COUNT = 48
72
+ dev_props = torch.npu.get_device_properties()
73
+ # The vector_core_num attribute is supported in the torch.npu v7.2.0 release version.
74
+ return dev_props.vector_core_num if hasattr(dev_props, "vector_core_num") else NPU_MULTI_PROCESSOR_COUNT
75
+ # Reasonable default to avoid division by zero
76
+ return 1
77
+
78
+
27
79
  def transformers_version_dispatch(
28
80
  required_version: str,
29
81
  before_fn,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.6.2.dev20251011154427
3
+ Version: 0.6.4.dev20260107111351
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -79,8 +79,8 @@ Requires-Dist: torchvision>=0.20; extra == "dev"
79
79
  </a>
80
80
  </td>
81
81
  <td style="padding: 10px;">
82
- <a href="https://discord.gg/gpumode">
83
- <img src="https://dcbadge.limes.pink/api/server/gpumode?style=flat" alt="Join Our Discord">
82
+ <a href="https://discord.gg/X4MaxPgA">
83
+ <img src="https://dcbadge.limes.pink/api/server/https://discord.gg/X4MaxPgA?style=flat" alt="Join Our Discord">
84
84
  </a>
85
85
  </td>
86
86
  </tr>
@@ -95,6 +95,7 @@ Requires-Dist: torchvision>=0.20; extra == "dev"
95
95
  <details>
96
96
  <summary>Latest News 🔥</summary>
97
97
 
98
+ - [2025/12/19] We announced a liger kernel discord channel at https://discord.gg/X4MaxPgA; We will be hosting Liger Kernel x Triton China Meetup in mid of January 2026
98
99
  - [2025/03/06] We release a joint blog post on TorchTune × Liger - [Peak Performance, Minimized Memory: Optimizing torchtune’s performance with torch.compile & Liger Kernel](https://pytorch.org/blog/peak-performance-minimized-memory/)
99
100
  - [2024/12/11] We release [v0.5.0](https://github.com/linkedin/Liger-Kernel/releases/tag/v0.5.0): 80% more memory efficient post training losses (DPO, ORPO, CPO, etc)!
100
101
  - [2024/12/5] We release LinkedIn Engineering Blog - [Liger-Kernel: Empowering an open source ecosystem of Triton Kernels for Efficient LLM Training](https://www.linkedin.com/blog/engineering/open-source/liger-kernel-open-source-ecosystem-for-efficient-llm-training)
@@ -113,6 +114,8 @@ We've also added optimized Post-Training kernels that deliver **up to 80% memory
113
114
 
114
115
  You can view the documentation site for additional installation, usage examples, and API references:https://linkedin.github.io/Liger-Kernel/
115
116
 
117
+ You can view the Liger Kernel Technical Report: https://openreview.net/forum?id=36SjAIT42G
118
+
116
119
  ## Supercharge Your Model with Liger Kernel
117
120
 
118
121
  ![Banner](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/banner.GIF)
@@ -310,8 +313,12 @@ loss.backward()
310
313
  | Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
311
314
  | Granite 3.0 & 3.1 | `liger_kernel.transformers.apply_liger_kernel_to_granite` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
312
315
  | OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
316
+ | Olmo3 | `liger_kernel.transformers.apply_liger_kernel_to_olmo3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
313
317
  | GLM-4 | `liger_kernel.transformers.apply_liger_kernel_to_glm4` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
318
+ | GPT-OSS | `liger_kernel.transformers.apply_liger_kernel_to_gpt_oss` | RoPE, RMSNorm, CrossEntropyLoss, FusedLinearCrossEntropy |
314
319
  | InternVL3 | `liger_kernel.transformers.apply_liger_kernel_to_internvl` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
320
+ | HunyuanV1 | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_dense` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
321
+ | HunyuanV1 MoE | `liger_kernel.transformers.apply_liger_kernel_to_hunyuan_v1_moe` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
315
322
 
316
323
 
317
324
  ## Low-level APIs
@@ -438,3 +445,5 @@ url={https://openreview.net/forum?id=36SjAIT42G}
438
445
  ↑ Back to Top ↑
439
446
  </a>
440
447
  </p>
448
+
449
+
@@ -0,0 +1,130 @@
1
+ liger_kernel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ liger_kernel/env_report.py,sha256=uhdEC8OydxoZlb7B6YYcAaBF3crGFdIck-4cxaW4NJY,1728
3
+ liger_kernel/utils.py,sha256=1SXJmyVtn-aoyUkA-Acsur_hdHqtxoGqN4v4Vk820bE,3845
4
+ liger_kernel/chunked_loss/README.md,sha256=0FmkFC3hKBqyoDT5uTlIYmrvRkF-EOCR1y-EBU1LpWU,2248
5
+ liger_kernel/chunked_loss/__init__.py,sha256=J5_jNnzZ4gZmA38W5f_4oab7xMoNk1Xy-yh3X_Xlf-s,714
6
+ liger_kernel/chunked_loss/cosine_similarity_loss.py,sha256=h8lPAkw8oYPUUBZ5YEG2tMMmQ7XkWnOo7r29A5vx-Eg,4759
7
+ liger_kernel/chunked_loss/cpo_loss.py,sha256=Gzz1eU4kgcbdubFVRy55e8A1Cr-r45UgNicXwZIjmBU,5454
8
+ liger_kernel/chunked_loss/dpo_loss.py,sha256=I83khNs3QQjuhr8U3NIOAACkbse6DNiBV-TulPZ0lXw,9006
9
+ liger_kernel/chunked_loss/functional.py,sha256=-XPDbLml9dHmvoSU2VNTUrBDFehuzvuAGPikVetBMtI,1132
10
+ liger_kernel/chunked_loss/fused_linear_distillation.py,sha256=Bjdxnjfg-GwYvMJ102juP06gFMlbkxYPpvv7cV_mZcg,12912
11
+ liger_kernel/chunked_loss/fused_linear_ppo.py,sha256=baU19PwqO1FTVxwlB-eyJv6gOLtL7baXGzSncYQ8Ktc,14296
12
+ liger_kernel/chunked_loss/fused_linear_preference.py,sha256=FIH85uUXAOgYx5Ax8MjFhJHVu-2pKtY7wSegd0zSyyY,18336
13
+ liger_kernel/chunked_loss/fused_linear_unpaired_preference.py,sha256=RiuK3UtRwH9T6jZ36sA8Urj-TVuOLOO2syLg_JOQapY,13437
14
+ liger_kernel/chunked_loss/grpo_loss.py,sha256=bmuZaNgqNbJ5pJGFDXWE-B4BGYF7xWVSN15UyCfuq_s,13079
15
+ liger_kernel/chunked_loss/jsd_loss.py,sha256=NJKmJ76_-kI875ZkC4hQfC4nAvPNCj4ZsNyDNsfD74k,8761
16
+ liger_kernel/chunked_loss/kto_loss.py,sha256=llVCe6DkcpCo57seGWoMikaQVFApx764jsmSbQyqwQY,7529
17
+ liger_kernel/chunked_loss/orpo_loss.py,sha256=nu9UYG16dcMw93lvHi4_hYs3Q0FK1KnlmMRj7OpYU8s,4872
18
+ liger_kernel/chunked_loss/simpo_loss.py,sha256=fy2w8KbhMrBv7b1jdIeH3bBFxY52bPQPZb3KwBvmurM,5385
19
+ liger_kernel/ops/__init__.py,sha256=F3m9qlXbgttykKEBsrMFf1WyK_0H8CKqLuDnFRR-cvc,7237
20
+ liger_kernel/ops/cross_entropy.py,sha256=DnXFRZ9TGN1SnEo8xGBFFPLNQaen8aLVNPJ1em-LbK4,22910
21
+ liger_kernel/ops/dyt.py,sha256=4XmkCCZaPPM8Tl4QHo6vSF2m68jrwsnjucrbyOJvZpM,5628
22
+ liger_kernel/ops/fused_add_rms_norm.py,sha256=lvwrLsKvoAQqS9KatgBkAyy0Xdecado-g0rvXYXaBak,14237
23
+ liger_kernel/ops/fused_linear_cross_entropy.py,sha256=1gx2qljre9PVc861iknFnNCGC-P35D2w1cc_yMDO9ow,16239
24
+ liger_kernel/ops/fused_linear_jsd.py,sha256=CSoprxb-YcJy-YUKiTcYkxN8sb9h2kdk_iHuncvSV5c,9683
25
+ liger_kernel/ops/fused_neighborhood_attention.py,sha256=vPi5xbnh6wxyZehaqo6Tuilqo2fN5SGDiONjnNmIKqs,35556
26
+ liger_kernel/ops/geglu.py,sha256=-ruMACDsFH1YsAak6BGvZ0ktLGIrBE6yGF0dAyR82UU,4307
27
+ liger_kernel/ops/group_norm.py,sha256=zoy-TcNkYtKGmGhTFJmnyiG_4Es4ZphpqP8jtUSI6-I,10912
28
+ liger_kernel/ops/grpo_loss.py,sha256=2SyOujtF9I3xiNo4wFf4s6MeiDotE_qeYfRWgj_bOBE,9573
29
+ liger_kernel/ops/jsd.py,sha256=onHp5T3MbvJaVz5Vup7Ww6EQp_HTaZeayTjJk6FgQMY,7042
30
+ liger_kernel/ops/kl_div.py,sha256=ZjGdDLKWksHT9dZ0xF_TDgAkj5cuMTwwT5tr9E-_24o,8734
31
+ liger_kernel/ops/layer_norm.py,sha256=-4UEyko9eKgBi5LNmfdEU2hTpJOWVnEy5iYjJkMvHmk,10598
32
+ liger_kernel/ops/llama4_rope.py,sha256=-aqdZzllklTN8b9--e-TsWY_ntGCN8-tyseT4x0bd8s,8223
33
+ liger_kernel/ops/multi_token_attention.py,sha256=Oz_RXDp-OSS_R_HuGmaETHdAJ7Toda_70OfE7TXMUlY,7645
34
+ liger_kernel/ops/poly_norm.py,sha256=5IdJEZnbbhblkL_X8UhSD4A2CooQbOAZJw8nAekWNs4,11372
35
+ liger_kernel/ops/qwen2vl_mrope.py,sha256=3GExhYpLgB4VUtyZyjRk8XjEur3W4EWF6HQ67ML5vBU,8481
36
+ liger_kernel/ops/rms_norm.py,sha256=r97gpPmhbKz9qrBjxUEX0XP04aYu4psJeLe3KnhPZyo,21852
37
+ liger_kernel/ops/rope.py,sha256=v-7JHRrv-5ImoROkpKfl30WwWI4qTa2tAl7zQeB4ml4,8956
38
+ liger_kernel/ops/softmax.py,sha256=tgORx6MK1IDDtZKqGarj0IPIVjqAIEUXXYPiinhRdtI,5864
39
+ liger_kernel/ops/sparsemax.py,sha256=AeWe1xgkHJFEKWTj2vu_0hj7LztGvjqXAps-QTpCY0U,5087
40
+ liger_kernel/ops/swiglu.py,sha256=D7nd4u_LInwsIRNCDdY77lqnTz8-W5dJrpEAt8zEO_A,3033
41
+ liger_kernel/ops/tiled_mlp.py,sha256=eyMFsFFgHch8a_6R6IYRG24_jqKg5GF_BQUoQuAG8SY,4529
42
+ liger_kernel/ops/tvd.py,sha256=FHJtLQI95ijqgg9UtaHpMAjSCiPxB6CduPwPMcGxelc,6405
43
+ liger_kernel/ops/utils.py,sha256=Xu6MJ2-lbp4hSmI0JGImKguKU0KqWnFQDgQwOxSieyc,4360
44
+ liger_kernel/ops/backends/README.md,sha256=ZP59UUqD1WW8LwM5Y-cTpSM-Dtgdp8Wku2mE9kqAc2E,4185
45
+ liger_kernel/ops/backends/__init__.py,sha256=-mgef3cHfDFeL5NbXbq1TI7ngCahE9qqL3aMaHnXvis,629
46
+ liger_kernel/ops/backends/registry.py,sha256=yJa_Sh2FZ__iPCIU8h2nOQbnsFQh1I-_czROLtb1uQM,1637
47
+ liger_kernel/ops/backends/_ascend/__init__.py,sha256=6n0keOX9H-kLadBdVZlx-Ce0ZLVJvLiEfR-9-uxmYUk,221
48
+ liger_kernel/ops/backends/_ascend/ascend-ub-manager-design.md,sha256=FVXHSO1KY4ZFxCAE5r4hOYB2Q8ANyrJZ7WnFJ_GeQOA,19605
49
+ liger_kernel/ops/backends/_ascend/ub_manager.py,sha256=3h7sncZk00veBJS37a01YPt1SLeAxJj5N3lPdv1wXAk,13174
50
+ liger_kernel/ops/backends/_ascend/ops/__init__.py,sha256=R1iS9R0EtmGbrN0cSkIiRtZouVl7ndiPVZJIoEALb7s,1748
51
+ liger_kernel/ops/backends/_ascend/ops/geglu.py,sha256=hs1Cdhw0pbgZFiK1srLuo8DCe8jtnmhjm5SS2vw8-0M,8421
52
+ liger_kernel/ops/backends/_ascend/ops/qwen2vl_mrope.py,sha256=pUYcstJ4FuzDTkuhmQaO3U9gcVQoNCpzuwwUdtES5hM,11015
53
+ liger_kernel/ops/backends/_ascend/ops/rope.py,sha256=nOwtm6_eSnzDjl2S-jvGpwHrumAOgWfr5pNg6SL3R2k,10842
54
+ liger_kernel/ops/backends/_ascend/ops/swiglu.py,sha256=yrbEgIgeCZyayMYHCRNq7LntZE9cEemht39_TFPro0k,4682
55
+ liger_kernel/ops/experimental/embedding.py,sha256=tolj3tItkzpSb30zWqDN2_yX4ectflaQ8HMyKyFIQc8,4172
56
+ liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-OidjtbsW80oZ6IM,13314
57
+ liger_kernel/transformers/__init__.py,sha256=4sqcDbOZ_JtS9Ag-7oyuhq5jN298GyzjJFu9J-DyyZQ,10872
58
+ liger_kernel/transformers/auto_model.py,sha256=RnJhK8xHamRnnswgRLG_muJE1i6T6LszjK8lC6vonhE,2410
59
+ liger_kernel/transformers/cross_entropy.py,sha256=08H8RxSxGX_52UzrHNnSZ_wWH-uvU8KrRiDmVrkOw14,1996
60
+ liger_kernel/transformers/dyt.py,sha256=Rng-MZQSprnGGWFtpmYKt7MIX26vFUYbq5ruM4MjH-U,719
61
+ liger_kernel/transformers/fsdp.py,sha256=CUiyjTmjkjY7pLXQv8ly9rnzgXw6529csd9pvtJNMYc,3096
62
+ liger_kernel/transformers/functional.py,sha256=f9sOWEfh5HZwOH5cVlcB_ts0MB_-fFFPki8PVZ5w__M,8352
63
+ liger_kernel/transformers/fused_add_rms_norm.py,sha256=k98sfcZhsgtdVxChciHmv0WUizzn6f-Rn72JtGgmafI,1180
64
+ liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=WnGuR_rjIWO0XHUyVakz-qsIRm028OKzi1vayvmPfbg,2320
65
+ liger_kernel/transformers/fused_linear_jsd.py,sha256=BW22DX3J6J8uZdoaU9JFUU5HnTrNYL63H9IQZzHkGu0,3982
66
+ liger_kernel/transformers/fused_neighborhood_attention.py,sha256=21O9DSRXgMQst9Lc3b62CsOLkYn-hjuskj9Zi3mvG7Y,7928
67
+ liger_kernel/transformers/geglu.py,sha256=esltAhNJZjWydvh07C6EaTdjA2aQzFPMNK92yR15SEI,1101
68
+ liger_kernel/transformers/group_norm.py,sha256=k7LDIG8H5CA5kiNj2uOi8D_Z6FlZtQDLyzJQxK2E-gA,2162
69
+ liger_kernel/transformers/grpo_loss.py,sha256=wNVz1o3q9XH17tDqaCZFEVXJhH9mQX44pWhQEwiRo_Q,6088
70
+ liger_kernel/transformers/jsd.py,sha256=_KlOX8YcdONU0tq0bIRDQ5VDBwtywm3Ro-FmlmI01qk,2975
71
+ liger_kernel/transformers/kl_div.py,sha256=94VR4uuj-2dZCTEnwFksvDi-LporrpB5HgmYtQCZnw0,402
72
+ liger_kernel/transformers/layer_norm.py,sha256=l4nsT_Zj4CdVZOM7F0I0Ox-lmLHyIJzqQvVaF0o0HbI,895
73
+ liger_kernel/transformers/llama4_rope.py,sha256=A_nxcS_KiUCyNeL2FAZX7yUhDsX7krrI9BG49OaN_nM,3627
74
+ liger_kernel/transformers/monkey_patch.py,sha256=ESFIi_7hQMcnUtRLjAMJ9kbzSbwToDhpOfFa6aQ-SrY,135534
75
+ liger_kernel/transformers/multi_token_attention.py,sha256=LtEjG7qy1-JK-HIPaz8zZ4P08aSZTnj5D635Pa04Onc,1730
76
+ liger_kernel/transformers/poly_norm.py,sha256=T3VdLQHLcCY7KzNzrc6IJRs8SzO8Yc7a0BS_2p6d7Wo,1367
77
+ liger_kernel/transformers/qwen2vl_mrope.py,sha256=0hOBR3j2Yd6xbT4z9BNRKEy1D0eyOUsIW6EmI_3PPNI,1033
78
+ liger_kernel/transformers/rms_norm.py,sha256=dD_69_GA3GUdtvdYVxTLKGeg8QZinJpS3qfeV7WvOuA,3237
79
+ liger_kernel/transformers/rope.py,sha256=-W9aYLa2hMOmmG5yeHcvPsOI5UTc95ylYxUddxkwmkA,2867
80
+ liger_kernel/transformers/softmax.py,sha256=VI5QGHYpXSiXckgovEnDGcXwitimsxKB0GX-AT4dAC4,256
81
+ liger_kernel/transformers/sparsemax.py,sha256=Os49bSpPX4pWymsasv_3j20m8GFaI54e03XFPkHiPE0,393
82
+ liger_kernel/transformers/swiglu.py,sha256=LpgikAs9hibAL7G6itygBbOlW9tZe5s4D2IGAKGpbPw,4284
83
+ liger_kernel/transformers/tiled_mlp.py,sha256=gPsz7b0kxpk3mre7o1uGBt-XdNvMUN7IIqnUYIur-T0,4628
84
+ liger_kernel/transformers/trainer_integration.py,sha256=W3ON51O5GkyzNJsItz0y5rKx-uy2f2cFfveZpqbUdhw,123
85
+ liger_kernel/transformers/tvd.py,sha256=GYjhtXgS3RTPveOTN2gyK4uBnjs6ii2vkSZRX21QpqA,446
86
+ liger_kernel/transformers/experimental/__init__.py,sha256=oQqk-f32JYgWEP9DJCj6ty6bbJSGrdXsFDQFwGeX6vI,127
87
+ liger_kernel/transformers/experimental/embedding.py,sha256=bjy9hHj--ivy6xEWdiE6qLy9uLyeS4PsBEgl_MdDrng,858
88
+ liger_kernel/transformers/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
+ liger_kernel/transformers/model/falcon_h1.py,sha256=heUZ4wUt2ATmtBtmv8Rcro3pQl6fV9T0pburjTTW7os,5004
90
+ liger_kernel/transformers/model/gemma.py,sha256=pAri4PYpknsFfkvyo8Ez2NNlqrUDW-KkExUXTGZAcH4,10621
91
+ liger_kernel/transformers/model/gemma2.py,sha256=qa9Ok42vFojVGNmASTH3Ek566Vu507kjd--ZpZDKX9M,12024
92
+ liger_kernel/transformers/model/gemma3.py,sha256=ZUrFCc-pfF8jYHV0HsptBr98hx6p2q9ea0kSzVAoFPo,14966
93
+ liger_kernel/transformers/model/glm4.py,sha256=bSp22iPIjsli4-c_usUOsyh1Bs2gIK8X6ynS0azseUs,5900
94
+ liger_kernel/transformers/model/glm4v.py,sha256=dd-BQpccDCp1SbIxcJ5rG8xcwYQK3KOv1Tgm9TGnZc4,6594
95
+ liger_kernel/transformers/model/glm4v_moe.py,sha256=zKhMdOOrRhlrvCSFaeVYfddL1ubpY8edEO91TN81n98,7135
96
+ liger_kernel/transformers/model/gpt_oss.py,sha256=8jEAQQNEXgVA-yuvEjKkBQvCvZy0E9ns-O9BPlajXXU,11197
97
+ liger_kernel/transformers/model/hunyuan_v1.py,sha256=MJvP9xkUFePIV0HLETJM4YPbVCEPkAE1ZI5Jxyiebh0,5731
98
+ liger_kernel/transformers/model/internvl.py,sha256=OOutracs9qrPHSU7FVYar08yinvGrHQVPvo39JEws6w,6473
99
+ liger_kernel/transformers/model/llama.py,sha256=kqZeONzwTBzudoChlKMzq1w23BtYGbxWZC1l1V__JTw,13410
100
+ liger_kernel/transformers/model/llama4.py,sha256=PfkynGVI0xxMs3EtyYpCgaALI6stu25OIrTIymE-pvg,4853
101
+ liger_kernel/transformers/model/llava.py,sha256=yoADM_BuIEummtTDiwWqjfUjXUMZD78VJzS0TRj5GJ4,15687
102
+ liger_kernel/transformers/model/loss_utils.py,sha256=mAV6NsE1xR2smQMlr_n9afh4ek3BhIfieZdTn1Z-9Fw,2836
103
+ liger_kernel/transformers/model/mistral.py,sha256=OcwOzVDMwwDbVccVPv-AaocznzWwzLT3aRaKK5SMaAg,6030
104
+ liger_kernel/transformers/model/mixtral.py,sha256=YcBDoTEJDgLFJ_RTo180DYGxR8D5Ad9-idumif7kCPE,12130
105
+ liger_kernel/transformers/model/mllama.py,sha256=vAHwCm63sn4kpAY0rDGf_N0HR7KRTBVpBYDVTPOaZTg,12079
106
+ liger_kernel/transformers/model/olmo2.py,sha256=-h2bUOeuPfY1MdShdRvq5_wFDHKP4PEimgIl0fL-BT4,5902
107
+ liger_kernel/transformers/model/olmo3.py,sha256=k2zYOlS8U_b5MwjdToB3tDRQ0bH_mWapVQqJcH8-qAo,6007
108
+ liger_kernel/transformers/model/output_classes.py,sha256=0BGXVR4dYQpSHLkSqpRoXuHMryrceGSlTYRu6pvd8ZY,4542
109
+ liger_kernel/transformers/model/paligemma.py,sha256=UAYoKkIMvvix7GG3cSdWaDxVjMp26YsvthJuE7wFf6Y,20848
110
+ liger_kernel/transformers/model/phi3.py,sha256=PT7Kw6yySg-7TsssWfi82eVMN3SWujCqzCqHigAdfeQ,4574
111
+ liger_kernel/transformers/model/qwen2.py,sha256=ojqdJpD3A9A5uCS0N_rSq8gyNYWSsHfuvx3Z3ObC7ss,10686
112
+ liger_kernel/transformers/model/qwen2_5_vl.py,sha256=FbIZDcg9cOr4PtBLNN8yVubN-gu2clndjSIzfi8NMos,6894
113
+ liger_kernel/transformers/model/qwen2_vl.py,sha256=967Ex4Scm0ehhiVxOtjwfj396nD9xkAwFwHcoURH6-o,6578
114
+ liger_kernel/transformers/model/qwen3.py,sha256=1fvioVmq5CRZSIuTd7uuLet-fti9ee3r8eLibvfNTcQ,5769
115
+ liger_kernel/transformers/model/qwen3_moe.py,sha256=yljJO4kyeM5U2Q4pXH3Mmq71ZFEC_Z73qgBx1-an-o8,6457
116
+ liger_kernel/transformers/model/qwen3_next.py,sha256=TayfD91GVLA1-fJwtVl6vMZgkUTYLQYURMRGBdCtnFc,6331
117
+ liger_kernel/transformers/model/qwen3_vl.py,sha256=sUIdJ-32IlFm_4pHv6PpLgVafqBS0QeJm_91tY67NdY,6646
118
+ liger_kernel/transformers/model/qwen3_vl_moe.py,sha256=CJEFcwBqItSEw9NA0mhEozlDTgIuJQ6VTjgkh5iLZ78,4856
119
+ liger_kernel/transformers/model/smollm3.py,sha256=1ewDY-99UAFJEfoeqfZxDcxjkqKYUSr5b7X-E_2BLLs,8126
120
+ liger_kernel/transformers/model/smolvlm.py,sha256=yFpPKawLVo3zXzLjM7Y_T8FyRrPxVyp-YPFMM8m3k0c,6734
121
+ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7HHWHwku25A-GYL0WU,193
122
+ liger_kernel/transformers/trainer/orpo_trainer.py,sha256=tX0h63aOFe3rNqTmk6JpMf75UPo981yzEa6TghnjS0Q,5370
123
+ liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
124
+ liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
125
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
126
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/METADATA,sha256=Mzy4eM7hocfOx4KYOI_qKR056hH-RyAOcd99Ju-qY5k,25660
127
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
128
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
129
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
130
+ liger_kernel_nightly-0.6.4.dev20260107111351.dist-info/RECORD,,