liger-kernel-nightly 0.5.9.dev20250517045825__py3-none-any.whl → 0.5.9.dev20250519011716__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/ops/dyt.py +113 -179
- liger_kernel/transformers/dyt.py +5 -3
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/METADATA +1 -1
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/RECORD +8 -8
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.5.9.dev20250517045825.dist-info → liger_kernel_nightly-0.5.9.dev20250519011716.dist-info}/top_level.txt +0 -0
liger_kernel/ops/dyt.py
CHANGED
@@ -4,7 +4,8 @@ import torch
|
|
4
4
|
import triton
|
5
5
|
import triton.language as tl
|
6
6
|
|
7
|
-
from
|
7
|
+
from triton.language.extra.libdevice import tanh
|
8
|
+
|
8
9
|
from liger_kernel.ops.utils import compare_version
|
9
10
|
from liger_kernel.ops.utils import ensure_contiguous
|
10
11
|
from liger_kernel.ops.utils import infer_device
|
@@ -20,187 +21,126 @@ else:
|
|
20
21
|
from triton.language.math import tanh
|
21
22
|
|
22
23
|
|
24
|
+
# @triton.autotune([triton.Config({"BLOCK_N":bn}, num_stages=ns, num_warps=nw)
|
25
|
+
# for bn in [1024, 2048, 4096]
|
26
|
+
# for ns in [1,2,4]
|
27
|
+
# for nw in [4, 8, 16, 32]
|
28
|
+
# ],
|
29
|
+
# key=['N'])
|
23
30
|
@triton.jit
|
24
|
-
def _dyt_fwd_kernel(
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
):
|
35
|
-
"""
|
36
|
-
Reference:
|
37
|
-
https://arxiv.org/abs/2503.10622
|
38
|
-
|
39
|
-
Shapes:
|
40
|
-
- x: (BT, C)
|
41
|
-
- alpha: (1)
|
42
|
-
- gamma: (C)
|
43
|
-
- beta: (C)
|
44
|
-
"""
|
45
|
-
row_idx = tl.program_id(0)
|
46
|
-
offsets = tl.arange(0, BLOCK_SIZE)
|
47
|
-
mask = offsets < n_cols
|
48
|
-
|
49
|
-
x_ptr += row_idx * x_row_stride
|
50
|
-
y_ptr += row_idx * y_row_stride
|
51
|
-
|
52
|
-
alpha = tl.load(alpha_ptr)
|
53
|
-
gamma = tl.load(gamma_ptr + offsets, mask=mask)
|
54
|
-
beta = tl.load(beta_ptr + offsets, mask=mask)
|
55
|
-
x = tl.load(x_ptr + offsets, mask=mask)
|
56
|
-
y = gamma * tanh((alpha * x).cast(tl.float32)) + beta
|
57
|
-
tl.store(y_ptr + offsets, y, mask=mask)
|
31
|
+
def _dyt_fwd_kernel(X, Y, Alpha, Gamma, Beta, HAVE_BETA: tl.constexpr, N: tl.constexpr, BLOCK_N: tl.constexpr = 1024):
|
32
|
+
col = tl.cast(tl.program_id(0), tl.int64) * BLOCK_N + tl.arange(0, BLOCK_N)
|
33
|
+
mask = col < N
|
34
|
+
row_id = tl.cast(tl.program_id(1), tl.int64)
|
35
|
+
|
36
|
+
X += row_id * N
|
37
|
+
Y += row_id * N
|
38
|
+
alpha = tl.load(Alpha).to(tl.float32)
|
39
|
+
|
40
|
+
gamma = tl.load(Gamma + col, mask=mask, other=0.0).to(tl.float32)
|
58
41
|
|
42
|
+
x = tl.load(X + col, mask=mask, other=0.0).to(tl.float32)
|
59
43
|
|
44
|
+
tanh_x = tanh(alpha * x)
|
45
|
+
y = tanh_x * gamma
|
46
|
+
if HAVE_BETA:
|
47
|
+
beta = tl.load(Beta + col, mask=mask, other=0.0).to(tl.float32)
|
48
|
+
y += beta
|
49
|
+
tl.store(Y + col, y, mask=mask)
|
50
|
+
|
51
|
+
|
52
|
+
# @triton.autotune([triton.Config({"BLOCK_N":bn}, num_stages=ns, num_warps=nw)
|
53
|
+
# for bn in [1024, 2048, 4096]
|
54
|
+
# for ns in [1,2,4]
|
55
|
+
# for nw in [4, 8, 16]
|
56
|
+
# ],
|
57
|
+
# key=['N'])
|
60
58
|
@triton.jit
|
61
59
|
def _dyt_bwd_kernel(
|
62
|
-
|
63
|
-
x_row_stride,
|
64
|
-
dy_ptr,
|
65
|
-
dy_row_stride,
|
66
|
-
dx_ptr,
|
67
|
-
dx_row_stride,
|
68
|
-
alpha_ptr,
|
69
|
-
dalpha_ptr,
|
70
|
-
gamma_ptr,
|
71
|
-
dgamma_ptr,
|
72
|
-
dgamma_row_stride,
|
73
|
-
n_cols,
|
74
|
-
n_rows,
|
75
|
-
ROWS_PER_PROGRAM: tl.constexpr,
|
76
|
-
BLOCK_SIZE: tl.constexpr,
|
60
|
+
DY, DX, DA, DG, DB, X, Alpha, Gamma, HAVE_BETA: tl.constexpr, M, N: tl.constexpr, BLOCK_N: tl.constexpr = 1024
|
77
61
|
):
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
dalpha = 0.0
|
106
|
-
dgamma = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
|
107
|
-
|
108
|
-
x_ptr += row_start * x_row_stride
|
109
|
-
dx_ptr += row_start * dx_row_stride
|
110
|
-
dy_ptr += row_start * dy_row_stride
|
111
|
-
alpha = tl.load(alpha_ptr)
|
112
|
-
gamma = tl.load(gamma_ptr + offsets, mask=mask, other=0.0)
|
113
|
-
|
114
|
-
for _ in tl.range(row_start, row_end):
|
115
|
-
dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0)
|
116
|
-
x = tl.load(x_ptr + offsets, mask=mask, other=0.0)
|
117
|
-
tanh_ax = tanh((alpha * x).cast(tl.float32))
|
118
|
-
sech2_ax = 1 - tanh_ax * tanh_ax
|
119
|
-
|
120
|
-
dx = dy * gamma * sech2_ax * alpha
|
121
|
-
dalpha += tl.sum(dy * gamma * sech2_ax * x)
|
122
|
-
dgamma += dy * tanh_ax
|
123
|
-
tl.store(dx_ptr + offsets, dx, mask=mask)
|
124
|
-
|
125
|
-
dy_ptr += dy_row_stride
|
126
|
-
x_ptr += x_row_stride
|
127
|
-
dx_ptr += dx_row_stride
|
128
|
-
|
129
|
-
tl.store(dgamma_ptr + pid * dgamma_row_stride + offsets, dgamma, mask=mask)
|
130
|
-
tl.store(dalpha_ptr + pid, dalpha)
|
131
|
-
|
132
|
-
pass
|
62
|
+
col = tl.cast(tl.program_id(0), tl.int64) * BLOCK_N + tl.arange(0, BLOCK_N)
|
63
|
+
mask = col < N
|
64
|
+
start_row_id = tl.cast(tl.program_id(1), tl.int64)
|
65
|
+
|
66
|
+
alpha = tl.load(Alpha).to(tl.float32)
|
67
|
+
da = 0.0
|
68
|
+
gamma = tl.load(Gamma + col, mask=mask, other=0.0).to(tl.float32)
|
69
|
+
dg = tl.zeros((BLOCK_N,), dtype=tl.float32)
|
70
|
+
if HAVE_BETA:
|
71
|
+
db = tl.zeros((BLOCK_N,), dtype=tl.float32)
|
72
|
+
for row_id in range(start_row_id, M, tl.num_programs(1)):
|
73
|
+
x = tl.load(X + row_id * N + col, mask=mask, other=0.0).to(tl.float32)
|
74
|
+
dy = tl.load(DY + row_id * N + col, mask=mask, other=0.0).to(tl.float32)
|
75
|
+
tanh_x = tanh(alpha * x)
|
76
|
+
if HAVE_BETA:
|
77
|
+
db += dy
|
78
|
+
dg += dy * tanh_x
|
79
|
+
tmp = (1 - tanh_x * tanh_x) * dy * gamma
|
80
|
+
da += tl.sum(x * tmp, 0)
|
81
|
+
dx = alpha * tmp
|
82
|
+
tl.store(DX + row_id * N + col, dx, mask=mask)
|
83
|
+
|
84
|
+
tl.store(DG + start_row_id * N + col, dg, mask=mask)
|
85
|
+
if HAVE_BETA:
|
86
|
+
tl.store(DB + start_row_id * N + col, db, mask=mask)
|
87
|
+
tl.store(DA + start_row_id * tl.cdiv(N, 512) + tl.program_id(0), da)
|
133
88
|
|
134
89
|
|
135
90
|
def liger_dyt_fwd(x, alpha, gamma, beta):
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
91
|
+
assert x.is_contiguous()
|
92
|
+
HAVE_BETA = True if beta is not None else False
|
93
|
+
input_shape = x.shape
|
94
|
+
x = x.view(-1, input_shape[-1])
|
95
|
+
M, N = x.shape
|
96
|
+
|
140
97
|
y = torch.empty_like(x)
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
98
|
+
|
99
|
+
if N >= 4096:
|
100
|
+
kwargs = {"BLOCK_N": min(triton.next_power_of_2(N), 2048), "num_warps": 4, "num_stages": 1}
|
101
|
+
else:
|
102
|
+
kwargs = {"BLOCK_N": min(triton.next_power_of_2(N), 1024), "num_warps": 4, "num_stages": 1}
|
103
|
+
|
104
|
+
grid = lambda meta: (triton.cdiv(N, meta["BLOCK_N"]), M)
|
105
|
+
_dyt_fwd_kernel[(grid)](
|
106
|
+
x,
|
107
|
+
y,
|
108
|
+
alpha,
|
109
|
+
gamma,
|
110
|
+
beta,
|
111
|
+
HAVE_BETA,
|
112
|
+
N,
|
113
|
+
**kwargs,
|
153
114
|
)
|
154
|
-
return y.view(
|
155
|
-
|
156
|
-
|
157
|
-
def liger_dyt_bwd(dy, x, alpha, gamma):
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
BLOCK_SIZE, num_warps = calculate_settings(n_cols)
|
165
|
-
sm_count = 1
|
115
|
+
return y.view(input_shape)
|
116
|
+
|
117
|
+
|
118
|
+
def liger_dyt_bwd(dy, x, alpha, gamma, beta):
|
119
|
+
assert dy.is_contiguous()
|
120
|
+
input_shape = x.shape
|
121
|
+
x = x.view(-1, input_shape[-1])
|
122
|
+
M, N = x.shape
|
123
|
+
HAVE_BETA = True if beta is not None else False
|
124
|
+
|
166
125
|
device = infer_device()
|
167
126
|
if device == "cuda":
|
168
|
-
|
127
|
+
NUM_SMS = torch.cuda.get_device_properties(x.device).multi_processor_count
|
169
128
|
elif device == "xpu":
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
dy_ptr=dy,
|
186
|
-
dy_row_stride=dy.stride(0),
|
187
|
-
dx_ptr=dx,
|
188
|
-
dx_row_stride=dx.stride(0),
|
189
|
-
alpha_ptr=alpha,
|
190
|
-
dalpha_ptr=_dalpha,
|
191
|
-
gamma_ptr=gamma,
|
192
|
-
dgamma_ptr=_dgamma,
|
193
|
-
dgamma_row_stride=_dgamma.stride(0),
|
194
|
-
n_cols=n_cols,
|
195
|
-
n_rows=n_rows,
|
196
|
-
ROWS_PER_PROGRAM=rows_per_program,
|
197
|
-
BLOCK_SIZE=BLOCK_SIZE,
|
198
|
-
num_warps=num_warps,
|
199
|
-
)
|
200
|
-
dalpha = _dalpha.sum(dim=0, keepdim=True).to(dtype)
|
201
|
-
dgamma = _dgamma.sum(dim=0).to(dtype)
|
202
|
-
dbeta = dy.sum(dim=0).to(dtype)
|
203
|
-
return dx.view(*shape), dalpha, dgamma, dbeta
|
129
|
+
NUM_SMS = torch.xpu.get_device_properties(x.device).gpu_subslice_count
|
130
|
+
|
131
|
+
da = torch.zeros(NUM_SMS, triton.cdiv(N, 512), dtype=torch.float32, device=x.device)
|
132
|
+
dg = torch.empty(NUM_SMS, N, dtype=torch.float32, device=x.device)
|
133
|
+
db = torch.empty(NUM_SMS, N, dtype=torch.float32, device=x.device) if HAVE_BETA else None
|
134
|
+
dx = torch.empty_like(dy)
|
135
|
+
|
136
|
+
kwargs = {"BLOCK_N": min(triton.next_power_of_2(N), 1024), "num_warps": 8, "num_stages": 2}
|
137
|
+
grid = lambda meta: (triton.cdiv(N, meta["BLOCK_N"]), NUM_SMS)
|
138
|
+
_dyt_bwd_kernel[grid](dy, dx, da, dg, db, x, alpha, gamma, HAVE_BETA, M, N, **kwargs)
|
139
|
+
if HAVE_BETA:
|
140
|
+
db = db.sum(0).to(x.dtype)
|
141
|
+
dg = dg.sum(0).to(gamma.dtype)
|
142
|
+
da = da.sum().to(x.dtype).unsqueeze(0)
|
143
|
+
return dx.view(input_shape), da, dg, db
|
204
144
|
|
205
145
|
|
206
146
|
class LigerDyTFunction(torch.autograd.Function):
|
@@ -208,18 +148,12 @@ class LigerDyTFunction(torch.autograd.Function):
|
|
208
148
|
@ensure_contiguous
|
209
149
|
def forward(ctx, x, alpha, gamma, beta):
|
210
150
|
y = liger_dyt_fwd(x, alpha, gamma, beta)
|
211
|
-
ctx.save_for_backward(x, alpha, gamma)
|
151
|
+
ctx.save_for_backward(x, alpha, gamma, beta)
|
212
152
|
return y
|
213
153
|
|
214
154
|
@staticmethod
|
215
155
|
@ensure_contiguous
|
216
|
-
def backward(ctx,
|
217
|
-
x, alpha, gamma = ctx.saved_tensors
|
218
|
-
dx, dalpha, dgamma, dbeta = liger_dyt_bwd(
|
219
|
-
|
220
|
-
x,
|
221
|
-
alpha,
|
222
|
-
gamma,
|
223
|
-
)
|
224
|
-
|
225
|
-
return (dx, dalpha, dgamma, dbeta)
|
156
|
+
def backward(ctx, dy):
|
157
|
+
x, alpha, gamma, beta = ctx.saved_tensors
|
158
|
+
dx, dalpha, dgamma, dbeta = liger_dyt_bwd(dy, x, alpha, gamma, beta)
|
159
|
+
return dx, dalpha, dgamma, dbeta
|
liger_kernel/transformers/dyt.py
CHANGED
@@ -5,16 +5,18 @@ from liger_kernel.ops.dyt import LigerDyTFunction
|
|
5
5
|
|
6
6
|
|
7
7
|
class LigerDyT(nn.Module):
|
8
|
-
def __init__(self, hidden_size, init_alpha=0.5):
|
8
|
+
def __init__(self, hidden_size, beta=True, init_alpha=0.5):
|
9
9
|
super().__init__()
|
10
10
|
self.hidden_size = hidden_size
|
11
11
|
self.init_alpha = init_alpha
|
12
12
|
self.alpha = nn.Parameter(torch.ones(1) * init_alpha)
|
13
13
|
self.gamma = nn.Parameter(torch.ones(hidden_size))
|
14
|
-
self.beta =
|
14
|
+
self.beta = None
|
15
|
+
if beta:
|
16
|
+
self.beta = nn.Parameter(torch.zeros(hidden_size))
|
15
17
|
|
16
18
|
def forward(self, x):
|
17
19
|
return LigerDyTFunction.apply(x, self.alpha, self.gamma, self.beta)
|
18
20
|
|
19
21
|
def extra_repr(self):
|
20
|
-
return f"{self.hidden_size}, init_alpha={self.init_alpha}"
|
22
|
+
return f"{self.hidden_size}, init_alpha={self.init_alpha}, beta={self.beta}"
|
@@ -17,7 +17,7 @@ liger_kernel/chunked_loss/orpo_loss.py,sha256=nu9UYG16dcMw93lvHi4_hYs3Q0FK1KnlmM
|
|
17
17
|
liger_kernel/chunked_loss/simpo_loss.py,sha256=fy2w8KbhMrBv7b1jdIeH3bBFxY52bPQPZb3KwBvmurM,5385
|
18
18
|
liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
liger_kernel/ops/cross_entropy.py,sha256=e8THGnhOcy_0SbOLABx67HEM7-B8a8pG7nDKbCRpQKM,19123
|
20
|
-
liger_kernel/ops/dyt.py,sha256=
|
20
|
+
liger_kernel/ops/dyt.py,sha256=Y180EIvtUc2z83mhyub0EVOCQHJmWX3JnscqkOJqswk,5467
|
21
21
|
liger_kernel/ops/fused_linear_cross_entropy.py,sha256=5fbGhN85n3zf0uIdJ7PYHWIRzTf0VTFiS0ARtOmqIP0,11020
|
22
22
|
liger_kernel/ops/fused_linear_jsd.py,sha256=CSoprxb-YcJy-YUKiTcYkxN8sb9h2kdk_iHuncvSV5c,9683
|
23
23
|
liger_kernel/ops/geglu.py,sha256=axGvCIvlBzuluoAIrWTsp2iZM4BFKNInkPov8YVvH9E,4126
|
@@ -37,7 +37,7 @@ liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-
|
|
37
37
|
liger_kernel/transformers/__init__.py,sha256=0KX0rxyy0E_uNWVE0PSTzEVzKqc5KdFHtvdHhJm23Kk,7077
|
38
38
|
liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
|
39
39
|
liger_kernel/transformers/cross_entropy.py,sha256=z3KTWQnFxr_IZaVjtYt0ZNEWQdDdYThN35xWkHlDGH0,1683
|
40
|
-
liger_kernel/transformers/dyt.py,sha256=
|
40
|
+
liger_kernel/transformers/dyt.py,sha256=i-4GPaMrl-jab9TVI5qN0-H9qycn_mCbV82ozU4nbmU,723
|
41
41
|
liger_kernel/transformers/functional.py,sha256=2YBfvtdU1GRZuRpJhHgJXeGYa1RvmO6-qQvrKQrLJK4,5259
|
42
42
|
liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=O8Sg5BT81nTaY9fSGoOY9dOD9ekibwwiuXhdUHaxntQ,1742
|
43
43
|
liger_kernel/transformers/fused_linear_jsd.py,sha256=bZ4otCvWBuOnA5XdQL-FzZVItJlDt-ht9e_pG7PG93E,3999
|
@@ -79,9 +79,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
|
|
79
79
|
liger_kernel/transformers/trainer/orpo_trainer.py,sha256=pdekW7l6Qg_aqa5SYKYlSWUF8m3lkOFvFLcIMEHrz9s,8338
|
80
80
|
liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
|
81
81
|
liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
|
82
|
-
liger_kernel_nightly-0.5.9.
|
83
|
-
liger_kernel_nightly-0.5.9.
|
84
|
-
liger_kernel_nightly-0.5.9.
|
85
|
-
liger_kernel_nightly-0.5.9.
|
86
|
-
liger_kernel_nightly-0.5.9.
|
87
|
-
liger_kernel_nightly-0.5.9.
|
82
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
83
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/METADATA,sha256=JJ5XcqsRjwW1nB2hH580FLzHY9i3mC_aEZj9mDNX6Gg,23970
|
84
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
|
85
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
86
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
87
|
+
liger_kernel_nightly-0.5.9.dev20250519011716.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|