@lgrammel/ds4-provider 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +96 -0
- package/binding.gyp +75 -0
- package/dist/ds4-language-model.d.ts +71 -0
- package/dist/ds4-language-model.d.ts.map +1 -0
- package/dist/ds4-language-model.js +888 -0
- package/dist/ds4-language-model.js.map +1 -0
- package/dist/ds4-provider.d.ts +13 -0
- package/dist/ds4-provider.d.ts.map +1 -0
- package/dist/ds4-provider.js +20 -0
- package/dist/ds4-provider.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/native-binding.d.ts +42 -0
- package/dist/native-binding.d.ts.map +1 -0
- package/dist/native-binding.js +157 -0
- package/dist/native-binding.js.map +1 -0
- package/ds4/LICENSE +22 -0
- package/ds4/ds4.c +18268 -0
- package/ds4/ds4.h +196 -0
- package/ds4/ds4_gpu.h +804 -0
- package/ds4/ds4_metal.m +14657 -0
- package/ds4/metal/argsort.metal +266 -0
- package/ds4/metal/bin.metal +192 -0
- package/ds4/metal/concat.metal +62 -0
- package/ds4/metal/cpy.metal +57 -0
- package/ds4/metal/dense.metal +1121 -0
- package/ds4/metal/dsv4_hc.metal +861 -0
- package/ds4/metal/dsv4_kv.metal +227 -0
- package/ds4/metal/dsv4_misc.metal +1088 -0
- package/ds4/metal/dsv4_rope.metal +155 -0
- package/ds4/metal/flash_attn.metal +1426 -0
- package/ds4/metal/get_rows.metal +54 -0
- package/ds4/metal/glu.metal +36 -0
- package/ds4/metal/moe.metal +1737 -0
- package/ds4/metal/norm.metal +153 -0
- package/ds4/metal/repeat.metal +52 -0
- package/ds4/metal/set_rows.metal +55 -0
- package/ds4/metal/softmax.metal +241 -0
- package/ds4/metal/sum_rows.metal +102 -0
- package/ds4/metal/unary.metal +312 -0
- package/native/binding.cpp +621 -0
- package/package.json +66 -0
- package/scripts/postinstall.cjs +13 -0
- package/scripts/vendor-ds4.cjs +67 -0
package/ds4/ds4_gpu.h
ADDED
|
@@ -0,0 +1,804 @@
|
|
|
1
|
+
#ifndef DS4_GPU_H
|
|
2
|
+
#define DS4_GPU_H
|
|
3
|
+
|
|
4
|
+
#include <stdbool.h>
|
|
5
|
+
#include <stdint.h>
|
|
6
|
+
|
|
7
|
+
/* =========================================================================
|
|
8
|
+
* GPU Tensor and Command Lifetime.
|
|
9
|
+
* =========================================================================
|
|
10
|
+
*
|
|
11
|
+
* Opaque device tensor used by the DS4-specific GPU executor.
|
|
12
|
+
*
|
|
13
|
+
* The public GPU API is tensor-resident: activations, KV state, and scratch
|
|
14
|
+
* buffers stay device-owned across the whole prefill/decode command sequence.
|
|
15
|
+
*/
|
|
16
|
+
typedef struct ds4_gpu_tensor ds4_gpu_tensor;
|
|
17
|
+
|
|
18
|
+
int ds4_gpu_init(void);
|
|
19
|
+
void ds4_gpu_cleanup(void);
|
|
20
|
+
|
|
21
|
+
ds4_gpu_tensor *ds4_gpu_tensor_alloc(uint64_t bytes);
|
|
22
|
+
ds4_gpu_tensor *ds4_gpu_tensor_alloc_managed(uint64_t bytes);
|
|
23
|
+
ds4_gpu_tensor *ds4_gpu_tensor_view(const ds4_gpu_tensor *base, uint64_t offset, uint64_t bytes);
|
|
24
|
+
void ds4_gpu_tensor_free(ds4_gpu_tensor *tensor);
|
|
25
|
+
uint64_t ds4_gpu_tensor_bytes(const ds4_gpu_tensor *tensor);
|
|
26
|
+
void *ds4_gpu_tensor_contents(ds4_gpu_tensor *tensor);
|
|
27
|
+
int ds4_gpu_tensor_fill_f32(ds4_gpu_tensor *tensor, float value, uint64_t count);
|
|
28
|
+
int ds4_gpu_tensor_write(ds4_gpu_tensor *tensor, uint64_t offset, const void *data, uint64_t bytes);
|
|
29
|
+
int ds4_gpu_tensor_read(const ds4_gpu_tensor *tensor, uint64_t offset, void *data, uint64_t bytes);
|
|
30
|
+
int ds4_gpu_tensor_copy(ds4_gpu_tensor *dst, uint64_t dst_offset,
|
|
31
|
+
const ds4_gpu_tensor *src, uint64_t src_offset,
|
|
32
|
+
uint64_t bytes);
|
|
33
|
+
|
|
34
|
+
int ds4_gpu_begin_commands(void);
|
|
35
|
+
int ds4_gpu_flush_commands(void);
|
|
36
|
+
int ds4_gpu_end_commands(void);
|
|
37
|
+
int ds4_gpu_synchronize(void);
|
|
38
|
+
|
|
39
|
+
int ds4_gpu_set_model_map(const void *model_map, uint64_t model_size);
|
|
40
|
+
int ds4_gpu_set_model_fd(int fd);
|
|
41
|
+
int ds4_gpu_set_model_map_range(const void *model_map, uint64_t model_size, uint64_t map_offset, uint64_t map_size);
|
|
42
|
+
int ds4_gpu_cache_model_range(const void *model_map, uint64_t model_size, uint64_t offset, uint64_t bytes, const char *label);
|
|
43
|
+
int ds4_gpu_cache_q8_f16_range(const void *model_map, uint64_t model_size, uint64_t offset, uint64_t bytes, uint64_t in_dim, uint64_t out_dim, const char *label);
|
|
44
|
+
int ds4_gpu_should_use_managed_kv_cache(uint64_t kv_cache_bytes, uint64_t context_bytes);
|
|
45
|
+
void ds4_gpu_set_quality(bool quality);
|
|
46
|
+
void ds4_gpu_print_memory_report(const char *label);
|
|
47
|
+
|
|
48
|
+
/* =========================================================================
|
|
49
|
+
* Embeddings and Indexer Helpers.
|
|
50
|
+
* =========================================================================
|
|
51
|
+
*
|
|
52
|
+
* These kernels seed HC state from token embeddings and implement the ratio-4
|
|
53
|
+
* compressed-attention indexer that chooses visible compressed rows.
|
|
54
|
+
*/
|
|
55
|
+
|
|
56
|
+
int ds4_gpu_embed_token_hc_tensor(
|
|
57
|
+
ds4_gpu_tensor *out_hc,
|
|
58
|
+
const void *model_map,
|
|
59
|
+
uint64_t model_size,
|
|
60
|
+
uint64_t weight_offset,
|
|
61
|
+
uint32_t n_vocab,
|
|
62
|
+
uint32_t token,
|
|
63
|
+
uint32_t n_embd,
|
|
64
|
+
uint32_t n_hc);
|
|
65
|
+
|
|
66
|
+
int ds4_gpu_embed_tokens_hc_tensor(
|
|
67
|
+
ds4_gpu_tensor *out_hc,
|
|
68
|
+
const ds4_gpu_tensor *tokens,
|
|
69
|
+
const void *model_map,
|
|
70
|
+
uint64_t model_size,
|
|
71
|
+
uint64_t weight_offset,
|
|
72
|
+
uint32_t n_vocab,
|
|
73
|
+
uint32_t n_tokens,
|
|
74
|
+
uint32_t n_embd,
|
|
75
|
+
uint32_t n_hc);
|
|
76
|
+
|
|
77
|
+
int ds4_gpu_indexer_score_one_tensor(
|
|
78
|
+
ds4_gpu_tensor *scores,
|
|
79
|
+
const ds4_gpu_tensor *q,
|
|
80
|
+
const ds4_gpu_tensor *weights,
|
|
81
|
+
const ds4_gpu_tensor *index_comp,
|
|
82
|
+
uint32_t n_comp,
|
|
83
|
+
uint32_t n_head,
|
|
84
|
+
uint32_t head_dim,
|
|
85
|
+
float scale);
|
|
86
|
+
|
|
87
|
+
int ds4_gpu_indexer_scores_prefill_tensor(
|
|
88
|
+
ds4_gpu_tensor *scores,
|
|
89
|
+
const ds4_gpu_tensor *q,
|
|
90
|
+
const ds4_gpu_tensor *weights,
|
|
91
|
+
const ds4_gpu_tensor *index_comp,
|
|
92
|
+
uint32_t n_comp,
|
|
93
|
+
uint32_t n_tokens,
|
|
94
|
+
uint32_t n_head,
|
|
95
|
+
uint32_t head_dim,
|
|
96
|
+
uint32_t ratio,
|
|
97
|
+
float scale);
|
|
98
|
+
|
|
99
|
+
int ds4_gpu_indexer_scores_decode_batch_tensor(
|
|
100
|
+
ds4_gpu_tensor *scores,
|
|
101
|
+
const ds4_gpu_tensor *q,
|
|
102
|
+
const ds4_gpu_tensor *weights,
|
|
103
|
+
const ds4_gpu_tensor *index_comp,
|
|
104
|
+
uint32_t n_comp,
|
|
105
|
+
uint32_t n_tokens,
|
|
106
|
+
uint32_t pos0,
|
|
107
|
+
uint32_t n_head,
|
|
108
|
+
uint32_t head_dim,
|
|
109
|
+
uint32_t ratio,
|
|
110
|
+
float scale);
|
|
111
|
+
|
|
112
|
+
int ds4_gpu_indexer_topk_tensor(
|
|
113
|
+
ds4_gpu_tensor *selected,
|
|
114
|
+
const ds4_gpu_tensor *scores,
|
|
115
|
+
uint32_t n_comp,
|
|
116
|
+
uint32_t n_tokens,
|
|
117
|
+
uint32_t top_k);
|
|
118
|
+
|
|
119
|
+
int ds4_gpu_dsv4_topk_mask_tensor(
|
|
120
|
+
ds4_gpu_tensor *mask,
|
|
121
|
+
const ds4_gpu_tensor *topk,
|
|
122
|
+
uint32_t n_comp,
|
|
123
|
+
uint32_t n_tokens,
|
|
124
|
+
uint32_t top_k);
|
|
125
|
+
|
|
126
|
+
/* =========================================================================
|
|
127
|
+
* Dense Projections, Norms, RoPE, and KV Rounding.
|
|
128
|
+
* =========================================================================
|
|
129
|
+
*
|
|
130
|
+
* The graph uses these primitives for Q/KV projections, HC/output projections,
|
|
131
|
+
* attention output projections, and DS4's tail-only RoPE.
|
|
132
|
+
*/
|
|
133
|
+
|
|
134
|
+
int ds4_gpu_matmul_q8_0_tensor(
|
|
135
|
+
ds4_gpu_tensor *out,
|
|
136
|
+
const void *model_map,
|
|
137
|
+
uint64_t model_size,
|
|
138
|
+
uint64_t weight_offset,
|
|
139
|
+
uint64_t in_dim,
|
|
140
|
+
uint64_t out_dim,
|
|
141
|
+
const ds4_gpu_tensor *x,
|
|
142
|
+
uint64_t n_tok);
|
|
143
|
+
|
|
144
|
+
int ds4_gpu_shared_gate_up_swiglu_q8_0_tensor(
|
|
145
|
+
ds4_gpu_tensor *gate,
|
|
146
|
+
ds4_gpu_tensor *up,
|
|
147
|
+
ds4_gpu_tensor *mid,
|
|
148
|
+
const void *model_map,
|
|
149
|
+
uint64_t model_size,
|
|
150
|
+
uint64_t gate_offset,
|
|
151
|
+
uint64_t up_offset,
|
|
152
|
+
uint64_t in_dim,
|
|
153
|
+
uint64_t out_dim,
|
|
154
|
+
const ds4_gpu_tensor *x);
|
|
155
|
+
|
|
156
|
+
int ds4_gpu_matmul_f16_tensor(
|
|
157
|
+
ds4_gpu_tensor *out,
|
|
158
|
+
const void *model_map,
|
|
159
|
+
uint64_t model_size,
|
|
160
|
+
uint64_t weight_offset,
|
|
161
|
+
uint64_t in_dim,
|
|
162
|
+
uint64_t out_dim,
|
|
163
|
+
const ds4_gpu_tensor *x,
|
|
164
|
+
uint64_t n_tok);
|
|
165
|
+
|
|
166
|
+
int ds4_gpu_matmul_f16_pair_tensor(
|
|
167
|
+
ds4_gpu_tensor *out_a,
|
|
168
|
+
ds4_gpu_tensor *out_b,
|
|
169
|
+
const void *model_map,
|
|
170
|
+
uint64_t model_size,
|
|
171
|
+
uint64_t weight_a_offset,
|
|
172
|
+
uint64_t weight_b_offset,
|
|
173
|
+
uint64_t in_dim,
|
|
174
|
+
uint64_t out_dim,
|
|
175
|
+
const ds4_gpu_tensor *x,
|
|
176
|
+
uint64_t n_tok);
|
|
177
|
+
|
|
178
|
+
int ds4_gpu_matmul_f32_tensor(
|
|
179
|
+
ds4_gpu_tensor *out,
|
|
180
|
+
const void *model_map,
|
|
181
|
+
uint64_t model_size,
|
|
182
|
+
uint64_t weight_offset,
|
|
183
|
+
uint64_t in_dim,
|
|
184
|
+
uint64_t out_dim,
|
|
185
|
+
const ds4_gpu_tensor *x,
|
|
186
|
+
uint64_t n_tok);
|
|
187
|
+
|
|
188
|
+
int ds4_gpu_repeat_hc_tensor(
|
|
189
|
+
ds4_gpu_tensor *out,
|
|
190
|
+
const ds4_gpu_tensor *row,
|
|
191
|
+
uint32_t n_embd,
|
|
192
|
+
uint32_t n_hc);
|
|
193
|
+
|
|
194
|
+
int ds4_gpu_rms_norm_plain_tensor(
|
|
195
|
+
ds4_gpu_tensor *out,
|
|
196
|
+
const ds4_gpu_tensor *x,
|
|
197
|
+
uint32_t n,
|
|
198
|
+
float eps);
|
|
199
|
+
|
|
200
|
+
int ds4_gpu_rms_norm_plain_rows_tensor(
|
|
201
|
+
ds4_gpu_tensor *out,
|
|
202
|
+
const ds4_gpu_tensor *x,
|
|
203
|
+
uint32_t n,
|
|
204
|
+
uint32_t rows,
|
|
205
|
+
float eps);
|
|
206
|
+
|
|
207
|
+
int ds4_gpu_rms_norm_weight_tensor(
|
|
208
|
+
ds4_gpu_tensor *out,
|
|
209
|
+
const ds4_gpu_tensor *x,
|
|
210
|
+
const void *model_map,
|
|
211
|
+
uint64_t model_size,
|
|
212
|
+
uint64_t weight_offset,
|
|
213
|
+
uint32_t n,
|
|
214
|
+
float eps);
|
|
215
|
+
|
|
216
|
+
int ds4_gpu_rms_norm_weight_rows_tensor(
|
|
217
|
+
ds4_gpu_tensor *out,
|
|
218
|
+
const ds4_gpu_tensor *x,
|
|
219
|
+
const void *model_map,
|
|
220
|
+
uint64_t model_size,
|
|
221
|
+
uint64_t weight_offset,
|
|
222
|
+
uint32_t n,
|
|
223
|
+
uint32_t rows,
|
|
224
|
+
float eps);
|
|
225
|
+
|
|
226
|
+
int ds4_gpu_dsv4_qkv_rms_norm_rows_tensor(
|
|
227
|
+
ds4_gpu_tensor *q_out,
|
|
228
|
+
const ds4_gpu_tensor *q,
|
|
229
|
+
const void *model_map,
|
|
230
|
+
uint64_t model_size,
|
|
231
|
+
uint64_t q_weight_offset,
|
|
232
|
+
uint32_t q_n,
|
|
233
|
+
ds4_gpu_tensor *kv_out,
|
|
234
|
+
const ds4_gpu_tensor *kv,
|
|
235
|
+
uint64_t kv_weight_offset,
|
|
236
|
+
uint32_t kv_n,
|
|
237
|
+
uint32_t rows,
|
|
238
|
+
float eps);
|
|
239
|
+
|
|
240
|
+
int ds4_gpu_head_rms_norm_tensor(
|
|
241
|
+
ds4_gpu_tensor *x,
|
|
242
|
+
uint32_t n_tok,
|
|
243
|
+
uint32_t n_head,
|
|
244
|
+
uint32_t head_dim,
|
|
245
|
+
float eps);
|
|
246
|
+
|
|
247
|
+
int ds4_gpu_dsv4_fp8_kv_quantize_tensor(
|
|
248
|
+
ds4_gpu_tensor *x,
|
|
249
|
+
uint32_t n_tok,
|
|
250
|
+
uint32_t head_dim,
|
|
251
|
+
uint32_t n_rot);
|
|
252
|
+
|
|
253
|
+
int ds4_gpu_rope_tail_tensor(
|
|
254
|
+
ds4_gpu_tensor *x,
|
|
255
|
+
uint32_t n_tok,
|
|
256
|
+
uint32_t n_head,
|
|
257
|
+
uint32_t head_dim,
|
|
258
|
+
uint32_t n_rot,
|
|
259
|
+
uint32_t pos0,
|
|
260
|
+
uint32_t n_ctx_orig,
|
|
261
|
+
bool inverse,
|
|
262
|
+
float freq_base,
|
|
263
|
+
float freq_scale,
|
|
264
|
+
float ext_factor,
|
|
265
|
+
float attn_factor,
|
|
266
|
+
float beta_fast,
|
|
267
|
+
float beta_slow);
|
|
268
|
+
|
|
269
|
+
/* Release decode fused KV finalizer: after the standalone RoPE kernel, this
|
|
270
|
+
* performs DS4's FP8 non-RoPE KV round trip and writes the F16-rounded raw
|
|
271
|
+
* attention cache row in one dispatch. */
|
|
272
|
+
int ds4_gpu_kv_fp8_store_raw_tensor(
|
|
273
|
+
ds4_gpu_tensor *kv,
|
|
274
|
+
ds4_gpu_tensor *raw_cache,
|
|
275
|
+
uint32_t raw_cap,
|
|
276
|
+
uint32_t row,
|
|
277
|
+
uint32_t head_dim,
|
|
278
|
+
uint32_t n_rot);
|
|
279
|
+
|
|
280
|
+
/* Reference/raw-cache primitive kept for prefill and diagnostics. Decode uses
|
|
281
|
+
* ds4_gpu_kv_fp8_store_raw_tensor unless a diagnostic reference path is
|
|
282
|
+
* explicitly selected by the graph driver. */
|
|
283
|
+
int ds4_gpu_store_raw_kv_tensor(
|
|
284
|
+
ds4_gpu_tensor *raw_cache,
|
|
285
|
+
const ds4_gpu_tensor *kv,
|
|
286
|
+
uint32_t raw_cap,
|
|
287
|
+
uint32_t row,
|
|
288
|
+
uint32_t head_dim);
|
|
289
|
+
|
|
290
|
+
int ds4_gpu_store_raw_kv_batch_tensor(
|
|
291
|
+
ds4_gpu_tensor *raw_cache,
|
|
292
|
+
const ds4_gpu_tensor *kv,
|
|
293
|
+
uint32_t raw_cap,
|
|
294
|
+
uint32_t pos0,
|
|
295
|
+
uint32_t n_tokens,
|
|
296
|
+
uint32_t head_dim);
|
|
297
|
+
|
|
298
|
+
/* =========================================================================
|
|
299
|
+
* KV Compression and Attention.
|
|
300
|
+
* =========================================================================
|
|
301
|
+
*
|
|
302
|
+
* Compressed layers maintain rolling score/KV state and append pooled rows at
|
|
303
|
+
* ratio boundaries. Attention kernels consume raw SWA rows, compressed rows,
|
|
304
|
+
* and optional indexer masks.
|
|
305
|
+
*/
|
|
306
|
+
|
|
307
|
+
int ds4_gpu_compressor_update_tensor(
|
|
308
|
+
const ds4_gpu_tensor *kv_cur,
|
|
309
|
+
const ds4_gpu_tensor *sc_cur,
|
|
310
|
+
ds4_gpu_tensor *state_kv,
|
|
311
|
+
ds4_gpu_tensor *state_score,
|
|
312
|
+
ds4_gpu_tensor *comp_cache,
|
|
313
|
+
const void *model_map,
|
|
314
|
+
uint64_t model_size,
|
|
315
|
+
uint64_t ape_offset,
|
|
316
|
+
uint32_t ape_type,
|
|
317
|
+
uint64_t norm_offset,
|
|
318
|
+
uint32_t norm_type,
|
|
319
|
+
uint32_t head_dim,
|
|
320
|
+
uint32_t ratio,
|
|
321
|
+
uint32_t pos,
|
|
322
|
+
uint32_t comp_row,
|
|
323
|
+
uint32_t n_rot,
|
|
324
|
+
uint32_t n_ctx_orig,
|
|
325
|
+
float freq_base,
|
|
326
|
+
float freq_scale,
|
|
327
|
+
float ext_factor,
|
|
328
|
+
float attn_factor,
|
|
329
|
+
float beta_fast,
|
|
330
|
+
float beta_slow,
|
|
331
|
+
float rms_eps);
|
|
332
|
+
|
|
333
|
+
int ds4_gpu_compressor_store_batch_tensor(
|
|
334
|
+
const ds4_gpu_tensor *kv,
|
|
335
|
+
const ds4_gpu_tensor *sc,
|
|
336
|
+
ds4_gpu_tensor *state_kv,
|
|
337
|
+
ds4_gpu_tensor *state_score,
|
|
338
|
+
const void *model_map,
|
|
339
|
+
uint64_t model_size,
|
|
340
|
+
uint64_t ape_offset,
|
|
341
|
+
uint32_t ape_type,
|
|
342
|
+
uint32_t head_dim,
|
|
343
|
+
uint32_t ratio,
|
|
344
|
+
uint32_t pos0,
|
|
345
|
+
uint32_t n_tokens);
|
|
346
|
+
|
|
347
|
+
int ds4_gpu_compressor_prefill_tensor(
|
|
348
|
+
ds4_gpu_tensor *comp_cache,
|
|
349
|
+
ds4_gpu_tensor *state_kv,
|
|
350
|
+
ds4_gpu_tensor *state_score,
|
|
351
|
+
const ds4_gpu_tensor *kv,
|
|
352
|
+
const ds4_gpu_tensor *sc,
|
|
353
|
+
const void *model_map,
|
|
354
|
+
uint64_t model_size,
|
|
355
|
+
uint64_t ape_offset,
|
|
356
|
+
uint32_t ape_type,
|
|
357
|
+
uint64_t norm_offset,
|
|
358
|
+
uint32_t norm_type,
|
|
359
|
+
uint32_t head_dim,
|
|
360
|
+
uint32_t ratio,
|
|
361
|
+
uint32_t pos0,
|
|
362
|
+
uint32_t n_tokens,
|
|
363
|
+
uint32_t n_rot,
|
|
364
|
+
uint32_t n_ctx_orig,
|
|
365
|
+
bool quantize_fp8,
|
|
366
|
+
float freq_base,
|
|
367
|
+
float freq_scale,
|
|
368
|
+
float ext_factor,
|
|
369
|
+
float attn_factor,
|
|
370
|
+
float beta_fast,
|
|
371
|
+
float beta_slow,
|
|
372
|
+
float rms_eps);
|
|
373
|
+
|
|
374
|
+
int ds4_gpu_compressor_prefill_ratio4_replay_tensor(
|
|
375
|
+
ds4_gpu_tensor *comp_cache,
|
|
376
|
+
ds4_gpu_tensor *state_kv,
|
|
377
|
+
ds4_gpu_tensor *state_score,
|
|
378
|
+
const ds4_gpu_tensor *kv,
|
|
379
|
+
const ds4_gpu_tensor *sc,
|
|
380
|
+
const void *model_map,
|
|
381
|
+
uint64_t model_size,
|
|
382
|
+
uint64_t ape_offset,
|
|
383
|
+
uint32_t ape_type,
|
|
384
|
+
uint64_t norm_offset,
|
|
385
|
+
uint32_t norm_type,
|
|
386
|
+
uint32_t head_dim,
|
|
387
|
+
uint32_t pos0,
|
|
388
|
+
uint32_t n_tokens,
|
|
389
|
+
uint32_t n_rot,
|
|
390
|
+
uint32_t n_ctx_orig,
|
|
391
|
+
bool quantize_fp8,
|
|
392
|
+
float freq_base,
|
|
393
|
+
float freq_scale,
|
|
394
|
+
float ext_factor,
|
|
395
|
+
float attn_factor,
|
|
396
|
+
float beta_fast,
|
|
397
|
+
float beta_slow,
|
|
398
|
+
float rms_eps);
|
|
399
|
+
|
|
400
|
+
int ds4_gpu_compressor_prefill_state_ratio4_tensor(
|
|
401
|
+
ds4_gpu_tensor *state_kv,
|
|
402
|
+
ds4_gpu_tensor *state_score,
|
|
403
|
+
const ds4_gpu_tensor *kv_tail,
|
|
404
|
+
const ds4_gpu_tensor *sc_tail,
|
|
405
|
+
const void *model_map,
|
|
406
|
+
uint64_t model_size,
|
|
407
|
+
uint64_t ape_offset,
|
|
408
|
+
uint32_t ape_type,
|
|
409
|
+
uint32_t head_dim,
|
|
410
|
+
uint32_t pos0);
|
|
411
|
+
|
|
412
|
+
int ds4_gpu_attention_decode_heads_tensor(
|
|
413
|
+
ds4_gpu_tensor *heads,
|
|
414
|
+
const void *model_map,
|
|
415
|
+
uint64_t model_size,
|
|
416
|
+
uint64_t sinks_offset,
|
|
417
|
+
const ds4_gpu_tensor *q,
|
|
418
|
+
const ds4_gpu_tensor *raw_kv,
|
|
419
|
+
uint32_t n_raw,
|
|
420
|
+
uint32_t raw_cap,
|
|
421
|
+
uint32_t raw_start,
|
|
422
|
+
const ds4_gpu_tensor *comp_kv,
|
|
423
|
+
uint32_t n_comp,
|
|
424
|
+
const ds4_gpu_tensor *comp_mask,
|
|
425
|
+
uint32_t use_mask,
|
|
426
|
+
uint32_t n_head,
|
|
427
|
+
uint32_t head_dim);
|
|
428
|
+
|
|
429
|
+
int ds4_gpu_attention_prefill_raw_heads_tensor(
|
|
430
|
+
ds4_gpu_tensor *heads,
|
|
431
|
+
const void *model_map,
|
|
432
|
+
uint64_t model_size,
|
|
433
|
+
uint64_t sinks_offset,
|
|
434
|
+
const ds4_gpu_tensor *q,
|
|
435
|
+
const ds4_gpu_tensor *raw_kv,
|
|
436
|
+
uint32_t n_tokens,
|
|
437
|
+
uint32_t window,
|
|
438
|
+
uint32_t n_head,
|
|
439
|
+
uint32_t head_dim);
|
|
440
|
+
|
|
441
|
+
int ds4_gpu_attention_decode_raw_batch_heads_tensor(
|
|
442
|
+
ds4_gpu_tensor *heads,
|
|
443
|
+
const void *model_map,
|
|
444
|
+
uint64_t model_size,
|
|
445
|
+
uint64_t sinks_offset,
|
|
446
|
+
const ds4_gpu_tensor *q,
|
|
447
|
+
const ds4_gpu_tensor *raw_kv,
|
|
448
|
+
uint32_t n_tokens,
|
|
449
|
+
uint32_t pos0,
|
|
450
|
+
uint32_t n_raw,
|
|
451
|
+
uint32_t raw_cap,
|
|
452
|
+
uint32_t raw_start,
|
|
453
|
+
uint32_t window,
|
|
454
|
+
uint32_t n_head,
|
|
455
|
+
uint32_t head_dim);
|
|
456
|
+
|
|
457
|
+
int ds4_gpu_attention_decode_mixed_batch_heads_tensor(
|
|
458
|
+
ds4_gpu_tensor *heads,
|
|
459
|
+
const void *model_map,
|
|
460
|
+
uint64_t model_size,
|
|
461
|
+
uint64_t sinks_offset,
|
|
462
|
+
const ds4_gpu_tensor *q,
|
|
463
|
+
const ds4_gpu_tensor *raw_kv,
|
|
464
|
+
const ds4_gpu_tensor *comp_kv,
|
|
465
|
+
const ds4_gpu_tensor *comp_mask,
|
|
466
|
+
uint32_t use_comp_mask,
|
|
467
|
+
uint32_t n_tokens,
|
|
468
|
+
uint32_t pos0,
|
|
469
|
+
uint32_t n_raw,
|
|
470
|
+
uint32_t raw_cap,
|
|
471
|
+
uint32_t raw_start,
|
|
472
|
+
uint32_t n_comp,
|
|
473
|
+
uint32_t window,
|
|
474
|
+
uint32_t ratio,
|
|
475
|
+
uint32_t n_head,
|
|
476
|
+
uint32_t head_dim);
|
|
477
|
+
|
|
478
|
+
int ds4_gpu_attention_indexed_mixed_batch_heads_tensor(
|
|
479
|
+
ds4_gpu_tensor *heads,
|
|
480
|
+
const void *model_map,
|
|
481
|
+
uint64_t model_size,
|
|
482
|
+
uint64_t sinks_offset,
|
|
483
|
+
const ds4_gpu_tensor *q,
|
|
484
|
+
const ds4_gpu_tensor *raw_kv,
|
|
485
|
+
const ds4_gpu_tensor *comp_kv,
|
|
486
|
+
const ds4_gpu_tensor *topk,
|
|
487
|
+
uint32_t n_tokens,
|
|
488
|
+
uint32_t pos0,
|
|
489
|
+
uint32_t n_raw,
|
|
490
|
+
uint32_t raw_cap,
|
|
491
|
+
uint32_t raw_start,
|
|
492
|
+
uint32_t n_comp,
|
|
493
|
+
uint32_t top_k,
|
|
494
|
+
uint32_t window,
|
|
495
|
+
uint32_t ratio,
|
|
496
|
+
uint32_t n_head,
|
|
497
|
+
uint32_t head_dim);
|
|
498
|
+
|
|
499
|
+
int ds4_gpu_attention_prefill_static_mixed_heads_tensor(
|
|
500
|
+
ds4_gpu_tensor *heads,
|
|
501
|
+
const void *model_map,
|
|
502
|
+
uint64_t model_size,
|
|
503
|
+
uint64_t sinks_offset,
|
|
504
|
+
const ds4_gpu_tensor *q,
|
|
505
|
+
const ds4_gpu_tensor *raw_kv,
|
|
506
|
+
const ds4_gpu_tensor *comp_kv,
|
|
507
|
+
uint32_t n_tokens,
|
|
508
|
+
uint32_t n_comp,
|
|
509
|
+
uint32_t window,
|
|
510
|
+
uint32_t ratio,
|
|
511
|
+
uint32_t n_head,
|
|
512
|
+
uint32_t head_dim);
|
|
513
|
+
|
|
514
|
+
int ds4_gpu_attention_prefill_masked_mixed_heads_tensor(
|
|
515
|
+
ds4_gpu_tensor *heads,
|
|
516
|
+
const void *model_map,
|
|
517
|
+
uint64_t model_size,
|
|
518
|
+
uint64_t sinks_offset,
|
|
519
|
+
const ds4_gpu_tensor *q,
|
|
520
|
+
const ds4_gpu_tensor *raw_kv,
|
|
521
|
+
const ds4_gpu_tensor *comp_kv,
|
|
522
|
+
const ds4_gpu_tensor *comp_mask,
|
|
523
|
+
uint32_t n_tokens,
|
|
524
|
+
uint32_t n_comp,
|
|
525
|
+
uint32_t window,
|
|
526
|
+
uint32_t ratio,
|
|
527
|
+
uint32_t n_head,
|
|
528
|
+
uint32_t head_dim);
|
|
529
|
+
|
|
530
|
+
int ds4_gpu_attention_output_q8_batch_tensor(
|
|
531
|
+
ds4_gpu_tensor *out,
|
|
532
|
+
ds4_gpu_tensor *low,
|
|
533
|
+
ds4_gpu_tensor *group_tmp,
|
|
534
|
+
ds4_gpu_tensor *low_tmp,
|
|
535
|
+
const void *model_map,
|
|
536
|
+
uint64_t model_size,
|
|
537
|
+
uint64_t out_a_offset,
|
|
538
|
+
uint64_t out_b_offset,
|
|
539
|
+
uint64_t group_dim,
|
|
540
|
+
uint64_t rank,
|
|
541
|
+
uint32_t n_groups,
|
|
542
|
+
uint64_t out_dim,
|
|
543
|
+
const ds4_gpu_tensor *heads,
|
|
544
|
+
uint32_t n_tokens);
|
|
545
|
+
|
|
546
|
+
int ds4_gpu_attention_output_low_q8_tensor(
|
|
547
|
+
ds4_gpu_tensor *low,
|
|
548
|
+
const void *model_map,
|
|
549
|
+
uint64_t model_size,
|
|
550
|
+
uint64_t out_a_offset,
|
|
551
|
+
uint64_t group_dim,
|
|
552
|
+
uint64_t rank,
|
|
553
|
+
uint32_t n_groups,
|
|
554
|
+
const ds4_gpu_tensor *heads);
|
|
555
|
+
|
|
556
|
+
/* =========================================================================
|
|
557
|
+
* Router, Shared Expert, and Routed MoE.
|
|
558
|
+
* =========================================================================
|
|
559
|
+
*
|
|
560
|
+
* These kernels implement the FFN body: router probabilities/top-k or hash
|
|
561
|
+
* routing, shared SwiGLU, and the IQ2_XXS/Q2_K/Q4_K routed experts.
|
|
562
|
+
*/
|
|
563
|
+
|
|
564
|
+
int ds4_gpu_swiglu_tensor(
|
|
565
|
+
ds4_gpu_tensor *out,
|
|
566
|
+
const ds4_gpu_tensor *gate,
|
|
567
|
+
const ds4_gpu_tensor *up,
|
|
568
|
+
uint32_t n,
|
|
569
|
+
float clamp,
|
|
570
|
+
float weight);
|
|
571
|
+
|
|
572
|
+
int ds4_gpu_add_tensor(
|
|
573
|
+
ds4_gpu_tensor *out,
|
|
574
|
+
const ds4_gpu_tensor *a,
|
|
575
|
+
const ds4_gpu_tensor *b,
|
|
576
|
+
uint32_t n);
|
|
577
|
+
|
|
578
|
+
int ds4_gpu_directional_steering_project_tensor(
|
|
579
|
+
ds4_gpu_tensor *x,
|
|
580
|
+
const ds4_gpu_tensor *directions,
|
|
581
|
+
uint32_t layer,
|
|
582
|
+
uint32_t width,
|
|
583
|
+
uint32_t rows,
|
|
584
|
+
float scale);
|
|
585
|
+
|
|
586
|
+
int ds4_gpu_router_select_tensor(
|
|
587
|
+
ds4_gpu_tensor *selected,
|
|
588
|
+
ds4_gpu_tensor *weights,
|
|
589
|
+
ds4_gpu_tensor *probs,
|
|
590
|
+
const void *model_map,
|
|
591
|
+
uint64_t model_size,
|
|
592
|
+
uint64_t bias_offset,
|
|
593
|
+
uint64_t hash_offset,
|
|
594
|
+
uint32_t hash_rows,
|
|
595
|
+
uint32_t token,
|
|
596
|
+
uint32_t n_expert_groups,
|
|
597
|
+
uint32_t n_group_used,
|
|
598
|
+
bool has_bias,
|
|
599
|
+
bool hash_mode,
|
|
600
|
+
const ds4_gpu_tensor *logits);
|
|
601
|
+
|
|
602
|
+
int ds4_gpu_router_select_batch_tensor(
|
|
603
|
+
ds4_gpu_tensor *selected,
|
|
604
|
+
ds4_gpu_tensor *weights,
|
|
605
|
+
ds4_gpu_tensor *probs,
|
|
606
|
+
const void *model_map,
|
|
607
|
+
uint64_t model_size,
|
|
608
|
+
uint64_t bias_offset,
|
|
609
|
+
uint64_t hash_offset,
|
|
610
|
+
uint32_t hash_rows,
|
|
611
|
+
uint32_t n_expert_groups,
|
|
612
|
+
uint32_t n_group_used,
|
|
613
|
+
bool has_bias,
|
|
614
|
+
bool hash_mode,
|
|
615
|
+
const ds4_gpu_tensor *logits,
|
|
616
|
+
const ds4_gpu_tensor *tokens,
|
|
617
|
+
uint32_t n_tokens);
|
|
618
|
+
|
|
619
|
+
int ds4_gpu_routed_moe_one_tensor(
|
|
620
|
+
ds4_gpu_tensor *out,
|
|
621
|
+
ds4_gpu_tensor *gate,
|
|
622
|
+
ds4_gpu_tensor *up,
|
|
623
|
+
ds4_gpu_tensor *mid,
|
|
624
|
+
ds4_gpu_tensor *experts,
|
|
625
|
+
const void *model_map,
|
|
626
|
+
uint64_t model_size,
|
|
627
|
+
uint64_t gate_offset,
|
|
628
|
+
uint64_t up_offset,
|
|
629
|
+
uint64_t down_offset,
|
|
630
|
+
uint32_t gate_type,
|
|
631
|
+
uint32_t down_type,
|
|
632
|
+
uint64_t gate_expert_bytes,
|
|
633
|
+
uint64_t gate_row_bytes,
|
|
634
|
+
uint64_t down_expert_bytes,
|
|
635
|
+
uint64_t down_row_bytes,
|
|
636
|
+
uint32_t expert_in_dim,
|
|
637
|
+
uint32_t expert_mid_dim,
|
|
638
|
+
uint32_t out_dim,
|
|
639
|
+
const ds4_gpu_tensor *selected,
|
|
640
|
+
const ds4_gpu_tensor *weights,
|
|
641
|
+
uint32_t n_expert,
|
|
642
|
+
float clamp,
|
|
643
|
+
const ds4_gpu_tensor *x);
|
|
644
|
+
|
|
645
|
+
int ds4_gpu_routed_moe_batch_tensor(
|
|
646
|
+
ds4_gpu_tensor *out,
|
|
647
|
+
ds4_gpu_tensor *gate,
|
|
648
|
+
ds4_gpu_tensor *up,
|
|
649
|
+
ds4_gpu_tensor *mid,
|
|
650
|
+
ds4_gpu_tensor *experts,
|
|
651
|
+
const void *model_map,
|
|
652
|
+
uint64_t model_size,
|
|
653
|
+
uint64_t gate_offset,
|
|
654
|
+
uint64_t up_offset,
|
|
655
|
+
uint64_t down_offset,
|
|
656
|
+
uint32_t gate_type,
|
|
657
|
+
uint32_t down_type,
|
|
658
|
+
uint64_t gate_expert_bytes,
|
|
659
|
+
uint64_t gate_row_bytes,
|
|
660
|
+
uint64_t down_expert_bytes,
|
|
661
|
+
uint64_t down_row_bytes,
|
|
662
|
+
uint32_t expert_in_dim,
|
|
663
|
+
uint32_t expert_mid_dim,
|
|
664
|
+
uint32_t out_dim,
|
|
665
|
+
const ds4_gpu_tensor *selected,
|
|
666
|
+
const ds4_gpu_tensor *weights,
|
|
667
|
+
uint32_t n_expert,
|
|
668
|
+
float clamp,
|
|
669
|
+
const ds4_gpu_tensor *x,
|
|
670
|
+
uint32_t n_tokens,
|
|
671
|
+
bool *mid_is_f16);
|
|
672
|
+
|
|
673
|
+
/* =========================================================================
|
|
674
|
+
* Hyper-Connection Kernels.
|
|
675
|
+
* =========================================================================
|
|
676
|
+
*
|
|
677
|
+
* HC kernels reduce four residual streams before a sublayer and expand the
|
|
678
|
+
* sublayer output back into four streams afterward.
|
|
679
|
+
*/
|
|
680
|
+
|
|
681
|
+
int ds4_gpu_hc_split_sinkhorn_tensor(
|
|
682
|
+
ds4_gpu_tensor *out,
|
|
683
|
+
const ds4_gpu_tensor *mix,
|
|
684
|
+
const void *model_map,
|
|
685
|
+
uint64_t model_size,
|
|
686
|
+
uint64_t scale_offset,
|
|
687
|
+
uint64_t base_offset,
|
|
688
|
+
uint32_t n_hc,
|
|
689
|
+
uint32_t sinkhorn_iters,
|
|
690
|
+
float eps);
|
|
691
|
+
|
|
692
|
+
int ds4_gpu_hc_weighted_sum_tensor(
|
|
693
|
+
ds4_gpu_tensor *out,
|
|
694
|
+
const ds4_gpu_tensor *residual_hc,
|
|
695
|
+
const ds4_gpu_tensor *weights,
|
|
696
|
+
uint32_t n_embd,
|
|
697
|
+
uint32_t n_hc);
|
|
698
|
+
|
|
699
|
+
int ds4_gpu_hc_weighted_sum_split_tensor(
|
|
700
|
+
ds4_gpu_tensor *out,
|
|
701
|
+
const ds4_gpu_tensor *residual_hc,
|
|
702
|
+
const ds4_gpu_tensor *split,
|
|
703
|
+
uint32_t n_embd,
|
|
704
|
+
uint32_t n_hc);
|
|
705
|
+
|
|
706
|
+
/* Release decode fused HC pre-sublayer operation: split the HC mixer and
|
|
707
|
+
* immediately reduce four HC streams into the active 4096-wide sublayer row. */
|
|
708
|
+
int ds4_gpu_hc_split_weighted_sum_tensor(
|
|
709
|
+
ds4_gpu_tensor *out,
|
|
710
|
+
ds4_gpu_tensor *split,
|
|
711
|
+
const ds4_gpu_tensor *mix,
|
|
712
|
+
const ds4_gpu_tensor *residual_hc,
|
|
713
|
+
const void *model_map,
|
|
714
|
+
uint64_t model_size,
|
|
715
|
+
uint64_t scale_offset,
|
|
716
|
+
uint64_t base_offset,
|
|
717
|
+
uint32_t n_embd,
|
|
718
|
+
uint32_t n_hc,
|
|
719
|
+
uint32_t sinkhorn_iters,
|
|
720
|
+
float eps);
|
|
721
|
+
|
|
722
|
+
int ds4_gpu_hc_split_weighted_sum_norm_tensor(
|
|
723
|
+
ds4_gpu_tensor *out,
|
|
724
|
+
ds4_gpu_tensor *norm_out,
|
|
725
|
+
ds4_gpu_tensor *split,
|
|
726
|
+
const ds4_gpu_tensor *mix,
|
|
727
|
+
const ds4_gpu_tensor *residual_hc,
|
|
728
|
+
const void *model_map,
|
|
729
|
+
uint64_t model_size,
|
|
730
|
+
uint64_t scale_offset,
|
|
731
|
+
uint64_t base_offset,
|
|
732
|
+
uint64_t norm_weight_offset,
|
|
733
|
+
uint32_t n_embd,
|
|
734
|
+
uint32_t n_hc,
|
|
735
|
+
uint32_t sinkhorn_iters,
|
|
736
|
+
float eps,
|
|
737
|
+
float norm_eps);
|
|
738
|
+
|
|
739
|
+
int ds4_gpu_output_hc_weights_tensor(
|
|
740
|
+
ds4_gpu_tensor *out,
|
|
741
|
+
const ds4_gpu_tensor *pre,
|
|
742
|
+
const void *model_map,
|
|
743
|
+
uint64_t model_size,
|
|
744
|
+
uint64_t scale_offset,
|
|
745
|
+
uint64_t base_offset,
|
|
746
|
+
uint32_t n_hc,
|
|
747
|
+
float eps);
|
|
748
|
+
|
|
749
|
+
int ds4_gpu_hc_expand_tensor(
|
|
750
|
+
ds4_gpu_tensor *out_hc,
|
|
751
|
+
const ds4_gpu_tensor *block_out,
|
|
752
|
+
const ds4_gpu_tensor *residual_hc,
|
|
753
|
+
const ds4_gpu_tensor *post,
|
|
754
|
+
const ds4_gpu_tensor *comb,
|
|
755
|
+
uint32_t n_embd,
|
|
756
|
+
uint32_t n_hc);
|
|
757
|
+
|
|
758
|
+
int ds4_gpu_hc_expand_split_tensor(
|
|
759
|
+
ds4_gpu_tensor *out_hc,
|
|
760
|
+
const ds4_gpu_tensor *block_out,
|
|
761
|
+
const ds4_gpu_tensor *residual_hc,
|
|
762
|
+
const ds4_gpu_tensor *split,
|
|
763
|
+
uint32_t n_embd,
|
|
764
|
+
uint32_t n_hc);
|
|
765
|
+
|
|
766
|
+
int ds4_gpu_hc_expand_add_split_tensor(
|
|
767
|
+
ds4_gpu_tensor *out_hc,
|
|
768
|
+
const ds4_gpu_tensor *block_out,
|
|
769
|
+
const ds4_gpu_tensor *block_add,
|
|
770
|
+
const ds4_gpu_tensor *residual_hc,
|
|
771
|
+
const ds4_gpu_tensor *split,
|
|
772
|
+
uint32_t n_embd,
|
|
773
|
+
uint32_t n_hc);
|
|
774
|
+
|
|
775
|
+
int ds4_gpu_shared_down_hc_expand_q8_0_tensor(
|
|
776
|
+
ds4_gpu_tensor *out_hc,
|
|
777
|
+
ds4_gpu_tensor *shared_out,
|
|
778
|
+
const void *model_map,
|
|
779
|
+
uint64_t model_size,
|
|
780
|
+
uint64_t weight_offset,
|
|
781
|
+
uint64_t in_dim,
|
|
782
|
+
uint64_t out_dim,
|
|
783
|
+
const ds4_gpu_tensor *shared_mid,
|
|
784
|
+
const ds4_gpu_tensor *routed_out,
|
|
785
|
+
const ds4_gpu_tensor *residual_hc,
|
|
786
|
+
const ds4_gpu_tensor *split,
|
|
787
|
+
uint32_t n_embd,
|
|
788
|
+
uint32_t n_hc);
|
|
789
|
+
|
|
790
|
+
int ds4_gpu_matmul_q8_0_hc_expand_tensor(
|
|
791
|
+
ds4_gpu_tensor *out_hc,
|
|
792
|
+
ds4_gpu_tensor *block_out,
|
|
793
|
+
const void *model_map,
|
|
794
|
+
uint64_t model_size,
|
|
795
|
+
uint64_t weight_offset,
|
|
796
|
+
uint64_t in_dim,
|
|
797
|
+
uint64_t out_dim,
|
|
798
|
+
const ds4_gpu_tensor *x,
|
|
799
|
+
const ds4_gpu_tensor *residual_hc,
|
|
800
|
+
const ds4_gpu_tensor *split,
|
|
801
|
+
uint32_t n_embd,
|
|
802
|
+
uint32_t n_hc);
|
|
803
|
+
|
|
804
|
+
#endif
|