llama_cpp 0.14.5 → 0.14.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,999 @@
1
+ // -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*-
2
+ // vi: set et ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi
3
+ //
4
+ // Copyright 2024 Mozilla Foundation
5
+ //
6
+ // Permission is hereby granted, free of charge, to any person obtaining
7
+ // a copy of this software and associated documentation files (the
8
+ // "Software"), to deal in the Software without restriction, including
9
+ // without limitation the rights to use, copy, modify, merge, publish,
10
+ // distribute, sublicense, and/or sell copies of the Software, and to
11
+ // permit persons to whom the Software is furnished to do so, subject to
12
+ // the following conditions:
13
+ //
14
+ // The above copyright notice and this permission notice shall be
15
+ // included in all copies or substantial portions of the Software.
16
+ //
17
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18
+ // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19
+ // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20
+ // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21
+ // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22
+ // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23
+ // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ // SOFTWARE.
25
+
26
+ //
27
+ // _ _ ___ _ _ ___
28
+ // | |_(_)_ _ _ _| _ ) | /_\ / __|
29
+ // | _| | ' \ || | _ \ |__ / _ \\__ \.
30
+ // \__|_|_||_\_, |___/____/_/ \_\___/
31
+ // |__/
32
+ //
33
+ // BASIC LINEAR ALGEBRA SUBPROGRAMS
34
+ //
35
+ //
36
+ // This file implements multithreaded CPU matrix multiplication for the
37
+ // common contiguous use case C = Aᵀ * B. These kernels are designed to
38
+ // have excellent performance[1] for matrices that fit in the CPU cache
39
+ // without imposing any overhead such as cache filling or malloc calls.
40
+ //
41
+ // This implementation does not guarantee any upper bound with rounding
42
+ // errors, which grow along with k. Our goal's to maximally exploit the
43
+ // hardware for performance, and then use whatever resources remain for
44
+ // improving numerical accuracy.
45
+ //
46
+ // [1] J. Tunney, ‘LLaMA Now Goes Faster on CPUs’, Mar. 2024. [Online].
47
+ // Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024].
48
+
49
+ #pragma GCC diagnostic ignored "-Wpedantic"
50
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
51
+
52
+ #include "sgemm.h"
53
+ #include <algorithm>
54
+ #include "ggml-impl.h"
55
+ #include "ggml-quants.h"
56
+
57
+ #ifdef _MSC_VER
58
+ #define NOINLINE __declspec(noinline)
59
+ #else
60
+ #define NOINLINE __attribute__((__noinline__))
61
+ #endif
62
+
63
+ #if defined(__ARM_NEON) || defined(__AVX512F__)
64
+ #define VECTOR_REGISTERS 32
65
+ #else
66
+ #define VECTOR_REGISTERS 16
67
+ #endif
68
+
69
+ #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
70
+
71
+ namespace {
72
+
73
+ inline float unhalf(ggml_fp16_t d) {
74
+ return GGML_FP16_TO_FP32(d);
75
+ }
76
+
77
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
78
+ // VECTORIZED ARITHMETIC OPERATIONS
79
+
80
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
81
+ inline __m128 add(__m128 x, __m128 y) { return _mm_add_ps(x, y); }
82
+ inline __m128 sub(__m128 x, __m128 y) { return _mm_sub_ps(x, y); }
83
+ inline __m128 mul(__m128 x, __m128 y) { return _mm_mul_ps(x, y); }
84
+ #endif // __SSE__
85
+
86
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
87
+ inline __m256 add(__m256 x, __m256 y) { return _mm256_add_ps(x, y); }
88
+ inline __m256 sub(__m256 x, __m256 y) { return _mm256_sub_ps(x, y); }
89
+ inline __m256 mul(__m256 x, __m256 y) { return _mm256_mul_ps(x, y); }
90
+ #endif // __AVX__
91
+
92
+ #if defined(__AVX512F__)
93
+ inline __m512 add(__m512 x, __m512 y) { return _mm512_add_ps(x, y); }
94
+ inline __m512 sub(__m512 x, __m512 y) { return _mm512_sub_ps(x, y); }
95
+ inline __m512 mul(__m512 x, __m512 y) { return _mm512_mul_ps(x, y); }
96
+ #endif // __AVX512F__
97
+
98
+ #if defined(__ARM_NEON)
99
+ inline float32x4_t add(float32x4_t x, float32x4_t y) { return vaddq_f32(x, y); }
100
+ inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vsubq_f32(x, y); }
101
+ inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vmulq_f32(x, y); }
102
+ #endif // __ARM_NEON
103
+
104
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
105
+ inline float16x8_t add(float16x8_t x, float16x8_t y) { return vaddq_f16(x, y); }
106
+ inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); }
107
+ inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); }
108
+ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
109
+
110
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
111
+ // VECTORIZED FUSED MULTIPLY ADD
112
+
113
+ /**
114
+ * Computes a * b + c.
115
+ */
116
+ template <typename T, typename U>
117
+ inline U madd(T a, T b, U c) {
118
+ return add(mul(a, b), c);
119
+ }
120
+
121
+ #if defined(__FMA__)
122
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
123
+ template <>
124
+ inline __m256 madd(__m256 a, __m256 b, __m256 c) {
125
+ return _mm256_fmadd_ps(a, b, c);
126
+ }
127
+ #endif
128
+ #if defined(__AVX512F__)
129
+ template <>
130
+ inline __m512 madd(__m512 a, __m512 b, __m512 c) {
131
+ return _mm512_fmadd_ps(a, b, c);
132
+ }
133
+ #endif
134
+ #endif
135
+
136
+ #if defined(__ARM_FEATURE_FMA)
137
+ template <>
138
+ inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) {
139
+ return vfmaq_f32(c, b, a);
140
+ }
141
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
142
+ template <>
143
+ inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) {
144
+ return vfmaq_f16(c, b, a);
145
+ }
146
+ #endif
147
+ #endif
148
+
149
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
150
+ // VECTORIZED HORIZONTAL SUM
151
+
152
+ #if defined(__ARM_NEON)
153
+ inline float hsum(float32x4_t x) {
154
+ return vaddvq_f32(x);
155
+ }
156
+ #endif // __ARM_NEON
157
+
158
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
159
+ inline float hsum(float16x8_t x) {
160
+ return vaddvq_f32(vaddq_f32(vcvt_f32_f16(vget_low_f16(x)),
161
+ vcvt_f32_f16(vget_high_f16(x))));
162
+ }
163
+ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
164
+
165
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
166
+ inline float hsum(__m128 x) {
167
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
168
+ x = _mm_add_ps(x, _mm_movehl_ps(x, x));
169
+ x = _mm_add_ss(x, _mm_movehdup_ps(x));
170
+ #else
171
+ __m128 t;
172
+ t = _mm_shuffle_ps(x, x, _MM_SHUFFLE(2, 3, 0, 1));
173
+ x = _mm_add_ps(x, t);
174
+ t = _mm_movehl_ps(t, x);
175
+ x = _mm_add_ss(x, t);
176
+ #endif
177
+ return _mm_cvtss_f32(x);
178
+ }
179
+ #endif
180
+
181
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
182
+ inline float hsum(__m256 x) {
183
+ return hsum(_mm_add_ps(_mm256_extractf128_ps(x, 1),
184
+ _mm256_castps256_ps128(x)));
185
+ }
186
+ #endif // __AVX__
187
+
188
+ #if defined(__AVX512F__)
189
+ inline float hsum(__m512 x) {
190
+ return _mm512_reduce_add_ps(x);
191
+ }
192
+ #endif // __AVX512F__
193
+
194
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
195
+ // VECTORIZED MEMORY LOADING
196
+
197
+ template <typename T, typename U> T load(const U *);
198
+
199
+ #if defined(__ARM_NEON)
200
+ template <> inline float32x4_t load(const float *p) {
201
+ return vld1q_f32(p);
202
+ }
203
+ #if !defined(_MSC_VER)
204
+ template <> inline float16x8_t load(const ggml_fp16_t *p) {
205
+ return vld1q_f16((const float16_t *)p);
206
+ }
207
+ template <> inline float32x4_t load(const ggml_fp16_t *p) {
208
+ return vcvt_f32_f16(vld1_f16((const float16_t *)p));
209
+ }
210
+ #endif // _MSC_VER
211
+ #endif // __ARM_NEON
212
+
213
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
214
+ template <> inline __m128 load(const float *p) {
215
+ return _mm_loadu_ps(p);
216
+ }
217
+ #endif // __SSE__
218
+
219
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
220
+ template <> inline __m256 load(const float *p) {
221
+ return _mm256_loadu_ps(p);
222
+ }
223
+ #endif // __AVX__
224
+
225
+ #if defined(__F16C__)
226
+ template <> inline __m256 load(const ggml_fp16_t *p) {
227
+ return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p));
228
+ }
229
+ #endif // __F16C__
230
+
231
+ #if defined(__AVX512F__)
232
+ template <> inline __m512 load(const float *p) {
233
+ return _mm512_loadu_ps(p);
234
+ }
235
+ template <> inline __m512 load(const ggml_fp16_t *p) {
236
+ return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p));
237
+ }
238
+ #endif // __AVX512F__
239
+
240
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
241
+ // FLOATING POINT MATRIX MULTIPLICATION
242
+
243
+ template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
244
+ class tinyBLAS {
245
+ public:
246
+ tinyBLAS(int k,
247
+ const TA *A, int lda,
248
+ const TB *B, int ldb,
249
+ TC *C, int ldc,
250
+ int ith, int nth)
251
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
252
+ }
253
+
254
+ void matmul(int m, int n, int task) {
255
+ if (task == GGML_TASK_TYPE_COMPUTE)
256
+ mnpack(0, m, 0, n);
257
+ }
258
+
259
+ private:
260
+ NOINLINE void mnpack(int m0, int m, int n0, int n) {
261
+ int mc, nc, mp, np;
262
+ switch ((std::min(m - m0, 5) << 4) | std::min(n - n0, 5)) {
263
+ #if VECTOR_REGISTERS == 32
264
+ case 0x55:
265
+ mc = 5;
266
+ nc = 5;
267
+ gemm<5, 5>(m0, m, n0, n);
268
+ break;
269
+ case 0x45:
270
+ mc = 4;
271
+ nc = 5;
272
+ gemm<4, 5>(m0, m, n0, n);
273
+ break;
274
+ case 0x54:
275
+ mc = 5;
276
+ nc = 4;
277
+ gemm<5, 4>(m0, m, n0, n);
278
+ break;
279
+ case 0x44:
280
+ mc = 4;
281
+ nc = 4;
282
+ gemm<4, 4>(m0, m, n0, n);
283
+ break;
284
+ case 0x53:
285
+ mc = 5;
286
+ nc = 3;
287
+ gemm<5, 3>(m0, m, n0, n);
288
+ break;
289
+ case 0x35:
290
+ mc = 3;
291
+ nc = 5;
292
+ gemm<3, 5>(m0, m, n0, n);
293
+ break;
294
+ case 0x43:
295
+ mc = 4;
296
+ nc = 3;
297
+ gemm<4, 3>(m0, m, n0, n);
298
+ break;
299
+ #else
300
+ case 0x55:
301
+ case 0x54:
302
+ case 0x53:
303
+ case 0x45:
304
+ case 0x44:
305
+ case 0x43:
306
+ mc = 4;
307
+ nc = 3;
308
+ gemm<4, 3>(m0, m, n0, n);
309
+ break;
310
+ case 0x35:
311
+ #endif
312
+ case 0x34:
313
+ mc = 3;
314
+ nc = 4;
315
+ gemm<3, 4>(m0, m, n0, n);
316
+ break;
317
+ case 0x52:
318
+ mc = 5;
319
+ nc = 2;
320
+ gemm<5, 2>(m0, m, n0, n);
321
+ break;
322
+ case 0x33:
323
+ mc = 3;
324
+ nc = 3;
325
+ gemm<3, 3>(m0, m, n0, n);
326
+ break;
327
+ case 0x25:
328
+ mc = 2;
329
+ nc = 5;
330
+ gemm<2, 5>(m0, m, n0, n);
331
+ break;
332
+ case 0x42:
333
+ mc = 4;
334
+ nc = 2;
335
+ gemm<4, 2>(m0, m, n0, n);
336
+ break;
337
+ case 0x24:
338
+ mc = 2;
339
+ nc = 4;
340
+ gemm<2, 4>(m0, m, n0, n);
341
+ break;
342
+ case 0x32:
343
+ mc = 3;
344
+ nc = 2;
345
+ gemm<3, 2>(m0, m, n0, n);
346
+ break;
347
+ case 0x23:
348
+ mc = 2;
349
+ nc = 3;
350
+ gemm<2, 3>(m0, m, n0, n);
351
+ break;
352
+ case 0x51:
353
+ mc = 5;
354
+ nc = 1;
355
+ gemm<5, 1>(m0, m, n0, n);
356
+ break;
357
+ case 0x41:
358
+ mc = 4;
359
+ nc = 1;
360
+ gemm<4, 1>(m0, m, n0, n);
361
+ break;
362
+ case 0x22:
363
+ mc = 2;
364
+ nc = 2;
365
+ gemm<2, 2>(m0, m, n0, n);
366
+ break;
367
+ case 0x15:
368
+ mc = 1;
369
+ nc = 5;
370
+ gemm<1, 5>(m0, m, n0, n);
371
+ break;
372
+ case 0x14:
373
+ mc = 1;
374
+ nc = 4;
375
+ gemm<1, 4>(m0, m, n0, n);
376
+ break;
377
+ case 0x31:
378
+ mc = 3;
379
+ nc = 1;
380
+ gemm<3, 1>(m0, m, n0, n);
381
+ break;
382
+ case 0x13:
383
+ mc = 1;
384
+ nc = 3;
385
+ gemm<1, 3>(m0, m, n0, n);
386
+ break;
387
+ case 0x21:
388
+ mc = 2;
389
+ nc = 1;
390
+ gemm<2, 1>(m0, m, n0, n);
391
+ break;
392
+ case 0x12:
393
+ mc = 1;
394
+ nc = 2;
395
+ gemm<1, 2>(m0, m, n0, n);
396
+ break;
397
+ case 0x11:
398
+ mc = 1;
399
+ nc = 1;
400
+ gemm<1, 1>(m0, m, n0, n);
401
+ break;
402
+ default:
403
+ return;
404
+ }
405
+ mp = m0 + (m - m0) / mc * mc;
406
+ np = n0 + (n - n0) / nc * nc;
407
+ mnpack(mp, m, n0, np);
408
+ mnpack(m0, m, np, n);
409
+ }
410
+
411
+ template <int RM, int RN>
412
+ NOINLINE void gemm(int m0, int m, int n0, int n) {
413
+ int ytiles = (m - m0) / RM;
414
+ int xtiles = (n - n0) / RN;
415
+ int tiles = xtiles * ytiles;
416
+ int duty = (tiles + nth - 1) / nth;
417
+ int start = duty * ith;
418
+ int end = start + duty;
419
+ if (end > tiles)
420
+ end = tiles;
421
+ for (int job = start; job < end; ++job) {
422
+ int ii = m0 + job / xtiles * RM;
423
+ int jj = n0 + job % xtiles * RN;
424
+ D Cv[RN][RM] = {};
425
+ for (int l = 0; l < k; l += KN)
426
+ for (int j = 0; j < RN; ++j)
427
+ for (int i = 0; i < RM; ++i)
428
+ Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l),
429
+ load<V>(B + ldb * (jj + j) + l),
430
+ Cv[j][i]);
431
+ for (int j = 0; j < RN; ++j)
432
+ for (int i = 0; i < RM; ++i)
433
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
434
+ }
435
+ }
436
+
437
+ const TA *const A;
438
+ const TB *const B;
439
+ TC *const C;
440
+ const int k;
441
+ const int lda;
442
+ const int ldb;
443
+ const int ldc;
444
+ const int ith;
445
+ const int nth;
446
+ };
447
+
448
+ //////////////////////////////////////////////////////////////////////////////////////////
449
+ // QUANT ZERO MATRIX MULTIPLICATION
450
+
451
+ #if defined(__ARM_FEATURE_DOTPROD)
452
+ template <typename TA>
453
+ class tinyBLAS_Q0_ARM {
454
+ public:
455
+ tinyBLAS_Q0_ARM(int k,
456
+ const TA *A, int lda,
457
+ const block_q8_0 *B, int ldb,
458
+ float *C, int ldc,
459
+ int ith, int nth)
460
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
461
+ }
462
+
463
+ void matmul(int m, int n, int task) {
464
+ if (task == GGML_TASK_TYPE_COMPUTE)
465
+ mnpack(0, m, 0, n);
466
+ }
467
+
468
+ private:
469
+ NOINLINE void mnpack(int m0, int m, int n0, int n) {
470
+ int mc, nc, mp, np;
471
+ switch ((std::min(m - m0, 3) << 4) | std::min(n - n0, 3)) {
472
+ case 0x33:
473
+ mc = 3;
474
+ nc = 3;
475
+ gemm<3, 3>(m0, m, n0, n);
476
+ break;
477
+ case 0x32:
478
+ mc = 3;
479
+ nc = 2;
480
+ gemm<3, 2>(m0, m, n0, n);
481
+ break;
482
+ case 0x23:
483
+ mc = 2;
484
+ nc = 3;
485
+ gemm<2, 3>(m0, m, n0, n);
486
+ break;
487
+ case 0x22:
488
+ mc = 2;
489
+ nc = 2;
490
+ gemm<2, 2>(m0, m, n0, n);
491
+ break;
492
+ case 0x31:
493
+ mc = 3;
494
+ nc = 1;
495
+ gemm<3, 1>(m0, m, n0, n);
496
+ break;
497
+ case 0x13:
498
+ mc = 1;
499
+ nc = 3;
500
+ gemm<1, 3>(m0, m, n0, n);
501
+ break;
502
+ case 0x21:
503
+ mc = 2;
504
+ nc = 1;
505
+ gemm<2, 1>(m0, m, n0, n);
506
+ break;
507
+ case 0x12:
508
+ mc = 1;
509
+ nc = 2;
510
+ gemm<1, 2>(m0, m, n0, n);
511
+ break;
512
+ case 0x11:
513
+ mc = 1;
514
+ nc = 1;
515
+ gemm<1, 1>(m0, m, n0, n);
516
+ break;
517
+ default:
518
+ return;
519
+ }
520
+ mp = m0 + (m - m0) / mc * mc;
521
+ np = n0 + (n - n0) / nc * nc;
522
+ mnpack(mp, m, n0, np);
523
+ mnpack(m0, m, np, n);
524
+ }
525
+
526
+ template <int RM, int RN>
527
+ NOINLINE void gemm(int m0, int m, int n0, int n) {
528
+ int ytiles = (m - m0) / RM;
529
+ int xtiles = (n - n0) / RN;
530
+ int tiles = xtiles * ytiles;
531
+ int duty = (tiles + nth - 1) / nth;
532
+ int start = duty * ith;
533
+ int end = start + duty;
534
+ if (end > tiles)
535
+ end = tiles;
536
+ for (int job = start; job < end; ++job) {
537
+ int ii = m0 + job / xtiles * RM;
538
+ int jj = n0 + job % xtiles * RN;
539
+ float32x4_t Cv[RN][RM] = {};
540
+ for (int l = 0; l < k; ++l)
541
+ for (int j = 0; j < RN; ++j)
542
+ for (int i = 0; i < RM; ++i)
543
+ Cv[j][i] = vmlaq_n_f32(Cv[j][i],
544
+ vcvtq_f32_s32(vdotq_s32(
545
+ vdotq_s32(vdupq_n_s32(0),
546
+ load_lo(A + lda * (ii + i) + l),
547
+ load_lo(B + ldb * (jj + j) + l)),
548
+ load_hi(A + lda * (ii + i) + l),
549
+ load_hi(B + ldb * (jj + j) + l))),
550
+ unhalf(A[lda * (ii + i) + l].d) *
551
+ unhalf(B[ldb * (jj + j) + l].d));
552
+ for (int j = 0; j < RN; ++j)
553
+ for (int i = 0; i < RM; ++i)
554
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
555
+ }
556
+ }
557
+
558
+ inline int8x16_t load_lo(const block_q8_0 *b) {
559
+ return vld1q_s8(b->qs);
560
+ }
561
+
562
+ inline int8x16_t load_hi(const block_q8_0 *b) {
563
+ return vld1q_s8(b->qs + 16);
564
+ }
565
+
566
+ inline int8x16_t load_lo(const block_q4_0 *b) {
567
+ return vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vld1q_u8(b->qs),
568
+ vdupq_n_u8(0x0f))),
569
+ vdupq_n_s8(0x8));
570
+ }
571
+
572
+ inline int8x16_t load_hi(const block_q4_0 *b) {
573
+ return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)),
574
+ vdupq_n_s8(0x8));
575
+ }
576
+
577
+ const TA *const A;
578
+ const block_q8_0 *const B;
579
+ float *const C;
580
+ const int k;
581
+ const int lda;
582
+ const int ldb;
583
+ const int ldc;
584
+ const int ith;
585
+ const int nth;
586
+ };
587
+ #endif // __ARM_FEATURE_DOTPROD
588
+
589
+ #if defined(__AVX2__) || defined(__AVX512F__)
590
+ template <typename TA, typename TB, typename TC>
591
+ class tinyBLAS_Q0_AVX2 {
592
+ public:
593
+ tinyBLAS_Q0_AVX2(int k,
594
+ const TA *A, int lda,
595
+ const TB *B, int ldb,
596
+ TC *C, int ldc,
597
+ int ith, int nth)
598
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
599
+ }
600
+
601
+ void matmul(int m, int n, int task) {
602
+ if (task == GGML_TASK_TYPE_COMPUTE)
603
+ mnpack(0, m, 0, n);
604
+ }
605
+
606
+ private:
607
+ void mnpack(int m0, int m, int n0, int n) {
608
+ int mc, nc, mp, np;
609
+ switch ((std::min(m - m0, 4) << 4) | std::min(n - n0, 4)) {
610
+ #if VECTOR_REGISTERS == 32
611
+ case 0x44:
612
+ mc = 4;
613
+ nc = 4;
614
+ gemm<4, 4>(m0, m, n0, n);
615
+ break;
616
+ case 0x43:
617
+ mc = 4;
618
+ nc = 3;
619
+ gemm<4, 3>(m0, m, n0, n);
620
+ break;
621
+ case 0x34:
622
+ mc = 3;
623
+ nc = 4;
624
+ gemm<3, 4>(m0, m, n0, n);
625
+ break;
626
+ case 0x33:
627
+ mc = 3;
628
+ nc = 3;
629
+ gemm<3, 3>(m0, m, n0, n);
630
+ break;
631
+ case 0x42:
632
+ mc = 4;
633
+ nc = 2;
634
+ gemm<4, 2>(m0, m, n0, n);
635
+ break;
636
+ case 0x24:
637
+ mc = 2;
638
+ nc = 4;
639
+ gemm<2, 4>(m0, m, n0, n);
640
+ break;
641
+ #else
642
+ case 0x44:
643
+ case 0x43:
644
+ case 0x42:
645
+ mc = 4;
646
+ nc = 2;
647
+ gemm<4, 2>(m0, m, n0, n);
648
+ break;
649
+ case 0x34:
650
+ case 0x24:
651
+ mc = 2;
652
+ nc = 4;
653
+ gemm<2, 4>(m0, m, n0, n);
654
+ break;
655
+ case 0x33:
656
+ #endif
657
+ case 0x32:
658
+ mc = 3;
659
+ nc = 2;
660
+ gemm<3, 2>(m0, m, n0, n);
661
+ break;
662
+ case 0x23:
663
+ mc = 2;
664
+ nc = 3;
665
+ gemm<2, 3>(m0, m, n0, n);
666
+ break;
667
+ case 0x41:
668
+ mc = 4;
669
+ nc = 1;
670
+ gemm<4, 1>(m0, m, n0, n);
671
+ break;
672
+ case 0x22:
673
+ mc = 2;
674
+ nc = 2;
675
+ gemm<2, 2>(m0, m, n0, n);
676
+ break;
677
+ case 0x14:
678
+ mc = 1;
679
+ nc = 4;
680
+ gemm<1, 4>(m0, m, n0, n);
681
+ break;
682
+ case 0x31:
683
+ mc = 3;
684
+ nc = 1;
685
+ gemm<3, 1>(m0, m, n0, n);
686
+ break;
687
+ case 0x13:
688
+ mc = 1;
689
+ nc = 3;
690
+ gemm<1, 3>(m0, m, n0, n);
691
+ break;
692
+ case 0x21:
693
+ mc = 2;
694
+ nc = 1;
695
+ gemm<2, 1>(m0, m, n0, n);
696
+ break;
697
+ case 0x12:
698
+ mc = 1;
699
+ nc = 2;
700
+ gemm<1, 2>(m0, m, n0, n);
701
+ break;
702
+ case 0x11:
703
+ mc = 1;
704
+ nc = 1;
705
+ gemm<1, 1>(m0, m, n0, n);
706
+ break;
707
+ default:
708
+ return;
709
+ }
710
+ mp = m0 + (m - m0) / mc * mc;
711
+ np = n0 + (n - n0) / nc * nc;
712
+ mnpack(mp, m, n0, np);
713
+ mnpack(m0, m, np, n);
714
+ }
715
+
716
+ template <int RM, int RN>
717
+ NOINLINE void gemm(int m0, int m, int n0, int n) {
718
+ int ytiles = (m - m0) / RM;
719
+ int xtiles = (n - n0) / RN;
720
+ int tiles = xtiles * ytiles;
721
+ int duty = (tiles + nth - 1) / nth;
722
+ int start = duty * ith;
723
+ int end = start + duty;
724
+ if (end > tiles)
725
+ end = tiles;
726
+ for (int job = start; job < end; ++job) {
727
+ int ii = m0 + job / xtiles * RM;
728
+ int jj = n0 + job % xtiles * RN;
729
+ __m256 Cv[RN][RM] = {};
730
+ for (int l = 0; l < k; ++l)
731
+ for (int j = 0; j < RN; ++j)
732
+ for (int i = 0; i < RM; ++i)
733
+ Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
734
+ unhalf(B[ldb * (jj + j) + l].d)),
735
+ updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
736
+ load(A + lda * (ii + i) + l)),
737
+ _mm256_sign_epi8(load(B + ldb * (jj + j) + l),
738
+ load(A + lda * (ii + i) + l))),
739
+ Cv[j][i]);
740
+ for (int j = 0; j < RN; ++j)
741
+ for (int i = 0; i < RM; ++i)
742
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
743
+ }
744
+ }
745
+
746
+ inline __m256i load(const block_q8_0 *b) {
747
+ return _mm256_loadu_si256((const __m256i *)b->qs);
748
+ }
749
+
750
+ inline __m256i load(const block_q4_0 *b) {
751
+ return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8));
752
+ }
753
+
754
+ inline __m256 updot(__m256i u, __m256i s) {
755
+ __m256i res;
756
+ #if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__))
757
+ res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s);
758
+ #else
759
+ res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s));
760
+ #endif
761
+ return _mm256_cvtepi32_ps(res);
762
+ }
763
+
764
+ static inline __m256i denibble(const uint8_t *p) {
765
+ __m128i x = _mm_loadu_si128((const __m128i *)p);
766
+ return _mm256_and_si256(_mm256_set1_epi8(15),
767
+ _mm256_insertf128_si256(_mm256_castsi128_si256(x),
768
+ _mm_srli_epi16(x, 4), 1));
769
+ }
770
+
771
+ const TA *const A;
772
+ const TB *const B;
773
+ TC *const C;
774
+ const int k;
775
+ const int lda;
776
+ const int ldb;
777
+ const int ldc;
778
+ const int ith;
779
+ const int nth;
780
+ };
781
+ #endif // __AVX2__
782
+
783
+ } // namespace
784
+
785
+ /**
786
+ * Performs optimized matrix multiplication on CPU.
787
+ *
788
+ * This subroutine may compute C = Aᵀ * B with column major ordering.
789
+ * Despite its name, this isn't a generalized implementation. Work is
790
+ * only performed when a handwritten kernel is written and available.
791
+ * Otherwise the caller should fall back to a general matmul routine.
792
+ *
793
+ * For example, for single-threaded single-precision GEMM you can say
794
+ *
795
+ * llamafile_sgemm(m, n, k, A, lda, B, ldb, C, ldc,
796
+ * 0, 1, GGML_TASK_TYPE_COMPUTE,
797
+ * GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32);
798
+ *
799
+ * @param m is rows in `A` and `C`
800
+ * @param n is cols in `B` and `C`
801
+ * @param k is cols in `A` and rows in `B`
802
+ * @param A is first input matrix (always transposed)
803
+ * @param lda is row stride of `A`
804
+ * @param B is second input matrix (never transposed)
805
+ * @param ldb is row stride of `B`
806
+ * @param C is input/output array of output matrices
807
+ * @param ldc is row stride of `C`
808
+ * @param ith is thread id (must be less than `nth`)
809
+ * @param nth is number of threads (must be greater than zero)
810
+ * @param task is GGML task type
811
+ * @param Atype is GGML data type of `A`
812
+ * @param Btype is GGML data type of `B`
813
+ * @param Ctype is GGML data type of `C`
814
+ * @return true if this function was able to service the matmul request
815
+ */
816
+ bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, int ldb, void *C,
817
+ int ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) {
818
+
819
+ assert(m >= 0);
820
+ assert(n >= 0);
821
+ assert(k >= 0);
822
+ assert(lda >= k);
823
+ assert(ldb >= k);
824
+ assert(ldc >= m);
825
+ assert(nth > 0);
826
+ assert(ith < nth);
827
+ assert(1ll * lda * m <= 0x7fffffff);
828
+ assert(1ll * ldb * n <= 0x7fffffff);
829
+ assert(1ll * ldc * n <= 0x7fffffff);
830
+
831
+ if (Ctype != GGML_TYPE_F32)
832
+ return false;
833
+
834
+ switch (Atype) {
835
+
836
+ case GGML_TYPE_F32: {
837
+ if (Btype != GGML_TYPE_F32)
838
+ return false;
839
+ #if defined(__AVX512F__)
840
+ if (k % 16)
841
+ return false;
842
+ tinyBLAS<16, __m512, __m512, float, float, float> tb{
843
+ k, (const float *)A, lda,
844
+ (const float *)B, ldb,
845
+ (float *)C, ldc,
846
+ ith, nth};
847
+ tb.matmul(m, n, task);
848
+ return true;
849
+ #elif defined(__AVX__) || defined(__AVX2__)
850
+ if (k % 8)
851
+ return false;
852
+ tinyBLAS<8, __m256, __m256, float, float, float> tb{
853
+ k, (const float *)A, lda,
854
+ (const float *)B, ldb,
855
+ (float *)C, ldc,
856
+ ith, nth};
857
+ tb.matmul(m, n, task);
858
+ return true;
859
+ #elif defined(__ARM_NEON)
860
+ if (n < 4)
861
+ return false;
862
+ if (k % 4)
863
+ return false;
864
+ tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{
865
+ k, (const float *)A, lda,
866
+ (const float *)B, ldb,
867
+ (float *)C, ldc,
868
+ ith, nth};
869
+ tb.matmul(m, n, task);
870
+ return true;
871
+ #else
872
+ return false;
873
+ #endif
874
+ }
875
+
876
+ case GGML_TYPE_F16: {
877
+ #if defined(__AVX512F__)
878
+ if (k % 16)
879
+ return false;
880
+ if (Btype != GGML_TYPE_F32)
881
+ return false;
882
+ tinyBLAS<16, __m512, __m512, ggml_fp16_t, float, float> tb{
883
+ k, (const ggml_fp16_t *)A, lda,
884
+ (const float *)B, ldb,
885
+ (float *)C, ldc,
886
+ ith, nth};
887
+ tb.matmul(m, n, task);
888
+ return true;
889
+ #elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__)
890
+ if (k % 8)
891
+ return false;
892
+ if (Btype != GGML_TYPE_F32)
893
+ return false;
894
+ tinyBLAS<8, __m256, __m256, ggml_fp16_t, float, float> tb{
895
+ k, (const ggml_fp16_t *)A, lda,
896
+ (const float *)B, ldb,
897
+ (float *)C, ldc,
898
+ ith, nth};
899
+ tb.matmul(m, n, task);
900
+ return true;
901
+ #elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
902
+ if (n < 8)
903
+ return false;
904
+ if (k % 8)
905
+ return false;
906
+ if (Btype != GGML_TYPE_F16)
907
+ return false;
908
+ tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{
909
+ k, (const ggml_fp16_t *)A, lda,
910
+ (const ggml_fp16_t *)B, ldb,
911
+ (float *)C, ldc,
912
+ ith, nth};
913
+ tb.matmul(m, n, task);
914
+ return true;
915
+ #elif defined(__ARM_NEON) && !defined(_MSC_VER)
916
+ if (k % 4)
917
+ return false;
918
+ if (Btype != GGML_TYPE_F32)
919
+ return false;
920
+ tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{
921
+ k, (const ggml_fp16_t *)A, lda,
922
+ (const float *)B, ldb,
923
+ (float *)C, ldc,
924
+ ith, nth};
925
+ tb.matmul(m, n, task);
926
+ return true;
927
+ #else
928
+ return false;
929
+ #endif
930
+ }
931
+
932
+ case GGML_TYPE_Q8_0: {
933
+ if (Btype != GGML_TYPE_Q8_0)
934
+ return false;
935
+ #if defined(__AVX2__) || defined(__AVX512F__)
936
+ tinyBLAS_Q0_AVX2<block_q8_0, block_q8_0, float> tb{
937
+ k, (const block_q8_0 *)A, lda,
938
+ (const block_q8_0 *)B, ldb,
939
+ (float *)C, ldc,
940
+ ith, nth};
941
+ tb.matmul(m, n, task);
942
+ return true;
943
+ #elif defined(__ARM_FEATURE_DOTPROD)
944
+ tinyBLAS_Q0_ARM<block_q8_0> tb{
945
+ k, (const block_q8_0 *)A, lda,
946
+ (const block_q8_0 *)B, ldb,
947
+ (float *)C, ldc,
948
+ ith, nth};
949
+ tb.matmul(m, n, task);
950
+ return true;
951
+ #else
952
+ return false;
953
+ #endif
954
+ }
955
+
956
+ case GGML_TYPE_Q4_0: {
957
+ if (Btype != GGML_TYPE_Q8_0)
958
+ return false;
959
+ #if defined(__AVX2__) || defined(__AVX512F__)
960
+ tinyBLAS_Q0_AVX2<block_q4_0, block_q8_0, float> tb{
961
+ k, (const block_q4_0 *)A, lda,
962
+ (const block_q8_0 *)B, ldb,
963
+ (float *)C, ldc,
964
+ ith, nth};
965
+ tb.matmul(m, n, task);
966
+ return true;
967
+ #elif defined(__ARM_FEATURE_DOTPROD)
968
+ tinyBLAS_Q0_ARM<block_q4_0> tb{
969
+ k, (const block_q4_0 *)A, lda,
970
+ (const block_q8_0 *)B, ldb,
971
+ (float *)C, ldc,
972
+ ith, nth};
973
+ tb.matmul(m, n, task);
974
+ return true;
975
+ #else
976
+ return false;
977
+ #endif
978
+ }
979
+
980
+ default:
981
+ return false;
982
+ }
983
+
984
+ (void)m;
985
+ (void)n;
986
+ (void)k;
987
+ (void)A;
988
+ (void)lda;
989
+ (void)B;
990
+ (void)ldb;
991
+ (void)C;
992
+ (void)ldc;
993
+ (void)ith;
994
+ (void)nth;
995
+ (void)task;
996
+ (void)Atype;
997
+ (void)Btype;
998
+ (void)Ctype;
999
+ }