cui-llama.rn 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/LICENSE +20 -0
  2. package/README.md +330 -0
  3. package/android/build.gradle +107 -0
  4. package/android/gradle.properties +5 -0
  5. package/android/src/main/AndroidManifest.xml +4 -0
  6. package/android/src/main/CMakeLists.txt +69 -0
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +353 -0
  8. package/android/src/main/java/com/rnllama/RNLlama.java +446 -0
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -0
  10. package/android/src/main/jni.cpp +635 -0
  11. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +94 -0
  12. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +95 -0
  13. package/cpp/README.md +4 -0
  14. package/cpp/common.cpp +3237 -0
  15. package/cpp/common.h +467 -0
  16. package/cpp/ggml-aarch64.c +2193 -0
  17. package/cpp/ggml-aarch64.h +39 -0
  18. package/cpp/ggml-alloc.c +1041 -0
  19. package/cpp/ggml-alloc.h +76 -0
  20. package/cpp/ggml-backend-impl.h +153 -0
  21. package/cpp/ggml-backend.c +2225 -0
  22. package/cpp/ggml-backend.h +236 -0
  23. package/cpp/ggml-common.h +1829 -0
  24. package/cpp/ggml-impl.h +655 -0
  25. package/cpp/ggml-metal.h +65 -0
  26. package/cpp/ggml-metal.m +3273 -0
  27. package/cpp/ggml-quants.c +15022 -0
  28. package/cpp/ggml-quants.h +132 -0
  29. package/cpp/ggml.c +22034 -0
  30. package/cpp/ggml.h +2444 -0
  31. package/cpp/grammar-parser.cpp +536 -0
  32. package/cpp/grammar-parser.h +29 -0
  33. package/cpp/json-schema-to-grammar.cpp +1045 -0
  34. package/cpp/json-schema-to-grammar.h +8 -0
  35. package/cpp/json.hpp +24766 -0
  36. package/cpp/llama.cpp +21789 -0
  37. package/cpp/llama.h +1201 -0
  38. package/cpp/log.h +737 -0
  39. package/cpp/rn-llama.hpp +630 -0
  40. package/cpp/sampling.cpp +460 -0
  41. package/cpp/sampling.h +160 -0
  42. package/cpp/sgemm.cpp +1027 -0
  43. package/cpp/sgemm.h +14 -0
  44. package/cpp/unicode-data.cpp +7032 -0
  45. package/cpp/unicode-data.h +20 -0
  46. package/cpp/unicode.cpp +812 -0
  47. package/cpp/unicode.h +64 -0
  48. package/ios/RNLlama.h +11 -0
  49. package/ios/RNLlama.mm +302 -0
  50. package/ios/RNLlama.xcodeproj/project.pbxproj +278 -0
  51. package/ios/RNLlamaContext.h +39 -0
  52. package/ios/RNLlamaContext.mm +426 -0
  53. package/jest/mock.js +169 -0
  54. package/lib/commonjs/NativeRNLlama.js +10 -0
  55. package/lib/commonjs/NativeRNLlama.js.map +1 -0
  56. package/lib/commonjs/grammar.js +574 -0
  57. package/lib/commonjs/grammar.js.map +1 -0
  58. package/lib/commonjs/index.js +151 -0
  59. package/lib/commonjs/index.js.map +1 -0
  60. package/lib/module/NativeRNLlama.js +3 -0
  61. package/lib/module/NativeRNLlama.js.map +1 -0
  62. package/lib/module/grammar.js +566 -0
  63. package/lib/module/grammar.js.map +1 -0
  64. package/lib/module/index.js +129 -0
  65. package/lib/module/index.js.map +1 -0
  66. package/lib/typescript/NativeRNLlama.d.ts +107 -0
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -0
  68. package/lib/typescript/grammar.d.ts +38 -0
  69. package/lib/typescript/grammar.d.ts.map +1 -0
  70. package/lib/typescript/index.d.ts +46 -0
  71. package/lib/typescript/index.d.ts.map +1 -0
  72. package/llama-rn.podspec +56 -0
  73. package/package.json +230 -0
  74. package/src/NativeRNLlama.ts +132 -0
  75. package/src/grammar.ts +849 -0
  76. package/src/index.ts +182 -0
package/cpp/sgemm.cpp ADDED
@@ -0,0 +1,1027 @@
1
+ // Copyright 2024 Mozilla Foundation
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining
4
+ // a copy of this software and associated documentation files (the
5
+ // "Software"), to deal in the Software without restriction, including
6
+ // without limitation the rights to use, copy, modify, merge, publish,
7
+ // distribute, sublicense, and/or sell copies of the Software, and to
8
+ // permit persons to whom the Software is furnished to do so, subject to
9
+ // the following conditions:
10
+ //
11
+ // The above copyright notice and this permission notice shall be
12
+ // included in all copies or substantial portions of the Software.
13
+ //
14
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ // SOFTWARE.
22
+
23
+ //
24
+ // _ _ ___ _ _ ___
25
+ // | |_(_)_ _ _ _| _ ) | /_\ / __|
26
+ // | _| | ' \ || | _ \ |__ / _ \\__ \.
27
+ // \__|_|_||_\_, |___/____/_/ \_\___/
28
+ // |__/
29
+ //
30
+ // BASIC LINEAR ALGEBRA SUBPROGRAMS
31
+ //
32
+ //
33
+ // This file implements multithreaded CPU matrix multiplication for the
34
+ // common contiguous use case C = Aᵀ * B. These kernels are designed to
35
+ // have excellent performance[1] for matrices that fit in the CPU cache
36
+ // without imposing any overhead such as cache filling or malloc calls.
37
+ //
38
+ // This implementation does not guarantee any upper bound with rounding
39
+ // errors, which grow along with k. Our goal's to maximally exploit the
40
+ // hardware for performance, and then use whatever resources remain for
41
+ // improving numerical accuracy.
42
+ //
43
+ // [1] J. Tunney, ‘LLaMA Now Goes Faster on CPUs’, Mar. 2024. [Online].
44
+ // Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024].
45
+
46
+ #if defined(__GNUC__)
47
+ #pragma GCC diagnostic ignored "-Wpedantic"
48
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
49
+ #endif
50
+
51
+ #include "sgemm.h"
52
+ #include "ggml-impl.h"
53
+ #include "ggml-quants.h"
54
+
55
+ #ifdef _MSC_VER
56
+ #define NOINLINE __declspec(noinline)
57
+ #else
58
+ #define NOINLINE __attribute__((__noinline__))
59
+ #endif
60
+
61
+ #if defined(__ARM_NEON) || defined(__AVX512F__)
62
+ #define VECTOR_REGISTERS 32
63
+ #else
64
+ #define VECTOR_REGISTERS 16
65
+ #endif
66
+
67
+ #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
68
+
69
+ namespace {
70
+
71
+ inline float unhalf(lm_ggml_fp16_t d) {
72
+ return LM_GGML_FP16_TO_FP32(d);
73
+ }
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
76
+ // VECTORIZED ARITHMETIC OPERATIONS
77
+
78
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
79
+ inline __m128 add(__m128 x, __m128 y) { return _mm_add_ps(x, y); }
80
+ inline __m128 sub(__m128 x, __m128 y) { return _mm_sub_ps(x, y); }
81
+ inline __m128 mul(__m128 x, __m128 y) { return _mm_mul_ps(x, y); }
82
+ #endif // __SSE__
83
+
84
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
85
+ inline __m256 add(__m256 x, __m256 y) { return _mm256_add_ps(x, y); }
86
+ inline __m256 sub(__m256 x, __m256 y) { return _mm256_sub_ps(x, y); }
87
+ inline __m256 mul(__m256 x, __m256 y) { return _mm256_mul_ps(x, y); }
88
+ #endif // __AVX__
89
+
90
+ #if defined(__AVX512F__)
91
+ inline __m512 add(__m512 x, __m512 y) { return _mm512_add_ps(x, y); }
92
+ inline __m512 sub(__m512 x, __m512 y) { return _mm512_sub_ps(x, y); }
93
+ inline __m512 mul(__m512 x, __m512 y) { return _mm512_mul_ps(x, y); }
94
+ #endif // __AVX512F__
95
+
96
+ #if defined(__ARM_NEON)
97
+ inline float32x4_t add(float32x4_t x, float32x4_t y) { return vaddq_f32(x, y); }
98
+ inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vsubq_f32(x, y); }
99
+ inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vmulq_f32(x, y); }
100
+ #endif // __ARM_NEON
101
+
102
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
103
+ inline float16x8_t add(float16x8_t x, float16x8_t y) { return vaddq_f16(x, y); }
104
+ inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); }
105
+ inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); }
106
+ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
107
+
108
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
109
+ // VECTORIZED FUSED MULTIPLY ADD
110
+
111
+ /**
112
+ * Computes a * b + c.
113
+ */
114
+ template <typename T, typename U>
115
+ inline U madd(T a, T b, U c) {
116
+ return add(mul(a, b), c);
117
+ }
118
+
119
+ #if defined(__FMA__)
120
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
121
+ template <>
122
+ inline __m256 madd(__m256 a, __m256 b, __m256 c) {
123
+ return _mm256_fmadd_ps(a, b, c);
124
+ }
125
+ #endif
126
+ #if defined(__AVX512F__)
127
+ template <>
128
+ inline __m512 madd(__m512 a, __m512 b, __m512 c) {
129
+ return _mm512_fmadd_ps(a, b, c);
130
+ }
131
+ #endif
132
+ #endif
133
+
134
+ #if defined(__ARM_FEATURE_FMA)
135
+ template <>
136
+ inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) {
137
+ return vfmaq_f32(c, b, a);
138
+ }
139
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
140
+ template <>
141
+ inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) {
142
+ return vfmaq_f16(c, b, a);
143
+ }
144
+ #endif
145
+ #endif
146
+
147
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
148
+ // VECTORIZED HORIZONTAL SUM
149
+
150
+ #if defined(__ARM_NEON)
151
+ inline float hsum(float32x4_t x) {
152
+ return vaddvq_f32(x);
153
+ }
154
+ #endif // __ARM_NEON
155
+
156
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
157
+ inline float hsum(float16x8_t x) {
158
+ return vaddvq_f32(vaddq_f32(vcvt_f32_f16(vget_low_f16(x)),
159
+ vcvt_f32_f16(vget_high_f16(x))));
160
+ }
161
+ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
162
+
163
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
164
+ inline float hsum(__m128 x) {
165
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
166
+ x = _mm_add_ps(x, _mm_movehl_ps(x, x));
167
+ x = _mm_add_ss(x, _mm_movehdup_ps(x));
168
+ #else
169
+ __m128 t;
170
+ t = _mm_shuffle_ps(x, x, _MM_SHUFFLE(2, 3, 0, 1));
171
+ x = _mm_add_ps(x, t);
172
+ t = _mm_movehl_ps(t, x);
173
+ x = _mm_add_ss(x, t);
174
+ #endif
175
+ return _mm_cvtss_f32(x);
176
+ }
177
+ #endif
178
+
179
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
180
+ inline float hsum(__m256 x) {
181
+ return hsum(_mm_add_ps(_mm256_extractf128_ps(x, 1),
182
+ _mm256_castps256_ps128(x)));
183
+ }
184
+ #endif // __AVX__
185
+
186
+ #if defined(__AVX512F__)
187
+ inline float hsum(__m512 x) {
188
+ return _mm512_reduce_add_ps(x);
189
+ }
190
+ #endif // __AVX512F__
191
+
192
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
193
+ // VECTORIZED MEMORY LOADING
194
+
195
+ template <typename T, typename U> T load(const U *);
196
+
197
+ #if defined(__ARM_NEON)
198
+ template <> inline float32x4_t load(const float *p) {
199
+ return vld1q_f32(p);
200
+ }
201
+ #if !defined(_MSC_VER)
202
+ template <> inline float16x8_t load(const lm_ggml_fp16_t *p) {
203
+ return vld1q_f16((const float16_t *)p);
204
+ }
205
+ template <> inline float32x4_t load(const lm_ggml_fp16_t *p) {
206
+ return vcvt_f32_f16(vld1_f16((const float16_t *)p));
207
+ }
208
+ #endif // _MSC_VER
209
+ #endif // __ARM_NEON
210
+
211
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
212
+ template <> inline __m128 load(const float *p) {
213
+ return _mm_loadu_ps(p);
214
+ }
215
+ #endif // __SSE__
216
+
217
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
218
+ template <> inline __m256 load(const float *p) {
219
+ return _mm256_loadu_ps(p);
220
+ }
221
+ #endif // __AVX__
222
+
223
+ #if defined(__F16C__)
224
+ template <> inline __m256 load(const lm_ggml_fp16_t *p) {
225
+ return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p));
226
+ }
227
+ #endif // __F16C__
228
+
229
+ #if defined(__AVX512F__)
230
+ template <> inline __m512 load(const float *p) {
231
+ return _mm512_loadu_ps(p);
232
+ }
233
+ template <> inline __m512 load(const lm_ggml_fp16_t *p) {
234
+ return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p));
235
+ }
236
+ #endif // __AVX512F__
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
239
+ // FLOATING POINT MATRIX MULTIPLICATION
240
+
241
+ template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
242
+ class tinyBLAS {
243
+ public:
244
+ tinyBLAS(int64_t k,
245
+ const TA *A, int64_t lda,
246
+ const TB *B, int64_t ldb,
247
+ TC *C, int64_t ldc,
248
+ int ith, int nth)
249
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
250
+ }
251
+
252
+ void matmul(int64_t m, int64_t n) {
253
+ mnpack(0, m, 0, n);
254
+ }
255
+
256
+ private:
257
+ NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
258
+ int64_t mc, nc, mp, np;
259
+ switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) {
260
+ #if VECTOR_REGISTERS == 32
261
+ case 0x55:
262
+ mc = 5;
263
+ nc = 5;
264
+ gemm<5, 5>(m0, m, n0, n);
265
+ break;
266
+ case 0x45:
267
+ mc = 4;
268
+ nc = 5;
269
+ gemm<4, 5>(m0, m, n0, n);
270
+ break;
271
+ case 0x54:
272
+ mc = 5;
273
+ nc = 4;
274
+ gemm<5, 4>(m0, m, n0, n);
275
+ break;
276
+ case 0x44:
277
+ mc = 4;
278
+ nc = 4;
279
+ gemm<4, 4>(m0, m, n0, n);
280
+ break;
281
+ case 0x53:
282
+ mc = 5;
283
+ nc = 3;
284
+ gemm<5, 3>(m0, m, n0, n);
285
+ break;
286
+ case 0x35:
287
+ mc = 3;
288
+ nc = 5;
289
+ gemm<3, 5>(m0, m, n0, n);
290
+ break;
291
+ case 0x43:
292
+ mc = 4;
293
+ nc = 3;
294
+ gemm<4, 3>(m0, m, n0, n);
295
+ break;
296
+ #else
297
+ case 0x55:
298
+ case 0x54:
299
+ case 0x53:
300
+ case 0x45:
301
+ case 0x44:
302
+ case 0x43:
303
+ mc = 4;
304
+ nc = 3;
305
+ gemm<4, 3>(m0, m, n0, n);
306
+ break;
307
+ case 0x35:
308
+ #endif
309
+ case 0x34:
310
+ mc = 3;
311
+ nc = 4;
312
+ gemm<3, 4>(m0, m, n0, n);
313
+ break;
314
+ case 0x52:
315
+ mc = 5;
316
+ nc = 2;
317
+ gemm<5, 2>(m0, m, n0, n);
318
+ break;
319
+ case 0x33:
320
+ mc = 3;
321
+ nc = 3;
322
+ gemm<3, 3>(m0, m, n0, n);
323
+ break;
324
+ case 0x25:
325
+ mc = 2;
326
+ nc = 5;
327
+ gemm<2, 5>(m0, m, n0, n);
328
+ break;
329
+ case 0x42:
330
+ mc = 4;
331
+ nc = 2;
332
+ gemm<4, 2>(m0, m, n0, n);
333
+ break;
334
+ case 0x24:
335
+ mc = 2;
336
+ nc = 4;
337
+ gemm<2, 4>(m0, m, n0, n);
338
+ break;
339
+ case 0x32:
340
+ mc = 3;
341
+ nc = 2;
342
+ gemm<3, 2>(m0, m, n0, n);
343
+ break;
344
+ case 0x23:
345
+ mc = 2;
346
+ nc = 3;
347
+ gemm<2, 3>(m0, m, n0, n);
348
+ break;
349
+ case 0x51:
350
+ mc = 5;
351
+ nc = 1;
352
+ gemm<5, 1>(m0, m, n0, n);
353
+ break;
354
+ case 0x41:
355
+ mc = 4;
356
+ nc = 1;
357
+ gemm<4, 1>(m0, m, n0, n);
358
+ break;
359
+ case 0x22:
360
+ mc = 2;
361
+ nc = 2;
362
+ gemm<2, 2>(m0, m, n0, n);
363
+ break;
364
+ case 0x15:
365
+ mc = 1;
366
+ nc = 5;
367
+ gemm<1, 5>(m0, m, n0, n);
368
+ break;
369
+ case 0x14:
370
+ mc = 1;
371
+ nc = 4;
372
+ gemm<1, 4>(m0, m, n0, n);
373
+ break;
374
+ case 0x31:
375
+ mc = 3;
376
+ nc = 1;
377
+ gemm<3, 1>(m0, m, n0, n);
378
+ break;
379
+ case 0x13:
380
+ mc = 1;
381
+ nc = 3;
382
+ gemm<1, 3>(m0, m, n0, n);
383
+ break;
384
+ case 0x21:
385
+ mc = 2;
386
+ nc = 1;
387
+ gemm<2, 1>(m0, m, n0, n);
388
+ break;
389
+ case 0x12:
390
+ mc = 1;
391
+ nc = 2;
392
+ gemm<1, 2>(m0, m, n0, n);
393
+ break;
394
+ case 0x11:
395
+ mc = 1;
396
+ nc = 1;
397
+ gemm<1, 1>(m0, m, n0, n);
398
+ break;
399
+ default:
400
+ return;
401
+ }
402
+ mp = m0 + (m - m0) / mc * mc;
403
+ np = n0 + (n - n0) / nc * nc;
404
+ mnpack(mp, m, n0, np);
405
+ mnpack(m0, m, np, n);
406
+ }
407
+
408
+ template <int RM, int RN>
409
+ NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
410
+ int64_t ytiles = (m - m0) / RM;
411
+ int64_t xtiles = (n - n0) / RN;
412
+ int64_t tiles = xtiles * ytiles;
413
+ int64_t duty = (tiles + nth - 1) / nth;
414
+ int64_t start = duty * ith;
415
+ int64_t end = start + duty;
416
+ if (end > tiles)
417
+ end = tiles;
418
+ for (int64_t job = start; job < end; ++job) {
419
+ int64_t ii = m0 + job / xtiles * RM;
420
+ int64_t jj = n0 + job % xtiles * RN;
421
+ D Cv[RN][RM] = {};
422
+ for (int64_t l = 0; l < k; l += KN)
423
+ for (int64_t j = 0; j < RN; ++j)
424
+ for (int64_t i = 0; i < RM; ++i)
425
+ Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l),
426
+ load<V>(B + ldb * (jj + j) + l),
427
+ Cv[j][i]);
428
+ for (int64_t j = 0; j < RN; ++j)
429
+ for (int64_t i = 0; i < RM; ++i)
430
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
431
+ }
432
+ }
433
+
434
+ const TA *const A;
435
+ const TB *const B;
436
+ TC *const C;
437
+ const int64_t k;
438
+ const int64_t lda;
439
+ const int64_t ldb;
440
+ const int64_t ldc;
441
+ const int ith;
442
+ const int nth;
443
+ };
444
+
445
+ //////////////////////////////////////////////////////////////////////////////////////////
446
+ // QUANT ZERO MATRIX MULTIPLICATION
447
+
448
+ #if defined(__ARM_FEATURE_DOTPROD)
449
+ template <typename TA>
450
+ class tinyBLAS_Q0_ARM {
451
+ public:
452
+ tinyBLAS_Q0_ARM(int64_t k,
453
+ const TA *A, int64_t lda,
454
+ const block_q8_0 *B, int64_t ldb,
455
+ float *C, int64_t ldc,
456
+ int ith, int nth)
457
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
458
+ }
459
+
460
+ void matmul(int64_t m, int64_t n) {
461
+ mnpack(0, m, 0, n);
462
+ }
463
+
464
+ private:
465
+ NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
466
+ int64_t mc, nc, mp, np;
467
+ switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) {
468
+ case 0x33:
469
+ mc = 3;
470
+ nc = 3;
471
+ gemm<3, 3>(m0, m, n0, n);
472
+ break;
473
+ case 0x32:
474
+ mc = 3;
475
+ nc = 2;
476
+ gemm<3, 2>(m0, m, n0, n);
477
+ break;
478
+ case 0x23:
479
+ mc = 2;
480
+ nc = 3;
481
+ gemm<2, 3>(m0, m, n0, n);
482
+ break;
483
+ case 0x22:
484
+ mc = 2;
485
+ nc = 2;
486
+ gemm<2, 2>(m0, m, n0, n);
487
+ break;
488
+ case 0x31:
489
+ mc = 3;
490
+ nc = 1;
491
+ gemm<3, 1>(m0, m, n0, n);
492
+ break;
493
+ case 0x13:
494
+ mc = 1;
495
+ nc = 3;
496
+ gemm<1, 3>(m0, m, n0, n);
497
+ break;
498
+ case 0x21:
499
+ mc = 2;
500
+ nc = 1;
501
+ gemm<2, 1>(m0, m, n0, n);
502
+ break;
503
+ case 0x12:
504
+ mc = 1;
505
+ nc = 2;
506
+ gemm<1, 2>(m0, m, n0, n);
507
+ break;
508
+ case 0x11:
509
+ mc = 1;
510
+ nc = 1;
511
+ gemm<1, 1>(m0, m, n0, n);
512
+ break;
513
+ default:
514
+ return;
515
+ }
516
+ mp = m0 + (m - m0) / mc * mc;
517
+ np = n0 + (n - n0) / nc * nc;
518
+ mnpack(mp, m, n0, np);
519
+ mnpack(m0, m, np, n);
520
+ }
521
+
522
+ template <int RM, int RN>
523
+ NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
524
+ int64_t ytiles = (m - m0) / RM;
525
+ int64_t xtiles = (n - n0) / RN;
526
+ int64_t tiles = xtiles * ytiles;
527
+ int64_t duty = (tiles + nth - 1) / nth;
528
+ int64_t start = duty * ith;
529
+ int64_t end = start + duty;
530
+ if (end > tiles)
531
+ end = tiles;
532
+ for (int64_t job = start; job < end; ++job) {
533
+ int64_t ii = m0 + job / xtiles * RM;
534
+ int64_t jj = n0 + job % xtiles * RN;
535
+ float32x4_t Cv[RN][RM] = {};
536
+ for (int64_t l = 0; l < k; ++l)
537
+ for (int64_t j = 0; j < RN; ++j)
538
+ for (int64_t i = 0; i < RM; ++i)
539
+ Cv[j][i] = vmlaq_n_f32(Cv[j][i],
540
+ vcvtq_f32_s32(vdotq_s32(
541
+ vdotq_s32(vdupq_n_s32(0),
542
+ load_lo(A + lda * (ii + i) + l),
543
+ load_lo(B + ldb * (jj + j) + l)),
544
+ load_hi(A + lda * (ii + i) + l),
545
+ load_hi(B + ldb * (jj + j) + l))),
546
+ unhalf(A[lda * (ii + i) + l].d) *
547
+ unhalf(B[ldb * (jj + j) + l].d));
548
+ for (int64_t j = 0; j < RN; ++j)
549
+ for (int64_t i = 0; i < RM; ++i)
550
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
551
+ }
552
+ }
553
+
554
+ inline int8x16_t load_lo(const block_q8_0 *b) {
555
+ return vld1q_s8(b->qs);
556
+ }
557
+
558
+ inline int8x16_t load_hi(const block_q8_0 *b) {
559
+ return vld1q_s8(b->qs + 16);
560
+ }
561
+
562
+ inline int8x16_t load_lo(const block_q4_0 *b) {
563
+ return vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vld1q_u8(b->qs),
564
+ vdupq_n_u8(0x0f))),
565
+ vdupq_n_s8(0x8));
566
+ }
567
+
568
+ inline int8x16_t load_hi(const block_q4_0 *b) {
569
+ return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)),
570
+ vdupq_n_s8(0x8));
571
+ }
572
+
573
+ const TA *const A;
574
+ const block_q8_0 *const B;
575
+ float *const C;
576
+ const int64_t k;
577
+ const int64_t lda;
578
+ const int64_t ldb;
579
+ const int64_t ldc;
580
+ const int ith;
581
+ const int nth;
582
+ };
583
+ #endif // __ARM_FEATURE_DOTPROD
584
+
585
+ #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
586
+ template <typename TA, typename TB, typename TC>
587
+ class tinyBLAS_Q0_AVX {
588
+ public:
589
+ tinyBLAS_Q0_AVX(int64_t k,
590
+ const TA *A, int64_t lda,
591
+ const TB *B, int64_t ldb,
592
+ TC *C, int64_t ldc,
593
+ int ith, int nth)
594
+ : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
595
+ }
596
+
597
+ void matmul(int64_t m, int64_t n) {
598
+ mnpack(0, m, 0, n);
599
+ }
600
+
601
+ private:
602
+ void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
603
+ int64_t mc, nc, mp, np;
604
+ switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) {
605
+ #if VECTOR_REGISTERS == 32
606
+ case 0x44:
607
+ mc = 4;
608
+ nc = 4;
609
+ gemm<4, 4>(m0, m, n0, n);
610
+ break;
611
+ case 0x43:
612
+ mc = 4;
613
+ nc = 3;
614
+ gemm<4, 3>(m0, m, n0, n);
615
+ break;
616
+ case 0x34:
617
+ mc = 3;
618
+ nc = 4;
619
+ gemm<3, 4>(m0, m, n0, n);
620
+ break;
621
+ case 0x33:
622
+ mc = 3;
623
+ nc = 3;
624
+ gemm<3, 3>(m0, m, n0, n);
625
+ break;
626
+ case 0x42:
627
+ mc = 4;
628
+ nc = 2;
629
+ gemm<4, 2>(m0, m, n0, n);
630
+ break;
631
+ case 0x24:
632
+ mc = 2;
633
+ nc = 4;
634
+ gemm<2, 4>(m0, m, n0, n);
635
+ break;
636
+ #else
637
+ case 0x44:
638
+ case 0x43:
639
+ case 0x42:
640
+ mc = 4;
641
+ nc = 2;
642
+ gemm<4, 2>(m0, m, n0, n);
643
+ break;
644
+ case 0x34:
645
+ case 0x24:
646
+ mc = 2;
647
+ nc = 4;
648
+ gemm<2, 4>(m0, m, n0, n);
649
+ break;
650
+ case 0x33:
651
+ #endif
652
+ case 0x32:
653
+ mc = 3;
654
+ nc = 2;
655
+ gemm<3, 2>(m0, m, n0, n);
656
+ break;
657
+ case 0x23:
658
+ mc = 2;
659
+ nc = 3;
660
+ gemm<2, 3>(m0, m, n0, n);
661
+ break;
662
+ case 0x41:
663
+ mc = 4;
664
+ nc = 1;
665
+ gemm<4, 1>(m0, m, n0, n);
666
+ break;
667
+ case 0x22:
668
+ mc = 2;
669
+ nc = 2;
670
+ gemm<2, 2>(m0, m, n0, n);
671
+ break;
672
+ case 0x14:
673
+ mc = 1;
674
+ nc = 4;
675
+ gemm<1, 4>(m0, m, n0, n);
676
+ break;
677
+ case 0x31:
678
+ mc = 3;
679
+ nc = 1;
680
+ gemm<3, 1>(m0, m, n0, n);
681
+ break;
682
+ case 0x13:
683
+ mc = 1;
684
+ nc = 3;
685
+ gemm<1, 3>(m0, m, n0, n);
686
+ break;
687
+ case 0x21:
688
+ mc = 2;
689
+ nc = 1;
690
+ gemm<2, 1>(m0, m, n0, n);
691
+ break;
692
+ case 0x12:
693
+ mc = 1;
694
+ nc = 2;
695
+ gemm<1, 2>(m0, m, n0, n);
696
+ break;
697
+ case 0x11:
698
+ mc = 1;
699
+ nc = 1;
700
+ gemm<1, 1>(m0, m, n0, n);
701
+ break;
702
+ default:
703
+ return;
704
+ }
705
+ mp = m0 + (m - m0) / mc * mc;
706
+ np = n0 + (n - n0) / nc * nc;
707
+ mnpack(mp, m, n0, np);
708
+ mnpack(m0, m, np, n);
709
+ }
710
+
711
+ template <int RM, int RN>
712
+ NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
713
+ int64_t ytiles = (m - m0) / RM;
714
+ int64_t xtiles = (n - n0) / RN;
715
+ int64_t tiles = xtiles * ytiles;
716
+ int64_t duty = (tiles + nth - 1) / nth;
717
+ int64_t start = duty * ith;
718
+ int64_t end = start + duty;
719
+ if (end > tiles)
720
+ end = tiles;
721
+ for (int64_t job = start; job < end; ++job) {
722
+ int64_t ii = m0 + job / xtiles * RM;
723
+ int64_t jj = n0 + job % xtiles * RN;
724
+ __m256 Cv[RN][RM] = {};
725
+ for (int64_t l = 0; l < k; ++l)
726
+ for (int64_t j = 0; j < RN; ++j)
727
+ for (int64_t i = 0; i < RM; ++i) {
728
+ #if defined(__AVX2__)
729
+ __m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
730
+ load(A + lda * (ii + i) + l)),
731
+ _mm256_sign_epi8(load(B + ldb * (jj + j) + l),
732
+ load(A + lda * (ii + i) + l)));
733
+ #else
734
+ __m128i ali0 = load0(A + lda * (ii + i) + l);
735
+ __m128i ali1 = load1(A + lda * (ii + i) + l);
736
+ __m128i blj0 = load0(B + ldb * (jj + j) + l);
737
+ __m128i blj1 = load1(B + ldb * (jj + j) + l);
738
+
739
+ __m128i sepAA0 = _mm_sign_epi8(ali0, ali0);
740
+ __m128i sepAA1 = _mm_sign_epi8(ali1, ali1);
741
+ __m128i sepBA0 = _mm_sign_epi8(blj0, ali0);
742
+ __m128i sepBA1 = _mm_sign_epi8(blj1, ali1);
743
+
744
+ // updot
745
+ const __m128i oneFill = _mm_set1_epi16(1);
746
+ __m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0);
747
+ __m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1);
748
+ __m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0)));
749
+ #endif
750
+ Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
751
+ unhalf(B[ldb * (jj + j) + l].d)),
752
+ udTmp,
753
+ Cv[j][i]);
754
+ }
755
+ for (int64_t j = 0; j < RN; ++j)
756
+ for (int64_t i = 0; i < RM; ++i)
757
+ C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
758
+ }
759
+ }
760
+
761
+ inline __m256i load(const block_q8_0 *b) {
762
+ return _mm256_loadu_si256((const __m256i *)b->qs);
763
+ }
764
+
765
+ inline __m128i load0(const block_q8_0 *b) {
766
+ return _mm_loadu_si128((const __m128i *)b->qs);
767
+ }
768
+
769
+ inline __m128i load1(const block_q8_0 *b) {
770
+ return _mm_loadu_si128(((const __m128i *)b->qs) + 1);
771
+ }
772
+
773
+ inline __m256i load(const block_q4_0 *b) {
774
+ return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8));
775
+ }
776
+
777
+ inline __m128i load0(const block_q4_0 *b) {
778
+ const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
779
+ return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8));
780
+ }
781
+
782
+ inline __m128i load1(const block_q4_0 *b) {
783
+ const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
784
+ return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8));
785
+ }
786
+
787
+ inline __m256 updot(__m256i u, __m256i s) {
788
+ __m256i res;
789
+ #if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__))
790
+ res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s);
791
+ #else
792
+ res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s));
793
+ #endif
794
+ return _mm256_cvtepi32_ps(res);
795
+ }
796
+
797
+ static inline __m256i denibble(const uint8_t *p) {
798
+ __m128i x = _mm_loadu_si128((const __m128i *)p);
799
+ return _mm256_and_si256(_mm256_set1_epi8(15),
800
+ _mm256_insertf128_si256(_mm256_castsi128_si256(x),
801
+ _mm_srli_epi16(x, 4), 1));
802
+ }
803
+
804
+ const TA *const A;
805
+ const TB *const B;
806
+ TC *const C;
807
+ const int64_t k;
808
+ const int64_t lda;
809
+ const int64_t ldb;
810
+ const int64_t ldc;
811
+ const int ith;
812
+ const int nth;
813
+ };
814
+ #endif // __AVX__
815
+
816
+ } // namespace
817
+
818
+ /**
819
+ * Performs optimized matrix multiplication on CPU.
820
+ *
821
+ * This subroutine may compute C = Aᵀ * B with column major ordering.
822
+ * Despite its name, this isn't a generalized implementation. Work is
823
+ * only performed when a handwritten kernel is written and available.
824
+ * Otherwise the caller should fall back to a general matmul routine.
825
+ *
826
+ * For example, for single-threaded single-precision GEMM you can say
827
+ *
828
+ * llamafile_sgemm(m, n, k, A, lda, B, ldb, C, ldc,
829
+ * 0, 1,
830
+ * LM_GGML_TYPE_F32, LM_GGML_TYPE_F32, LM_GGML_TYPE_F32);
831
+ *
832
+ * @param m is rows in `A` and `C`
833
+ * @param n is cols in `B` and `C`
834
+ * @param k is cols in `A` and rows in `B`
835
+ * @param A is first input matrix (always transposed)
836
+ * @param lda is row stride of `A`
837
+ * @param B is second input matrix (never transposed)
838
+ * @param ldb is row stride of `B`
839
+ * @param C is input/output array of output matrices
840
+ * @param ldc is row stride of `C`
841
+ * @param ith is thread id (must be less than `nth`)
842
+ * @param nth is number of threads (must be greater than zero)
843
+ * @param Atype is GGML data type of `A`
844
+ * @param Btype is GGML data type of `B`
845
+ * @param Ctype is GGML data type of `C`
846
+ * @return true if this function was able to service the matmul request
847
+ */
848
+ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C,
849
+ int64_t ldc, int ith, int nth, int Atype, int Btype, int Ctype) {
850
+
851
+ assert(m >= 0);
852
+ assert(n >= 0);
853
+ assert(k >= 0);
854
+ assert(lda >= k);
855
+ assert(ldb >= k);
856
+ assert(ldc >= m);
857
+ assert(nth > 0);
858
+ assert(ith < nth);
859
+
860
+ if (Ctype != LM_GGML_TYPE_F32)
861
+ return false;
862
+
863
+ switch (Atype) {
864
+
865
+ case LM_GGML_TYPE_F32: {
866
+ if (Btype != LM_GGML_TYPE_F32)
867
+ return false;
868
+ #if defined(__AVX512F__)
869
+ if (k % 16)
870
+ return false;
871
+ tinyBLAS<16, __m512, __m512, float, float, float> tb{
872
+ k, (const float *)A, lda,
873
+ (const float *)B, ldb,
874
+ (float *)C, ldc,
875
+ ith, nth};
876
+ tb.matmul(m, n);
877
+ return true;
878
+ #elif defined(__AVX__) || defined(__AVX2__)
879
+ if (k % 8)
880
+ return false;
881
+ tinyBLAS<8, __m256, __m256, float, float, float> tb{
882
+ k, (const float *)A, lda,
883
+ (const float *)B, ldb,
884
+ (float *)C, ldc,
885
+ ith, nth};
886
+ tb.matmul(m, n);
887
+ return true;
888
+ #elif defined(__ARM_NEON)
889
+ if (n < 4)
890
+ return false;
891
+ if (k % 4)
892
+ return false;
893
+ tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{
894
+ k, (const float *)A, lda,
895
+ (const float *)B, ldb,
896
+ (float *)C, ldc,
897
+ ith, nth};
898
+ tb.matmul(m, n);
899
+ return true;
900
+ #else
901
+ return false;
902
+ #endif
903
+ }
904
+
905
+ case LM_GGML_TYPE_F16: {
906
+ #if defined(__AVX512F__)
907
+ if (k % 16)
908
+ return false;
909
+ if (Btype != LM_GGML_TYPE_F32)
910
+ return false;
911
+ tinyBLAS<16, __m512, __m512, lm_ggml_fp16_t, float, float> tb{
912
+ k, (const lm_ggml_fp16_t *)A, lda,
913
+ (const float *)B, ldb,
914
+ (float *)C, ldc,
915
+ ith, nth};
916
+ tb.matmul(m, n);
917
+ return true;
918
+ #elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__)
919
+ if (k % 8)
920
+ return false;
921
+ if (Btype != LM_GGML_TYPE_F32)
922
+ return false;
923
+ tinyBLAS<8, __m256, __m256, lm_ggml_fp16_t, float, float> tb{
924
+ k, (const lm_ggml_fp16_t *)A, lda,
925
+ (const float *)B, ldb,
926
+ (float *)C, ldc,
927
+ ith, nth};
928
+ tb.matmul(m, n);
929
+ return true;
930
+ #elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
931
+ if (n < 8)
932
+ return false;
933
+ if (k % 8)
934
+ return false;
935
+ if (Btype != LM_GGML_TYPE_F16)
936
+ return false;
937
+ tinyBLAS<8, float16x8_t, float16x8_t, lm_ggml_fp16_t, lm_ggml_fp16_t, float> tb{
938
+ k, (const lm_ggml_fp16_t *)A, lda,
939
+ (const lm_ggml_fp16_t *)B, ldb,
940
+ (float *)C, ldc,
941
+ ith, nth};
942
+ tb.matmul(m, n);
943
+ return true;
944
+ #elif defined(__ARM_NEON) && !defined(_MSC_VER)
945
+ if (k % 4)
946
+ return false;
947
+ if (Btype != LM_GGML_TYPE_F32)
948
+ return false;
949
+ tinyBLAS<4, float32x4_t, float32x4_t, lm_ggml_fp16_t, float, float> tb{
950
+ k, (const lm_ggml_fp16_t *)A, lda,
951
+ (const float *)B, ldb,
952
+ (float *)C, ldc,
953
+ ith, nth};
954
+ tb.matmul(m, n);
955
+ return true;
956
+ #else
957
+ return false;
958
+ #endif
959
+ }
960
+
961
+ case LM_GGML_TYPE_Q8_0: {
962
+ if (Btype != LM_GGML_TYPE_Q8_0)
963
+ return false;
964
+ #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
965
+ tinyBLAS_Q0_AVX<block_q8_0, block_q8_0, float> tb{
966
+ k, (const block_q8_0 *)A, lda,
967
+ (const block_q8_0 *)B, ldb,
968
+ (float *)C, ldc,
969
+ ith, nth};
970
+ tb.matmul(m, n);
971
+ return true;
972
+ #elif defined(__ARM_FEATURE_DOTPROD)
973
+ tinyBLAS_Q0_ARM<block_q8_0> tb{
974
+ k, (const block_q8_0 *)A, lda,
975
+ (const block_q8_0 *)B, ldb,
976
+ (float *)C, ldc,
977
+ ith, nth};
978
+ tb.matmul(m, n);
979
+ return true;
980
+ #else
981
+ return false;
982
+ #endif
983
+ }
984
+
985
+ case LM_GGML_TYPE_Q4_0: {
986
+ if (Btype != LM_GGML_TYPE_Q8_0)
987
+ return false;
988
+ #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
989
+ tinyBLAS_Q0_AVX<block_q4_0, block_q8_0, float> tb{
990
+ k, (const block_q4_0 *)A, lda,
991
+ (const block_q8_0 *)B, ldb,
992
+ (float *)C, ldc,
993
+ ith, nth};
994
+ tb.matmul(m, n);
995
+ return true;
996
+ #elif defined(__ARM_FEATURE_DOTPROD)
997
+ tinyBLAS_Q0_ARM<block_q4_0> tb{
998
+ k, (const block_q4_0 *)A, lda,
999
+ (const block_q8_0 *)B, ldb,
1000
+ (float *)C, ldc,
1001
+ ith, nth};
1002
+ tb.matmul(m, n);
1003
+ return true;
1004
+ #else
1005
+ return false;
1006
+ #endif
1007
+ }
1008
+
1009
+ default:
1010
+ return false;
1011
+ }
1012
+
1013
+ (void)m;
1014
+ (void)n;
1015
+ (void)k;
1016
+ (void)A;
1017
+ (void)lda;
1018
+ (void)B;
1019
+ (void)ldb;
1020
+ (void)C;
1021
+ (void)ldc;
1022
+ (void)ith;
1023
+ (void)nth;
1024
+ (void)Atype;
1025
+ (void)Btype;
1026
+ (void)Ctype;
1027
+ }