cui-llama.rn 1.6.0 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/README.md +35 -7
  2. package/android/src/main/CMakeLists.txt +16 -11
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +4 -1
  4. package/android/src/main/jni.cpp +20 -4
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/cpp/LICENSE +21 -0
  14. package/cpp/chat.cpp +1 -1
  15. package/cpp/common.cpp +17 -2
  16. package/cpp/common.h +7 -3
  17. package/cpp/ggml-alloc.c +4 -1
  18. package/cpp/ggml-cpp.h +1 -1
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/binary-ops.h +1 -1
  25. package/cpp/ggml-cpu/common.h +72 -0
  26. package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -101
  27. package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +109 -42
  28. package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +3 -0
  29. package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +246 -160
  30. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/ops.h +2 -20
  31. package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
  32. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/simd-mappings.h +7 -3
  33. package/{ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/unary-ops.h +1 -1
  34. package/cpp/ggml-cpu.h +5 -0
  35. package/cpp/ggml-impl.h +16 -9
  36. package/cpp/ggml-llama-sim.metallib +0 -0
  37. package/cpp/ggml-llama.metallib +0 -0
  38. package/cpp/ggml-metal.m +492 -47
  39. package/cpp/ggml.c +134 -244
  40. package/cpp/ggml.h +61 -94
  41. package/cpp/json-schema-to-grammar.cpp +3 -0
  42. package/cpp/llama-arch.cpp +46 -17
  43. package/cpp/llama-arch.h +9 -0
  44. package/cpp/llama-batch.cpp +5 -1
  45. package/cpp/llama-batch.h +2 -1
  46. package/cpp/llama-chat.cpp +31 -10
  47. package/cpp/llama-chat.h +3 -2
  48. package/cpp/llama-context.cpp +104 -489
  49. package/cpp/llama-context.h +14 -30
  50. package/cpp/llama-graph.cpp +69 -62
  51. package/cpp/llama-graph.h +21 -18
  52. package/cpp/llama-hparams.h +5 -0
  53. package/cpp/llama-kv-cache.cpp +1497 -391
  54. package/cpp/llama-kv-cache.h +272 -80
  55. package/cpp/llama-memory.h +11 -1
  56. package/cpp/llama-model.cpp +502 -176
  57. package/cpp/llama-model.h +13 -3
  58. package/cpp/llama-sampling.cpp +2 -1
  59. package/cpp/llama-vocab.cpp +8 -1
  60. package/cpp/llama.h +14 -11
  61. package/cpp/rn-llama.cpp +20 -172
  62. package/cpp/rn-llama.h +1 -5
  63. package/ios/CMakeLists.txt +13 -10
  64. package/ios/RNLlama.h +6 -0
  65. package/ios/RNLlama.mm +5 -0
  66. package/ios/RNLlamaContext.mm +26 -28
  67. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +7 -3
  68. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  69. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  70. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  71. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +61 -94
  72. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  73. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  74. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +3 -2
  75. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +14 -30
  76. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +21 -18
  77. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +5 -0
  78. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  79. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +11 -1
  80. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +13 -3
  81. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +14 -11
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +1 -5
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  85. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +7 -3
  86. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  87. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  88. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  89. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +61 -94
  90. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  91. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  92. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +3 -2
  93. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +14 -30
  94. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +21 -18
  95. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +5 -0
  96. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  97. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +11 -1
  98. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +13 -3
  99. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +14 -11
  100. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +1 -5
  101. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  102. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  103. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +7 -3
  104. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  105. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  106. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  107. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +61 -94
  108. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  109. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  110. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +3 -2
  111. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +14 -30
  112. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +21 -18
  113. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +5 -0
  114. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  115. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +11 -1
  116. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +13 -3
  117. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +14 -11
  118. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +1 -5
  119. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  120. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  121. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +7 -3
  122. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  123. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  124. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  125. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +61 -94
  126. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  127. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  128. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +3 -2
  129. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +14 -30
  130. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +21 -18
  131. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +5 -0
  132. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  133. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +11 -1
  134. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +13 -3
  135. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +14 -11
  136. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +1 -5
  137. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  138. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  139. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  140. package/lib/module/NativeRNLlama.js.map +1 -1
  141. package/lib/typescript/NativeRNLlama.d.ts +4 -0
  142. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  143. package/package.json +1 -1
  144. package/src/NativeRNLlama.ts +5 -0
  145. package/cpp/binary-ops.h +0 -16
  146. package/cpp/ops.h +0 -128
  147. package/cpp/simd-mappings.h +0 -888
  148. package/cpp/unary-ops.h +0 -28
  149. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  150. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  151. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  152. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  153. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  154. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +0 -128
  155. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  156. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  157. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +0 -802
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  162. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  163. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  164. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +0 -128
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +0 -28
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +0 -802
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +0 -16
  176. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  177. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  178. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  179. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  180. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +0 -128
  181. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  182. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +0 -888
  183. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  184. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  185. /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
  186. /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
  187. /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
  188. /package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +0 -0
  189. /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
  190. /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
  191. /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
  192. /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
  193. /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
  194. /package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -0
  195. /package/cpp/{vec.h → ggml-cpu/vec.h} +0 -0
@@ -1,8 +0,0 @@
1
- #pragma once
2
-
3
- #include "ggml-cpu-traits.h"
4
- #include "ggml.h"
5
-
6
- // GGML internal header
7
-
8
- lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_aarch64_buffer_type(void);
@@ -1,512 +0,0 @@
1
- #pragma once
2
-
3
- // GGML CPU internal header
4
-
5
- #include "ggml.h"
6
- #include "ggml-impl.h"
7
-
8
- #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
- //#include <stddef.h>
10
- #include <stdbool.h>
11
- #include <string.h> // memcpy
12
- #include <math.h> // fabsf
13
-
14
- #ifdef __cplusplus
15
- extern "C" {
16
- #endif
17
-
18
- struct lm_ggml_compute_params {
19
- // ith = thread index, nth = number of threads
20
- int ith, nth;
21
-
22
- // work buffer for all threads
23
- size_t wsize;
24
- void * wdata;
25
-
26
- struct lm_ggml_threadpool * threadpool;
27
- };
28
-
29
-
30
- #if defined(_MSC_VER)
31
-
32
- #define m512bh(p) p
33
- #define m512i(p) p
34
-
35
- #else
36
-
37
- #define m512bh(p) (__m512bh)(p)
38
- #define m512i(p) (__m512i)(p)
39
-
40
- #endif
41
-
42
- // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
43
- #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
44
- #ifndef __FMA__
45
- #define __FMA__
46
- #endif
47
- #ifndef __F16C__
48
- #define __F16C__
49
- #endif
50
- #endif
51
-
52
- // __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available
53
- #if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__))
54
- #ifndef __SSE3__
55
- #define __SSE3__
56
- #endif
57
- #ifndef __SSSE3__
58
- #define __SSSE3__
59
- #endif
60
- #endif
61
-
62
- #if defined(__s390x__) && defined(__VEC__)
63
- #ifndef __VXE__
64
- #define __VXE__
65
- #endif
66
- #ifndef __VXE2__
67
- #define __VXE2__
68
- #endif
69
- #endif
70
-
71
- #if defined(__ARM_FEATURE_SVE)
72
- #include <sys/prctl.h>
73
- #endif
74
-
75
- #if defined(__ARM_NEON)
76
-
77
- // ref: https://github.com/ggml-org/llama.cpp/pull/5404
78
- #ifdef _MSC_VER
79
- #define lm_ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
80
- #else
81
- #define lm_ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
82
- #endif // _MSC_VER
83
-
84
- #if !defined(__aarch64__)
85
-
86
- // 32-bit ARM compatibility
87
-
88
- // vaddlvq_s16
89
- // vpaddq_s16
90
- // vpaddq_s32
91
- // vaddvq_s32
92
- // vaddvq_f32
93
- // vmaxvq_f32
94
- // vcvtnq_s32_f32
95
- // vzip1_u8
96
- // vzip2_u8
97
-
98
- inline static int32_t vaddlvq_s16(int16x8_t v) {
99
- int32x4_t v0 = vreinterpretq_s32_s64(vpaddlq_s32(vpaddlq_s16(v)));
100
- return vgetq_lane_s32(v0, 0) + vgetq_lane_s32(v0, 2);
101
- }
102
-
103
- inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
104
- int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
105
- int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
106
- return vcombine_s16(a0, b0);
107
- }
108
-
109
- inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
110
- int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
111
- int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
112
- return vcombine_s32(a0, b0);
113
- }
114
-
115
- inline static int32_t vaddvq_s32(int32x4_t v) {
116
- return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
117
- }
118
-
119
- inline static float vaddvq_f32(float32x4_t v) {
120
- return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
121
- }
122
-
123
- inline static float vmaxvq_f32(float32x4_t v) {
124
- return
125
- MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
126
- MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
127
- }
128
-
129
- inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
130
- int32x4_t res;
131
-
132
- res[0] = roundf(vgetq_lane_f32(v, 0));
133
- res[1] = roundf(vgetq_lane_f32(v, 1));
134
- res[2] = roundf(vgetq_lane_f32(v, 2));
135
- res[3] = roundf(vgetq_lane_f32(v, 3));
136
-
137
- return res;
138
- }
139
-
140
- inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
141
- uint8x8_t res;
142
-
143
- res[0] = a[0]; res[1] = b[0];
144
- res[2] = a[1]; res[3] = b[1];
145
- res[4] = a[2]; res[5] = b[2];
146
- res[6] = a[3]; res[7] = b[3];
147
-
148
- return res;
149
- }
150
-
151
- inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
152
- uint8x8_t res;
153
-
154
- res[0] = a[4]; res[1] = b[4];
155
- res[2] = a[5]; res[3] = b[5];
156
- res[4] = a[6]; res[5] = b[6];
157
- res[6] = a[7]; res[7] = b[7];
158
-
159
- return res;
160
- }
161
-
162
- // vld1q_s16_x2
163
- // vld1q_u8_x2
164
- // vld1q_u8_x4
165
- // vld1q_s8_x2
166
- // vld1q_s8_x4
167
- // TODO: double-check these work correctly
168
-
169
- typedef struct lm_ggml_int16x8x2_t {
170
- int16x8_t val[2];
171
- } lm_ggml_int16x8x2_t;
172
-
173
- inline static lm_ggml_int16x8x2_t lm_ggml_vld1q_s16_x2(const int16_t * ptr) {
174
- lm_ggml_int16x8x2_t res;
175
-
176
- res.val[0] = vld1q_s16(ptr + 0);
177
- res.val[1] = vld1q_s16(ptr + 8);
178
-
179
- return res;
180
- }
181
-
182
- typedef struct lm_ggml_uint8x16x2_t {
183
- uint8x16_t val[2];
184
- } lm_ggml_uint8x16x2_t;
185
-
186
- inline static lm_ggml_uint8x16x2_t lm_ggml_vld1q_u8_x2(const uint8_t * ptr) {
187
- lm_ggml_uint8x16x2_t res;
188
-
189
- res.val[0] = vld1q_u8(ptr + 0);
190
- res.val[1] = vld1q_u8(ptr + 16);
191
-
192
- return res;
193
- }
194
-
195
- typedef struct lm_ggml_uint8x16x4_t {
196
- uint8x16_t val[4];
197
- } lm_ggml_uint8x16x4_t;
198
-
199
- inline static lm_ggml_uint8x16x4_t lm_ggml_vld1q_u8_x4(const uint8_t * ptr) {
200
- lm_ggml_uint8x16x4_t res;
201
-
202
- res.val[0] = vld1q_u8(ptr + 0);
203
- res.val[1] = vld1q_u8(ptr + 16);
204
- res.val[2] = vld1q_u8(ptr + 32);
205
- res.val[3] = vld1q_u8(ptr + 48);
206
-
207
- return res;
208
- }
209
-
210
- typedef struct lm_ggml_int8x16x2_t {
211
- int8x16_t val[2];
212
- } lm_ggml_int8x16x2_t;
213
-
214
- inline static lm_ggml_int8x16x2_t lm_ggml_vld1q_s8_x2(const int8_t * ptr) {
215
- lm_ggml_int8x16x2_t res;
216
-
217
- res.val[0] = vld1q_s8(ptr + 0);
218
- res.val[1] = vld1q_s8(ptr + 16);
219
-
220
- return res;
221
- }
222
-
223
- typedef struct lm_ggml_int8x16x4_t {
224
- int8x16_t val[4];
225
- } lm_ggml_int8x16x4_t;
226
-
227
- inline static lm_ggml_int8x16x4_t lm_ggml_vld1q_s8_x4(const int8_t * ptr) {
228
- lm_ggml_int8x16x4_t res;
229
-
230
- res.val[0] = vld1q_s8(ptr + 0);
231
- res.val[1] = vld1q_s8(ptr + 16);
232
- res.val[2] = vld1q_s8(ptr + 32);
233
- res.val[3] = vld1q_s8(ptr + 48);
234
-
235
- return res;
236
- }
237
-
238
- // NOTE: not tested
239
- inline static int8x16_t lm_ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
240
- int8x16_t res;
241
-
242
- res[ 0] = a[b[ 0]];
243
- res[ 1] = a[b[ 1]];
244
- res[ 2] = a[b[ 2]];
245
- res[ 3] = a[b[ 3]];
246
- res[ 4] = a[b[ 4]];
247
- res[ 5] = a[b[ 5]];
248
- res[ 6] = a[b[ 6]];
249
- res[ 7] = a[b[ 7]];
250
- res[ 8] = a[b[ 8]];
251
- res[ 9] = a[b[ 9]];
252
- res[10] = a[b[10]];
253
- res[11] = a[b[11]];
254
- res[12] = a[b[12]];
255
- res[13] = a[b[13]];
256
- res[14] = a[b[14]];
257
- res[15] = a[b[15]];
258
-
259
- return res;
260
- }
261
-
262
- // NOTE: not tested
263
- inline static uint8x16_t lm_ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
264
- uint8x16_t res;
265
-
266
- res[ 0] = a[b[ 0]];
267
- res[ 1] = a[b[ 1]];
268
- res[ 2] = a[b[ 2]];
269
- res[ 3] = a[b[ 3]];
270
- res[ 4] = a[b[ 4]];
271
- res[ 5] = a[b[ 5]];
272
- res[ 6] = a[b[ 6]];
273
- res[ 7] = a[b[ 7]];
274
- res[ 8] = a[b[ 8]];
275
- res[ 9] = a[b[ 9]];
276
- res[10] = a[b[10]];
277
- res[11] = a[b[11]];
278
- res[12] = a[b[12]];
279
- res[13] = a[b[13]];
280
- res[14] = a[b[14]];
281
- res[15] = a[b[15]];
282
-
283
- return res;
284
- }
285
-
286
- #else
287
-
288
- #define lm_ggml_int16x8x2_t int16x8x2_t
289
- #define lm_ggml_uint8x16x2_t uint8x16x2_t
290
- #define lm_ggml_uint8x16x4_t uint8x16x4_t
291
- #define lm_ggml_int8x16x2_t int8x16x2_t
292
- #define lm_ggml_int8x16x4_t int8x16x4_t
293
-
294
- #define lm_ggml_vld1q_s16_x2 vld1q_s16_x2
295
- #define lm_ggml_vld1q_u8_x2 vld1q_u8_x2
296
- #define lm_ggml_vld1q_u8_x4 vld1q_u8_x4
297
- #define lm_ggml_vld1q_s8_x2 vld1q_s8_x2
298
- #define lm_ggml_vld1q_s8_x4 vld1q_s8_x4
299
- #define lm_ggml_vqtbl1q_s8 vqtbl1q_s8
300
- #define lm_ggml_vqtbl1q_u8 vqtbl1q_u8
301
-
302
- #endif // !defined(__aarch64__)
303
-
304
- #if !defined(__ARM_FEATURE_DOTPROD)
305
-
306
- inline static int32x4_t lm_ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
307
- const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
308
- const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
309
-
310
- return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
311
- }
312
-
313
- #else
314
-
315
- #define lm_ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
316
-
317
- #endif // !defined(__ARM_FEATURE_DOTPROD)
318
-
319
- #endif // defined(__ARM_NEON)
320
-
321
- #ifdef __wasm_simd128__
322
- #include <wasm_simd128.h>
323
- #else
324
- #ifdef __POWER9_VECTOR__
325
- #include <altivec.h>
326
- #else
327
- #if defined(_MSC_VER) || defined(__MINGW32__)
328
- #include <intrin.h>
329
- #else
330
- #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__)
331
- #if !defined(__riscv)
332
- #include <immintrin.h>
333
- #endif
334
- #endif
335
- #endif
336
- #endif
337
- #endif
338
-
339
- #ifdef __riscv_v_intrinsic
340
- #include <riscv_vector.h>
341
- #endif
342
-
343
- #if defined(__loongarch64)
344
- #if defined(__loongarch_asx)
345
- #include <lasxintrin.h>
346
- #endif
347
- #if defined(__loongarch_sx)
348
- #include <lsxintrin.h>
349
- #endif
350
- #endif
351
-
352
- #if defined(__VXE__) || defined(__VXE2__)
353
- #include <vecintrin.h>
354
-
355
- #define vec_neg(a) (-(a)) // Vector Negate
356
- #define vec_add(a, b) ((a) + (b)) // Vector Add
357
- #define vec_sub(a, b) ((a) - (b)) // Vector Subtract
358
- #define vec_mul(a, b) ((a) * (b)) // Vector Multiply
359
- #define vec_div(a, b) ((a) / (b)) // Vector Divide
360
- #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left
361
- #define vec_sra(a, b) ((a) >> (b)) // Vector Shift Right
362
- #define vec_sr(a, b) ((a) >> (b)) // Vector Shift Right Algebraic
363
- #define vec_slo(a, b) vec_slb(a, (b) << 64) // Vector Shift Left by Octet
364
- #define vec_sro(a, b) vec_srb(a, (b) << 64) // Vector Shift Right by Octet
365
-
366
- #ifndef vec_and
367
- #define vec_and(a, b) ((a) & (b)) // Vector AND
368
- #endif
369
-
370
- #ifndef vec_or
371
- #define vec_or(a, b) ((a) | (b)) // Vector OR
372
- #endif
373
-
374
- #ifndef vec_xor
375
- #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR
376
- #endif
377
-
378
- typedef signed char char8x16_t __attribute__((vector_size(16)));
379
- typedef unsigned char uchar8x16_t __attribute__((vector_size(16)));
380
-
381
- typedef int8_t int8x16_t __attribute__((vector_size(16)));
382
- typedef int16_t int16x8_t __attribute__((vector_size(16)));
383
- typedef int32_t int32x4_t __attribute__((vector_size(16)));
384
-
385
- typedef uint8_t uint8x16_t __attribute__((vector_size(16)));
386
- typedef uint16_t uint16x8_t __attribute__((vector_size(16)));
387
- typedef uint32_t uint32x4_t __attribute__((vector_size(16)));
388
-
389
- typedef float float32x4_t __attribute__((vector_size(16)));
390
- typedef double double64x2_t __attribute((vector_size(16)));
391
-
392
- typedef signed long long long64x2_t __attribute((vector_size(16)));
393
- typedef unsigned long long ulong64x2_t __attribute__((vector_size(16)));
394
-
395
- typedef struct lm_ggml_uint8x16x2_t {
396
- uint8x16_t val[2];
397
- } lm_ggml_uint8x16x2_t;
398
-
399
- inline static lm_ggml_uint8x16x2_t lm_ggml_vec_xl_u8x2(const uint8_t * ptr) {
400
- lm_ggml_uint8x16x2_t res;
401
-
402
- res.val[0] = vec_xl( 0, ptr);
403
- res.val[1] = vec_xl(16, ptr);
404
-
405
- return res;
406
- }
407
-
408
- typedef struct lm_ggml_uint8x16x4_t {
409
- uint8x16_t val[4];
410
- } lm_ggml_uint8x16x4_t;
411
-
412
- inline static lm_ggml_uint8x16x4_t lm_ggml_vec_xl_u8x4(const uint8_t * ptr) {
413
- lm_ggml_uint8x16x4_t res;
414
-
415
- res.val[0] = vec_xl( 0, ptr);
416
- res.val[1] = vec_xl(16, ptr);
417
- res.val[2] = vec_xl(32, ptr);
418
- res.val[3] = vec_xl(48, ptr);
419
-
420
- return res;
421
- }
422
-
423
- typedef struct lm_ggml_int8x16x4_t {
424
- int8x16_t val[4];
425
- } lm_ggml_int8x16x4_t;
426
-
427
- inline static lm_ggml_int8x16x4_t lm_ggml_vec_xl_s8x4(const int8_t * ptr) {
428
- lm_ggml_int8x16x4_t res;
429
-
430
- res.val[0] = vec_xl( 0, ptr);
431
- res.val[1] = vec_xl(16, ptr);
432
- res.val[2] = vec_xl(32, ptr);
433
- res.val[3] = vec_xl(48, ptr);
434
-
435
- return res;
436
- }
437
-
438
- typedef struct lm_ggml_int16x8x2_t {
439
- int16x8_t val[2];
440
- } lm_ggml_int16x8x2_t;
441
-
442
- inline static lm_ggml_int16x8x2_t lm_ggml_vec_xl_s16x2(const int16_t * ptr) {
443
- lm_ggml_int16x8x2_t res;
444
-
445
- res.val[0] = vec_xl( 0, ptr);
446
- res.val[1] = vec_xl(16, ptr);
447
-
448
- return res;
449
- }
450
-
451
- /*
452
- ! WARNING: Very slow. Use vec_perm if possible. Refer to iq4_xs
453
- ! or iq4_nl for example implementation.
454
- */
455
- inline static int8x16_t lm_ggml_vec_tbl(int8x16_t a, uint8x16_t b) {
456
- int8x16_t res;
457
-
458
- res[ 0] = a[b[ 0]];
459
- res[ 1] = a[b[ 1]];
460
- res[ 2] = a[b[ 2]];
461
- res[ 3] = a[b[ 3]];
462
- res[ 4] = a[b[ 4]];
463
- res[ 5] = a[b[ 5]];
464
- res[ 6] = a[b[ 6]];
465
- res[ 7] = a[b[ 7]];
466
- res[ 8] = a[b[ 8]];
467
- res[ 9] = a[b[ 9]];
468
- res[10] = a[b[10]];
469
- res[11] = a[b[11]];
470
- res[12] = a[b[12]];
471
- res[13] = a[b[13]];
472
- res[14] = a[b[14]];
473
- res[15] = a[b[15]];
474
-
475
- return res;
476
- }
477
-
478
- inline static int16x8_t vec_padd_s16(int16x8_t a, int16x8_t b) {
479
- const uchar8x16_t v_maske = { 0, 1, 4, 5, 8, 9, 12, 13,
480
- 16, 17, 20, 21, 24, 25, 28, 29 };
481
-
482
- const int16x8_t v_abo = vec_pack((int32x4_t)a, (int32x4_t)b);
483
- const int16x8_t v_abe = vec_perm(a, b, v_maske);
484
- return v_abo + v_abe;
485
- }
486
-
487
- inline static int32x4_t lm_ggml_vec_dot(int32x4_t acc, int8x16_t a, int8x16_t b) {
488
- const int16x8_t p = vec_mule(a, b) + vec_mulo(a, b);
489
- return acc + (vec_unpackh(p) + vec_unpackl(p));
490
- }
491
-
492
- #endif
493
-
494
- #if defined(__loongarch_asx)
495
- /* float type data load instructions */
496
- static __m128 __lsx_vreplfr2vr_s(const float val) {
497
- v4f32 res = {val, val, val, val};
498
- return (__m128)res;
499
- }
500
-
501
- static __m256 __lasx_xvreplfr2vr_s(const float val) {
502
- v8f32 res = {val, val, val, val, val, val, val, val};
503
- return (__m256)res;
504
- }
505
- #endif
506
-
507
- // TODO: move to ggml-threading
508
- void lm_ggml_barrier(struct lm_ggml_threadpool * tp);
509
-
510
- #ifdef __cplusplus
511
- }
512
- #endif
@@ -1,63 +0,0 @@
1
- #pragma once
2
-
3
- #define LM_GGML_COMMON_DECL_C
4
- #include "ggml-common.h"
5
-
6
- #include "ggml.h"
7
-
8
- // GGML CPU internal header
9
-
10
- #ifdef __cplusplus
11
- extern "C" {
12
- #endif
13
-
14
- // Quantization
15
- void quantize_row_q4_0(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
16
- void quantize_row_q4_1(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
17
- void quantize_row_q5_0(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
18
- void quantize_row_q5_1(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
19
- void quantize_row_q8_0(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
20
- void quantize_row_q8_1(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
21
-
22
- void quantize_row_q2_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
23
- void quantize_row_q3_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
24
- void quantize_row_q4_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
25
- void quantize_row_q5_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
26
- void quantize_row_q6_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
27
- void quantize_row_q8_K(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
28
-
29
- void quantize_row_tq1_0(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
30
- void quantize_row_tq2_0(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
31
-
32
- void quantize_row_iq4_nl (const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
33
- void quantize_row_iq4_xs (const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
34
-
35
- // Dot product
36
- void lm_ggml_vec_dot_q4_0_q8_0(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
37
- void lm_ggml_vec_dot_q4_1_q8_1(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
38
- void lm_ggml_vec_dot_q5_0_q8_0(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
39
- void lm_ggml_vec_dot_q5_1_q8_1(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
40
- void lm_ggml_vec_dot_q8_0_q8_0(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
41
-
42
- void lm_ggml_vec_dot_q2_K_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
43
- void lm_ggml_vec_dot_q3_K_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
44
- void lm_ggml_vec_dot_q4_K_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
45
- void lm_ggml_vec_dot_q5_K_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
46
- void lm_ggml_vec_dot_q6_K_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
47
-
48
- void lm_ggml_vec_dot_tq1_0_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
49
- void lm_ggml_vec_dot_tq2_0_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
50
-
51
- void lm_ggml_vec_dot_iq2_xxs_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
52
- void lm_ggml_vec_dot_iq2_xs_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
53
- void lm_ggml_vec_dot_iq2_s_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
54
- void lm_ggml_vec_dot_iq3_xxs_q8_K(int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
55
- void lm_ggml_vec_dot_iq1_s_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
56
- void lm_ggml_vec_dot_iq1_m_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
57
- void lm_ggml_vec_dot_iq4_nl_q8_0 (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
58
- void lm_ggml_vec_dot_iq4_xs_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
59
- void lm_ggml_vec_dot_iq3_s_q8_K (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT vx, size_t bx, const void * LM_GGML_RESTRICT vy, size_t by, int nrc);
60
-
61
- #ifdef __cplusplus
62
- }
63
- #endif
@@ -1,38 +0,0 @@
1
- #pragma once
2
- #include "ggml-backend-impl.h"
3
- #include "ggml-cpu-impl.h"
4
- #include "ggml.h"
5
-
6
- #ifdef __cplusplus
7
- # include <vector>
8
- extern "C" {
9
- #endif
10
-
11
- // return true if op part of extra "accelerator"
12
- bool lm_ggml_cpu_extra_compute_forward(struct lm_ggml_compute_params * params, struct lm_ggml_tensor * op);
13
- bool lm_ggml_cpu_extra_work_size(int n_threads, const struct lm_ggml_tensor * op, size_t * size);
14
-
15
- #ifdef __cplusplus
16
- }
17
-
18
- namespace ggml::cpu {
19
- // register in tensor->extra
20
- class tensor_traits {
21
- public:
22
- virtual ~tensor_traits();
23
- virtual bool work_size(int n_threads, const struct lm_ggml_tensor * op, size_t & size) = 0;
24
- virtual bool compute_forward(struct lm_ggml_compute_params * params, struct lm_ggml_tensor * op) = 0;
25
- };
26
-
27
- class extra_buffer_type {
28
- public:
29
- virtual ~extra_buffer_type();
30
- virtual bool supports_op(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op) = 0;
31
- virtual tensor_traits * get_tensor_traits(const struct lm_ggml_tensor * op) = 0;
32
- };
33
- } // namespace ggml::cpu
34
-
35
- // implemented in ggml-cpu.cpp.
36
- std::vector<lm_ggml_backend_buffer_type_t> & lm_ggml_backend_cpu_get_extra_buffers_type();
37
-
38
- #endif
@@ -1,14 +0,0 @@
1
- #pragma once
2
- #include <stdint.h>
3
- #include <stdbool.h>
4
- #ifdef __cplusplus
5
- extern "C" {
6
- #endif
7
-
8
- bool llamafile_sgemm(const struct lm_ggml_compute_params * params, int64_t, int64_t, int64_t,
9
- const void *, int64_t, const void *, int64_t, void *, int64_t,
10
- int, int, int);
11
-
12
- #ifdef __cplusplus
13
- }
14
- #endif
@@ -1,28 +0,0 @@
1
- #pragma once
2
-
3
- #include "cpu-common.h"
4
-
5
- #ifdef __cplusplus
6
- extern "C" {
7
- #endif
8
-
9
- void lm_ggml_compute_forward_abs(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
10
- void lm_ggml_compute_forward_sgn(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
11
- void lm_ggml_compute_forward_neg(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
12
- void lm_ggml_compute_forward_step(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
13
- void lm_ggml_compute_forward_tanh(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
14
- void lm_ggml_compute_forward_elu(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
15
- void lm_ggml_compute_forward_relu(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
16
- void lm_ggml_compute_forward_sigmoid(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
17
- void lm_ggml_compute_forward_hardsigmoid(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
18
- void lm_ggml_compute_forward_exp(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
19
- void lm_ggml_compute_forward_hardswish(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
20
- void lm_ggml_compute_forward_sqr(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
21
- void lm_ggml_compute_forward_sqrt(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
22
- void lm_ggml_compute_forward_sin(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
23
- void lm_ggml_compute_forward_cos(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
24
- void lm_ggml_compute_forward_log(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
25
-
26
- #ifdef __cplusplus
27
- }
28
- #endif