cui-llama.rn 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +54 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1085
  14. package/cpp/chat.h +143 -0
  15. package/cpp/common.cpp +1562 -1996
  16. package/cpp/common.h +677 -744
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-alloc.c +1039 -1030
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +255 -255
  21. package/cpp/ggml-backend-reg.cpp +586 -582
  22. package/cpp/ggml-backend.cpp +2004 -2002
  23. package/cpp/ggml-backend.h +354 -354
  24. package/cpp/ggml-common.h +1857 -1851
  25. package/cpp/ggml-cpp.h +39 -39
  26. package/cpp/ggml-cpu-aarch64.cpp +5725 -4247
  27. package/cpp/ggml-cpu-aarch64.h +8 -8
  28. package/cpp/ggml-cpu-impl.h +512 -380
  29. package/cpp/ggml-cpu-quants.c +13026 -11517
  30. package/cpp/ggml-cpu-traits.cpp +36 -36
  31. package/cpp/ggml-cpu-traits.h +38 -38
  32. package/cpp/ggml-cpu.c +3438 -14485
  33. package/cpp/ggml-cpu.cpp +655 -633
  34. package/cpp/ggml-cpu.h +138 -135
  35. package/cpp/ggml-impl.h +594 -567
  36. package/cpp/ggml-metal-impl.h +312 -3
  37. package/cpp/ggml-metal.h +66 -66
  38. package/cpp/ggml-metal.m +5360 -5002
  39. package/cpp/ggml-opt.cpp +854 -854
  40. package/cpp/ggml-opt.h +216 -216
  41. package/cpp/ggml-quants.c +5238 -5238
  42. package/cpp/ggml-threading.h +14 -14
  43. package/cpp/ggml.c +6618 -6524
  44. package/cpp/ggml.h +2222 -2194
  45. package/cpp/gguf.cpp +1330 -1329
  46. package/cpp/gguf.h +202 -202
  47. package/cpp/json-schema-to-grammar.cpp +1024 -1025
  48. package/cpp/json-schema-to-grammar.h +21 -22
  49. package/cpp/json.hpp +24766 -24766
  50. package/cpp/llama-adapter.cpp +382 -347
  51. package/cpp/llama-adapter.h +76 -74
  52. package/cpp/llama-arch.cpp +1714 -1492
  53. package/cpp/llama-arch.h +428 -402
  54. package/cpp/llama-batch.cpp +368 -368
  55. package/cpp/llama-batch.h +88 -88
  56. package/cpp/llama-chat.cpp +640 -587
  57. package/cpp/llama-chat.h +56 -53
  58. package/cpp/llama-context.cpp +2831 -1775
  59. package/cpp/llama-context.h +265 -128
  60. package/cpp/llama-cparams.cpp +1 -1
  61. package/cpp/llama-cparams.h +38 -37
  62. package/cpp/llama-cpp.h +30 -30
  63. package/cpp/llama-grammar.cpp +1219 -1219
  64. package/cpp/llama-grammar.h +173 -164
  65. package/cpp/llama-graph.cpp +1695 -0
  66. package/cpp/llama-graph.h +592 -0
  67. package/cpp/llama-hparams.cpp +79 -71
  68. package/cpp/llama-hparams.h +156 -139
  69. package/cpp/llama-impl.cpp +167 -167
  70. package/cpp/llama-impl.h +61 -61
  71. package/cpp/llama-io.cpp +15 -0
  72. package/cpp/llama-io.h +35 -0
  73. package/cpp/llama-kv-cache.cpp +1380 -718
  74. package/cpp/llama-kv-cache.h +213 -218
  75. package/cpp/llama-memory.cpp +1 -0
  76. package/cpp/llama-memory.h +21 -0
  77. package/cpp/llama-mmap.cpp +600 -590
  78. package/cpp/llama-mmap.h +68 -68
  79. package/cpp/llama-model-loader.cpp +1129 -1124
  80. package/cpp/llama-model-loader.h +169 -167
  81. package/cpp/llama-model.cpp +13080 -4023
  82. package/cpp/llama-model.h +409 -370
  83. package/cpp/llama-sampling.cpp +2563 -2525
  84. package/cpp/llama-sampling.h +32 -32
  85. package/cpp/llama-vocab.cpp +3295 -3252
  86. package/cpp/llama-vocab.h +125 -125
  87. package/cpp/llama.cpp +351 -10137
  88. package/cpp/llama.h +1434 -1340
  89. package/cpp/log.cpp +427 -423
  90. package/cpp/log.h +132 -132
  91. package/cpp/{chat-template.hpp → minja/chat-template.hpp} +537 -529
  92. package/cpp/{minja.hpp → minja/minja.hpp} +2941 -2883
  93. package/cpp/ops.cpp +8723 -0
  94. package/cpp/ops.h +128 -0
  95. package/cpp/rn-llama.cpp +45 -71
  96. package/cpp/rn-llama.h +3 -3
  97. package/cpp/sampling.cpp +573 -532
  98. package/cpp/sgemm.cpp +3043 -2598
  99. package/cpp/sgemm.h +14 -14
  100. package/cpp/simd-mappings.h +888 -0
  101. package/cpp/speculative.cpp +278 -277
  102. package/cpp/speculative.h +28 -28
  103. package/cpp/unary-ops.cpp +186 -0
  104. package/cpp/unary-ops.h +28 -0
  105. package/cpp/vec.cpp +258 -0
  106. package/cpp/vec.h +802 -0
  107. package/ios/CMakeLists.txt +5 -2
  108. package/ios/RNLlama.mm +2 -2
  109. package/ios/RNLlamaContext.mm +40 -24
  110. package/package.json +1 -1
  111. package/src/NativeRNLlama.ts +6 -4
  112. package/src/index.ts +3 -1
  113. package/android/src/main/build-arm64/CMakeCache.txt +0 -429
  114. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCCompiler.cmake +0 -81
  115. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCXXCompiler.cmake +0 -101
  116. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_C.bin +0 -0
  117. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_CXX.bin +0 -0
  118. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeSystem.cmake +0 -15
  119. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.c +0 -904
  120. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.o +0 -0
  121. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.cpp +0 -919
  122. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.o +0 -0
  123. package/android/src/main/build-arm64/CMakeFiles/CMakeConfigureLog.yaml +0 -431
  124. package/android/src/main/build-arm64/CMakeFiles/CMakeDirectoryInformation.cmake +0 -16
  125. package/android/src/main/build-arm64/CMakeFiles/Makefile.cmake +0 -165
  126. package/android/src/main/build-arm64/CMakeFiles/Makefile2 +0 -297
  127. package/android/src/main/build-arm64/CMakeFiles/Progress/1 +0 -1
  128. package/android/src/main/build-arm64/CMakeFiles/Progress/2 +0 -1
  129. package/android/src/main/build-arm64/CMakeFiles/Progress/3 +0 -1
  130. package/android/src/main/build-arm64/CMakeFiles/Progress/4 +0 -1
  131. package/android/src/main/build-arm64/CMakeFiles/Progress/5 +0 -1
  132. package/android/src/main/build-arm64/CMakeFiles/Progress/6 +0 -1
  133. package/android/src/main/build-arm64/CMakeFiles/Progress/count.txt +0 -1
  134. package/android/src/main/build-arm64/CMakeFiles/TargetDirectories.txt +0 -8
  135. package/android/src/main/build-arm64/CMakeFiles/cmake.check_cache +0 -1
  136. package/android/src/main/build-arm64/CMakeFiles/progress.marks +0 -1
  137. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o +0 -0
  138. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o.d +0 -58
  139. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o +0 -0
  140. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o.d +0 -756
  141. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o +0 -0
  142. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o.d +0 -709
  143. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o +0 -0
  144. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o.d +0 -714
  145. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o +0 -0
  146. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o.d +0 -62
  147. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o +0 -0
  148. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o.d +0 -708
  149. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o +0 -0
  150. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o.d +0 -113
  151. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o +0 -0
  152. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o.d +0 -713
  153. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o +0 -0
  154. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o.d +0 -763
  155. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o +0 -0
  156. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o.d +0 -61
  157. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o +0 -0
  158. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o.d +0 -707
  159. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o +0 -0
  160. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o.d +0 -104
  161. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o +0 -0
  162. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o.d +0 -714
  163. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o +0 -0
  164. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o.d +0 -723
  165. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/DependInfo.cmake +0 -62
  166. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/build.make +0 -722
  167. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/cmake_clean.cmake +0 -89
  168. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.make +0 -2
  169. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.ts +0 -2
  170. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/depend.make +0 -2
  171. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/flags.make +0 -17
  172. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/progress.make +0 -41
  173. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/DependInfo.cmake +0 -62
  174. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/build.make +0 -722
  175. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/cmake_clean.cmake +0 -89
  176. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.make +0 -2
  177. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.ts +0 -2
  178. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/depend.make +0 -2
  179. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/flags.make +0 -17
  180. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/progress.make +0 -41
  181. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/DependInfo.cmake +0 -62
  182. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/build.make +0 -722
  183. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/cmake_clean.cmake +0 -89
  184. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.make +0 -2
  185. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.ts +0 -2
  186. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/depend.make +0 -2
  187. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/flags.make +0 -17
  188. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/progress.make +0 -41
  189. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/DependInfo.cmake +0 -62
  190. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/build.make +0 -722
  191. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/cmake_clean.cmake +0 -89
  192. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.make +0 -2
  193. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.ts +0 -2
  194. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/depend.make +0 -2
  195. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/flags.make +0 -17
  196. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/progress.make +0 -41
  197. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/DependInfo.cmake +0 -62
  198. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/build.make +0 -722
  199. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/cmake_clean.cmake +0 -89
  200. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.make +0 -2
  201. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.ts +0 -2
  202. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/depend.make +0 -2
  203. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/flags.make +0 -17
  204. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/progress.make +0 -41
  205. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/DependInfo.cmake +0 -62
  206. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/build.make +0 -722
  207. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/cmake_clean.cmake +0 -89
  208. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.make +0 -2
  209. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.ts +0 -2
  210. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/depend.make +0 -2
  211. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/flags.make +0 -17
  212. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/progress.make +0 -41
  213. package/android/src/main/build-arm64/Makefile +0 -1862
  214. package/android/src/main/build-arm64/cmake_install.cmake +0 -66
  215. package/cpp/chat.hpp +0 -55
  216. package/cpp/rn-llama.hpp +0 -913
package/cpp/ggml-alloc.h CHANGED
@@ -19,7 +19,7 @@ struct lm_ggml_tallocr {
19
19
  };
20
20
 
21
21
  LM_GGML_API struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer);
22
- LM_GGML_API void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor);
22
+ LM_GGML_API enum lm_ggml_status lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor);
23
23
 
24
24
  // Graph allocator
25
25
  /*
@@ -1,255 +1,255 @@
1
- #pragma once
2
-
3
- // ggml-backend internal header
4
-
5
- #include "ggml-backend.h"
6
-
7
- #ifdef __cplusplus
8
- extern "C" {
9
- #endif
10
-
11
- #define LM_GGML_BACKEND_API_VERSION 1
12
-
13
- //
14
- // Backend buffer type
15
- //
16
-
17
- struct lm_ggml_backend_buffer_type_i {
18
- const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
19
- // allocate a buffer of this type
20
- lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
21
- // tensor alignment
22
- size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft);
23
- // (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
24
- size_t (*get_max_size) (lm_ggml_backend_buffer_type_t buft);
25
- // (optional) data size needed to allocate the tensor, including padding (defaults to lm_ggml_nbytes)
26
- size_t (*get_alloc_size)(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
27
- // (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
28
- bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
29
- };
30
-
31
- struct lm_ggml_backend_buffer_type {
32
- struct lm_ggml_backend_buffer_type_i iface;
33
- lm_ggml_backend_dev_t device;
34
- void * context;
35
- };
36
-
37
- //
38
- // Backend buffer
39
- //
40
-
41
- struct lm_ggml_backend_buffer_i {
42
- // (optional) free the buffer
43
- void (*free_buffer) (lm_ggml_backend_buffer_t buffer);
44
- // base address of the buffer
45
- void * (*get_base) (lm_ggml_backend_buffer_t buffer);
46
- // (optional) initialize a tensor in the buffer (eg. add tensor extras)
47
- void (*init_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
48
- // tensor data access
49
- void (*memset_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
50
- void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
51
- void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
52
- // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
53
- bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
54
- // clear the entire buffer
55
- void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
56
- // (optional) reset any internal state due to tensor initialization, such as tensor extras
57
- void (*reset) (lm_ggml_backend_buffer_t buffer);
58
- };
59
-
60
- struct lm_ggml_backend_buffer {
61
- struct lm_ggml_backend_buffer_i iface;
62
- lm_ggml_backend_buffer_type_t buft;
63
- void * context;
64
- size_t size;
65
- enum lm_ggml_backend_buffer_usage usage;
66
- };
67
-
68
- LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
69
- lm_ggml_backend_buffer_type_t buft,
70
- struct lm_ggml_backend_buffer_i iface,
71
- void * context,
72
- size_t size);
73
-
74
- // do not use directly, use lm_ggml_backend_tensor_copy instead
75
- LM_GGML_API bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
76
-
77
- // multi-buffer
78
- // buffer that contains a collection of buffers
79
- LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
80
- LM_GGML_API bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
81
- LM_GGML_API void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
82
-
83
- //
84
- // Backend (stream)
85
- //
86
-
87
- struct lm_ggml_backend_i {
88
- const char * (*get_name)(lm_ggml_backend_t backend);
89
-
90
- void (*free)(lm_ggml_backend_t backend);
91
-
92
- // (optional) asynchronous tensor data access
93
- void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
94
- void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
95
- bool (*cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
96
-
97
- // (optional) complete all pending operations (required if the backend supports async operations)
98
- void (*synchronize)(lm_ggml_backend_t backend);
99
-
100
- // (optional) graph plans (not used currently)
101
- // compute graph with a plan
102
- lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
103
- void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
104
- // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
105
- void (*graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
106
- // compute the graph with the plan
107
- enum lm_ggml_status (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
108
-
109
- // compute graph (always async if supported by the backend)
110
- enum lm_ggml_status (*graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
111
-
112
- // (optional) event synchronization
113
- // record an event on this stream
114
- void (*event_record)(lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
115
- // wait for an event on on a different stream
116
- void (*event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
117
- };
118
-
119
- struct lm_ggml_backend {
120
- lm_ggml_guid_t guid;
121
- struct lm_ggml_backend_i iface;
122
- lm_ggml_backend_dev_t device;
123
- void * context;
124
- };
125
-
126
- struct lm_ggml_backend_event {
127
- struct lm_ggml_backend_device * device;
128
- void * context;
129
- };
130
-
131
- //
132
- // Backend device
133
- //
134
-
135
- // Note: if additional properties are needed, we should add a struct with all of them
136
- // the current functions to obtain the properties can remain, since they are more convenient for often used properties
137
- struct lm_ggml_backend_device_i {
138
- // device name: short identifier for this device, such as "CPU" or "CUDA0"
139
- const char * (*get_name)(lm_ggml_backend_dev_t dev);
140
-
141
- // device description: short informative description of the device, could be the model name
142
- const char * (*get_description)(lm_ggml_backend_dev_t dev);
143
-
144
- // device memory in bytes
145
- void (*get_memory)(lm_ggml_backend_dev_t dev, size_t * free, size_t * total);
146
-
147
- // device type
148
- enum lm_ggml_backend_dev_type (*get_type)(lm_ggml_backend_dev_t dev);
149
-
150
- // device properties
151
- void (*get_props)(lm_ggml_backend_dev_t dev, struct lm_ggml_backend_dev_props * props);
152
-
153
- // backend (stream) initialization
154
- lm_ggml_backend_t (*init_backend)(lm_ggml_backend_dev_t dev, const char * params);
155
-
156
- // preferred buffer type
157
- lm_ggml_backend_buffer_type_t (*get_buffer_type)(lm_ggml_backend_dev_t dev);
158
-
159
- // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
160
- lm_ggml_backend_buffer_type_t (*get_host_buffer_type)(lm_ggml_backend_dev_t dev);
161
-
162
- // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
163
- lm_ggml_backend_buffer_t (*buffer_from_host_ptr)(lm_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
164
-
165
- // check if the backend can compute an operation
166
- bool (*supports_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
167
-
168
- // check if the backend can use tensors allocated in a buffer type
169
- bool (*supports_buft)(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft);
170
-
171
- // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
172
- // these should be expensive operations that may benefit from running on this backend instead of the CPU backend
173
- bool (*offload_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
174
-
175
- // (optional) event synchronization
176
- lm_ggml_backend_event_t (*event_new) (lm_ggml_backend_dev_t dev);
177
- void (*event_free) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
178
- void (*event_synchronize) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
179
- };
180
-
181
- struct lm_ggml_backend_device {
182
- struct lm_ggml_backend_device_i iface;
183
- lm_ggml_backend_reg_t reg;
184
- void * context;
185
- };
186
-
187
- //
188
- // Backend (reg)
189
- //
190
-
191
- struct lm_ggml_backend_reg_i {
192
- const char * (*get_name)(lm_ggml_backend_reg_t reg);
193
-
194
- // enumerate available devices
195
- size_t (*get_device_count)(lm_ggml_backend_reg_t reg);
196
- lm_ggml_backend_dev_t (*get_device)(lm_ggml_backend_reg_t reg, size_t index);
197
-
198
- // (optional) get a pointer to a function in the backend
199
- // backends can add custom functions that are not part of the standard ggml-backend interface
200
- void * (*get_proc_address)(lm_ggml_backend_reg_t reg, const char * name);
201
- };
202
-
203
- struct lm_ggml_backend_reg {
204
- int api_version; // initialize to LM_GGML_BACKEND_API_VERSION
205
- struct lm_ggml_backend_reg_i iface;
206
- void * context;
207
- };
208
-
209
- // Internal backend registry API
210
- LM_GGML_API void lm_ggml_backend_register(lm_ggml_backend_reg_t reg);
211
-
212
- // Add backend dynamic loading support to the backend
213
-
214
- // Initialize the backend
215
- typedef lm_ggml_backend_reg_t (*lm_ggml_backend_init_t)(void);
216
- // Optional: obtain a score for the backend based on the system configuration
217
- // Higher scores are preferred, 0 means the backend is not supported in the current system
218
- typedef int (*lm_ggml_backend_score_t)(void);
219
-
220
- #ifdef LM_GGML_BACKEND_DL
221
- # ifdef __cplusplus
222
- # define LM_GGML_BACKEND_DL_IMPL(reg_fn) \
223
- extern "C" { \
224
- LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_init(void); \
225
- } \
226
- lm_ggml_backend_reg_t lm_ggml_backend_init(void) { \
227
- return reg_fn(); \
228
- }
229
- # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
230
- extern "C" { \
231
- LM_GGML_BACKEND_API int lm_ggml_backend_score(void); \
232
- } \
233
- int lm_ggml_backend_score(void) { \
234
- return score_fn(); \
235
- }
236
- # else
237
- # define LM_GGML_BACKEND_DL_IMPL(reg_fn) \
238
- LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_init(void); \
239
- lm_ggml_backend_reg_t lm_ggml_backend_init(void) { \
240
- return reg_fn(); \
241
- }
242
- # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
243
- LM_GGML_BACKEND_API int lm_ggml_backend_score(void); \
244
- int lm_ggml_backend_score(void) { \
245
- return score_fn(); \
246
- }
247
- # endif
248
- #else
249
- # define LM_GGML_BACKEND_DL_IMPL(reg_fn)
250
- # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn)
251
- #endif
252
-
253
- #ifdef __cplusplus
254
- }
255
- #endif
1
+ #pragma once
2
+
3
+ // ggml-backend internal header
4
+
5
+ #include "ggml-backend.h"
6
+
7
+ #ifdef __cplusplus
8
+ extern "C" {
9
+ #endif
10
+
11
+ #define LM_GGML_BACKEND_API_VERSION 1
12
+
13
+ //
14
+ // Backend buffer type
15
+ //
16
+
17
+ struct lm_ggml_backend_buffer_type_i {
18
+ const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
19
+ // allocate a buffer of this type
20
+ lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
21
+ // tensor alignment
22
+ size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft);
23
+ // (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
24
+ size_t (*get_max_size) (lm_ggml_backend_buffer_type_t buft);
25
+ // (optional) data size needed to allocate the tensor, including padding (defaults to lm_ggml_nbytes)
26
+ size_t (*get_alloc_size)(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
27
+ // (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
28
+ bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
29
+ };
30
+
31
+ struct lm_ggml_backend_buffer_type {
32
+ struct lm_ggml_backend_buffer_type_i iface;
33
+ lm_ggml_backend_dev_t device;
34
+ void * context;
35
+ };
36
+
37
+ //
38
+ // Backend buffer
39
+ //
40
+
41
+ struct lm_ggml_backend_buffer_i {
42
+ // (optional) free the buffer
43
+ void (*free_buffer) (lm_ggml_backend_buffer_t buffer);
44
+ // base address of the buffer
45
+ void * (*get_base) (lm_ggml_backend_buffer_t buffer);
46
+ // (optional) initialize a tensor in the buffer (eg. add tensor extras)
47
+ enum lm_ggml_status (*init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
48
+ // tensor data access
49
+ void (*memset_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
50
+ void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
51
+ void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
52
+ // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
53
+ bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
54
+ // clear the entire buffer
55
+ void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
56
+ // (optional) reset any internal state due to tensor initialization, such as tensor extras
57
+ void (*reset) (lm_ggml_backend_buffer_t buffer);
58
+ };
59
+
60
+ struct lm_ggml_backend_buffer {
61
+ struct lm_ggml_backend_buffer_i iface;
62
+ lm_ggml_backend_buffer_type_t buft;
63
+ void * context;
64
+ size_t size;
65
+ enum lm_ggml_backend_buffer_usage usage;
66
+ };
67
+
68
+ LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
69
+ lm_ggml_backend_buffer_type_t buft,
70
+ struct lm_ggml_backend_buffer_i iface,
71
+ void * context,
72
+ size_t size);
73
+
74
+ // do not use directly, use lm_ggml_backend_tensor_copy instead
75
+ LM_GGML_API bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
76
+
77
+ // multi-buffer
78
+ // buffer that contains a collection of buffers
79
+ LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
80
+ LM_GGML_API bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
81
+ LM_GGML_API void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
82
+
83
+ //
84
+ // Backend (stream)
85
+ //
86
+
87
+ struct lm_ggml_backend_i {
88
+ const char * (*get_name)(lm_ggml_backend_t backend);
89
+
90
+ void (*free)(lm_ggml_backend_t backend);
91
+
92
+ // (optional) asynchronous tensor data access
93
+ void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
94
+ void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
95
+ bool (*cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
96
+
97
+ // (optional) complete all pending operations (required if the backend supports async operations)
98
+ void (*synchronize)(lm_ggml_backend_t backend);
99
+
100
+ // (optional) graph plans (not used currently)
101
+ // compute graph with a plan
102
+ lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
103
+ void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
104
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
105
+ void (*graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
106
+ // compute the graph with the plan
107
+ enum lm_ggml_status (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
108
+
109
+ // compute graph (always async if supported by the backend)
110
+ enum lm_ggml_status (*graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
111
+
112
+ // (optional) event synchronization
113
+ // record an event on this stream
114
+ void (*event_record)(lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
115
+ // wait for an event on on a different stream
116
+ void (*event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
117
+ };
118
+
119
+ struct lm_ggml_backend {
120
+ lm_ggml_guid_t guid;
121
+ struct lm_ggml_backend_i iface;
122
+ lm_ggml_backend_dev_t device;
123
+ void * context;
124
+ };
125
+
126
+ struct lm_ggml_backend_event {
127
+ struct lm_ggml_backend_device * device;
128
+ void * context;
129
+ };
130
+
131
+ //
132
+ // Backend device
133
+ //
134
+
135
+ // Note: if additional properties are needed, we should add a struct with all of them
136
+ // the current functions to obtain the properties can remain, since they are more convenient for often used properties
137
+ struct lm_ggml_backend_device_i {
138
+ // device name: short identifier for this device, such as "CPU" or "CUDA0"
139
+ const char * (*get_name)(lm_ggml_backend_dev_t dev);
140
+
141
+ // device description: short informative description of the device, could be the model name
142
+ const char * (*get_description)(lm_ggml_backend_dev_t dev);
143
+
144
+ // device memory in bytes
145
+ void (*get_memory)(lm_ggml_backend_dev_t dev, size_t * free, size_t * total);
146
+
147
+ // device type
148
+ enum lm_ggml_backend_dev_type (*get_type)(lm_ggml_backend_dev_t dev);
149
+
150
+ // device properties
151
+ void (*get_props)(lm_ggml_backend_dev_t dev, struct lm_ggml_backend_dev_props * props);
152
+
153
+ // backend (stream) initialization
154
+ lm_ggml_backend_t (*init_backend)(lm_ggml_backend_dev_t dev, const char * params);
155
+
156
+ // preferred buffer type
157
+ lm_ggml_backend_buffer_type_t (*get_buffer_type)(lm_ggml_backend_dev_t dev);
158
+
159
+ // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
160
+ lm_ggml_backend_buffer_type_t (*get_host_buffer_type)(lm_ggml_backend_dev_t dev);
161
+
162
+ // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
163
+ lm_ggml_backend_buffer_t (*buffer_from_host_ptr)(lm_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
164
+
165
+ // check if the backend can compute an operation
166
+ bool (*supports_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
167
+
168
+ // check if the backend can use tensors allocated in a buffer type
169
+ bool (*supports_buft)(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft);
170
+
171
+ // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
172
+ // these should be expensive operations that may benefit from running on this backend instead of the CPU backend
173
+ bool (*offload_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
174
+
175
+ // (optional) event synchronization
176
+ lm_ggml_backend_event_t (*event_new) (lm_ggml_backend_dev_t dev);
177
+ void (*event_free) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
178
+ void (*event_synchronize) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
179
+ };
180
+
181
+ struct lm_ggml_backend_device {
182
+ struct lm_ggml_backend_device_i iface;
183
+ lm_ggml_backend_reg_t reg;
184
+ void * context;
185
+ };
186
+
187
+ //
188
+ // Backend (reg)
189
+ //
190
+
191
+ struct lm_ggml_backend_reg_i {
192
+ const char * (*get_name)(lm_ggml_backend_reg_t reg);
193
+
194
+ // enumerate available devices
195
+ size_t (*get_device_count)(lm_ggml_backend_reg_t reg);
196
+ lm_ggml_backend_dev_t (*get_device)(lm_ggml_backend_reg_t reg, size_t index);
197
+
198
+ // (optional) get a pointer to a function in the backend
199
+ // backends can add custom functions that are not part of the standard ggml-backend interface
200
+ void * (*get_proc_address)(lm_ggml_backend_reg_t reg, const char * name);
201
+ };
202
+
203
+ struct lm_ggml_backend_reg {
204
+ int api_version; // initialize to LM_GGML_BACKEND_API_VERSION
205
+ struct lm_ggml_backend_reg_i iface;
206
+ void * context;
207
+ };
208
+
209
+ // Internal backend registry API
210
+ LM_GGML_API void lm_ggml_backend_register(lm_ggml_backend_reg_t reg);
211
+
212
+ // Add backend dynamic loading support to the backend
213
+
214
+ // Initialize the backend
215
+ typedef lm_ggml_backend_reg_t (*lm_ggml_backend_init_t)(void);
216
+ // Optional: obtain a score for the backend based on the system configuration
217
+ // Higher scores are preferred, 0 means the backend is not supported in the current system
218
+ typedef int (*lm_ggml_backend_score_t)(void);
219
+
220
+ #ifdef LM_GGML_BACKEND_DL
221
+ # ifdef __cplusplus
222
+ # define LM_GGML_BACKEND_DL_IMPL(reg_fn) \
223
+ extern "C" { \
224
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_init(void); \
225
+ } \
226
+ lm_ggml_backend_reg_t lm_ggml_backend_init(void) { \
227
+ return reg_fn(); \
228
+ }
229
+ # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
230
+ extern "C" { \
231
+ LM_GGML_BACKEND_API int lm_ggml_backend_score(void); \
232
+ } \
233
+ int lm_ggml_backend_score(void) { \
234
+ return score_fn(); \
235
+ }
236
+ # else
237
+ # define LM_GGML_BACKEND_DL_IMPL(reg_fn) \
238
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_init(void); \
239
+ lm_ggml_backend_reg_t lm_ggml_backend_init(void) { \
240
+ return reg_fn(); \
241
+ }
242
+ # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
243
+ LM_GGML_BACKEND_API int lm_ggml_backend_score(void); \
244
+ int lm_ggml_backend_score(void) { \
245
+ return score_fn(); \
246
+ }
247
+ # endif
248
+ #else
249
+ # define LM_GGML_BACKEND_DL_IMPL(reg_fn)
250
+ # define LM_GGML_BACKEND_DL_SCORE_IMPL(score_fn)
251
+ #endif
252
+
253
+ #ifdef __cplusplus
254
+ }
255
+ #endif