@fugood/llama.node 0.3.2 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (286) hide show
  1. package/CMakeLists.txt +7 -0
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  7. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  8. package/bin/win32/arm64/llama-node.node +0 -0
  9. package/bin/win32/arm64/node.lib +0 -0
  10. package/bin/win32/x64/llama-node.node +0 -0
  11. package/bin/win32/x64/node.lib +0 -0
  12. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  13. package/bin/win32-vulkan/arm64/node.lib +0 -0
  14. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/x64/node.lib +0 -0
  16. package/lib/binding.ts +18 -1
  17. package/package.json +1 -1
  18. package/src/DetokenizeWorker.cpp +1 -1
  19. package/src/EmbeddingWorker.cpp +17 -7
  20. package/src/EmbeddingWorker.h +2 -1
  21. package/src/LlamaCompletionWorker.cpp +8 -8
  22. package/src/LlamaCompletionWorker.h +2 -2
  23. package/src/LlamaContext.cpp +89 -27
  24. package/src/LlamaContext.h +2 -0
  25. package/src/TokenizeWorker.cpp +1 -1
  26. package/src/common.hpp +4 -4
  27. package/src/llama.cpp/.github/workflows/build.yml +240 -168
  28. package/src/llama.cpp/.github/workflows/docker.yml +8 -8
  29. package/src/llama.cpp/.github/workflows/python-lint.yml +8 -1
  30. package/src/llama.cpp/.github/workflows/server.yml +21 -14
  31. package/src/llama.cpp/CMakeLists.txt +14 -6
  32. package/src/llama.cpp/Sources/llama/llama.h +4 -0
  33. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
  34. package/src/llama.cpp/cmake/common.cmake +33 -0
  35. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +11 -0
  36. package/src/llama.cpp/common/CMakeLists.txt +6 -4
  37. package/src/llama.cpp/common/arg.cpp +986 -770
  38. package/src/llama.cpp/common/arg.h +22 -22
  39. package/src/llama.cpp/common/common.cpp +212 -351
  40. package/src/llama.cpp/common/common.h +204 -117
  41. package/src/llama.cpp/common/json-schema-to-grammar.cpp +1 -1
  42. package/src/llama.cpp/common/log.cpp +50 -50
  43. package/src/llama.cpp/common/log.h +18 -18
  44. package/src/llama.cpp/common/ngram-cache.cpp +36 -36
  45. package/src/llama.cpp/common/ngram-cache.h +19 -19
  46. package/src/llama.cpp/common/sampling.cpp +163 -121
  47. package/src/llama.cpp/common/sampling.h +41 -20
  48. package/src/llama.cpp/common/speculative.cpp +274 -0
  49. package/src/llama.cpp/common/speculative.h +28 -0
  50. package/src/llama.cpp/docs/build.md +134 -161
  51. package/src/llama.cpp/examples/CMakeLists.txt +33 -14
  52. package/src/llama.cpp/examples/batched/CMakeLists.txt +1 -1
  53. package/src/llama.cpp/examples/batched/batched.cpp +19 -18
  54. package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +1 -1
  55. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +10 -11
  56. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +1 -1
  57. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +1 -1
  58. package/src/llama.cpp/examples/cvector-generator/CMakeLists.txt +1 -1
  59. package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +9 -9
  60. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +1 -1
  61. package/src/llama.cpp/examples/embedding/CMakeLists.txt +1 -1
  62. package/src/llama.cpp/examples/embedding/embedding.cpp +12 -12
  63. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +3 -2
  64. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +8 -8
  65. package/src/llama.cpp/examples/export-lora/CMakeLists.txt +1 -1
  66. package/src/llama.cpp/examples/export-lora/export-lora.cpp +5 -5
  67. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +1 -1
  68. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +4 -7
  69. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +1 -1
  70. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +7 -7
  71. package/src/llama.cpp/examples/gguf/CMakeLists.txt +1 -1
  72. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +8 -1
  73. package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +1 -1
  74. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +2 -2
  75. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +1 -1
  76. package/src/llama.cpp/examples/gritlm/gritlm.cpp +18 -18
  77. package/src/llama.cpp/examples/imatrix/CMakeLists.txt +1 -1
  78. package/src/llama.cpp/examples/imatrix/imatrix.cpp +31 -13
  79. package/src/llama.cpp/examples/infill/CMakeLists.txt +1 -1
  80. package/src/llama.cpp/examples/infill/infill.cpp +41 -87
  81. package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +1 -1
  82. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +439 -459
  83. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +2 -0
  84. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +11 -14
  85. package/src/llama.cpp/examples/llava/CMakeLists.txt +10 -3
  86. package/src/llama.cpp/examples/llava/clip.cpp +263 -66
  87. package/src/llama.cpp/examples/llava/clip.h +8 -2
  88. package/src/llama.cpp/examples/llava/llava-cli.cpp +23 -23
  89. package/src/llama.cpp/examples/llava/llava.cpp +83 -22
  90. package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +21 -21
  91. package/src/llama.cpp/examples/llava/qwen2vl-cli.cpp +581 -0
  92. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +1 -1
  93. package/src/llama.cpp/examples/lookahead/lookahead.cpp +26 -26
  94. package/src/llama.cpp/examples/lookup/CMakeLists.txt +4 -4
  95. package/src/llama.cpp/examples/lookup/lookup-create.cpp +7 -7
  96. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
  97. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +16 -15
  98. package/src/llama.cpp/examples/lookup/lookup.cpp +30 -30
  99. package/src/llama.cpp/examples/main/CMakeLists.txt +1 -1
  100. package/src/llama.cpp/examples/main/main.cpp +73 -114
  101. package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +1 -1
  102. package/src/llama.cpp/examples/parallel/CMakeLists.txt +1 -1
  103. package/src/llama.cpp/examples/parallel/parallel.cpp +18 -19
  104. package/src/llama.cpp/examples/passkey/CMakeLists.txt +1 -1
  105. package/src/llama.cpp/examples/passkey/passkey.cpp +14 -14
  106. package/src/llama.cpp/examples/perplexity/CMakeLists.txt +1 -1
  107. package/src/llama.cpp/examples/perplexity/perplexity.cpp +99 -120
  108. package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
  109. package/src/llama.cpp/examples/quantize/quantize.cpp +0 -3
  110. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +1 -1
  111. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +10 -9
  112. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +1 -1
  113. package/src/llama.cpp/examples/retrieval/retrieval.cpp +16 -16
  114. package/src/llama.cpp/examples/rpc/rpc-server.cpp +3 -1
  115. package/src/llama.cpp/examples/run/CMakeLists.txt +5 -0
  116. package/src/llama.cpp/examples/run/run.cpp +911 -0
  117. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +1 -1
  118. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +38 -21
  119. package/src/llama.cpp/examples/server/CMakeLists.txt +3 -16
  120. package/src/llama.cpp/examples/server/server.cpp +2073 -1339
  121. package/src/llama.cpp/examples/server/tests/requirements.txt +2 -2
  122. package/src/llama.cpp/examples/server/utils.hpp +354 -277
  123. package/src/llama.cpp/examples/simple/CMakeLists.txt +2 -2
  124. package/src/llama.cpp/examples/simple/simple.cpp +130 -94
  125. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +5 -0
  126. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +200 -0
  127. package/src/llama.cpp/examples/speculative/CMakeLists.txt +1 -1
  128. package/src/llama.cpp/examples/speculative/speculative.cpp +68 -64
  129. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +5 -0
  130. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +265 -0
  131. package/src/llama.cpp/examples/tokenize/CMakeLists.txt +1 -1
  132. package/src/llama.cpp/examples/tokenize/tokenize.cpp +3 -3
  133. package/src/llama.cpp/examples/tts/CMakeLists.txt +5 -0
  134. package/src/llama.cpp/examples/tts/tts.cpp +932 -0
  135. package/src/llama.cpp/ggml/CMakeLists.txt +54 -36
  136. package/src/llama.cpp/ggml/include/ggml-backend.h +63 -34
  137. package/src/llama.cpp/ggml/include/ggml-blas.h +5 -3
  138. package/src/llama.cpp/ggml/include/ggml-cann.h +9 -7
  139. package/src/llama.cpp/ggml/include/ggml-cpp.h +38 -0
  140. package/src/llama.cpp/ggml/include/ggml-cpu.h +135 -0
  141. package/src/llama.cpp/ggml/include/ggml-cuda.h +12 -12
  142. package/src/llama.cpp/ggml/include/ggml-kompute.h +7 -3
  143. package/src/llama.cpp/ggml/include/ggml-metal.h +11 -7
  144. package/src/llama.cpp/ggml/include/ggml-opencl.h +26 -0
  145. package/src/llama.cpp/ggml/include/ggml-opt.h +216 -0
  146. package/src/llama.cpp/ggml/include/ggml-rpc.h +9 -5
  147. package/src/llama.cpp/ggml/include/ggml-sycl.h +18 -11
  148. package/src/llama.cpp/ggml/include/ggml-vulkan.h +10 -8
  149. package/src/llama.cpp/ggml/include/ggml.h +159 -417
  150. package/src/llama.cpp/ggml/src/CMakeLists.txt +121 -1155
  151. package/src/llama.cpp/ggml/src/ggml-alloc.c +23 -28
  152. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +57 -36
  153. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +552 -0
  154. package/src/llama.cpp/ggml/src/ggml-backend.cpp +306 -867
  155. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  156. package/src/llama.cpp/ggml/src/{ggml-blas.cpp → ggml-blas/ggml-blas.cpp} +216 -65
  157. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +76 -0
  158. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +456 -111
  159. package/src/llama.cpp/ggml/src/ggml-cann/common.h +6 -3
  160. package/src/llama.cpp/ggml/src/{ggml-cann.cpp → ggml-cann/ggml-cann.cpp} +343 -177
  161. package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -5
  162. package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +22 -9
  163. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +24 -13
  164. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +23 -13
  165. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +11 -0
  166. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +10 -0
  167. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +10 -0
  168. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +17 -0
  169. package/src/llama.cpp/ggml/src/ggml-common.h +42 -42
  170. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +336 -0
  171. package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
  172. package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.h +8 -0
  173. package/src/llama.cpp/ggml/src/ggml-cpu/amx/common.h +91 -0
  174. package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
  175. package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.h +10 -0
  176. package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
  177. package/src/llama.cpp/ggml/src/{ggml-aarch64.c → ggml-cpu/ggml-cpu-aarch64.cpp} +1299 -246
  178. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
  179. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
  180. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
  181. package/src/llama.cpp/ggml/src/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +14 -242
  182. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
  183. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  184. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
  185. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
  186. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
  187. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +628 -0
  188. package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.cpp +666 -0
  189. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +152 -0
  190. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +8 -0
  191. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +104 -0
  192. package/src/llama.cpp/ggml/src/ggml-impl.h +393 -22
  193. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  194. package/src/llama.cpp/ggml/src/{ggml-kompute.cpp → ggml-kompute/ggml-kompute.cpp} +360 -127
  195. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +105 -0
  196. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
  197. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +107 -0
  198. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +147 -0
  199. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +4004 -0
  200. package/src/llama.cpp/ggml/src/ggml-opt.cpp +854 -0
  201. package/src/llama.cpp/ggml/src/ggml-quants.c +188 -10702
  202. package/src/llama.cpp/ggml/src/ggml-quants.h +78 -125
  203. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  204. package/src/llama.cpp/ggml/src/{ggml-rpc.cpp → ggml-rpc/ggml-rpc.cpp} +478 -300
  205. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +84 -0
  206. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
  207. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +36 -5
  208. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +259 -0
  209. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +3 -2
  210. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +1 -1
  211. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +5 -5
  212. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +34 -35
  213. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
  214. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +76 -0
  215. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +4 -4
  216. package/src/llama.cpp/ggml/src/{ggml-sycl.cpp → ggml-sycl/ggml-sycl.cpp} +3638 -4151
  217. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +3 -2
  218. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +6 -6
  219. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +75 -87
  220. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +7 -6
  221. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +56 -0
  222. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +11 -0
  223. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +6 -0
  224. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +4 -3
  225. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +7 -7
  226. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +1 -0
  227. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +4 -4
  228. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +141 -0
  229. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +10 -0
  230. package/src/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
  231. package/src/llama.cpp/ggml/src/ggml-threading.h +14 -0
  232. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +92 -0
  233. package/src/llama.cpp/ggml/src/{ggml-vulkan.cpp → ggml-vulkan/ggml-vulkan.cpp} +2138 -887
  234. package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/CMakeLists.txt +3 -1
  235. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
  236. package/src/llama.cpp/ggml/src/ggml.c +4427 -20125
  237. package/src/llama.cpp/include/llama-cpp.h +25 -0
  238. package/src/llama.cpp/include/llama.h +93 -52
  239. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +112 -0
  240. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +46 -0
  241. package/src/llama.cpp/pocs/CMakeLists.txt +3 -1
  242. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +2 -2
  243. package/src/llama.cpp/pocs/vdot/q8dot.cpp +4 -3
  244. package/src/llama.cpp/pocs/vdot/vdot.cpp +8 -7
  245. package/src/llama.cpp/src/CMakeLists.txt +4 -8
  246. package/src/llama.cpp/src/llama-grammar.cpp +15 -15
  247. package/src/llama.cpp/src/llama-grammar.h +2 -5
  248. package/src/llama.cpp/src/llama-sampling.cpp +779 -194
  249. package/src/llama.cpp/src/llama-sampling.h +21 -2
  250. package/src/llama.cpp/src/llama-vocab.cpp +55 -10
  251. package/src/llama.cpp/src/llama-vocab.h +35 -11
  252. package/src/llama.cpp/src/llama.cpp +4317 -2979
  253. package/src/llama.cpp/src/unicode-data.cpp +2 -2
  254. package/src/llama.cpp/src/unicode.cpp +62 -51
  255. package/src/llama.cpp/src/unicode.h +9 -10
  256. package/src/llama.cpp/tests/CMakeLists.txt +48 -38
  257. package/src/llama.cpp/tests/test-arg-parser.cpp +15 -15
  258. package/src/llama.cpp/tests/test-backend-ops.cpp +324 -80
  259. package/src/llama.cpp/tests/test-barrier.cpp +1 -0
  260. package/src/llama.cpp/tests/test-chat-template.cpp +59 -9
  261. package/src/llama.cpp/tests/test-gguf.cpp +1303 -0
  262. package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -6
  263. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -4
  264. package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -4
  265. package/src/llama.cpp/tests/test-log.cpp +2 -2
  266. package/src/llama.cpp/tests/test-opt.cpp +853 -142
  267. package/src/llama.cpp/tests/test-quantize-fns.cpp +24 -21
  268. package/src/llama.cpp/tests/test-quantize-perf.cpp +16 -14
  269. package/src/llama.cpp/tests/test-rope.cpp +62 -20
  270. package/src/llama.cpp/tests/test-sampling.cpp +163 -138
  271. package/src/llama.cpp/tests/test-tokenizer-0.cpp +7 -7
  272. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +5 -5
  273. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +5 -5
  274. package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +0 -72
  275. package/src/llama.cpp/.github/workflows/nix-ci.yml +0 -79
  276. package/src/llama.cpp/.github/workflows/nix-flake-update.yml +0 -22
  277. package/src/llama.cpp/.github/workflows/nix-publish-flake.yml +0 -36
  278. package/src/llama.cpp/common/train.cpp +0 -1515
  279. package/src/llama.cpp/common/train.h +0 -233
  280. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +0 -5
  281. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +0 -1639
  282. package/src/llama.cpp/ggml/src/ggml-aarch64.h +0 -39
  283. package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +0 -600
  284. package/src/llama.cpp/tests/test-grad0.cpp +0 -1683
  285. /package/src/llama.cpp/ggml/{cmake → src/ggml-cpu/cmake}/FindSIMD.cmake +0 -0
  286. /package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.h +0 -0
@@ -7,7 +7,7 @@
7
7
  extern "C" {
8
8
  #endif
9
9
 
10
- #ifdef GGML_USE_HIPBLAS
10
+ #ifdef GGML_USE_HIP
11
11
  #define GGML_CUDA_NAME "ROCm"
12
12
  #define GGML_CUBLAS_NAME "hipBLAS"
13
13
  #elif defined(GGML_USE_MUSA)
@@ -20,27 +20,27 @@ extern "C" {
20
20
  #define GGML_CUDA_MAX_DEVICES 16
21
21
 
22
22
  // backend API
23
- GGML_API ggml_backend_t ggml_backend_cuda_init(int device);
23
+ GGML_BACKEND_API ggml_backend_t ggml_backend_cuda_init(int device);
24
24
 
25
- GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend);
25
+ GGML_BACKEND_API bool ggml_backend_is_cuda(ggml_backend_t backend);
26
26
 
27
27
  // device buffer
28
- GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
28
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
29
29
 
30
30
  // split tensor buffer that splits matrices by rows across multiple devices
31
- GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
31
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split);
32
32
 
33
33
  // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
34
- GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
34
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
35
35
 
36
- GGML_API int ggml_backend_cuda_get_device_count(void);
37
- GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38
- GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
36
+ GGML_BACKEND_API int ggml_backend_cuda_get_device_count(void);
37
+ GGML_BACKEND_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38
+ GGML_BACKEND_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
39
39
 
40
- GGML_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41
- GGML_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
40
+ GGML_BACKEND_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41
+ GGML_BACKEND_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
42
42
 
43
- GGML_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
43
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
44
44
 
45
45
  #ifdef __cplusplus
46
46
  }
@@ -11,6 +11,8 @@
11
11
  extern "C" {
12
12
  #endif
13
13
 
14
+ #define GGML_KOMPUTE_MAX_DEVICES 16
15
+
14
16
  struct ggml_vk_device {
15
17
  int index;
16
18
  int type; // same as VkPhysicalDeviceType
@@ -35,11 +37,13 @@ struct ggml_vk_device ggml_vk_current_device(void);
35
37
  // forward declaration
36
38
  typedef struct ggml_backend * ggml_backend_t;
37
39
 
38
- GGML_API ggml_backend_t ggml_backend_kompute_init(int device);
40
+ GGML_BACKEND_API ggml_backend_t ggml_backend_kompute_init(int device);
41
+
42
+ GGML_BACKEND_API bool ggml_backend_is_kompute(ggml_backend_t backend);
39
43
 
40
- GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
44
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
41
45
 
42
- GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
46
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_kompute_reg(void);
43
47
 
44
48
  #ifdef __cplusplus
45
49
  }
@@ -39,23 +39,27 @@ extern "C" {
39
39
  // user-code should use only these functions
40
40
  //
41
41
 
42
- GGML_API ggml_backend_t ggml_backend_metal_init(void);
42
+ GGML_BACKEND_API ggml_backend_t ggml_backend_metal_init(void);
43
43
 
44
- GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
44
+ GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend);
45
45
 
46
- GGML_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
46
+ GGML_DEPRECATED(
47
+ GGML_BACKEND_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
+ "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
47
49
 
48
- GGML_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);
50
+ GGML_BACKEND_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);
49
51
 
50
- GGML_API ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
52
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
51
53
 
52
54
  // helper to check if the device supports a specific family
53
55
  // ideally, the user code should be doing these checks
54
56
  // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
55
- GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
57
+ GGML_BACKEND_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
56
58
 
57
59
  // capture all command buffers committed the next time `ggml_backend_graph_compute` is called
58
- GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
60
+ GGML_BACKEND_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
61
+
62
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_metal_reg(void);
59
63
 
60
64
  #ifdef __cplusplus
61
65
  }
@@ -0,0 +1,26 @@
1
+ #ifndef GGML_OPENCL_H
2
+ #define GGML_OPENCL_H
3
+
4
+ #include "ggml.h"
5
+ #include "ggml-backend.h"
6
+
7
+ #ifdef __cplusplus
8
+ extern "C" {
9
+ #endif
10
+
11
+ //
12
+ // backend API
13
+ //
14
+ GGML_BACKEND_API ggml_backend_t ggml_backend_opencl_init(void);
15
+ GGML_BACKEND_API bool ggml_backend_is_opencl(ggml_backend_t backend);
16
+
17
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
18
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
19
+
20
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_opencl_reg(void);
21
+
22
+ #ifdef __cplusplus
23
+ }
24
+ #endif
25
+
26
+ #endif // GGML_OPENCL_H
@@ -0,0 +1,216 @@
1
+ // This file contains functionality for training models using GGML.
2
+ // It is not strictly needed vs. just vanilla GGML but it provides a more high-level interface for common needs such as datasets.
3
+ // At the bottom of this file especially there are relatively high-level functions that are suitable use or adaptation in user code.
4
+ //
5
+ // Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de)
6
+
7
+ #pragma once
8
+
9
+ #include "ggml.h"
10
+ #include "ggml-backend.h"
11
+
12
+ #include <stdint.h>
13
+
14
+ #ifdef __cplusplus
15
+ extern "C" {
16
+ #endif
17
+
18
+ struct ggml_opt_dataset;
19
+ struct ggml_opt_context;
20
+ struct ggml_opt_result;
21
+
22
+ typedef struct ggml_opt_dataset * ggml_opt_dataset_t;
23
+ typedef struct ggml_opt_context * ggml_opt_context_t;
24
+ typedef struct ggml_opt_result * ggml_opt_result_t;
25
+
26
+ // ====== Loss ======
27
+
28
+ // built-in loss types, i.e. the built-in quantities minimized by the optimizer
29
+ // custom loss types can be defined via mean or sum which simply reduce the outputs for all datapoints to a single value
30
+ enum ggml_opt_loss_type {
31
+ GGML_OPT_LOSS_TYPE_MEAN,
32
+ GGML_OPT_LOSS_TYPE_SUM,
33
+ GGML_OPT_LOSS_TYPE_CROSS_ENTROPY,
34
+ GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR,
35
+ };
36
+
37
+ // ====== Dataset ======
38
+
39
+ GGML_API ggml_opt_dataset_t ggml_opt_dataset_init(
40
+ int64_t ne_datapoint, // number of elements per datapoint
41
+ int64_t ne_label, // number of elements per label
42
+ int64_t ndata, // total number of datapoints/labels
43
+ int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
44
+ GGML_API void ggml_opt_dataset_free(ggml_opt_dataset_t dataset);
45
+
46
+ // get underlying tensors that store the data
47
+ GGML_API struct ggml_tensor * ggml_opt_dataset_data (ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
48
+ GGML_API struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
49
+
50
+ // shuffle idata first datapoints from dataset with RNG from opt_ctx, shuffle all datapoints if idata is negative
51
+ GGML_API void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata);
52
+
53
+ // get batch at position ibatch from dataset and copy the data to data_batch and labels_batch
54
+ GGML_API void ggml_opt_dataset_get_batch(
55
+ ggml_opt_dataset_t dataset,
56
+ struct ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
57
+ struct ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
58
+ int64_t ibatch);
59
+
60
+ // ====== Model / Context ======
61
+
62
+ enum ggml_opt_build_type {
63
+ GGML_OPT_BUILD_TYPE_FORWARD,
64
+ GGML_OPT_BUILD_TYPE_GRAD,
65
+ GGML_OPT_BUILD_TYPE_OPT,
66
+ };
67
+
68
+ // parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
69
+ struct ggml_opt_optimizer_params {
70
+ // AdamW optimizer parameters
71
+ struct {
72
+ float alpha; // learning rate
73
+ float beta1;
74
+ float beta2;
75
+ float eps; // epsilon for numerical stability
76
+ float wd; // weight decay for AdamW, use 0.0f to disable
77
+ } adamw;
78
+ };
79
+
80
+ // callback to calculate optimizer parameters prior to a backward pass
81
+ // userdata can be used to pass arbitrary data
82
+ typedef struct ggml_opt_optimizer_params (*ggml_opt_get_optimizer_params)(void * userdata);
83
+
84
+ // returns the default optimizer params (constant)
85
+ // userdata is not used
86
+ GGML_API struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata);
87
+
88
+ // parameters for initializing a new optimization context
89
+ struct ggml_opt_params {
90
+ ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
91
+
92
+ struct ggml_context * ctx_compute; // created in user code, holds non-static tensors
93
+
94
+ // the forward graph is defined by inputs and outputs
95
+ // those tensors and all tensors inbetween are not intended to be reusable between multiple optimization contexts
96
+ struct ggml_tensor * inputs;
97
+ struct ggml_tensor * outputs;
98
+
99
+ enum ggml_opt_loss_type loss_type;
100
+ enum ggml_opt_build_type build_type;
101
+
102
+ int32_t opt_period; // after how many gradient accumulation steps an optimizer step should be done
103
+
104
+ ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
105
+ void * get_opt_pars_ud; // userdata for calculating optimizer parameters
106
+ };
107
+
108
+ // get parameters for an optimization context with defaults set where possible
109
+ // parameters for which no sensible defaults exist are supplied as arguments to this function
110
+ GGML_API ggml_opt_params ggml_opt_default_params(
111
+ ggml_backend_sched_t backend_sched,
112
+ struct ggml_context * ctx_compute,
113
+ struct ggml_tensor * inputs,
114
+ struct ggml_tensor * outputs,
115
+ enum ggml_opt_loss_type loss_type);
116
+
117
+ GGML_API ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params);
118
+ GGML_API void ggml_opt_free(ggml_opt_context_t opt_ctx);
119
+
120
+ // set gradients to zero, initilize loss, and optionally reset the optimizer
121
+ GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer);
122
+
123
+ // get underlying tensors that store data
124
+ GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor
125
+ GGML_API struct ggml_tensor * ggml_opt_outputs( ggml_opt_context_t opt_ctx); // forward graph output tensor
126
+ GGML_API struct ggml_tensor * ggml_opt_labels( ggml_opt_context_t opt_ctx); // labels to compare outputs against
127
+ GGML_API struct ggml_tensor * ggml_opt_loss( ggml_opt_context_t opt_ctx); // scalar tensor that contains the loss
128
+ GGML_API struct ggml_tensor * ggml_opt_pred( ggml_opt_context_t opt_ctx); // predictions made by outputs
129
+ GGML_API struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
130
+
131
+ GGML_API struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node);
132
+
133
+ // ====== Optimization Result ======
134
+
135
+ GGML_API ggml_opt_result_t ggml_opt_result_init();
136
+ GGML_API void ggml_opt_result_free(ggml_opt_result_t result);
137
+ GGML_API void ggml_opt_result_reset(ggml_opt_result_t result);
138
+
139
+ // get data from result, uncertainties are optional and can be ignored by passing NULL
140
+ GGML_API void ggml_opt_result_ndata( ggml_opt_result_t result, int64_t * ndata); // writes 1 value, number of datapoints
141
+ GGML_API void ggml_opt_result_loss( ggml_opt_result_t result, double * loss, double * unc); // writes 1 value
142
+ GGML_API void ggml_opt_result_pred( ggml_opt_result_t result, int32_t * pred); // writes ndata values
143
+ GGML_API void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc); // writes 1 value
144
+
145
+ // ====== Computation ======
146
+
147
+ // do forward pass, increment result if not NULL
148
+ GGML_API void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result);
149
+
150
+ // do forward pass, increment result if not NULL, do backward pass
151
+ GGML_API void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result);
152
+
153
+ // ############################################################################
154
+ // ## The high-level functions start here. They do not depend on any private ##
155
+ // ## functions or structs and can be copied to and adapted for user code. ##
156
+ // ############################################################################
157
+
158
+ // ====== Intended Usage ======
159
+ //
160
+ // 1. Select the appropriate loss for your problem.
161
+ // 2. Create a dataset and set the data for the "data" tensor. Also set the "labels" tensor if your loss needs them.
162
+ // Setting the shard size to 1 will be fine, it's the granularity with which data is shuffled/loaded (bigger values are faster).
163
+ // 3. Create a GGML graph for your model with no_alloc == true. Use two separate contexts for the tensors.
164
+ // The first context should contain the model parameters and inputs and be allocated statically in user code.
165
+ // The second context should contain all other tensors and will be (re)allocated automatically.
166
+ // Due to this automated allocation the data of the second context is not defined when accessed in user code.
167
+ // Note that the second dimension of the inputs/outputs are interpreted as the number of datapoints in those tensors.
168
+ // 4. Call ggml_opt_fit. If you need more control you can use ggml_opt_epoch instead.
169
+
170
+ // signature for a callback while evaluating opt_ctx on dataset, called after an evaluation
171
+ typedef void (*ggml_opt_epoch_callback)(
172
+ bool train, // true after training evaluation, false after validation evaluation
173
+ ggml_opt_context_t opt_ctx,
174
+ ggml_opt_dataset_t dataset,
175
+ ggml_opt_result_t result, // result associated with the dataset subsection
176
+ int64_t ibatch, // number of batches that have been evaluated so far
177
+ int64_t ibatch_max, // total number of batches in this dataset subsection
178
+ int64_t t_start_us); // time at which the evaluation on the dataset subsection was started
179
+
180
+ // do training on front of dataset, do evaluation only on back of dataset
181
+ GGML_API void ggml_opt_epoch(
182
+ ggml_opt_context_t opt_ctx,
183
+ ggml_opt_dataset_t dataset,
184
+ ggml_opt_result_t result_train, // result to increment during training, ignored if NULL
185
+ ggml_opt_result_t result_eval, // result to increment during evaluation, ignored if NULL
186
+ int64_t idata_split, // data index at which to split training and evaluation
187
+ ggml_opt_epoch_callback callback_train,
188
+ ggml_opt_epoch_callback callback_eval);
189
+
190
+ // callback that prints a progress bar on stderr
191
+ GGML_API void ggml_opt_epoch_callback_progress_bar(
192
+ bool train,
193
+ ggml_opt_context_t opt_ctx,
194
+ ggml_opt_dataset_t dataset,
195
+ ggml_opt_result_t result,
196
+ int64_t ibatch,
197
+ int64_t ibatch_max,
198
+ int64_t t_start_us);
199
+
200
+ // fit model defined by inputs and outputs to dataset
201
+ GGML_API void ggml_opt_fit(
202
+ ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
203
+ ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
204
+ ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
205
+ ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
206
+ ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
207
+ enum ggml_opt_loss_type loss_type, // loss to minimize
208
+ ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
209
+ int64_t nepoch, // how many times the dataset should be iterated over
210
+ int64_t nbatch_logical, // datapoints optimizer step, must be a multiple of ndata_batch in inputs/outputs
211
+ float val_split, // fraction of the dataset to use for validation, must be in [0.0f, 1.0f)
212
+ bool silent); // whether or not info prints to stderr should be suppressed
213
+
214
+ #ifdef __cplusplus
215
+ }
216
+ #endif
@@ -10,14 +10,18 @@ extern "C" {
10
10
  #define GGML_RPC_MAX_SERVERS 16
11
11
 
12
12
  // backend API
13
- GGML_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint);
14
- GGML_API bool ggml_backend_is_rpc(ggml_backend_t backend);
13
+ GGML_BACKEND_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint);
14
+ GGML_BACKEND_API bool ggml_backend_is_rpc(ggml_backend_t backend);
15
15
 
16
- GGML_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint);
16
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint);
17
17
 
18
- GGML_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total);
18
+ GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total);
19
19
 
20
- GGML_API void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem);
20
+ GGML_BACKEND_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem);
21
+
22
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void);
23
+
24
+ GGML_BACKEND_API ggml_backend_dev_t ggml_backend_rpc_add_device(const char * endpoint);
21
25
 
22
26
  #ifdef __cplusplus
23
27
  }
@@ -17,26 +17,33 @@ extern "C" {
17
17
  #endif
18
18
 
19
19
  // backend API
20
- GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
20
+ GGML_BACKEND_API ggml_backend_t ggml_backend_sycl_init(int device);
21
+
22
+ GGML_BACKEND_API bool ggml_backend_is_sycl(ggml_backend_t backend);
21
23
 
22
24
  // devide buffer
23
- GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
25
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
24
26
 
25
27
  // split tensor buffer that splits matrices by rows across multiple devices
26
- GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
28
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
27
29
 
28
30
  // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
29
- GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
31
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
30
32
 
31
- GGML_API void ggml_backend_sycl_print_sycl_devices(void);
32
- GGML_API void ggml_sycl_get_gpu_list(int *id_list, int max_len);
33
- GGML_API void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
34
- GGML_API int ggml_backend_sycl_get_device_count();
35
- GGML_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
33
+ GGML_BACKEND_API void ggml_backend_sycl_print_sycl_devices(void);
34
+ GGML_BACKEND_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len);
35
+ GGML_BACKEND_API void ggml_backend_sycl_get_device_description(int device,
36
+ char *description,
37
+ size_t description_size);
38
+ GGML_BACKEND_API int ggml_backend_sycl_get_device_count();
39
+ GGML_BACKEND_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
36
40
 
37
41
  // SYCL doesn't support registering host memory, keep here for reference
38
- // GGML_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
39
- // GGML_API void ggml_backend_sycl_unregister_host_buffer(void * buffer);
42
+ // GGML_BACKEND_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
43
+ // GGML_BACKEND_API void ggml_backend_sycl_unregister_host_buffer(void * buffer);
44
+
45
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_sycl_reg(void);
46
+
40
47
  #ifdef __cplusplus
41
48
  }
42
49
  #endif
@@ -10,19 +10,21 @@ extern "C" {
10
10
  #define GGML_VK_NAME "Vulkan"
11
11
  #define GGML_VK_MAX_DEVICES 16
12
12
 
13
- GGML_API void ggml_vk_instance_init(void);
13
+ GGML_BACKEND_API void ggml_vk_instance_init(void);
14
14
 
15
15
  // backend API
16
- GGML_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
16
+ GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
17
17
 
18
- GGML_API bool ggml_backend_is_vk(ggml_backend_t backend);
19
- GGML_API int ggml_backend_vk_get_device_count(void);
20
- GGML_API void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
21
- GGML_API void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
18
+ GGML_BACKEND_API bool ggml_backend_is_vk(ggml_backend_t backend);
19
+ GGML_BACKEND_API int ggml_backend_vk_get_device_count(void);
20
+ GGML_BACKEND_API void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
21
+ GGML_BACKEND_API void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
22
22
 
23
- GGML_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
23
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
24
24
  // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
25
- GGML_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
25
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
26
+
27
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_vk_reg(void);
26
28
 
27
29
  #ifdef __cplusplus
28
30
  }