@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,7 +0,0 @@
1
- -r ./requirements-convert_legacy_llama.txt
2
- --extra-index-url https://download.pytorch.org/whl/cpu
3
- torch~=2.2.1; platform_machine != "s390x"
4
-
5
- # torch s390x packages can only be found from nightly builds
6
- --extra-index-url https://download.pytorch.org/whl/nightly
7
- torch>=0.0.0.dev0; platform_machine == "s390x"
@@ -1,5 +0,0 @@
1
- numpy~=1.26.4
2
- sentencepiece~=0.2.0
3
- transformers>=4.45.1,<5.0.0
4
- gguf>=0.1.0
5
- protobuf>=4.21.0,<5.0.0
@@ -1 +0,0 @@
1
- -r ./requirements-convert_legacy_llama.txt
@@ -1,4 +0,0 @@
1
- -r ./requirements-convert_hf_to_gguf.txt
2
- --extra-index-url https://download.pytorch.org/whl/cpu
3
- # torch s390x packages can only be found from nightly builds
4
- --extra-index-url https://download.pytorch.org/whl/nightly
@@ -1,3 +0,0 @@
1
- numpy~=1.26.4
2
- PySide6~=6.9.0
3
- gguf>=0.16.0
@@ -1,3 +0,0 @@
1
- docstring_parser~=0.15
2
- pydantic~=2.6.3
3
- requests
@@ -1,12 +0,0 @@
1
- aiohttp~=3.9.3
2
- pytest~=8.3.3
3
- huggingface_hub~=0.23.2
4
- matplotlib~=3.10.0
5
- numpy~=1.26.4
6
- openai~=1.55.3
7
- pandas~=2.2.3
8
- prometheus-client~=0.20.0
9
- requests~=2.32.3
10
- wget~=3.2
11
- typer~=0.15.1
12
- seaborn~=0.13.2
@@ -1,13 +0,0 @@
1
- # These requirements include all dependencies for all top-level python scripts
2
- # for llama.cpp. Avoid adding packages here directly.
3
- #
4
- # Package versions must stay compatible across all top-level python scripts.
5
- #
6
-
7
- -r ./requirements/requirements-convert_legacy_llama.txt
8
-
9
- -r ./requirements/requirements-convert_hf_to_gguf.txt
10
- -r ./requirements/requirements-convert_hf_to_gguf_update.txt
11
- -r ./requirements/requirements-convert_llama_ggml_to_gguf.txt
12
- -r ./requirements/requirements-convert_lora_to_gguf.txt
13
- -r ./requirements/requirements-tool_bench.txt
@@ -1,30 +0,0 @@
1
- #!/bin/sh
2
-
3
- CC=$1
4
-
5
- build_number="0"
6
- build_commit="unknown"
7
- build_compiler="unknown"
8
- build_target="unknown"
9
-
10
- if out=$(git rev-list --count HEAD); then
11
- # git is broken on WSL so we need to strip extra newlines
12
- build_number=$(printf '%s' "$out" | tr -d '\n')
13
- fi
14
-
15
- if out=$(git rev-parse --short HEAD); then
16
- build_commit=$(printf '%s' "$out" | tr -d '\n')
17
- fi
18
-
19
- if out=$($CC --version | head -1); then
20
- build_compiler=$out
21
- fi
22
-
23
- if out=$($CC -dumpmachine); then
24
- build_target=$out
25
- fi
26
-
27
- echo "int LLAMA_BUILD_NUMBER = ${build_number};"
28
- echo "char const *LLAMA_COMMIT = \"${build_commit}\";"
29
- echo "char const *LLAMA_COMPILER = \"${build_compiler}\";"
30
- echo "char const *LLAMA_BUILD_TARGET = \"${build_target}\";"
@@ -1,19 +0,0 @@
1
- :: MIT license
2
- :: Copyright (C) 2024 Intel Corporation
3
- :: SPDX-License-Identifier: MIT
4
-
5
-
6
- set URL=%1
7
- set COMPONENTS=%2
8
-
9
- curl.exe --output %TEMP%\webimage.exe --url %URL% --retry 5 --retry-delay 5
10
- start /b /wait %TEMP%\webimage.exe -s -x -f webimage_extracted --log extract.log
11
- del %TEMP%\webimage.exe
12
- if "%COMPONENTS%"=="" (
13
- webimage_extracted\bootstrapper.exe -s --action install --eula=accept -p=NEED_VS2017_INTEGRATION=0 -p=NEED_VS2019_INTEGRATION=0 -p=NEED_VS2022_INTEGRATION=0 --log-dir=.
14
- ) else (
15
- webimage_extracted\bootstrapper.exe -s --action install --components=%COMPONENTS% --eula=accept -p=NEED_VS2017_INTEGRATION=0 -p=NEED_VS2019_INTEGRATION=0 -p=NEED_VS2022_INTEGRATION=0 --log-dir=.
16
- )
17
- set installer_exit_code=%ERRORLEVEL%
18
- rd /s/q "webimage_extracted"
19
- exit /b %installer_exit_code%
@@ -1,16 +0,0 @@
1
- # CMake equivalent of `xxd -i ${INPUT} ${OUTPUT}`
2
- # Usage: cmake -DINPUT=tools/server/public/index.html -DOUTPUT=tools/server/index.html.hpp -P scripts/xxd.cmake
3
-
4
- SET(INPUT "" CACHE STRING "Input File")
5
- SET(OUTPUT "" CACHE STRING "Output File")
6
-
7
- get_filename_component(filename "${INPUT}" NAME)
8
- string(REGEX REPLACE "\\.|-" "_" name "${filename}")
9
-
10
- file(READ "${INPUT}" hex_data HEX)
11
- string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," hex_sequence "${hex_data}")
12
-
13
- string(LENGTH ${hex_data} hex_len)
14
- math(EXPR len "${hex_len} / 2")
15
-
16
- file(WRITE "${OUTPUT}" "unsigned char ${name}[] = {${hex_sequence}};\nunsigned int ${name}_len = ${len};\n")
@@ -1,177 +0,0 @@
1
- llama_add_compile_flags()
2
-
3
- function(llama_build source)
4
- if (DEFINED LLAMA_TEST_NAME)
5
- set(TEST_TARGET ${LLAMA_TEST_NAME})
6
- else()
7
- get_filename_component(TEST_TARGET ${source} NAME_WE)
8
- endif()
9
-
10
- add_executable(${TEST_TARGET} ${source})
11
- target_link_libraries(${TEST_TARGET} PRIVATE common)
12
- install(TARGETS ${TEST_TARGET} RUNTIME)
13
- endfunction()
14
-
15
- function(llama_test target)
16
- include(CMakeParseArguments)
17
- set(options)
18
- set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
19
- set(multiValueArgs ARGS)
20
- cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
21
-
22
- if (NOT DEFINED LLAMA_TEST_LABEL)
23
- set(LLAMA_TEST_LABEL "main")
24
- endif()
25
- if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
26
- set(LLAMA_TEST_WORKING_DIRECTORY .)
27
- endif()
28
- if (DEFINED LLAMA_TEST_NAME)
29
- set(TEST_NAME ${LLAMA_TEST_NAME})
30
- else()
31
- set(TEST_NAME ${target})
32
- endif()
33
-
34
- set(TEST_TARGET ${target})
35
-
36
- add_test(
37
- NAME ${TEST_NAME}
38
- WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
39
- COMMAND $<TARGET_FILE:${TEST_TARGET}>
40
- ${LLAMA_TEST_ARGS})
41
-
42
- set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
43
- endfunction()
44
-
45
- # Builds and runs a test source file.
46
- # Optional args:
47
- # - NAME: name of the executable & test target (defaults to the source file name without extension)
48
- # - LABEL: label for the test (defaults to main)
49
- # - ARGS: arguments to pass to the test executable
50
- # - WORKING_DIRECTORY
51
- function(llama_build_and_test source)
52
- include(CMakeParseArguments)
53
- set(options)
54
- set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
55
- set(multiValueArgs ARGS)
56
- cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
57
-
58
- if (NOT DEFINED LLAMA_TEST_LABEL)
59
- set(LLAMA_TEST_LABEL "main")
60
- endif()
61
- if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
62
- set(LLAMA_TEST_WORKING_DIRECTORY .)
63
- endif()
64
- if (DEFINED LLAMA_TEST_NAME)
65
- set(TEST_TARGET ${LLAMA_TEST_NAME})
66
- else()
67
- get_filename_component(TEST_TARGET ${source} NAME_WE)
68
- endif()
69
-
70
- add_executable(${TEST_TARGET} ${source} get-model.cpp)
71
- install(TARGETS ${TEST_TARGET} RUNTIME)
72
- target_link_libraries(${TEST_TARGET} PRIVATE common)
73
-
74
- add_test(
75
- NAME ${TEST_TARGET}
76
- WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
77
- COMMAND $<TARGET_FILE:${TEST_TARGET}>
78
- ${LLAMA_TEST_ARGS})
79
-
80
- set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${LLAMA_TEST_LABEL})
81
- endfunction()
82
-
83
- # build test-tokenizer-0 target once and add many tests
84
- llama_build(test-tokenizer-0.cpp)
85
-
86
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
87
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
88
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
89
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
90
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
91
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
92
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
93
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
94
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
95
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf)
96
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf)
97
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
98
- llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
99
-
100
- if (LLAMA_LLGUIDANCE)
101
- llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
102
- endif ()
103
-
104
- if (NOT WIN32)
105
- # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
106
- llama_build_and_test(test-sampling.cpp)
107
- llama_build_and_test(test-grammar-parser.cpp)
108
- llama_build_and_test(test-grammar-integration.cpp)
109
- llama_build_and_test(test-llama-grammar.cpp)
110
- llama_build_and_test(test-chat.cpp)
111
- # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
112
- if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
113
- llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
114
- target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../tools/server)
115
- endif()
116
-
117
- if (NOT GGML_BACKEND_DL)
118
- llama_build(test-quantize-stats.cpp)
119
- endif()
120
-
121
- llama_build(test-gbnf-validator.cpp)
122
-
123
- # build test-tokenizer-1-bpe target once and add many tests
124
- llama_build(test-tokenizer-1-bpe.cpp)
125
-
126
- # TODO: disabled due to slowness
127
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
128
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
129
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
130
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
131
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges)
132
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
133
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
134
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
135
-
136
- # build test-tokenizer-1-spm target once and add many tests
137
- llama_build(test-tokenizer-1-spm.cpp)
138
-
139
- llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
140
- #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
141
-
142
- # llama_build_and_test(test-double-float.cpp) # SLOW
143
- endif()
144
-
145
- llama_build_and_test(test-log.cpp)
146
- llama_build_and_test(test-chat-template.cpp)
147
- llama_build_and_test(test-regex-partial.cpp)
148
-
149
- # this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
150
- if (NOT WIN32)
151
- llama_build_and_test(test-arg-parser.cpp)
152
- endif()
153
-
154
- # llama_build_and_test(test-opt.cpp) # SLOW
155
- llama_build_and_test(test-gguf.cpp)
156
- llama_build_and_test(test-backend-ops.cpp)
157
-
158
- llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
159
- llama_build_and_test(test-autorelease.cpp LABEL "model")
160
-
161
- if (NOT GGML_BACKEND_DL)
162
- # these tests use the backends directly and cannot be built with dynamic loading
163
- llama_build_and_test(test-barrier.cpp)
164
- llama_build_and_test(test-quantize-fns.cpp)
165
- llama_build_and_test(test-quantize-perf.cpp)
166
- llama_build_and_test(test-rope.cpp)
167
- endif()
168
-
169
- # libmtmd
170
- set(LLAMA_TEST_NAME test-mtmd-c-api)
171
- llama_build_and_test(test-mtmd-c-api.c)
172
- target_link_libraries(${LLAMA_TEST_NAME} PRIVATE mtmd)
173
-
174
- # dummy executable - not installed
175
- get_filename_component(TEST_TARGET test-c.c NAME_WE)
176
- add_executable(${TEST_TARGET} test-c.c)
177
- target_link_libraries(${TEST_TARGET} PRIVATE llama)
@@ -1,21 +0,0 @@
1
- #include <cstdio>
2
- #include <cstdlib>
3
- #include <cstring>
4
-
5
- #include "get-model.h"
6
-
7
- char * get_model_or_exit(int argc, char *argv[]) {
8
- char * model_path;
9
- if (argc > 1) {
10
- model_path = argv[1];
11
-
12
- } else {
13
- model_path = getenv("LLAMACPP_TEST_MODELFILE");
14
- if (!model_path || strlen(model_path) == 0) {
15
- fprintf(stderr, "\033[33mWARNING: No model file provided. Skipping this test. Set LLAMACPP_TEST_MODELFILE=<gguf_model_path> to silence this warning and run this test.\n\033[0m");
16
- exit(EXIT_SUCCESS);
17
- }
18
- }
19
-
20
- return model_path;
21
- }
@@ -1,2 +0,0 @@
1
- #pragma once
2
- char * get_model_or_exit(int, char*[]);
@@ -1,178 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
-
4
- #include <string>
5
- #include <vector>
6
- #include <sstream>
7
- #include <unordered_set>
8
-
9
- #undef NDEBUG
10
- #include <cassert>
11
-
12
- int main(void) {
13
- common_params params;
14
-
15
- printf("test-arg-parser: make sure there is no duplicated arguments in any examples\n\n");
16
- for (int ex = 0; ex < LLAMA_EXAMPLE_COUNT; ex++) {
17
- try {
18
- auto ctx_arg = common_params_parser_init(params, (enum llama_example)ex);
19
- std::unordered_set<std::string> seen_args;
20
- std::unordered_set<std::string> seen_env_vars;
21
- for (const auto & opt : ctx_arg.options) {
22
- // check for args duplications
23
- for (const auto & arg : opt.args) {
24
- if (seen_args.find(arg) == seen_args.end()) {
25
- seen_args.insert(arg);
26
- } else {
27
- fprintf(stderr, "test-arg-parser: found different handlers for the same argument: %s", arg);
28
- exit(1);
29
- }
30
- }
31
- // check for env var duplications
32
- if (opt.env) {
33
- if (seen_env_vars.find(opt.env) == seen_env_vars.end()) {
34
- seen_env_vars.insert(opt.env);
35
- } else {
36
- fprintf(stderr, "test-arg-parser: found different handlers for the same env var: %s", opt.env);
37
- exit(1);
38
- }
39
- }
40
- }
41
- } catch (std::exception & e) {
42
- printf("%s\n", e.what());
43
- assert(false);
44
- }
45
- }
46
-
47
- auto list_str_to_char = [](std::vector<std::string> & argv) -> std::vector<char *> {
48
- std::vector<char *> res;
49
- for (auto & arg : argv) {
50
- res.push_back(const_cast<char *>(arg.data()));
51
- }
52
- return res;
53
- };
54
-
55
- std::vector<std::string> argv;
56
-
57
- printf("test-arg-parser: test invalid usage\n\n");
58
-
59
- // missing value
60
- argv = {"binary_name", "-m"};
61
- assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
62
-
63
- // wrong value (int)
64
- argv = {"binary_name", "-ngl", "hello"};
65
- assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
66
-
67
- // wrong value (enum)
68
- argv = {"binary_name", "-sm", "hello"};
69
- assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
70
-
71
- // non-existence arg in specific example (--draft cannot be used outside llama-speculative)
72
- argv = {"binary_name", "--draft", "123"};
73
- assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_EMBEDDING));
74
-
75
-
76
- printf("test-arg-parser: test valid usage\n\n");
77
-
78
- argv = {"binary_name", "-m", "model_file.gguf"};
79
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
80
- assert(params.model.path == "model_file.gguf");
81
-
82
- argv = {"binary_name", "-t", "1234"};
83
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
84
- assert(params.cpuparams.n_threads == 1234);
85
-
86
- argv = {"binary_name", "--verbose"};
87
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
88
- assert(params.verbosity > 1);
89
-
90
- argv = {"binary_name", "-m", "abc.gguf", "--predict", "6789", "--batch-size", "9090"};
91
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
92
- assert(params.model.path == "abc.gguf");
93
- assert(params.n_predict == 6789);
94
- assert(params.n_batch == 9090);
95
-
96
- // --draft cannot be used outside llama-speculative
97
- argv = {"binary_name", "--draft", "123"};
98
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_SPECULATIVE));
99
- assert(params.speculative.n_max == 123);
100
-
101
- // skip this part on windows, because setenv is not supported
102
- #ifdef _WIN32
103
- printf("test-arg-parser: skip on windows build\n");
104
- #else
105
- printf("test-arg-parser: test environment variables (valid + invalid usages)\n\n");
106
-
107
- setenv("LLAMA_ARG_THREADS", "blah", true);
108
- argv = {"binary_name"};
109
- assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
110
-
111
- setenv("LLAMA_ARG_MODEL", "blah.gguf", true);
112
- setenv("LLAMA_ARG_THREADS", "1010", true);
113
- argv = {"binary_name"};
114
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
115
- assert(params.model.path == "blah.gguf");
116
- assert(params.cpuparams.n_threads == 1010);
117
-
118
-
119
- printf("test-arg-parser: test environment variables being overwritten\n\n");
120
-
121
- setenv("LLAMA_ARG_MODEL", "blah.gguf", true);
122
- setenv("LLAMA_ARG_THREADS", "1010", true);
123
- argv = {"binary_name", "-m", "overwritten.gguf"};
124
- assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_COMMON));
125
- assert(params.model.path == "overwritten.gguf");
126
- assert(params.cpuparams.n_threads == 1010);
127
- #endif // _WIN32
128
-
129
- if (common_has_curl()) {
130
- printf("test-arg-parser: test curl-related functions\n\n");
131
- const char * GOOD_URL = "https://ggml.ai/";
132
- const char * BAD_URL = "https://www.google.com/404";
133
- const char * BIG_FILE = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-large-v1.bin";
134
-
135
- {
136
- printf("test-arg-parser: test good URL\n\n");
137
- auto res = common_remote_get_content(GOOD_URL, {});
138
- assert(res.first == 200);
139
- assert(res.second.size() > 0);
140
- std::string str(res.second.data(), res.second.size());
141
- assert(str.find("llama.cpp") != std::string::npos);
142
- }
143
-
144
- {
145
- printf("test-arg-parser: test bad URL\n\n");
146
- auto res = common_remote_get_content(BAD_URL, {});
147
- assert(res.first == 404);
148
- }
149
-
150
- {
151
- printf("test-arg-parser: test max size error\n");
152
- common_remote_params params;
153
- params.max_size = 1;
154
- try {
155
- common_remote_get_content(GOOD_URL, params);
156
- assert(false && "it should throw an error");
157
- } catch (std::exception & e) {
158
- printf(" expected error: %s\n\n", e.what());
159
- }
160
- }
161
-
162
- {
163
- printf("test-arg-parser: test timeout error\n");
164
- common_remote_params params;
165
- params.timeout = 1;
166
- try {
167
- common_remote_get_content(BIG_FILE, params);
168
- assert(false && "it should throw an error");
169
- } catch (std::exception & e) {
170
- printf(" expected error: %s\n\n", e.what());
171
- }
172
- }
173
- } else {
174
- printf("test-arg-parser: no curl, skipping curl-related functions\n");
175
- }
176
-
177
- printf("test-arg-parser: all tests OK\n\n");
178
- }
@@ -1,24 +0,0 @@
1
- // ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
2
-
3
- #include <cstdio>
4
- #include <string>
5
- #include <thread>
6
-
7
- #include "llama.h"
8
- #include "get-model.h"
9
-
10
- // This creates a new context inside a pthread and then tries to exit cleanly.
11
- int main(int argc, char ** argv) {
12
- auto * model_path = get_model_or_exit(argc, argv);
13
-
14
- std::thread([&model_path]() {
15
- llama_backend_init();
16
- auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
17
- auto * ctx = llama_init_from_model(model, llama_context_default_params());
18
- llama_free(ctx);
19
- llama_model_free(model);
20
- llama_backend_free();
21
- }).join();
22
-
23
- return 0;
24
- }