@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,312 +0,0 @@
1
- #include "llama.h"
2
- #include "common.h"
3
- #include "console.h"
4
-
5
- #include <cstdio>
6
- #include <string>
7
- #include <map>
8
- #include <vector>
9
- #include <fstream>
10
- #include <thread>
11
-
12
- //static const std::map<std::string, std::vector<llama_token>> & k_tests() {
13
- // static std::map<std::string, std::vector<llama_token>> _k_tests = {
14
- // { "" , { }, },
15
- // { " " , { 220, }, },
16
- // { " " , { 256, }, },
17
- // { " " , { 262, }, },
18
- // { "\t" , { 197, }, },
19
- // { "\n" , { 198, }, },
20
- // { "\n\n" , { 271, }, },
21
- // { "\n\n\n" , { 1432, }, },
22
- // { "\t\n" , { 1602, }, },
23
- // { "Hello world" , { 9906, 1917, }, },
24
- // { " Hello world" , { 22691, 1917, }, },
25
- // { "Hello World" , { 9906, 4435, }, },
26
- // { " Hello World" , { 22691, 4435, }, },
27
- // { " Hello World!" , { 22691, 4435, 0, }, },
28
- // { "Hello, world!" , { 9906, 11, 1917, 0, }, },
29
- // { " Hello, world!" , { 22691, 11, 1917, 0, }, },
30
- // { " this is 🦙.cpp" , { 420, 374, 11410, 99, 247, 13, 11055, }, },
31
- // { "w048 7tuijk dsdfhu" , { 86, 23904, 220, 22, 83, 2005, 42908, 11729, 3013, 17156, }, },
32
- // { "нещо на Български" , { 79862, 102118, 13373, 64571, 34694, 3114, 112203, 80112, }, },
33
- // { "កាន់តែពិសេសអាចខលចេញ" , { 21549, 222, 98629, 241, 45358, 233, 21549, 237, 45358, 224, 21549, 244, 21549, 115, 21549, 253, 45358, 223, 21549, 253, 21549, 95, 98629, 227, 21549, 223, 21549, 249, 21549, 227, 45358, 223, 21549, 231, }, },
34
- // { "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", { 9468, 248, 222, 320, 8416, 8, 27623, 114, 102470, 9468, 234, 104, 31643, 320, 36773, 100166, 98634, 8, 26602, 227, 320, 3323, 43465, 430, 706, 1202, 1866, 4037, 8, }, },
35
- // { "Hello" , { 9906, }, },
36
- // { " Hello" , { 22691, }, },
37
- // { " Hello" , { 220, 22691, }, },
38
- // { " Hello" , { 256, 22691, }, },
39
- // { " Hello" , { 262, 22691, }, },
40
- // { " Hello\n Hello" , { 262, 22691, 198, 262, 22691, }, },
41
- // { " (" , { 320, }, },
42
- // { "\n =" , { 198, 284, }, },
43
- // { "' era" , { 6, 11639, }, },
44
- // { "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~", { 9906, 11, 379, 65948, 0, 2650, 527, 499, 27623, 223, 949, 37046, 101067, 19000, 23182, 102301, 9263, 18136, 16, 36827, 21909, }, },
45
- // { "3" , { 18, }, },
46
- // { "33" , { 1644, }, },
47
- // { "333" , { 8765, }, },
48
- // { "3333" , { 8765, 18, }, },
49
- // { "33333" , { 8765, 1644, }, },
50
- // { "333333" , { 8765, 8765, }, },
51
- // { "3333333" , { 8765, 8765, 18, }, },
52
- // { "33333333" , { 8765, 8765, 1644, }, },
53
- // { "333333333" , { 8765, 8765, 8765, }, },
54
- // };
55
- //
56
- // return _k_tests;
57
- //}
58
-
59
- using llama_tests = std::map<std::string, std::vector<llama_token>>;
60
-
61
- static llama_tests read_tests(const std::string & fname_inp, const std::string & fname_out) {
62
- llama_tests tests;
63
-
64
- std::ifstream ifs_inp(fname_inp);
65
- if (!ifs_inp) {
66
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_inp.c_str());
67
- return tests;
68
- }
69
-
70
- std::string sraw((std::istreambuf_iterator<char>(ifs_inp)), std::istreambuf_iterator<char>());
71
-
72
- std::ifstream ifs_out(fname_out);
73
- if (!ifs_out) {
74
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
75
- return tests;
76
- }
77
-
78
- std::vector<std::string> sout;
79
- for (std::string line; std::getline(ifs_out, line);) {
80
- sout.push_back(line);
81
- }
82
-
83
- const std::string sep = "\n__ggml_vocab_test__\n";
84
-
85
- std::vector<std::string> sinp;
86
-
87
- size_t pos = 0;
88
- while (pos < sraw.size()) {
89
- const size_t next = sraw.find(sep, pos);
90
- if (next == std::string::npos) {
91
- sinp.push_back(sraw.substr(pos));
92
- break;
93
- }
94
- sinp.push_back(sraw.substr(pos, next - pos));
95
- pos = next + sep.size();
96
- }
97
-
98
- if (sinp.size() != sout.size()) {
99
- fprintf(stderr, "%s : error: input and output files have different number of tests\n", __func__);
100
- return tests;
101
- }
102
-
103
- for (size_t i = 0; i < sinp.size(); ++i) {
104
- const std::string & s = sinp[i];
105
- const std::string & o = string_strip(sout[i]);
106
-
107
- std::vector<llama_token> toks;
108
-
109
- size_t pos = 0;
110
- while (pos < o.size()) {
111
- size_t next = o.find(' ', pos);
112
- if (next == std::string::npos) {
113
- next = o.size();
114
- }
115
- const std::string stok = o.substr(pos, next - pos);
116
- toks.push_back(std::stoi(stok));
117
- pos = next + 1;
118
- }
119
-
120
- tests[s] = toks;
121
- }
122
-
123
- return tests;
124
- }
125
-
126
- int main(int argc, char **argv) {
127
- if (argc < 2) {
128
- fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
129
- return 1;
130
- }
131
-
132
- const std::string fname = argv[1];
133
-
134
- const std::string fname_inp = fname + ".inp";
135
- const std::string fname_out = fname + ".out";
136
-
137
- std::string fname_text;
138
- if (argc > 2) {
139
- fname_text = argv[2];
140
- }
141
-
142
- fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
143
-
144
- llama_model * model;
145
- llama_context * ctx;
146
-
147
- llama_backend_init();
148
-
149
- // load the vocab
150
- {
151
- auto mparams = llama_model_default_params();
152
-
153
- mparams.vocab_only = true;
154
-
155
- model = llama_model_load_from_file(fname.c_str(), mparams);
156
-
157
- if (model == NULL) {
158
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
159
- return 1;
160
- }
161
-
162
- auto cparams = llama_context_default_params();
163
-
164
- ctx = llama_init_from_model(model, cparams);
165
-
166
- if (ctx == NULL) {
167
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
168
- llama_model_free(model);
169
- return 1;
170
- }
171
- }
172
-
173
- #ifdef _WIN32
174
- // We need this for unicode console support
175
- console::init(false, false);
176
- atexit([]() { console::cleanup(); });
177
- #endif
178
-
179
- bool success = true;
180
-
181
- const auto k_tests = [&]() -> llama_tests {
182
- if (!fname_text.empty()) {
183
- return {};
184
- }
185
-
186
- const auto res = read_tests(fname_inp, fname_out);
187
-
188
- if (res.empty()) {
189
- fprintf(stderr, "%s : error: no tests found\n", __func__);
190
- exit(1);
191
- }
192
-
193
- return res;
194
- }();
195
-
196
- const bool add_special = false;
197
-
198
- // multi-threaded tokenization
199
- const int nthread = std::thread::hardware_concurrency();
200
- std::vector<std::thread> threads(nthread);
201
-
202
- for (int i = 0; i < nthread; i++) {
203
- threads[i] = std::thread([&, i]() {
204
- for (const auto & test_kv : k_tests) {
205
- const std::vector<llama_token> res = common_tokenize(ctx, test_kv.first, add_special, false);
206
-
207
- // here only print the result of the first thread
208
- // because the other threads are running the same tests
209
- if (i != 0) {
210
- continue;
211
- }
212
-
213
- printf("\n");
214
- printf("src: '%s'\n", test_kv.first.c_str());
215
- printf("res: '%s'\n", common_detokenize(ctx, res).c_str());
216
- printf("tok: ");
217
- for (const auto & tok : res) {
218
- printf("%d ", tok);
219
- }
220
- printf("\n");
221
-
222
- bool correct = res.size() == test_kv.second.size();
223
- for (int i = 0; i < (int) res.size() && correct; ++i) {
224
- if (test_kv.second[i] != res[i]) {
225
- correct = false;
226
- }
227
- }
228
-
229
- if (!correct) {
230
- fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
231
- fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
232
- common_detokenize(ctx, res).c_str(),
233
- common_detokenize(ctx, test_kv.second).c_str());
234
- fprintf(stderr, "%s : expected tokens: ", __func__);
235
- for (const auto & t : test_kv.second) {
236
- fprintf(stderr, "%6d '%s', ", t, common_token_to_piece(ctx, t).c_str());
237
- }
238
- fprintf(stderr, "\n");
239
- fprintf(stderr, "%s : got tokens: ", __func__);
240
- for (const auto & t : res) {
241
- fprintf(stderr, "%6d '%s', ", t, common_token_to_piece(ctx, t).c_str());
242
- }
243
- fprintf(stderr, "\n");
244
-
245
- success = false;
246
- }
247
- }
248
- });
249
- }
250
-
251
- for (int i = 0; i < nthread; i++) {
252
- threads[i].join();
253
- }
254
-
255
- // single threaded tokenization
256
- if (!fname_text.empty()) {
257
- fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
258
-
259
- std::string text;
260
- {
261
- std::ifstream ifs(fname_text);
262
- if (!ifs) {
263
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
264
- return 1;
265
- }
266
- text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
267
- }
268
-
269
- fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
270
-
271
- std::vector<llama_token> res;
272
-
273
- {
274
- const auto t_start = ggml_time_us();
275
-
276
- res = common_tokenize(ctx, text, add_special, false);
277
-
278
- const auto t_end = ggml_time_us();
279
-
280
- fprintf(stderr, "%s : tokenized in %.3f ms (cpp)\n", __func__, (t_end - t_start) / 1000.0);
281
- }
282
-
283
- fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
284
-
285
- {
286
- const std::string fname_out = fname_text + ".tokcpp";
287
-
288
- std::ofstream ofs(fname_out);
289
- if (!ofs) {
290
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
291
- return 1;
292
- }
293
-
294
- for (const auto & tok : res) {
295
- //ofs << tok << " '" << string_strip(llama_detokenize(ctx, std::vector<int>{tok})) << "'" << std::endl;
296
- ofs << tok << "\n";
297
- }
298
- }
299
-
300
- fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
301
- }
302
-
303
- llama_model_free(model);
304
- llama_free(ctx);
305
-
306
- llama_backend_free();
307
-
308
- printf("\n");
309
- printf("Tests %s\n", success ? "passed" : "failed");
310
-
311
- return success ? 0 : 3;
312
- }
@@ -1,155 +0,0 @@
1
- #include "llama.h"
2
- #include "common.h"
3
- #include "console.h"
4
-
5
- #include "../src/unicode.h"
6
-
7
- #include <cassert>
8
- #include <codecvt>
9
- #include <cstdio>
10
- #include <cstring>
11
- #include <locale>
12
- #include <string>
13
- #include <thread>
14
- #include <vector>
15
- #include <atomic>
16
-
17
- int main(int argc, char **argv) {
18
- if (argc < 2 || argc > 3) {
19
- fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
20
- return 1;
21
- }
22
-
23
- const std::string fname = argv[1];
24
- bool ignore_merges = false;
25
- if (argc == 3) {
26
- if (std::strcmp(argv[2], "--ignore-merges") != 0) {
27
- fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
28
- return 1;
29
- }
30
- ignore_merges = true;
31
- }
32
-
33
- fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
34
-
35
- if (ignore_merges) {
36
- fprintf(stderr, "%s : ignoring merges for tokens inside vocab\n", __func__);
37
- }
38
-
39
- llama_model * model;
40
- llama_context * ctx;
41
-
42
- llama_backend_init();
43
-
44
- // load the vocab
45
- {
46
- auto mparams = llama_model_default_params();
47
-
48
- mparams.vocab_only = true;
49
-
50
- model = llama_model_load_from_file(fname.c_str(), mparams);
51
-
52
- if (model == NULL) {
53
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
54
- return 1;
55
- }
56
-
57
- auto cparams = llama_context_default_params();
58
-
59
- ctx = llama_init_from_model(model, cparams);
60
-
61
- if (ctx == NULL) {
62
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
63
- llama_model_free(model);
64
- return 1;
65
- }
66
- }
67
-
68
- const llama_vocab * vocab = llama_model_get_vocab(model);
69
-
70
- //GGML_ASSERT(llama_vocab_type(vocab) == LLAMA_VOCAB_TYPE_BPE);
71
- if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_BPE) {
72
- return 99;
73
- }
74
-
75
- #ifdef _WIN32
76
- // We need this for unicode console support
77
- console::init(false, false);
78
- atexit([]() { console::cleanup(); });
79
- #endif
80
-
81
- const int n_vocab = llama_vocab_n_tokens(vocab);
82
-
83
- for (int i = 0; i < n_vocab; ++i) {
84
- std::string str = common_detokenize(ctx, std::vector<int>(1, i));
85
- try {
86
- auto cps = unicode_cpts_from_utf8(str);
87
- std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
88
- if (ignore_merges && tokens.size() > 1) {
89
- fprintf(stderr,
90
- "%s : error: token %d detokenizes to '%s'(%zu) but "
91
- "tokenization of this to multiple tokens: [",
92
- __func__, i, str.c_str(), str.length());
93
- fprintf(stderr, "%d", tokens[0]);
94
- for (size_t i = 1; i < tokens.size(); i++) {
95
- fprintf(stderr, ", %d", tokens[i]);
96
- }
97
- fprintf(stderr, "]\n");
98
- return 2;
99
- }
100
- std::string check = common_detokenize(ctx, tokens);
101
- if (check != str) {
102
- fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
103
- __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
104
- return 2;
105
- }
106
- }
107
- catch (const std::invalid_argument &) {
108
- //fprintf(stderr, "%s : info: utf8 conversion %d '%s'\n", __func__, i, str.c_str());
109
- }
110
- }
111
-
112
- // unicode
113
- {
114
- const int nthread = std::thread::hardware_concurrency();
115
-
116
- std::vector<std::thread> threads(nthread);
117
-
118
- std::atomic_int errcode = {};
119
-
120
- for (int i = 0; i < nthread; ++i) {
121
- threads[i] = std::thread([i, nthread, ctx, &errcode]() {
122
- for (uint32_t cp = i; !errcode && cp < 0x00110000; cp += nthread) {
123
- if ((0x0000D800 <= cp && cp <= 0x0000DFFF) || // surrogates \p{Cs}
124
- (0x00040000 <= cp && cp <= 0x000E0000)) { // undefined \p{Cn}
125
- continue;
126
- }
127
-
128
- std::string str = unicode_cpt_to_utf8(cp);
129
- std::vector<llama_token> tokens = common_tokenize(ctx, str, false);
130
- std::string check = common_detokenize(ctx, tokens);
131
- if (cp != 9601 && str != check) {
132
- fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
133
- cp, check.c_str(), check.length(), str.c_str(), str.length());
134
- errcode = 3;
135
- }
136
- }
137
- });
138
- }
139
-
140
- for (auto & t : threads) {
141
- t.join();
142
- }
143
-
144
- if (errcode) {
145
- return errcode;
146
- }
147
- }
148
-
149
- llama_model_free(model);
150
- llama_free(ctx);
151
-
152
- llama_backend_free();
153
-
154
- return 0;
155
- }
@@ -1,125 +0,0 @@
1
- #include "llama.h"
2
- #include "common.h"
3
- #include "console.h"
4
-
5
- #include "../src/unicode.h"
6
-
7
- #include <cassert>
8
- #include <codecvt>
9
- #include <cstdio>
10
- #include <cstring>
11
- #include <locale>
12
- #include <string>
13
- #include <thread>
14
- #include <vector>
15
- #include <atomic>
16
-
17
- int main(int argc, char ** argv) {
18
- if (argc < 2) {
19
- fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
20
- return 1;
21
- }
22
-
23
- const std::string fname = argv[1];
24
-
25
- fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
26
-
27
- llama_model * model;
28
- llama_context * ctx;
29
-
30
- llama_backend_init();
31
-
32
- // load the vocab
33
- {
34
- auto mparams = llama_model_default_params();
35
-
36
- mparams.vocab_only = true;
37
-
38
- model = llama_model_load_from_file(fname.c_str(), mparams);
39
-
40
- if (model == NULL) {
41
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
42
- return 1;
43
- }
44
-
45
- auto cparams = llama_context_default_params();
46
-
47
- ctx = llama_init_from_model(model, cparams);
48
-
49
- if (ctx == NULL) {
50
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
51
- llama_model_free(model);
52
- return 1;
53
- }
54
- }
55
-
56
- const llama_vocab * vocab = llama_model_get_vocab(model);
57
-
58
- //GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
59
- if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_SPM) {
60
- return 99;
61
- }
62
-
63
- #ifdef _WIN32
64
- // We need this for unicode console support
65
- console::init(false, false);
66
- atexit([]() { console::cleanup(); });
67
- #endif
68
-
69
- const int n_vocab = llama_vocab_n_tokens(vocab);
70
-
71
- for (int i = 0; i < n_vocab; ++i) {
72
- std::string str = common_detokenize(ctx, std::vector<int>(1, i), true);
73
- std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
74
- std::string check = common_detokenize(ctx, tokens);
75
- if (check != str) {
76
- fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
77
- __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
78
- return 2;
79
- }
80
- }
81
-
82
- // unicode
83
- {
84
- const int nthread = std::thread::hardware_concurrency();
85
-
86
- std::vector<std::thread> threads(nthread);
87
-
88
- std::atomic_int errcode = {};
89
-
90
- for (int i = 0; i < nthread; ++i) {
91
- threads[i] = std::thread([i, nthread, ctx, &errcode]() {
92
- for (uint32_t cp = i; !errcode && cp < 0x00110000; cp += nthread) {
93
- if ((0x0000D800 <= cp && cp <= 0x0000DFFF) || // surrogates \p{Cs}
94
- (0x00040000 <= cp && cp <= 0x000E0000)) { // undefined \p{Cn}
95
- continue;
96
- }
97
-
98
- std::string str = unicode_cpt_to_utf8(cp);
99
- std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
100
- std::string check = common_detokenize(ctx, tokens);
101
- if (cp != 9601 && str != check) {
102
- fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
103
- cp, check.c_str(), check.length(), str.c_str(), str.length());
104
- errcode = 3;
105
- }
106
- }
107
- });
108
- }
109
-
110
- for (auto & t : threads) {
111
- t.join();
112
- }
113
-
114
- if(errcode) {
115
- return errcode;
116
- }
117
- }
118
-
119
- llama_model_free(model);
120
- llama_free(ctx);
121
-
122
- llama_backend_free();
123
-
124
- return 0;
125
- }
@@ -1,39 +0,0 @@
1
- # dependencies
2
-
3
- find_package(Threads REQUIRED)
4
-
5
- # third-party
6
-
7
- # ...
8
-
9
- # flags
10
-
11
- llama_add_compile_flags()
12
-
13
- # tools
14
-
15
- if (EMSCRIPTEN)
16
- else()
17
- add_subdirectory(batched-bench)
18
- add_subdirectory(gguf-split)
19
- add_subdirectory(imatrix)
20
- add_subdirectory(llama-bench)
21
- add_subdirectory(main)
22
- add_subdirectory(perplexity)
23
- add_subdirectory(quantize)
24
- if (LLAMA_BUILD_SERVER)
25
- add_subdirectory(server)
26
- endif()
27
- add_subdirectory(run)
28
- add_subdirectory(tokenize)
29
- add_subdirectory(tts)
30
- add_subdirectory(mtmd)
31
- if (GGML_RPC)
32
- add_subdirectory(rpc)
33
- endif()
34
- if (NOT GGML_BACKEND_DL)
35
- # these examples use the backends directly and cannot be built with dynamic loading
36
- add_subdirectory(cvector-generator)
37
- add_subdirectory(export-lora)
38
- endif()
39
- endif()
@@ -1,5 +0,0 @@
1
- set(TARGET llama-batched-bench)
2
- add_executable(${TARGET} batched-bench.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)