@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,277 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
- #include "log.h"
4
- #include "llama.h"
5
-
6
- #include <cmath>
7
- #include <cstdio>
8
- #include <string>
9
- #include <vector>
10
- #include <algorithm>
11
-
12
- static void print_usage(int, char ** argv) {
13
- LOG("\nexample usage:\n");
14
- LOG("\n %s -m model.gguf --junk 250 --pos 90 --keep 32 --grp-attn-n 2 [--seed 1234]\n", argv[0]);
15
- LOG("\n");
16
- }
17
-
18
- int main(int argc, char ** argv) {
19
- common_params params;
20
-
21
- params.n_junk = 250;
22
- params.n_keep = 32;
23
- params.i_pos = -1;
24
-
25
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PASSKEY, print_usage)) {
26
- return 1;
27
- }
28
-
29
- common_init();
30
-
31
- int n_junk = params.n_junk;
32
- int n_keep = params.n_keep;
33
- int n_grp = params.grp_attn_n;
34
- int i_pos = params.i_pos;
35
-
36
- if (i_pos == -1) {
37
- i_pos = rand() % n_junk;
38
- }
39
-
40
- const std::string prompt_prefix = "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.";
41
- const std::string prompt_suffix = " What is the pass key? The pass key is";
42
-
43
- // generate junk text
44
- params.prompt = prompt_prefix;
45
-
46
- const int passkey = rand() % 50000 + 1;
47
-
48
- for (int i = 0; i < n_junk; i++) {
49
- if (i % n_junk == i_pos) {
50
- params.prompt += " The pass key is " + std::to_string(passkey) + ". Remember it. " + std::to_string(passkey) + " is the pass key.";
51
- }
52
-
53
- params.prompt += " The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.";
54
- }
55
-
56
- params.prompt += prompt_suffix;
57
-
58
- // init LLM
59
-
60
- llama_backend_init();
61
- llama_numa_init(params.numa);
62
-
63
- // initialize the model
64
-
65
- llama_model_params model_params = common_model_params_to_llama(params);
66
-
67
- llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
68
-
69
- if (model == NULL) {
70
- LOG_ERR("%s: unable to load model\n" , __func__);
71
- return 1;
72
- }
73
-
74
- const llama_vocab * vocab = llama_model_get_vocab(model);
75
-
76
- // initialize the context
77
-
78
- llama_context_params ctx_params = common_context_params_to_llama(params);
79
-
80
- ctx_params.n_ctx = llama_model_n_ctx_train(model)*n_grp + n_keep;
81
-
82
- GGML_ASSERT(ctx_params.n_batch % n_grp == 0 && "n_batch must be divisible by n_grp");
83
-
84
- llama_context * ctx = llama_init_from_model(model, ctx_params);
85
- if (ctx == NULL) {
86
- LOG_ERR("%s: failed to create the llama_context\n" , __func__);
87
- return 1;
88
- }
89
-
90
- auto sparams = llama_sampler_chain_default_params();
91
-
92
- llama_sampler * smpl = llama_sampler_chain_init(sparams);
93
-
94
- llama_sampler_chain_add(smpl, llama_sampler_init_greedy());
95
-
96
- // tokenize the prompt
97
- std::vector<llama_token> tokens_list;
98
- tokens_list = common_tokenize(ctx, params.prompt, true);
99
-
100
- // tokenize the prefix and use it as a sink
101
- const int n_tokens_prefix = common_tokenize(ctx, prompt_prefix, true).size();
102
-
103
- const int n_tokens_all = tokens_list.size();
104
-
105
- // we leave a margin of 16 tokens for the generated text - it should contain just the passkey
106
- const int n_predict = 16;
107
-
108
- // total length of the sequences including the prompt
109
- const int n_len = n_tokens_all + n_predict;
110
-
111
- const int n_ctx = llama_n_ctx(ctx) - n_keep;
112
- const int n_kv_req = llama_n_ctx(ctx);
113
- const int n_batch = ctx_params.n_batch;
114
- const int n_batch_grp = ctx_params.n_batch/n_grp;
115
-
116
- LOG_INF("\n%s: n_len = %d, n_ctx = %d, n_kv_req = %d, n_grp = %d, n_batch = %d, n_junk = %d, i_pos = %d\n", __func__, n_len, n_ctx, n_kv_req, n_grp, n_batch, n_junk, i_pos);
117
-
118
- // print the prompt token-by-token
119
-
120
- LOG_INF("\n");
121
- LOG_INF("prefix tokens: %d\n", n_tokens_prefix);
122
- LOG_INF("prompt tokens: %d\n", n_tokens_all);
123
- //LOG_INF("prompt: %s\n", params.prompt.c_str());
124
-
125
- llama_batch batch = llama_batch_init(params.n_batch, 0, 1);
126
-
127
- int n_past = 0;
128
-
129
- // fill the KV cache
130
- for (int i = 0; i < n_ctx; i += n_batch) {
131
- if (i > 0 && n_grp > 1) {
132
- // if SelfExtend is enabled, we compress the position from the last batch by a factor of n_grp
133
- const int ib = i/n_batch - 1;
134
- const int bd = n_batch_grp*(n_grp - 1);
135
-
136
- llama_kv_self_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd);
137
- llama_kv_self_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp);
138
- llama_kv_self_update (ctx);
139
-
140
- n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;
141
- }
142
-
143
- common_batch_clear(batch);
144
-
145
- for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
146
- common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
147
- }
148
-
149
- if (i + n_batch >= n_tokens_all) {
150
- batch.logits[batch.n_tokens - 1] = true;
151
- }
152
-
153
- if (llama_decode(ctx, batch) != 0) {
154
- LOG_INF("%s: llama_decode() failed\n", __func__);
155
- return 1;
156
- }
157
-
158
- LOG_INF("%s: processed: [%6d, %6d)\n", __func__, i, std::min(i + n_batch, n_tokens_all));
159
-
160
- if (i + n_batch >= n_tokens_all) {
161
- break;
162
- }
163
- }
164
-
165
- for (int i = n_ctx; i < n_tokens_all; i += n_batch) {
166
- const int n_discard = n_batch;
167
-
168
- LOG_INF("%s: shifting KV cache with %d\n", __func__, n_discard);
169
-
170
- llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
171
- llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
172
- //llama_kv_self_defrag (ctx);
173
- llama_kv_self_update (ctx);
174
-
175
- n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;
176
-
177
- common_batch_clear(batch);
178
-
179
- for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
180
- common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
181
- }
182
-
183
- if (i + n_batch >= n_tokens_all) {
184
- batch.logits[batch.n_tokens - 1] = true;
185
- }
186
-
187
- if (llama_decode(ctx, batch) != 0) {
188
- LOG_ERR("%s: llama_decode() failed\n", __func__);
189
- return 1;
190
- }
191
-
192
- LOG_INF("%s: processed: [%6d, %6d)\n", __func__, i, std::min(i + n_batch, n_tokens_all));
193
- }
194
-
195
- {
196
- const int n_discard = n_past - n_ctx + n_predict;
197
-
198
- if (n_discard > 0) {
199
- LOG_INF("%s: shifting KV cache with %d to free space for the answer\n", __func__, n_discard);
200
-
201
- llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
202
- llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
203
- //llama_kv_self_defrag (ctx);
204
- llama_kv_self_update (ctx);
205
-
206
- n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;
207
- }
208
- }
209
-
210
- LOG_INF("\n");
211
- LOG_INF("%s: passkey = %d, inserted at position %d / %d (token pos: ~%d)\n", __func__, passkey, i_pos, n_junk, (i_pos * n_tokens_all) / n_junk);
212
- LOG_INF("\n");
213
-
214
- // main loop
215
-
216
- int n_cur = n_tokens_all;
217
- int n_decode = 0;
218
-
219
- LOG_INF("%s", prompt_suffix.c_str());
220
-
221
- const auto t_main_start = ggml_time_us();
222
-
223
- while (n_cur <= n_len) {
224
- // sample the next token
225
- {
226
- const llama_token new_token_id = llama_sampler_sample(smpl, ctx, batch.n_tokens - 1);
227
-
228
- // is it an end of generation?
229
- if (llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_len) {
230
- LOG("\n");
231
-
232
- break;
233
- }
234
-
235
- LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
236
-
237
- n_decode += 1;
238
-
239
- // prepare the next batch
240
- common_batch_clear(batch);
241
-
242
- // push this new token for next evaluation
243
- common_batch_add(batch, new_token_id, n_past++, { 0 }, true);
244
- }
245
-
246
- n_cur += 1;
247
-
248
- // evaluate the current batch with the transformer model
249
- if (llama_decode(ctx, batch)) {
250
- LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1);
251
- return 1;
252
- }
253
- }
254
-
255
- LOG("\n");
256
-
257
- const auto t_main_end = ggml_time_us();
258
-
259
- LOG_INF("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
260
- __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
261
-
262
- LOG("\n");
263
- llama_perf_context_print(ctx);
264
-
265
- LOG("\n");
266
-
267
- llama_sampler_free(smpl);
268
-
269
- llama_batch_free(batch);
270
-
271
- llama_free(ctx);
272
- llama_model_free(model);
273
-
274
- llama_backend_free();
275
-
276
- return 0;
277
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-retrieval)
2
- add_executable(${TARGET} retrieval.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)
@@ -1,304 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
- #include "log.h"
4
- #include "llama.h"
5
-
6
- #include <algorithm>
7
- #include <fstream>
8
- #include <iostream> // TODO: remove me
9
-
10
- static void print_usage(int, char ** argv) {
11
- LOG("\nexample usage:\n");
12
- LOG("\n %s --model ./models/bge-base-en-v1.5-f16.gguf --top-k 3 --context-file README.md --context-file License --chunk-size 100 --chunk-separator .\n", argv[0]);
13
- LOG("\n");
14
- }
15
-
16
- struct chunk {
17
- // filename
18
- std::string filename;
19
- // original file position
20
- size_t filepos;
21
- // original text data
22
- std::string textdata;
23
- // tokenized text data
24
- std::vector<llama_token> tokens;
25
- // embedding
26
- std::vector<float> embedding;
27
- };
28
-
29
- // chunk file data to chunks of size >= chunk_size
30
- // chunk_separator is the separator between chunks
31
- static std::vector<chunk> chunk_file(const std::string & filename, int chunk_size, const std::string & chunk_separator) {
32
- std::vector<chunk> chunks;
33
- std::ifstream f(filename.c_str());
34
-
35
- if (!f.is_open()) {
36
- LOG_ERR("could not open file %s\n", filename.c_str());
37
- return chunks;
38
- }
39
-
40
- chunk current_chunk;
41
- char buffer[1024];
42
- int64_t filepos = 0;
43
- std::string current;
44
- while (f.read(buffer, 1024)) {
45
- current += std::string(buffer, f.gcount());
46
- size_t pos;
47
- while ((pos = current.find(chunk_separator)) != std::string::npos) {
48
- current_chunk.textdata += current.substr(0, pos + chunk_separator.size());
49
- if ((int) current_chunk.textdata.size() > chunk_size) {
50
- // save chunk
51
- current_chunk.filepos = filepos;
52
- current_chunk.filename = filename;
53
- chunks.push_back(current_chunk);
54
- // update filepos
55
- filepos += (int) current_chunk.textdata.size();
56
- // reset current_chunk
57
- current_chunk = chunk();
58
- }
59
- current = current.substr(pos + chunk_separator.size());
60
- }
61
-
62
- }
63
- // add leftover data to last chunk
64
- if (current_chunk.textdata.size() > 0) {
65
- if (chunks.empty()) {
66
- current_chunk.filepos = filepos;
67
- current_chunk.filename = filename;
68
- chunks.push_back(current_chunk);
69
- } else {
70
- chunks.back().textdata += current_chunk.textdata;
71
- }
72
- }
73
- f.close();
74
- return chunks;
75
- }
76
-
77
- static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
78
- size_t n_tokens = tokens.size();
79
- for (size_t i = 0; i < n_tokens; i++) {
80
- common_batch_add(batch, tokens[i], i, { seq_id }, true);
81
- }
82
- }
83
-
84
- static void batch_encode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
85
- // clear previous kv_cache values (irrelevant for embeddings)
86
- llama_kv_self_clear(ctx);
87
-
88
- // run model
89
- LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
90
- if (llama_encode(ctx, batch) < 0) {
91
- LOG_ERR("%s : failed to encode\n", __func__);
92
- }
93
-
94
- for (int i = 0; i < batch.n_tokens; i++) {
95
- if (!batch.logits[i]) {
96
- continue;
97
- }
98
-
99
- // try to get sequence embeddings - supported only when pooling_type is not NONE
100
- const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
101
- if (embd == NULL) {
102
- embd = llama_get_embeddings_ith(ctx, i);
103
- if (embd == NULL) {
104
- LOG_ERR("%s: failed to get embeddings for token %d\n", __func__, i);
105
- continue;
106
- }
107
- }
108
-
109
- float * out = output + batch.seq_id[i][0] * n_embd;
110
- common_embd_normalize(embd, out, n_embd, 2);
111
- }
112
- }
113
-
114
- int main(int argc, char ** argv) {
115
- common_params params;
116
-
117
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_RETRIEVAL, print_usage)) {
118
- return 1;
119
- }
120
-
121
- common_init();
122
-
123
- // For BERT models, batch size must be equal to ubatch size
124
- params.n_ubatch = params.n_batch;
125
- params.embedding = true;
126
-
127
- if (params.chunk_size <= 0) {
128
- LOG_ERR("chunk_size must be positive\n");
129
- return 1;
130
- }
131
- if (params.context_files.empty()) {
132
- LOG_ERR("context_files must be specified\n");
133
- return 1;
134
- }
135
-
136
- LOG_INF("processing files:\n");
137
- for (auto & context_file : params.context_files) {
138
- LOG_INF("%s\n", context_file.c_str());
139
- }
140
-
141
- std::vector<chunk> chunks;
142
- for (auto & context_file : params.context_files) {
143
- std::vector<chunk> file_chunk = chunk_file(context_file, params.chunk_size, params.chunk_separator);
144
- chunks.insert(chunks.end(), file_chunk.begin(), file_chunk.end());
145
- }
146
- LOG_INF("Number of chunks: %zu\n", chunks.size());
147
-
148
- llama_backend_init();
149
- llama_numa_init(params.numa);
150
-
151
- // load the model
152
- common_init_result llama_init = common_init_from_params(params);
153
-
154
- llama_model * model = llama_init.model.get();
155
- llama_context * ctx = llama_init.context.get();
156
-
157
- if (model == NULL) {
158
- LOG_ERR("%s: unable to load model\n", __func__);
159
- return 1;
160
- }
161
-
162
- const llama_vocab * vocab = llama_model_get_vocab(model);
163
-
164
- const int n_ctx_train = llama_model_n_ctx_train(model);
165
- const int n_ctx = llama_n_ctx(ctx);
166
-
167
- const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
168
- if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
169
- LOG_ERR("%s: pooling type NONE not supported\n", __func__);
170
- return 1;
171
- }
172
-
173
- if (n_ctx > n_ctx_train) {
174
- LOG_WRN("%s: warning: model was trained on only %d context tokens (%d specified)\n",
175
- __func__, n_ctx_train, n_ctx);
176
- }
177
-
178
- // print system information
179
- {
180
- LOG_INF("\n");
181
- LOG_INF("%s\n", common_params_get_system_info(params).c_str());
182
- }
183
-
184
- // max batch size
185
- const uint64_t n_batch = params.n_batch;
186
- GGML_ASSERT(params.n_batch >= params.n_ctx);
187
-
188
- // tokenize the prompts and trim
189
- for (auto & chunk : chunks) {
190
- auto inp = common_tokenize(ctx, chunk.textdata, true, false);
191
- if (inp.size() > n_batch) {
192
- LOG_ERR("%s: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
193
- __func__, (long long int) inp.size(), (long long int) n_batch);
194
- return 1;
195
- }
196
- // add eos if not present
197
- if (llama_vocab_eos(vocab) >= 0 && (inp.empty() || inp.back() != llama_vocab_eos(vocab))) {
198
- inp.push_back(llama_vocab_eos(vocab));
199
- }
200
- chunk.tokens = inp;
201
- }
202
-
203
- // tokenization stats
204
- if (params.verbose_prompt) {
205
- for (int i = 0; i < (int) chunks.size(); i++) {
206
- LOG_INF("%s: prompt %d: '%s'\n", __func__, i, chunks[i].textdata.c_str());
207
- LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, chunks[i].tokens.size());
208
- for (int j = 0; j < (int) chunks[i].tokens.size(); j++) {
209
- LOG_INF("%6d -> '%s'\n", chunks[i].tokens[j], common_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
210
- }
211
- LOG_INF("\n\n");
212
- }
213
- }
214
-
215
- // initialize batch
216
- const int n_chunks = chunks.size();
217
- struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
218
-
219
- // allocate output
220
- const int n_embd = llama_model_n_embd(model);
221
- std::vector<float> embeddings(n_chunks * n_embd, 0);
222
- float * emb = embeddings.data();
223
-
224
- // break into batches
225
- int p = 0; // number of prompts processed already
226
- int s = 0; // number of prompts in current batch
227
- for (int k = 0; k < n_chunks; k++) {
228
- // clamp to n_batch tokens
229
- auto & inp = chunks[k].tokens;
230
-
231
- const uint64_t n_toks = inp.size();
232
-
233
- // encode if at capacity
234
- if (batch.n_tokens + n_toks > n_batch) {
235
- float * out = emb + p * n_embd;
236
- batch_encode(ctx, batch, out, s, n_embd);
237
- common_batch_clear(batch);
238
- p += s;
239
- s = 0;
240
- }
241
-
242
- // add to batch
243
- batch_add_seq(batch, inp, s);
244
- s += 1;
245
- }
246
-
247
- // final batch
248
- float * out = emb + p * n_embd;
249
- batch_encode(ctx, batch, out, s, n_embd);
250
-
251
- // save embeddings to chunks
252
- for (int i = 0; i < n_chunks; i++) {
253
- chunks[i].embedding = std::vector<float>(emb + i * n_embd, emb + (i + 1) * n_embd);
254
- // clear tokens as they are no longer needed
255
- chunks[i].tokens.clear();
256
- }
257
-
258
- struct llama_batch query_batch = llama_batch_init(n_batch, 0, 1);
259
-
260
- // start loop, receive query and return top k similar chunks based on cosine similarity
261
- std::string query;
262
- while (true) {
263
- LOG("Enter query: ");
264
- std::getline(std::cin, query);
265
- std::vector<int32_t> query_tokens = common_tokenize(ctx, query, true);
266
-
267
- batch_add_seq(query_batch, query_tokens, 0);
268
-
269
- std::vector<float> query_emb(n_embd, 0);
270
- batch_encode(ctx, query_batch, query_emb.data(), 1, n_embd);
271
-
272
- common_batch_clear(query_batch);
273
-
274
- // compute cosine similarities
275
- {
276
- std::vector<std::pair<int, float>> similarities;
277
- for (int i = 0; i < n_chunks; i++) {
278
- float sim = common_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
279
- similarities.push_back(std::make_pair(i, sim));
280
- }
281
-
282
- // sort similarities
283
- std::sort(similarities.begin(), similarities.end(), [](const std::pair<int, float> & a, const std::pair<int, float> & b) {
284
- return a.second > b.second;
285
- });
286
-
287
- LOG("Top %d similar chunks:\n", params.sampling.top_k);
288
- for (int i = 0; i < std::min(params.sampling.top_k, (int) chunks.size()); i++) {
289
- LOG("filename: %s\n", chunks[similarities[i].first].filename.c_str());
290
- LOG("filepos: %lld\n", (long long int) chunks[similarities[i].first].filepos);
291
- LOG("similarity: %f\n", similarities[i].second);
292
- LOG("textdata:\n%s\n", chunks[similarities[i].first].textdata.c_str());
293
- LOG("--------------------\n");
294
- }
295
- }
296
- }
297
-
298
- LOG("\n");
299
- llama_perf_context_print(ctx);
300
-
301
- // clean up
302
- llama_batch_free(query_batch);
303
- llama_backend_free();
304
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-save-load-state)
2
- add_executable(${TARGET} save-load-state.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)