@fugood/llama.node 0.6.3 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -7
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  27. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  28. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  29. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  30. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  31. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  32. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  36. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  37. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  38. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  39. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  40. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  41. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  42. package/src/llama.cpp/cmake/common.cmake +0 -35
  43. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  44. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  45. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  46. package/src/llama.cpp/docs/build.md +0 -561
  47. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  48. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  49. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  50. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  53. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  54. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  56. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  57. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  58. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  59. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  60. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  61. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  62. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  63. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  64. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  68. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  70. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  71. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  72. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  73. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  74. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  75. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  76. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  77. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  79. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  80. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  81. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  82. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  83. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  84. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  85. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  86. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  87. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  88. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  89. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  90. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  91. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  92. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  93. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  94. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  96. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  99. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  100. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  101. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  102. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  103. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  104. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  105. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  106. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  107. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  108. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  109. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  110. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  111. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  112. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  113. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  114. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  115. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  116. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  117. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  118. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  119. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  120. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  121. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  122. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  123. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  125. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  127. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  128. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  129. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  130. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  131. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  132. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  135. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  136. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  137. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  138. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  139. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  140. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  141. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  142. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  143. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  144. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  145. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  146. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  147. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  148. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  149. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  150. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  151. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  153. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  155. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  157. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  159. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  161. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  163. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  167. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  169. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  170. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  172. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  173. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  175. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  177. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  181. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  183. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  185. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  186. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  187. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  189. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  191. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  195. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  198. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  199. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  200. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  204. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  205. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  206. package/src/llama.cpp/models/.editorconfig +0 -1
  207. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  208. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  212. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  214. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  217. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  225. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  228. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  231. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  233. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  237. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  240. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  242. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  245. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  248. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  256. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  258. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  261. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  262. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  263. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  264. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  265. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  266. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  267. package/src/llama.cpp/prompts/assistant.txt +0 -31
  268. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  269. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  270. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  271. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  273. package/src/llama.cpp/prompts/chat.txt +0 -28
  274. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  275. package/src/llama.cpp/prompts/dan.txt +0 -1
  276. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  277. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  278. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  279. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  280. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  281. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  284. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  285. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  286. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  287. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  289. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  290. package/src/llama.cpp/requirements.txt +0 -13
  291. package/src/llama.cpp/scripts/build-info.sh +0 -30
  292. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  293. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  294. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  295. package/src/llama.cpp/tests/get-model.cpp +0 -21
  296. package/src/llama.cpp/tests/get-model.h +0 -2
  297. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  298. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  299. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  300. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  301. package/src/llama.cpp/tests/test-c.c +0 -7
  302. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  303. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  304. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  305. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  306. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  307. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  308. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  309. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  310. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  311. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  312. package/src/llama.cpp/tests/test-log.cpp +0 -39
  313. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  314. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  315. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  316. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  317. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  318. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  319. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  320. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  321. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  322. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  323. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  324. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  325. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  326. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  327. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  328. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  329. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  330. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  331. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  332. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  333. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  334. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  335. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  336. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  337. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  338. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  339. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  340. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  341. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  342. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  343. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  344. package/src/llama.cpp/tools/main/main.cpp +0 -977
  345. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  346. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  347. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  348. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  349. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  350. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  351. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  353. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  354. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  355. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  356. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  357. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  358. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  360. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  361. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  362. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  363. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  364. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  365. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  367. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  368. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  369. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  370. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  371. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  372. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  373. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  374. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  375. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  376. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  377. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,667 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
- #include "log.h"
4
- #include "llama.h"
5
-
6
- #include <chrono>
7
- #include <cmath>
8
- #include <cstdio>
9
- #include <cstring>
10
- #include <ctime>
11
- #include <thread>
12
- #include <mutex>
13
- #include <vector>
14
- #include <fstream>
15
- #include <unordered_map>
16
- #include <algorithm>
17
-
18
- #if defined(_MSC_VER)
19
- #pragma warning(disable: 4244 4267) // possible loss of data
20
- #endif
21
-
22
- static void print_usage(int, char ** argv) {
23
- LOG("\nexample usage:\n");
24
- LOG("\n %s \\\n"
25
- " -m model.gguf -f some-text.txt [-o imatrix.dat] [--process-output] \\\n"
26
- " [--no-ppl] [--chunk 123] [--output-frequency 10] [--save-frequency 0] \\\n"
27
- " [--in-file imatrix-prev-0.dat --in-file imatrix-prev-1.dat ...] \\\n"
28
- " [--parse-special]\n" , argv[0]);
29
- LOG("\n");
30
- }
31
-
32
- struct Stats {
33
- std::vector<float> values;
34
- std::vector<int> counts;
35
- int ncall = 0;
36
- };
37
-
38
- class IMatrixCollector {
39
- public:
40
- IMatrixCollector() = default;
41
- void set_params(common_params params) { m_params = std::move(params); }
42
- bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data);
43
- void save_imatrix(int ncall = -1) const;
44
- bool load_imatrix(const char * fname);
45
- private:
46
- std::unordered_map<std::string, Stats> m_stats;
47
- common_params m_params;
48
- std::mutex m_mutex;
49
- int m_last_call = 0;
50
- std::vector<char> m_src1_data;
51
- std::vector<char> m_ids; // the expert ids from ggml_mul_mat_id
52
- };
53
-
54
- // remove any prefix and suffixes from the name
55
- // CUDA0#blk.0.attn_k.weight#0 => blk.0.attn_k.weight
56
- static std::string filter_tensor_name(const char * name) {
57
- std::string wname;
58
- const char * p = strchr(name, '#');
59
- if (p != NULL) {
60
- p = p + 1;
61
- const char * q = strchr(p, '#');
62
- if (q != NULL) {
63
- wname = std::string(p, q - p);
64
- } else {
65
- wname = p;
66
- }
67
- } else {
68
- wname = name;
69
- }
70
- return wname;
71
- }
72
-
73
- bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
74
- GGML_UNUSED(user_data);
75
-
76
- const struct ggml_tensor * src0 = t->src[0];
77
- const struct ggml_tensor * src1 = t->src[1];
78
- std::string wname = filter_tensor_name(src0->name);
79
-
80
- // when ask is true, the scheduler wants to know if we are interested in data from this tensor
81
- // if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
82
- if (ask) {
83
- if (t->op == GGML_OP_MUL_MAT_ID) return true; // collect all indirect matrix multiplications
84
- if (t->op != GGML_OP_MUL_MAT) return false;
85
- // why are small batches ignored (<16 tokens)?
86
- if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false;
87
- if (!(wname.substr(0, 4) == "blk." || (m_params.process_output && wname == "output.weight"))) return false;
88
- return true;
89
- }
90
-
91
- std::lock_guard<std::mutex> lock(m_mutex);
92
-
93
- // copy the data from the GPU memory if needed
94
- const bool is_host = ggml_backend_buffer_is_host(src1->buffer);
95
-
96
- if (!is_host) {
97
- const size_t src1_nbytes = ggml_nbytes(src1);
98
- m_src1_data.resize(src1_nbytes);
99
- ggml_backend_tensor_get(src1, m_src1_data.data(), 0, src1_nbytes);
100
- }
101
-
102
- const char * data = is_host ? (const char *) src1->data : m_src1_data.data();
103
- GGML_ASSERT(src1->nb[0] == ggml_element_size(src1));
104
-
105
- // this has been adapted to the new format of storing merged experts in a single 3d tensor
106
- // ref: https://github.com/ggml-org/llama.cpp/pull/6387
107
- if (t->op == GGML_OP_MUL_MAT_ID) {
108
- // ids -> [n_experts_used, n_tokens]
109
- // src1 -> [cols, n_expert_used, n_tokens]
110
- const ggml_tensor * ids = t->src[2];
111
- const int n_as = src0->ne[2];
112
- const int n_ids = ids->ne[0];
113
-
114
- // the top-k selected expert ids are stored in the ids tensor
115
- // for simplicity, always copy ids to host, because it is small
116
- // take into account that ids is not contiguous!
117
-
118
- GGML_ASSERT(ids->ne[1] == src1->ne[2]);
119
-
120
- m_ids.resize(ggml_nbytes(ids));
121
- ggml_backend_tensor_get(ids, m_ids.data(), 0, ggml_nbytes(ids));
122
-
123
- auto & e = m_stats[wname];
124
-
125
- ++e.ncall;
126
-
127
- if (e.values.empty()) {
128
- e.values.resize(src1->ne[0]*n_as, 0);
129
- e.counts.resize(src1->ne[0]*n_as, 0);
130
- }
131
- else if (e.values.size() != (size_t)src1->ne[0]*n_as) {
132
- LOG_ERR("%s: inconsistent size for %s (%d vs %d)\n", __func__, wname.c_str(), (int)e.values.size(), (int)src1->ne[0]*n_as);
133
- exit(1); //GGML_ABORT("fatal error");
134
- }
135
- LOG_DBGV(2, "%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[2], (int)src1->type);
136
- // loop over all possible experts, regardless if they are used or not in the batch
137
- for (int ex = 0; ex < n_as; ++ex) {
138
- size_t e_start = ex*src1->ne[0];
139
-
140
- for (int idx = 0; idx < n_ids; ++idx) {
141
- for (int row = 0; row < (int)src1->ne[2]; ++row) {
142
- const int excur = *(const int32_t *) (m_ids.data() + row*ids->nb[1] + idx*ids->nb[0]);
143
-
144
- GGML_ASSERT(excur >= 0 && excur < n_as); // sanity check
145
-
146
- if (excur != ex) continue;
147
-
148
- const int64_t i11 = idx % src1->ne[1];
149
- const int64_t i12 = row;
150
- const float * x = (const float *)(data + i11*src1->nb[1] + i12*src1->nb[2]);
151
-
152
- for (int j = 0; j < (int)src1->ne[0]; ++j) {
153
- e.values[e_start + j] += x[j]*x[j];
154
- e.counts[e_start + j]++;
155
- if (!std::isfinite(e.values[e_start + j])) {
156
- LOG("\n");
157
- LOG_ERR("%f detected in %s\n", e.values[e_start + j], wname.c_str());
158
- exit(1);
159
- }
160
- }
161
- }
162
- }
163
- if (e.ncall > m_last_call) {
164
- m_last_call = e.ncall;
165
- if (m_last_call % m_params.n_out_freq == 0) {
166
- save_imatrix();
167
- }
168
- if (m_params.n_save_freq > 0 && m_last_call%m_params.n_save_freq == 0) {
169
- save_imatrix(m_last_call);
170
- }
171
- }
172
- }
173
- } else {
174
- auto & e = m_stats[wname];
175
- if (e.values.empty()) {
176
- e.values.resize(src1->ne[0], 0);
177
- e.counts.resize(src1->ne[0], 0);
178
- }
179
- else if (e.values.size() != (size_t)src1->ne[0]) {
180
- LOG_ERR("%s: inconsistent size for %s (%d vs %d)\n", __func__, wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
181
- exit(1); //GGML_ABORT("fatal error");
182
- }
183
- ++e.ncall;
184
- LOG_DBGV(2, "%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
185
- for (int row = 0; row < (int)src1->ne[1]; ++row) {
186
- const float * x = (const float *) (data + row * src1->nb[1]);
187
- for (int j = 0; j < (int)src1->ne[0]; ++j) {
188
- e.values[j] += x[j]*x[j];
189
- e.counts[j]++;
190
- if (!std::isfinite(e.values[j])) {
191
- LOG_ERR("%f detected in %s\n", e.values[j], wname.c_str());
192
- exit(1);
193
- }
194
- }
195
- }
196
- if (e.ncall > m_last_call) {
197
- m_last_call = e.ncall;
198
- if (m_last_call % m_params.n_out_freq == 0) {
199
- save_imatrix();
200
- }
201
- if (m_params.n_save_freq > 0 && m_last_call%m_params.n_save_freq == 0) {
202
- save_imatrix(m_last_call);
203
- }
204
- }
205
- }
206
-
207
- return true;
208
- }
209
-
210
- void IMatrixCollector::save_imatrix(int ncall) const {
211
- auto fname = m_params.out_file;
212
-
213
- if (ncall > 0) {
214
- fname += ".at_";
215
- fname += std::to_string(ncall);
216
- }
217
-
218
- // avoid writing imatrix entries that do not have full data
219
- // this can happen with MoE models where some of the experts end up not being exercised by the provided training data
220
-
221
- int n_entries = 0;
222
- std::vector<std::string> to_store;
223
-
224
- bool is_first = true; // for printing
225
- for (const auto & kv : m_stats) {
226
- const int n_all = kv.second.counts.size();
227
-
228
- if (n_all == 0) {
229
- continue;
230
- }
231
-
232
- int n_zeros = 0;
233
- for (const int c : kv.second.counts) {
234
- if (c == 0) {
235
- n_zeros++;
236
- }
237
- }
238
-
239
- if (n_zeros != 0 && is_first) {
240
- LOG_INF("\n");
241
- is_first = false;
242
- }
243
-
244
- if (n_zeros == n_all) {
245
- LOG_WRN("%s: entry '%40s' has no data - skipping\n", __func__, kv.first.c_str());
246
- continue;
247
- }
248
-
249
- if (n_zeros > 0) {
250
- LOG_WRN("%s: entry '%40s' has partial data (%.2f%%) - skipping\n", __func__, kv.first.c_str(), 100.0f * (n_all - n_zeros) / n_all);
251
- continue;
252
- }
253
-
254
- n_entries++;
255
- to_store.push_back(kv.first);
256
- }
257
-
258
- if (to_store.size() < m_stats.size()) {
259
- LOG_WRN("%s: storing only %zu out of %zu entries\n", __func__, to_store.size(), m_stats.size());
260
- }
261
-
262
- std::ofstream out(fname, std::ios::binary);
263
- out.write((const char *) &n_entries, sizeof(n_entries));
264
- for (const auto & name : to_store) {
265
- const auto & stat = m_stats.at(name);
266
- int len = name.size();
267
- out.write((const char *) &len, sizeof(len));
268
- out.write(name.c_str(), len);
269
- out.write((const char *) &stat.ncall, sizeof(stat.ncall));
270
- int nval = stat.values.size();
271
- out.write((const char *) &nval, sizeof(nval));
272
- if (nval > 0) {
273
- std::vector<float> tmp(nval);
274
- for (int i = 0; i < nval; i++) {
275
- tmp[i] = (stat.values[i] / static_cast<float>(stat.counts[i])) * static_cast<float>(stat.ncall);
276
- }
277
- out.write((const char*)tmp.data(), nval*sizeof(float));
278
- }
279
- }
280
-
281
- // Write the number of call the matrix was computed with
282
- out.write((const char *) &m_last_call, sizeof(m_last_call));
283
-
284
- // Write the input filename at the end of the file to later on specify it in quantize
285
- {
286
- int len = m_params.prompt_file.size();
287
- out.write((const char *) &len, sizeof(len));
288
- out.write(m_params.prompt_file.c_str(), len);
289
- }
290
-
291
- LOGV(1, "\n");
292
- LOG_DBGV(1, "%s: stored collected data after %d chunks in %s\n", __func__, m_last_call, fname.c_str());
293
- }
294
-
295
- bool IMatrixCollector::load_imatrix(const char * fname) {
296
- std::ifstream in(fname, std::ios::binary);
297
- if (!in) {
298
- LOG_ERR("%s: failed to open %s\n",__func__, fname);
299
- return false;
300
- }
301
- int n_entries;
302
- in.read((char*)&n_entries, sizeof(n_entries));
303
- if (in.fail() || n_entries < 1) {
304
- LOG_ERR("%s: no data in file %s\n", __func__, fname);
305
- return false;
306
- }
307
- for (int i = 0; i < n_entries; ++i) {
308
- int len; in.read((char *)&len, sizeof(len));
309
- std::vector<char> name_as_vec(len+1);
310
- in.read((char *)name_as_vec.data(), len);
311
- if (in.fail()) {
312
- LOG_ERR("%s: failed reading name for entry %d from %s\n",__func__,i+1, fname);
313
- return false;
314
- }
315
- name_as_vec[len] = 0;
316
- std::string name{name_as_vec.data()};
317
- auto & e = m_stats[std::move(name)];
318
- int ncall;
319
- in.read((char*)&ncall, sizeof(ncall));
320
- int nval;
321
- in.read((char *)&nval, sizeof(nval));
322
- if (in.fail() || nval < 1) {
323
- LOG_ERR("%s: failed reading number of values for entry %d\n",__func__,i);
324
- m_stats = {};
325
- return false;
326
- }
327
-
328
- if (e.values.empty()) {
329
- e.values.resize(nval, 0);
330
- e.counts.resize(nval, 0);
331
- }
332
-
333
- std::vector<float> tmp(nval);
334
- in.read((char*)tmp.data(), nval*sizeof(float));
335
- if (in.fail()) {
336
- LOG_ERR("%s: failed reading data for entry %d\n",__func__,i);
337
- m_stats = {};
338
- return false;
339
- }
340
-
341
- // Recreate the state as expected by save_imatrix(), and corerct for weighted sum.
342
- for (int i = 0; i < nval; i++) {
343
- e.values[i] += tmp[i];
344
- e.counts[i] += ncall;
345
- }
346
- e.ncall += ncall;
347
-
348
- }
349
- return true;
350
- }
351
-
352
- static IMatrixCollector g_collector;
353
-
354
- static bool ik_collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
355
- return g_collector.collect_imatrix(t, ask, user_data);
356
- }
357
-
358
-
359
- struct results_log_softmax {
360
- double log_softmax;
361
- float logit;
362
- float prob;
363
- };
364
-
365
- static std::vector<float> softmax(const std::vector<float> & logits) {
366
- std::vector<float> probs(logits.size());
367
- float max_logit = logits[0];
368
- for (float v : logits) {
369
- max_logit = std::max(max_logit, v);
370
- }
371
- double sum_exp = 0.0;
372
- for (size_t i = 0; i < logits.size(); i++) {
373
- // Subtract the maximum logit value from the current logit value for numerical stability
374
- const float logit = logits[i] - max_logit;
375
- const float exp_logit = expf(logit);
376
- sum_exp += exp_logit;
377
- probs[i] = exp_logit;
378
- }
379
- for (size_t i = 0; i < probs.size(); i++) {
380
- probs[i] /= sum_exp;
381
- }
382
- return probs;
383
- }
384
-
385
- static results_log_softmax log_softmax(int n_vocab, const float * logits, int tok) {
386
- float max_logit = logits[0];
387
- for (int i = 1; i < n_vocab; ++i) {
388
- max_logit = std::max(max_logit, logits[i]);
389
- }
390
- double sum_exp = 0.0;
391
- for (int i = 0; i < n_vocab; ++i) {
392
- sum_exp += expf(logits[i] - max_logit);
393
- }
394
- return {logits[tok] - max_logit - log(sum_exp), logits[tok], expf(logits[tok] - max_logit) / (float) sum_exp};
395
- }
396
-
397
- static void process_logits(
398
- int n_vocab, const float * logits, const int * tokens, int n_token, std::vector<std::thread> & workers,
399
- double & nll, double & nll2, float * logit_history, float * prob_history) {
400
- std::mutex mutex;
401
- int counter = 0;
402
- auto compute = [&mutex, &counter, &nll, &nll2, logit_history, prob_history, n_vocab, logits, tokens, n_token] () {
403
- double local_nll = 0;
404
- double local_nll2 = 0;
405
- while (true) {
406
- std::unique_lock<std::mutex> lock(mutex);
407
- int i = counter++;
408
- if (i >= n_token) {
409
- nll += local_nll; nll2 += local_nll2;
410
- break;
411
- }
412
- lock.unlock();
413
- const results_log_softmax results = log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]);
414
- const double v = -results.log_softmax;
415
- local_nll += v;
416
- local_nll2 += v*v;
417
-
418
- logit_history[i] = results.logit;
419
- prob_history[i] = results.prob;
420
- }
421
- };
422
- for (auto & w : workers) {
423
- w = std::thread(compute);
424
- }
425
- compute();
426
- for (auto & w : workers) {
427
- w.join();
428
- }
429
- }
430
-
431
- static bool compute_imatrix(llama_context * ctx, const common_params & params) {
432
- const llama_model * model = llama_get_model(ctx);
433
- const llama_vocab * vocab = llama_model_get_vocab(model);
434
-
435
- const bool add_bos = llama_vocab_get_add_bos(vocab);
436
- const int n_ctx = llama_n_ctx(ctx);
437
-
438
- GGML_ASSERT(!llama_vocab_get_add_eos(vocab));
439
-
440
- auto tim1 = std::chrono::high_resolution_clock::now();
441
- LOG_INF("%s: tokenizing the input ..\n", __func__);
442
-
443
- std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true, params.parse_special);
444
-
445
- auto tim2 = std::chrono::high_resolution_clock::now();
446
- LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
447
-
448
- if (params.i_chunk > 0) {
449
- if (size_t((params.i_chunk + 2)*n_ctx) >= tokens.size()) {
450
- LOG_ERR("%s: there will be not enough tokens left after removing %d chunks\n", __func__, params.i_chunk);
451
- return false;
452
- }
453
- LOG_INF("%s: removing initial %d chunks (%d tokens)\n", __func__, params.i_chunk, params.i_chunk*n_ctx);
454
- tokens.erase(tokens.begin(), tokens.begin() + params.i_chunk*n_ctx);
455
- }
456
-
457
- if (int(tokens.size()) < 2*n_ctx) {
458
- LOG_ERR("%s: you need at least %d tokens for a context of %d tokens\n", __func__, 2*n_ctx, n_ctx);
459
- LOG_ERR("%s: the data file you provided tokenizes to only %zu tokens\n", __func__, tokens.size());
460
- return false;
461
- }
462
-
463
- std::vector<float> logit_history;
464
- std::vector<float> prob_history;
465
-
466
- if (params.compute_ppl) {
467
- logit_history.resize(tokens.size());
468
- prob_history.resize(tokens.size());
469
- }
470
-
471
- const int n_chunk_max = tokens.size() / n_ctx;
472
-
473
- const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
474
- const int n_vocab = llama_vocab_n_tokens(vocab);
475
- const int n_batch = params.n_batch;
476
-
477
- int count = 0;
478
- double nll = 0.0;
479
- double nll2 = 0.0;
480
-
481
- LOG_INF("%s: computing over %d chunks with batch_size %d\n", __func__, n_chunk, n_batch);
482
-
483
- std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
484
-
485
- const int num_batches = (n_ctx + n_batch - 1) / n_batch;
486
-
487
- std::vector<float> logits;
488
- if (params.compute_ppl && num_batches > 1) {
489
- logits.reserve((size_t)n_ctx * n_vocab);
490
- }
491
-
492
- for (int i = 0; i < n_chunk; ++i) {
493
- const int start = i * n_ctx;
494
- const int end = start + n_ctx;
495
-
496
- std::vector<float> logits;
497
-
498
- const auto t_start = std::chrono::high_resolution_clock::now();
499
-
500
- // clear the KV cache
501
- llama_kv_self_clear(ctx);
502
-
503
- llama_batch batch = llama_batch_init(n_batch, 0, 1);
504
-
505
- for (int j = 0; j < num_batches; ++j) {
506
- const int batch_start = start + j * n_batch;
507
- const int batch_size = std::min(end - batch_start, n_batch);
508
-
509
- // save original token and restore it after eval
510
- const auto token_org = tokens[batch_start];
511
-
512
- // add BOS token for the first batch of each chunk
513
- if (add_bos && j == 0) {
514
- tokens[batch_start] = llama_vocab_bos(vocab);
515
- }
516
-
517
- common_batch_clear(batch);
518
- for (int i = 0; i < batch_size; i++) {
519
- common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
520
- }
521
-
522
- if (llama_decode(ctx, batch)) {
523
- LOG_ERR("%s : failed to eval\n", __func__);
524
- llama_batch_free(batch);
525
- return false;
526
- }
527
-
528
- // restore the original token in case it was set to BOS
529
- tokens[batch_start] = token_org;
530
-
531
- if (params.compute_ppl && num_batches > 1) {
532
- const auto * batch_logits = llama_get_logits(ctx);
533
- logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
534
- }
535
- }
536
-
537
- llama_batch_free(batch);
538
-
539
- const auto t_end = std::chrono::high_resolution_clock::now();
540
-
541
- if (i == 0) {
542
- const float t_total = std::chrono::duration<float>(t_end - t_start).count();
543
- LOG_INF("%s: %.2f seconds per pass - ETA ", __func__, t_total);
544
- int total_seconds = (int)(t_total * n_chunk);
545
- if (total_seconds >= 60*60) {
546
- LOG("%d hours ", total_seconds / (60*60));
547
- total_seconds = total_seconds % (60*60);
548
- }
549
- LOG("%.2f minutes\n", total_seconds / 60.0);
550
- }
551
-
552
- if (params.compute_ppl) {
553
- const int first = n_ctx/2;
554
- const auto * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
555
- process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
556
- workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
557
- count += n_ctx - first - 1;
558
-
559
- LOG("[%d]%.4lf,", i + 1, std::exp(nll / count));
560
- fflush(stdout);
561
-
562
- logits.clear();
563
- }
564
- }
565
- LOG("\n");
566
-
567
- if (params.compute_ppl) {
568
- nll2 /= count;
569
- nll /= count;
570
- const double ppl = exp(nll);
571
- nll2 -= nll * nll;
572
- if (nll2 > 0) {
573
- nll2 = sqrt(nll2/(count-1));
574
- LOG("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
575
- } else {
576
- LOG("Unexpected negative standard deviation of log(prob)\n");
577
- }
578
- }
579
-
580
- return true;
581
- }
582
-
583
- int main(int argc, char ** argv) {
584
- common_params params;
585
-
586
- params.out_file = "imatrix.dat" ;
587
-
588
- params.n_ctx = 512;
589
- params.escape = false;
590
-
591
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) {
592
- return 1;
593
- }
594
-
595
- common_init();
596
-
597
- params.n_batch = std::min(params.n_batch, params.n_ctx);
598
-
599
- g_collector.set_params(params);
600
-
601
- for (const auto & in_file : params.in_files) {
602
- LOG_INF("%s : loading imatrix from '%s'\n", __func__, in_file.c_str());
603
- if (!g_collector.load_imatrix(in_file.c_str())) {
604
- LOG_ERR("%s : failed to load %s\n", __func__, in_file.c_str());
605
- return 1;
606
- }
607
- }
608
-
609
- if (params.in_files.size() > 1) {
610
- LOG_INF("%s : saving combined imatrix to '%s'\n", __func__, params.out_file.c_str());
611
- g_collector.save_imatrix();
612
- }
613
-
614
- llama_backend_init();
615
- llama_numa_init(params.numa);
616
-
617
- // pass the callback to the backend scheduler
618
- // it will be executed for each node during the graph computation
619
- params.cb_eval = ik_collect_imatrix;
620
- params.cb_eval_user_data = NULL;
621
- params.warmup = false;
622
-
623
- // init
624
- common_init_result llama_init = common_init_from_params(params);
625
-
626
- llama_model * model = llama_init.model.get();
627
- llama_context * ctx = llama_init.context.get();
628
-
629
- if (model == nullptr || ctx == nullptr) {
630
- LOG_ERR("%s : failed to init\n", __func__);
631
- return 1;
632
- }
633
-
634
- const int n_ctx_train = llama_model_n_ctx_train(model);
635
- if (params.n_ctx > n_ctx_train) {
636
- LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n",
637
- __func__, n_ctx_train, params.n_ctx);
638
- }
639
-
640
- // print system information
641
- {
642
- LOG_INF("\n");
643
- LOG_INF("%s\n", common_params_get_system_info(params).c_str());
644
- }
645
-
646
- if (params.prompt.empty()) {
647
- if (params.in_files.empty()) {
648
- LOG_ERR("Error: No prompt provided and no precomputed matrices (--in-file) to combine.\n");
649
- return 1;
650
- }
651
- LOG_INF("No prompt provided; combining precomputed matrices only.\n");
652
- } else {
653
- if (!compute_imatrix(ctx, params)) {
654
- return 1;
655
- }
656
- }
657
-
658
-
659
- g_collector.save_imatrix();
660
-
661
- LOG("\n");
662
- llama_perf_context_print(ctx);
663
-
664
- llama_backend_free();
665
-
666
- return 0;
667
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-bench)
2
- add_executable(${TARGET} llama-bench.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)