@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,492 +0,0 @@
1
- // A basic application simulating a server with multiple clients.
2
- // The clients submit requests to the server and they are processed in parallel.
3
-
4
- #include "arg.h"
5
- #include "common.h"
6
- #include "sampling.h"
7
- #include "log.h"
8
- #include "llama.h"
9
-
10
- #include <cmath>
11
- #include <cstdio>
12
- #include <string>
13
- #include <vector>
14
- #include <ctime>
15
- #include <algorithm>
16
-
17
- // trim whitespace from the beginning and end of a string
18
- static std::string trim(const std::string & str) {
19
- size_t start = 0;
20
- size_t end = str.size();
21
-
22
- while (start < end && isspace(str[start])) {
23
- start += 1;
24
- }
25
-
26
- while (end > start && isspace(str[end - 1])) {
27
- end -= 1;
28
- }
29
-
30
- return str.substr(start, end - start);
31
- }
32
-
33
- static std::string k_system =
34
- R"(Transcript of a never ending dialog, where the User interacts with an Assistant.
35
- The Assistant is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
36
-
37
- User:
38
- Recommend a nice restaurant in the area.
39
- Assistant:
40
- I recommend the restaurant "The Golden Duck". It is a 5 star restaurant with a great view of the city. The food is delicious and the service is excellent. The prices are reasonable and the portions are generous. The restaurant is located at 123 Main Street, New York, NY 10001. The phone number is (212) 555-1234. The hours are Monday through Friday from 11:00 am to 10:00 pm. The restaurant is closed on Saturdays and Sundays.
41
- User:
42
- Who is Richard Feynman?
43
- Assistant:
44
- Richard Feynman was an American physicist who is best known for his work in quantum mechanics and particle physics. He was awarded the Nobel Prize in Physics in 1965 for his contributions to the development of quantum electrodynamics. He was a popular lecturer and author, and he wrote several books, including "Surely You're Joking, Mr. Feynman!" and "What Do You Care What Other People Think?".
45
- )";
46
-
47
- static std::vector<std::string> k_questions = {
48
- "What is the tallest mountain in the world?",
49
- "Who was the first person to win two Nobel Prizes?",
50
- "Which country invented paper?",
51
- "What organ is primarily responsible for pumping blood throughout the body?",
52
- "Which planet is known for its prominent ring system?",
53
- "Who directed the movie 'Inception'?",
54
- "What is the freezing point of water in Fahrenheit?",
55
- "Which animal is known to have the longest lifespan?",
56
- "What language has the most native speakers worldwide?",
57
- "What is the capital city of Canada?",
58
- "Who is credited with inventing the World Wide Web?",
59
- "Which metal is liquid at room temperature?",
60
- "What is the term for an animal that eats both plants and meat?",
61
- "Who painted 'The Starry Night'?",
62
- "What gas do humans exhale that plants use for photosynthesis?",
63
- "What year did World War II end?",
64
- "Which continent has the most countries?",
65
- "Who wrote the novel 'Frankenstein'?",
66
- "What does DNA stand for?",
67
- "What is the main ingredient in traditional Japanese miso soup?"
68
- };
69
-
70
- static std::vector<std::string> k_answers = {
71
- "The tallest mountain in the world is Mount Everest.",
72
- "Marie Curie was the first person to win two Nobel Prizes.",
73
- "Paper was invented in China.",
74
- "The heart is the organ responsible for pumping blood.",
75
- "Saturn is known for its prominent ring system.",
76
- "Christopher Nolan directed the movie 'Inception'.",
77
- "The freezing point of water in Fahrenheit is 32°F.",
78
- "The bowhead whale is known to have the longest lifespan among mammals.",
79
- "Mandarin Chinese has the most native speakers in the world.",
80
- "The capital city of Canada is Ottawa.",
81
- "Tim Berners-Lee is credited with inventing the World Wide Web.",
82
- "Mercury is the metal that is liquid at room temperature.",
83
- "An animal that eats both plants and meat is called an omnivore.",
84
- "'The Starry Night' was painted by Vincent van Gogh.",
85
- "Humans exhale carbon dioxide, which plants use in photosynthesis.",
86
- "World War II ended in 1945.",
87
- "Africa is the continent with the most countries.",
88
- "The novel 'Frankenstein' was written by Mary Shelley.",
89
- "DNA stands for Deoxyribonucleic Acid.",
90
- "The main ingredient in traditional Japanese miso soup is fermented soybean paste."
91
- };
92
-
93
- static std::vector<std::string> k_prompts = {
94
- "What is the meaning of life?",
95
- "Tell me an interesting fact about llamas.",
96
- "What is the best way to cook a steak?",
97
- "Are you familiar with the Special Theory of Relativity and can you explain it to me?",
98
- "Recommend some interesting books to read.",
99
- "What is the best way to learn a new language?",
100
- "How to get a job at Google?",
101
- "If you could have any superpower, what would it be?",
102
- "I want to learn how to play the piano. What would be the best way to do it?",
103
- };
104
-
105
- struct client {
106
- ~client() {
107
- if (smpl) {
108
- common_sampler_free(smpl);
109
- }
110
- }
111
-
112
- int32_t id = 0;
113
-
114
- llama_seq_id seq_id = -1;
115
-
116
- llama_token sampled;
117
-
118
- int64_t t_start_prompt;
119
- int64_t t_start_gen;
120
-
121
- int32_t n_past = 0;
122
- int32_t n_prompt = 0;
123
- int32_t n_decoded = 0;
124
- int32_t i_batch = -1;
125
-
126
- std::string input;
127
- std::string prompt;
128
- std::string response;
129
-
130
- struct common_sampler * smpl = nullptr;
131
- };
132
-
133
- static void print_date_time() {
134
- std::time_t current_time = std::time(nullptr);
135
- std::tm* local_time = std::localtime(&current_time);
136
- char buffer[80];
137
- strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", local_time);
138
-
139
- LOG_INF("\n");
140
- LOG_INF("\033[35mrun parameters as of %s\033[0m\n", buffer);
141
- LOG_INF("\n");
142
- }
143
-
144
- // Define a split string function to ...
145
- static std::vector<std::string> split_string(const std::string& input, char delimiter) {
146
- std::vector<std::string> tokens;
147
- std::istringstream stream(input);
148
- std::string token;
149
- while (std::getline(stream, token, delimiter)) {
150
- tokens.push_back(token);
151
- }
152
- return tokens;
153
- }
154
-
155
- int main(int argc, char ** argv) {
156
- srand(1234);
157
-
158
- common_params params;
159
-
160
- params.n_predict = 128;
161
- params.n_junk = 0;
162
-
163
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PARALLEL)) {
164
- return 1;
165
- }
166
-
167
- common_init();
168
-
169
- // number of simultaneous "clients" to simulate
170
- const int32_t n_clients = params.n_parallel;
171
-
172
- // dedicate one sequence to the system prompt
173
- params.n_parallel += 1;
174
-
175
- // requests to simulate
176
- const int32_t n_seq = params.n_sequences;
177
-
178
- // insert new requests as soon as the previous one is done
179
- const bool cont_batching = params.cont_batching;
180
-
181
- // is the system prompt shared in the cache
182
- const bool is_sp_shared = params.is_pp_shared;
183
-
184
- // extra text to insert in each client's prompt in order to make it larger
185
- const int32_t n_junk = params.n_junk;
186
-
187
- // init llama.cpp
188
- llama_backend_init();
189
- llama_numa_init(params.numa);
190
-
191
- // load the target model
192
- common_init_result llama_init = common_init_from_params(params);
193
-
194
- llama_model * model = llama_init.model.get();
195
- llama_context * ctx = llama_init.context.get();
196
-
197
- const llama_vocab * vocab = llama_model_get_vocab(model);
198
-
199
- // load the prompts from an external file if there are any
200
- if (params.prompt.empty()) {
201
- LOG_INF("\033[32mNo new questions so proceed with build-in defaults.\033[0m\n");
202
- } else {
203
- // Output each line of the input params.prompts vector and copy to k_prompts
204
- int index = 0;
205
- LOG_INF("\033[32mNow printing the external prompt file %s\033[0m\n\n", params.prompt_file.c_str());
206
-
207
- std::vector<std::string> prompts = split_string(params.prompt, '\n');
208
- for (const auto& prompt : prompts) {
209
- k_prompts.resize(index + 1);
210
- k_prompts[index] = prompt;
211
- index++;
212
- LOG_INF("%3d prompt: %s\n", index, prompt.c_str());
213
- }
214
- }
215
-
216
- LOG_INF("\n\n");
217
-
218
- const int n_ctx = llama_n_ctx(ctx);
219
-
220
- std::vector<client> clients(n_clients);
221
- for (size_t i = 0; i < clients.size(); ++i) {
222
- auto & client = clients[i];
223
- client.id = i;
224
- client.smpl = common_sampler_init(model, params.sampling);
225
- }
226
-
227
- std::vector<llama_token> tokens_system;
228
-
229
- tokens_system = common_tokenize(ctx, k_system, true);
230
- const int32_t n_tokens_system = tokens_system.size();
231
-
232
- llama_seq_id g_seq_id = 0;
233
-
234
- // the max batch size is as large as the context to handle cases where we get very long input prompt from multiple
235
- // users. regardless of the size, the main loop will chunk the batch into a maximum of params.n_batch tokens at a time
236
- llama_batch batch = llama_batch_init(n_ctx, 0, 1);
237
-
238
- int32_t n_total_prompt = 0;
239
- int32_t n_total_gen = 0;
240
- int32_t n_cache_miss = 0;
241
-
242
- const auto t_main_start = ggml_time_us();
243
-
244
- LOG_INF("%s: Simulating parallel requests from clients:\n", __func__);
245
- LOG_INF("%s: n_parallel = %d, n_sequences = %d, cont_batching = %d, system tokens = %d\n", __func__, n_clients, n_seq, cont_batching, n_tokens_system);
246
- LOG_INF("\n");
247
-
248
- if (is_sp_shared) {
249
- LOG_INF("%s: Evaluating the system prompt ...\n", __func__);
250
-
251
- for (int32_t i = 0; i < n_tokens_system; ++i) {
252
- common_batch_add(batch, tokens_system[i], i, { 0 }, false);
253
- }
254
-
255
- if (llama_decode(ctx, batch) != 0) {
256
- LOG_ERR("%s: llama_decode() failed\n", __func__);
257
- return 1;
258
- }
259
-
260
- // assign the system KV cache to all parallel sequences
261
- for (int32_t i = 1; i <= n_clients; ++i) {
262
- llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
263
- }
264
-
265
- LOG_INF("\n");
266
- }
267
-
268
- LOG_INF("Processing requests ...\n\n");
269
-
270
- while (true) {
271
- common_batch_clear(batch);
272
-
273
- // decode any currently ongoing sequences
274
- for (auto & client : clients) {
275
- if (client.seq_id == -1) {
276
- continue;
277
- }
278
-
279
- client.i_batch = batch.n_tokens;
280
-
281
- common_batch_add(batch, client.sampled, client.n_past++, { client.id + 1 }, true);
282
-
283
- client.n_decoded += 1;
284
- }
285
-
286
- if (batch.n_tokens == 0) {
287
- // all sequences have ended - clear the entire KV cache
288
- for (int i = 1; i <= n_clients; ++i) {
289
- llama_kv_self_seq_rm(ctx, i, -1, -1);
290
- // but keep the system prompt
291
- llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
292
- }
293
-
294
- LOG_INF("%s: clearing the KV cache\n", __func__);
295
- }
296
-
297
- // insert new sequences for decoding
298
- if (cont_batching || batch.n_tokens == 0) {
299
- for (auto & client : clients) {
300
- if (client.seq_id == -1 && g_seq_id < n_seq) {
301
- client.seq_id = g_seq_id;
302
-
303
- client.t_start_prompt = ggml_time_us();
304
- client.t_start_gen = 0;
305
-
306
- client.input = k_prompts[rand() % k_prompts.size()];
307
- client.response = "";
308
-
309
- // construct the prompt:
310
- // [system prompt] + [junk] + [user prompt]
311
- client.n_past = 0;
312
- client.prompt = "";
313
- if (is_sp_shared) {
314
- client.n_past = n_tokens_system;
315
- } else {
316
- client.prompt += k_system;
317
- }
318
- for (int i = 0; i < n_junk; ++i) {
319
- const int r = rand() % k_questions.size();
320
- client.prompt += "User:\n" + k_questions[r] + "\nAssistant:\n " + k_answers[r] + "\n";
321
- }
322
- client.prompt += "User:\n" + client.input + "\nAssistant:\n";
323
-
324
- common_sampler_reset(client.smpl);
325
-
326
- // do not prepend BOS because we have a system prompt!
327
- std::vector<llama_token> tokens_prompt;
328
- tokens_prompt = common_tokenize(ctx, client.prompt, false);
329
-
330
- for (size_t i = 0; i < tokens_prompt.size(); ++i) {
331
- common_batch_add(batch, tokens_prompt[i], client.n_past++, { client.id + 1 }, false);
332
- }
333
-
334
- // extract the logits only for the last token
335
- if (batch.n_tokens > 0) {
336
- batch.logits[batch.n_tokens - 1] = true;
337
- }
338
-
339
- client.n_prompt = tokens_prompt.size();
340
- client.n_decoded = 0;
341
- client.i_batch = batch.n_tokens - 1;
342
-
343
- LOG_INF("\033[31mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id);
344
-
345
- g_seq_id += 1;
346
-
347
- // insert new requests one-by-one
348
- //if (cont_batching) {
349
- // break;
350
- //}
351
- }
352
- }
353
- }
354
-
355
- if (batch.n_tokens == 0) {
356
- break;
357
- }
358
-
359
- // process in chunks of params.n_batch
360
- int32_t n_batch = params.n_batch;
361
-
362
- for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
363
- // experiment: process in powers of 2
364
- //if (i + n_batch > (int32_t) batch.n_tokens && n_batch > 32) {
365
- // n_batch /= 2;
366
- // i -= n_batch;
367
- // continue;
368
- //}
369
-
370
- const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
371
-
372
- llama_batch batch_view = {
373
- n_tokens,
374
- batch.token + i,
375
- nullptr,
376
- batch.pos + i,
377
- batch.n_seq_id + i,
378
- batch.seq_id + i,
379
- batch.logits + i,
380
- };
381
-
382
- const int ret = llama_decode(ctx, batch_view);
383
- if (ret != 0) {
384
- if (n_batch == 1 || ret < 0) {
385
- // if you get here, it means the KV cache is full - try increasing it via the context size
386
- LOG_ERR("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
387
- return 1;
388
- }
389
-
390
- LOG_ERR("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2);
391
-
392
- n_cache_miss += 1;
393
-
394
- // retry with half the batch size to try to find a free slot in the KV cache
395
- n_batch /= 2;
396
- i -= n_batch;
397
-
398
- continue;
399
- }
400
-
401
- LOG_DBG("%s : decoded batch of %d tokens\n", __func__, n_tokens);
402
-
403
- for (auto & client : clients) {
404
- if (client.i_batch < (int) i || client.i_batch >= (int) (i + n_tokens)) {
405
- continue;
406
- }
407
-
408
- //printf("client %d, seq %d, token %d, pos %d, batch %d\n",
409
- // client.id, client.seq_id, client.sampled, client.n_decoded, client.i_batch);
410
-
411
- const llama_token id = common_sampler_sample(client.smpl, ctx, client.i_batch - i);
412
-
413
- common_sampler_accept(client.smpl, id, true);
414
-
415
- if (client.n_decoded == 1) {
416
- // start measuring generation time after the first token to make sure all concurrent clients
417
- // have their prompt already processed
418
- client.t_start_gen = ggml_time_us();
419
- }
420
-
421
- const std::string token_str = common_token_to_piece(ctx, id);
422
-
423
- client.response += token_str;
424
- client.sampled = id;
425
-
426
- //printf("client %d, seq %d, token %d, pos %d, batch %d: %s\n",
427
- // client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str());
428
-
429
- if (client.n_decoded > 2 &&
430
- (llama_vocab_is_eog(vocab, id) ||
431
- (params.n_predict > 0 && client.n_decoded >= params.n_predict) ||
432
- client.response.find("User:") != std::string::npos)) {
433
- // basic reverse prompt
434
- const size_t pos = client.response.find("User:");
435
- if (pos != std::string::npos) {
436
- client.response = client.response.substr(0, pos);
437
- }
438
-
439
- // delete only the generated part of the sequence, i.e. keep the system prompt in the cache
440
- llama_kv_self_seq_rm(ctx, client.id + 1, -1, -1);
441
- llama_kv_self_seq_cp(ctx, 0, client.id + 1, -1, -1);
442
-
443
- const auto t_main_end = ggml_time_us();
444
-
445
- LOG_INF("\033[31mClient %3d, seq %3d/%3d, prompt %4d t, response %4d t, time %5.2f s, speed %5.2f t/s, cache miss %d \033[0m \n\nInput: %s\n\033[35mResponse: %s\033[0m\n\n",
446
- client.id, client.seq_id, n_seq, client.n_prompt, client.n_decoded,
447
- (t_main_end - client.t_start_prompt) / 1e6,
448
- (double) (client.n_prompt + client.n_decoded) / (t_main_end - client.t_start_prompt) * 1e6,
449
- n_cache_miss,
450
- ::trim(client.input).c_str(),
451
- ::trim(client.response).c_str());
452
-
453
- n_total_prompt += client.n_prompt;
454
- n_total_gen += client.n_decoded;
455
-
456
- client.seq_id = -1;
457
- }
458
-
459
- client.i_batch = -1;
460
- }
461
- }
462
- }
463
-
464
- const auto t_main_end = ggml_time_us();
465
-
466
- print_date_time();
467
-
468
- LOG_INF("%s: n_parallel = %d, n_sequences = %d, cont_batching = %d, system tokens = %d\n", __func__, n_clients, n_seq, cont_batching, n_tokens_system);
469
- if (params.prompt_file.empty()) {
470
- params.prompt_file = "used built-in defaults";
471
- }
472
- LOG_INF("External prompt file: \033[32m%s\033[0m\n", params.prompt_file.c_str());
473
- LOG_INF("Model and path used: \033[32m%s\033[0m\n\n", params.model.path.c_str());
474
-
475
- LOG_INF("Total prompt tokens: %6d, speed: %5.2f t/s\n", n_total_prompt, (double) (n_total_prompt ) / (t_main_end - t_main_start) * 1e6);
476
- LOG_INF("Total gen tokens: %6d, speed: %5.2f t/s\n", n_total_gen, (double) (n_total_gen ) / (t_main_end - t_main_start) * 1e6);
477
- LOG_INF("Total speed (AVG): %6s speed: %5.2f t/s\n", "", (double) (n_total_prompt + n_total_gen) / (t_main_end - t_main_start) * 1e6);
478
- LOG_INF("Cache misses: %6d\n", n_cache_miss);
479
-
480
- LOG_INF("\n");
481
-
482
- // TODO: print sampling/grammar timings for all clients
483
- llama_perf_context_print(ctx);
484
-
485
- llama_batch_free(batch);
486
-
487
- llama_backend_free();
488
-
489
- LOG("\n\n");
490
-
491
- return 0;
492
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-passkey)
2
- add_executable(${TARGET} passkey.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)