@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,644 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
- #include "sampling.h"
4
- #include "log.h"
5
- #include "llama.h"
6
-
7
- #include <algorithm>
8
- #include <cstdio>
9
- #include <cstring>
10
- #include <random>
11
- #include <set>
12
- #include <string>
13
- #include <vector>
14
-
15
- #define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
16
- #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
17
-
18
- struct seq_draft {
19
- bool active = false;
20
- bool drafting = false;
21
- bool skip = false;
22
-
23
- int i_batch_dft = 0;
24
- std::vector<int> i_batch_tgt;
25
-
26
- std::vector<llama_token> tokens;
27
- std::vector<std::vector<llama_token_data>> dists;
28
-
29
- struct common_sampler * smpl = nullptr;
30
- };
31
-
32
- int main(int argc, char ** argv) {
33
- common_params params;
34
-
35
- // needed to get candidate probs even for temp <= 0.0
36
- params.sampling.n_probs = 128;
37
-
38
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
39
- return 1;
40
- }
41
-
42
- if (params.n_predict < -1) {
43
- LOG_ERR("%s: --n-predict must be >= -1\n", __func__);
44
- return 1;
45
- }
46
-
47
- common_init();
48
-
49
- if (params.speculative.model.path.empty()) {
50
- LOG_ERR("%s: --model-draft is required\n", __func__);
51
- return 1;
52
- }
53
-
54
- // max number of parallel drafting sequences (i.e. tree branches)
55
- const int n_seq_dft = params.n_parallel;
56
-
57
- // probability threshold for splitting a draft branch (only for n_seq_dft > 1)
58
- const float p_draft_split = params.speculative.p_split;
59
-
60
- std::default_random_engine rng(params.sampling.seed == LLAMA_DEFAULT_SEED ? std::random_device()() : params.sampling.seed);
61
- std::uniform_real_distribution<> u_dist;
62
-
63
- // init llama.cpp
64
- llama_backend_init();
65
- llama_numa_init(params.numa);
66
-
67
- llama_model * model_tgt = NULL;
68
- llama_model * model_dft = NULL;
69
-
70
- llama_context * ctx_tgt = NULL;
71
- llama_context * ctx_dft = NULL;
72
-
73
- // load the target model
74
- common_init_result llama_init_tgt = common_init_from_params(params);
75
-
76
- model_tgt = llama_init_tgt.model.get();
77
- ctx_tgt = llama_init_tgt.context.get();
78
-
79
- // load the draft model
80
- params.devices = params.speculative.devices;
81
- params.model = params.speculative.model;
82
- params.n_gpu_layers = params.speculative.n_gpu_layers;
83
- if (params.speculative.cpuparams.n_threads > 0) {
84
- params.cpuparams.n_threads = params.speculative.cpuparams.n_threads;
85
- }
86
-
87
- params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads;
88
- common_init_result llama_init_dft = common_init_from_params(params);
89
-
90
- model_dft = llama_init_dft.model.get();
91
- ctx_dft = llama_init_dft.context.get();
92
-
93
- const llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt);
94
- const llama_vocab * vocab_dft = llama_model_get_vocab(model_dft);
95
-
96
- const bool vocab_type_tgt = llama_vocab_type(vocab_tgt);
97
- LOG_DBG("vocab_type tgt: %d\n", vocab_type_tgt);
98
-
99
- const bool vocab_type_dft = llama_vocab_type(vocab_dft);
100
- LOG_DBG("vocab_type dft: %d\n", vocab_type_dft);
101
-
102
- if (vocab_type_tgt != vocab_type_dft) {
103
- LOG_ERR("%s: draft model vocab type must match target model to use speculation but ", __func__);
104
- LOG_ERR("vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt);
105
- return 1;
106
- }
107
-
108
- if (
109
- llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) ||
110
- llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) ||
111
- llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) ||
112
- llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft)
113
- ) {
114
- LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__);
115
- return 1;
116
- }
117
-
118
- {
119
- const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt);
120
- const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft);
121
- const int vocab_diff = n_vocab_tgt > n_vocab_dft
122
- ? n_vocab_tgt - n_vocab_dft
123
- : n_vocab_dft - n_vocab_tgt;
124
-
125
- if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
126
- LOG_ERR("%s: draft model vocab must closely match target model to use speculation but ", __func__);
127
- LOG_ERR("target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
128
- n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
129
- return 1;
130
- }
131
-
132
- for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
133
- const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i);
134
- const char * token_text_dft = llama_vocab_get_text(vocab_dft, i);
135
- if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
136
- LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__);
137
- LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i,
138
- common_token_to_piece(ctx_tgt, i).c_str(),
139
- common_token_to_piece(ctx_dft, i).c_str());
140
- return 1;
141
- }
142
- }
143
- }
144
-
145
-
146
- // Tokenize the prompt
147
- std::vector<llama_token> inp;
148
- inp = common_tokenize(ctx_tgt, params.prompt, true, true);
149
-
150
- const int max_context_size = llama_n_ctx(ctx_tgt);
151
- const int max_tokens_list_size = max_context_size - 4;
152
-
153
- if ((int) inp.size() > max_tokens_list_size) {
154
- LOG_ERR("%s: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size);
155
- return 1;
156
- }
157
-
158
- LOG("\n\n");
159
-
160
- for (auto id : inp) {
161
- LOG("%s", common_token_to_piece(ctx_tgt, id).c_str());
162
- }
163
-
164
- const int n_input = inp.size();
165
-
166
- const auto t_enc_start = ggml_time_us();
167
-
168
- // eval the prompt with both models
169
- llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1));
170
- llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1));
171
- llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input));
172
-
173
- const auto t_enc_end = ggml_time_us();
174
-
175
- // the 2 models should have the same vocab
176
- //GGML_ASSERT(n_vocab == llama_vocab_n_tokens(model_dft));
177
-
178
- // how many tokens to draft each time
179
- int n_draft = params.speculative.n_max;
180
-
181
- int n_predict = 0;
182
- int n_drafted = 0;
183
- int n_accept = 0;
184
-
185
- int n_past_tgt = inp.size();
186
- int n_past_dft = inp.size();
187
-
188
- // used to determine end of generation
189
- bool has_eos = false;
190
-
191
- // target model sampling context (reuse the llama_context's sampling instance)
192
- struct common_sampler * smpl = common_sampler_init(model_tgt, params.sampling);
193
-
194
- // draft sequence data
195
- std::vector<seq_draft> drafts(n_seq_dft);
196
-
197
- for (int s = 0; s < n_seq_dft; ++s) {
198
- // allocate llama_sampler for each draft sequence
199
- drafts[s].smpl = common_sampler_init(model_dft, params.sampling);
200
- }
201
-
202
- llama_batch batch_dft = llama_batch_init(llama_n_batch(ctx_dft), 0, 1);
203
- llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, n_seq_dft);
204
-
205
- const auto t_dec_start = ggml_time_us();
206
-
207
- // sample from the last token of the prompt
208
- drafts[0].i_batch_tgt.resize(1);
209
- drafts[0].i_batch_tgt[0] = 0;
210
-
211
- while (true) {
212
- std::set<int> active_seqs = {};
213
-
214
- // print current draft sequences
215
- for (int s = 0; s < n_seq_dft; ++s) {
216
- if (!drafts[s].active) {
217
- continue;
218
- }
219
-
220
- active_seqs.insert(s);
221
- const auto & tokens = drafts[s].tokens;
222
-
223
- LOG_DBG("draft %d: %s\n", s, string_from(ctx_dft, tokens).c_str());
224
- }
225
-
226
- int i_dft = 0;
227
- int s_keep = 0;
228
-
229
- llama_token token_id;
230
- std::string token_str;
231
-
232
- // loop until we fail to accept a drafted token or we run out of drafted tokens
233
- while (true) {
234
-
235
- // check if the target token matches any of the drafts
236
- // for stochastic sampling, attempt to match the token with the drafted tokens
237
- {
238
- bool accept = false;
239
- if (params.sampling.temp > 0) {
240
- // stochastic verification
241
- common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
242
-
243
- auto & dist_tgt = *common_sampler_get_candidates(smpl);
244
-
245
- float p_tgt = 0.0f;
246
- float p_dft = 0.0f;
247
-
248
- while (active_seqs.size() > 0) {
249
- // randomly select a sequence to verify from active sequences
250
- std::uniform_int_distribution<unsigned int> u_int_dist(0, active_seqs.size() - 1);
251
- int s = *std::next(active_seqs.begin(), u_int_dist(rng));
252
- if (i_dft >= (int) drafts[s].tokens.size()) {
253
- drafts[s].active = false;
254
- active_seqs.erase(s);
255
- continue;
256
- }
257
- if (accept) {
258
- // if we already accepted a token, we can skip the rest
259
- if (drafts[s].tokens[i_dft] != drafts[s_keep].tokens[i_dft]) {
260
- drafts[s].active = false;
261
- active_seqs.erase(s);
262
- }
263
- continue;
264
- }
265
-
266
- LOG_DBG("verifying sequence #%d at pos #%d from %d active sequence(s)\n", s, i_dft, (int) active_seqs.size());
267
- float r = u_dist(rng);
268
- llama_token_data_array dist_dft = { drafts[s].dists[i_dft].data() , drafts[s].dists[i_dft].size(), LLAMA_TOKEN_NULL, true };
269
-
270
- //GGML_ASSERT(dist_tgt.size <= dist_dft.size);
271
-
272
- // acquire the token probabilities assigned by the draft and target models
273
- for (size_t i = 0; i < dist_tgt.size; i++) {
274
- if (dist_tgt.data[i].id == drafts[s].tokens[i_dft]) {
275
- p_tgt = dist_tgt.data[i].p;
276
- break;
277
- }
278
- }
279
- for (size_t i = 0; i < dist_dft.size; i++) {
280
- if (dist_dft.data[i].id == drafts[s].tokens[i_dft]) {
281
- p_dft = dist_dft.data[i].p;
282
- break;
283
- }
284
- }
285
- LOG_DBG("r = %f, p_dft = %f, p_tgt = %f\n", r, p_dft, p_tgt);
286
- if (r <= p_tgt / p_dft) {
287
- s_keep = s;
288
- accept = true;
289
- token_id = drafts[s].tokens[i_dft];
290
- token_str = common_token_to_piece(ctx_tgt, token_id);
291
- common_sampler_accept(smpl, token_id, true);
292
-
293
- LOG_DBG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
294
- break;
295
- } else {
296
- LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], common_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
297
- drafts[s].active = false;
298
-
299
- // calculate residual probability
300
- GGML_ASSERT(dist_tgt.sorted);
301
- GGML_ASSERT(dist_dft.sorted);
302
-
303
- // sort dist by id
304
- std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
305
- return a.id < b.id;
306
- });
307
- std::sort(dist_dft.data, dist_dft.data + dist_dft.size, [](const llama_token_data &a, const llama_token_data &b) {
308
- return a.id < b.id;
309
- });
310
-
311
- float sum_probs = 0.0f;
312
-
313
- for (size_t i = 0; i < dist_tgt.size; i++) {
314
- if (i < dist_dft.size) {
315
- dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p - dist_dft.data[i].p);
316
- } else {
317
- dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p);
318
- }
319
-
320
- sum_probs += dist_tgt.data[i].p;
321
- }
322
-
323
- for (size_t i = 0; i < dist_tgt.size; i++) {
324
- dist_tgt.data[i].p /= sum_probs;
325
- }
326
-
327
- // sort dist_tgt by p desc
328
- std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
329
- return a.p > b.p;
330
- });
331
- }
332
-
333
- active_seqs.erase(s);
334
- for (int i = 0; i < n_seq_dft; i++) {
335
- if (i == s) {
336
- continue;
337
- }
338
- if (drafts[i].active && drafts[i].tokens[i_dft] == drafts[s].tokens[i_dft]) {
339
- // synchronize active status for sequences with the same drafted token
340
- drafts[i].active = drafts[i].active && accept;
341
- if (!drafts[i].active) {
342
- active_seqs.erase(s);
343
- }
344
- }
345
- }
346
- }
347
-
348
- if (!accept) {
349
- // all drafted tokens were rejected
350
- // sample from the target model
351
- LOG_DBG("all drafted tokens were rejected, sampling from residual distribution\n");
352
- std::vector<float> probs(dist_tgt.size);
353
- for (size_t i = 0; i < dist_tgt.size; ++i) {
354
- probs[i] = dist_tgt.data[i].p;
355
- }
356
-
357
- std::discrete_distribution<> dist(probs.begin(), probs.end());
358
-
359
- const int idx = dist(rng);
360
-
361
- token_id = dist_tgt.data[idx].id;
362
- common_sampler_accept(smpl, token_id, true);
363
- token_str = common_token_to_piece(ctx_tgt, token_id);
364
- }
365
- } else {
366
- // greedy verification
367
-
368
- // sample from the target model
369
- LOG_DBG("sampling target: s_keep = %3d, i_dft = %3d, i_batch_tgt = %3d\n", s_keep, i_dft, drafts[s_keep].i_batch_tgt[i_dft]);
370
- token_id = common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]);
371
-
372
- common_sampler_accept(smpl, token_id, true);
373
-
374
- token_str = common_token_to_piece(ctx_tgt, token_id);
375
-
376
- for (int s = 0; s < n_seq_dft; ++s) {
377
- if (!drafts[s].active) {
378
- continue;
379
- }
380
-
381
- if (i_dft < (int) drafts[s].tokens.size() && token_id == drafts[s].tokens[i_dft]) {
382
- LOG_DBG("the sampled target token matches the %dth drafted token of sequence %d (%d, '%s') - accepted\n", i_dft, s, token_id, token_str.c_str());
383
-
384
- s_keep = s;
385
- accept = true;
386
- } else {
387
- drafts[s].active = false;
388
- }
389
- }
390
- }
391
-
392
- if (llama_vocab_is_eog(vocab_tgt, token_id)) {
393
- has_eos = true;
394
- }
395
- ++n_predict;
396
-
397
- if (accept) {
398
- ++n_accept;
399
- ++n_past_tgt;
400
- ++n_past_dft;
401
- ++i_dft;
402
- if (params.use_color) {
403
- // Color token according to its origin sequence
404
- LOG("\u001b[%dm%s\u001b[37m", (36 - s_keep % 6), token_str.c_str());
405
- } else {
406
- LOG("%s", token_str.c_str());
407
- }
408
- continue;
409
- } else {
410
- LOG("%s", token_str.c_str());
411
- break;
412
- }
413
- }
414
- }
415
-
416
- {
417
- LOG_DBG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", token_id, token_str.c_str());
418
-
419
- // TODO: simplify
420
- {
421
- LOG_DBG("keeping sequence %d, n_past_tgt = %d, n_past_dft = %d\n", s_keep, n_past_tgt, n_past_dft);
422
-
423
- llama_kv_self_seq_keep(ctx_dft, s_keep);
424
- llama_kv_self_seq_cp (ctx_dft, s_keep, 0, -1, -1);
425
- llama_kv_self_seq_keep(ctx_dft, 0);
426
-
427
- llama_kv_self_seq_rm (ctx_tgt, s_keep, n_past_tgt, -1);
428
- llama_kv_self_seq_keep(ctx_tgt, s_keep);
429
- llama_kv_self_seq_cp (ctx_tgt, s_keep, 0, -1, -1);
430
- llama_kv_self_seq_keep(ctx_tgt, 0);
431
- }
432
-
433
- for (int s = 0; s < n_seq_dft; ++s) {
434
- drafts[s].active = false;
435
- drafts[s].tokens.clear();
436
- drafts[s].i_batch_tgt.clear();
437
- drafts[s].dists.clear();
438
- }
439
- // note: will be erased after the speculation phase
440
- drafts[0].tokens.push_back(token_id);
441
- drafts[0].dists.push_back(std::vector<llama_token_data>());
442
- drafts[0].i_batch_tgt.push_back(0);
443
-
444
- common_batch_clear(batch_dft);
445
- common_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
446
-
447
- llama_kv_self_seq_rm(ctx_dft, 0, n_past_dft, -1);
448
- // LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
449
- llama_decode(ctx_dft, batch_dft);
450
-
451
- ++n_past_dft;
452
- }
453
-
454
- if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) {
455
- break;
456
- }
457
-
458
- if (drafts[0].smpl) {
459
- common_sampler_free(drafts[0].smpl);
460
- }
461
- drafts[0].smpl = common_sampler_clone(smpl);
462
-
463
- int n_seq_cur = 1;
464
- int n_past_cur = n_past_dft;
465
-
466
- for (int s = 0; s < n_seq_dft; ++s) {
467
- drafts[s].active = false;
468
- drafts[s].drafting = false;
469
- }
470
- drafts[0].active = true;
471
- drafts[0].drafting = true;
472
- drafts[0].i_batch_dft = 0;
473
-
474
- common_batch_clear(batch_tgt);
475
- common_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
476
-
477
- // sample n_draft tokens from the draft model using tree-based sampling
478
- for (int i = 0; i < n_draft; ++i) {
479
- batch_dft.n_tokens = 0;
480
-
481
- for (int s = 0; s < n_seq_dft; ++s) {
482
- drafts[s].skip = false;
483
- }
484
-
485
- for (int s = 0; s < n_seq_dft; ++s) {
486
- if (!drafts[s].drafting || drafts[s].skip) {
487
- continue;
488
- }
489
-
490
- common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
491
-
492
- const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl);
493
-
494
- for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
495
- LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
496
- k, s, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
497
- }
498
-
499
- std::vector<int> sa(1, s);
500
-
501
- // attempt to split the branch if the probability is high enough
502
- for (int f = 1; f < 8; ++f) {
503
- if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_draft_split) {
504
- LOG_DBG("splitting seq %3d into %3d\n", s, n_seq_cur);
505
-
506
- llama_kv_self_seq_rm(ctx_dft, n_seq_cur, -1, -1);
507
- llama_kv_self_seq_cp(ctx_dft, s, n_seq_cur, -1, -1);
508
-
509
- // all previous tokens from this branch are now also part of the new branch
510
- for (int t = 0; t < batch_tgt.n_tokens; ++t) {
511
- for (int p = 0; p < batch_tgt.n_seq_id[t]; ++p) {
512
- if (batch_tgt.seq_id[t][p] == s) {
513
- batch_tgt.seq_id[t][batch_tgt.n_seq_id[t]] = n_seq_cur;
514
- batch_tgt.n_seq_id[t]++;
515
- break;
516
- }
517
- }
518
- }
519
-
520
- // copy the draft state
521
- drafts[n_seq_cur].active = true;
522
- drafts[n_seq_cur].drafting = true;
523
- drafts[n_seq_cur].skip = true;
524
-
525
- drafts[n_seq_cur].tokens = drafts[s].tokens;
526
- drafts[n_seq_cur].dists = drafts[s].dists;
527
- drafts[n_seq_cur].i_batch_dft = drafts[s].i_batch_dft;
528
- drafts[n_seq_cur].i_batch_tgt = drafts[s].i_batch_tgt;
529
-
530
- if (drafts[n_seq_cur].smpl) {
531
- common_sampler_free(drafts[n_seq_cur].smpl);
532
- }
533
- drafts[n_seq_cur].smpl = common_sampler_clone(drafts[s].smpl);
534
-
535
- sa.push_back(n_seq_cur);
536
-
537
- n_seq_cur++;
538
- } else {
539
- break;
540
- }
541
- }
542
-
543
- // add drafted token for each sequence
544
- for (int is = 0; is < (int) sa.size(); ++is) {
545
- const llama_token id = cur_p->data[is].id;
546
-
547
- const int s = sa[is];
548
-
549
- common_sampler_accept(drafts[s].smpl, id, true);
550
-
551
- drafts[s].tokens.push_back(id);
552
- // save cur_p.data into drafts[s].dists
553
- drafts[s].dists.push_back({cur_p->data, cur_p->data + cur_p->size});
554
-
555
- // add unique drafted tokens to the target batch
556
- drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
557
-
558
- common_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
559
-
560
- // add the token to the batch for batched decoding with the draft model
561
- drafts[s].i_batch_dft = batch_dft.n_tokens;
562
-
563
- common_batch_add(batch_dft, id, n_past_cur, { s }, true);
564
-
565
- if (batch_tgt.n_tokens > n_draft) {
566
- drafts[s].drafting = false;
567
- }
568
- }
569
- }
570
-
571
- // no sequence is drafting anymore
572
- if (batch_dft.n_tokens == 0) {
573
- break;
574
- }
575
-
576
- // evaluate the drafted tokens on the draft model
577
- llama_decode(ctx_dft, batch_dft);
578
- ++n_past_cur;
579
- ++n_drafted;
580
-
581
- if (batch_tgt.n_tokens > n_draft) {
582
- break;
583
- }
584
- }
585
-
586
- // evaluate the target model on the drafted tokens
587
- {
588
- llama_kv_self_seq_keep(ctx_tgt, 0);
589
- for (int s = 1; s < n_seq_dft; ++s) {
590
- llama_kv_self_seq_cp(ctx_tgt, 0, s, -1, -1);
591
- }
592
-
593
- // LOG_DBG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt).c_str());
594
- llama_decode(ctx_tgt, batch_tgt);
595
- ++n_past_tgt;
596
- }
597
-
598
- // the first token is always proposed by the target model before the speculation loop so we erase it here
599
- for (int s = 0; s < n_seq_dft; ++s) {
600
- if (!drafts[s].active) {
601
- continue;
602
- }
603
-
604
- drafts[s].tokens.erase(drafts[s].tokens.begin());
605
- drafts[s].dists.erase(drafts[s].dists.begin());
606
- }
607
- }
608
-
609
- auto t_dec_end = ggml_time_us();
610
-
611
- LOG("\n\n");
612
-
613
- LOG_INF("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
614
- LOG_INF("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
615
-
616
- LOG_INF("\n");
617
- LOG_INF("n_draft = %d\n", n_draft);
618
- LOG_INF("n_predict = %d\n", n_predict);
619
- LOG_INF("n_drafted = %d\n", n_drafted);
620
- LOG_INF("n_accept = %d\n", n_accept);
621
- LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
622
-
623
- LOG_INF("\n");
624
- LOG_INF("draft:\n\n");
625
- // TODO: print sampling/grammar timings for all drafts
626
- llama_perf_context_print(ctx_dft);
627
-
628
- LOG_INF("\n");
629
- LOG_INF("target:\n\n");
630
- common_perf_print(ctx_tgt, smpl);
631
-
632
- common_sampler_free(smpl);
633
- for (int s = 0; s < n_seq_dft; ++s) {
634
- common_sampler_free(drafts[s].smpl);
635
- }
636
-
637
- llama_batch_free(batch_dft);
638
-
639
- llama_backend_free();
640
-
641
- LOG("\n\n");
642
-
643
- return 0;
644
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-speculative-simple)
2
- add_executable(${TARGET} speculative-simple.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)