@fugood/llama.node 0.6.3 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -7
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  27. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  28. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  29. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  30. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  31. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  32. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  36. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  37. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  38. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  39. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  40. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  41. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  42. package/src/llama.cpp/cmake/common.cmake +0 -35
  43. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  44. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  45. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  46. package/src/llama.cpp/docs/build.md +0 -561
  47. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  48. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  49. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  50. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  53. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  54. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  56. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  57. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  58. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  59. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  60. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  61. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  62. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  63. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  64. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  68. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  70. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  71. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  72. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  73. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  74. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  75. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  76. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  77. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  79. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  80. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  81. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  82. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  83. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  84. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  85. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  86. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  87. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  88. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  89. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  90. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  91. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  92. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  93. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  94. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  96. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  99. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  100. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  101. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  102. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  103. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  104. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  105. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  106. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  107. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  108. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  109. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  110. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  111. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  112. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  113. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  114. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  115. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  116. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  117. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  118. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  119. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  120. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  121. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  122. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  123. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  125. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  127. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  128. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  129. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  130. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  131. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  132. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  135. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  136. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  137. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  138. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  139. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  140. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  141. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  142. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  143. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  144. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  145. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  146. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  147. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  148. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  149. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  150. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  151. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  153. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  155. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  157. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  159. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  161. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  163. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  167. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  169. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  170. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  172. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  173. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  175. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  177. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  181. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  183. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  185. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  186. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  187. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  189. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  191. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  195. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  198. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  199. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  200. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  204. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  205. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  206. package/src/llama.cpp/models/.editorconfig +0 -1
  207. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  208. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  212. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  214. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  217. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  225. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  228. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  231. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  233. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  237. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  240. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  242. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  245. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  248. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  256. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  258. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  261. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  262. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  263. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  264. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  265. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  266. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  267. package/src/llama.cpp/prompts/assistant.txt +0 -31
  268. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  269. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  270. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  271. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  273. package/src/llama.cpp/prompts/chat.txt +0 -28
  274. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  275. package/src/llama.cpp/prompts/dan.txt +0 -1
  276. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  277. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  278. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  279. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  280. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  281. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  284. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  285. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  286. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  287. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  289. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  290. package/src/llama.cpp/requirements.txt +0 -13
  291. package/src/llama.cpp/scripts/build-info.sh +0 -30
  292. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  293. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  294. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  295. package/src/llama.cpp/tests/get-model.cpp +0 -21
  296. package/src/llama.cpp/tests/get-model.h +0 -2
  297. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  298. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  299. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  300. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  301. package/src/llama.cpp/tests/test-c.c +0 -7
  302. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  303. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  304. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  305. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  306. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  307. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  308. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  309. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  310. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  311. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  312. package/src/llama.cpp/tests/test-log.cpp +0 -39
  313. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  314. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  315. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  316. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  317. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  318. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  319. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  320. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  321. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  322. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  323. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  324. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  325. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  326. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  327. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  328. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  329. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  330. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  331. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  332. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  333. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  334. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  335. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  336. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  337. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  338. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  339. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  340. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  341. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  342. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  343. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  344. package/src/llama.cpp/tools/main/main.cpp +0 -977
  345. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  346. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  347. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  348. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  349. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  350. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  351. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  353. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  354. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  355. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  356. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  357. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  358. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  360. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  361. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  362. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  363. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  364. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  365. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  367. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  368. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  369. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  370. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  371. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  372. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  373. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  374. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  375. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  376. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  377. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,977 +0,0 @@
1
- #include "arg.h"
2
- #include "common.h"
3
- #include "console.h"
4
- #include "log.h"
5
- #include "sampling.h"
6
- #include "llama.h"
7
- #include "chat.h"
8
-
9
- #include <cstdio>
10
- #include <cstring>
11
- #include <ctime>
12
- #include <fstream>
13
- #include <iostream>
14
- #include <sstream>
15
- #include <string>
16
- #include <vector>
17
-
18
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
19
- #include <signal.h>
20
- #include <unistd.h>
21
- #elif defined (_WIN32)
22
- #define WIN32_LEAN_AND_MEAN
23
- #ifndef NOMINMAX
24
- #define NOMINMAX
25
- #endif
26
- #include <windows.h>
27
- #include <signal.h>
28
- #endif
29
-
30
- #if defined(_MSC_VER)
31
- #pragma warning(disable: 4244 4267) // possible loss of data
32
- #endif
33
-
34
- static llama_context ** g_ctx;
35
- static llama_model ** g_model;
36
- static common_sampler ** g_smpl;
37
- static common_params * g_params;
38
- static std::vector<llama_token> * g_input_tokens;
39
- static std::ostringstream * g_output_ss;
40
- static std::vector<llama_token> * g_output_tokens;
41
- static bool is_interacting = false;
42
- static bool need_insert_eot = false;
43
-
44
- static void print_usage(int argc, char ** argv) {
45
- (void) argc;
46
-
47
- LOG("\nexample usage:\n");
48
- LOG("\n text generation: %s -m your_model.gguf -p \"I believe the meaning of life is\" -n 128 -no-cnv\n", argv[0]);
49
- LOG("\n chat (conversation): %s -m your_model.gguf -sys \"You are a helpful assistant\"\n", argv[0]);
50
- LOG("\n");
51
- }
52
-
53
- static bool file_exists(const std::string & path) {
54
- std::ifstream f(path.c_str());
55
- return f.good();
56
- }
57
-
58
- static bool file_is_empty(const std::string & path) {
59
- std::ifstream f;
60
- f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
61
- f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
62
- return f.tellg() == 0;
63
- }
64
-
65
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
66
- static void sigint_handler(int signo) {
67
- if (signo == SIGINT) {
68
- if (!is_interacting && g_params->interactive) {
69
- is_interacting = true;
70
- need_insert_eot = true;
71
- } else {
72
- console::cleanup();
73
- LOG("\n");
74
- common_perf_print(*g_ctx, *g_smpl);
75
-
76
- // make sure all logs are flushed
77
- LOG("Interrupted by user\n");
78
- common_log_pause(common_log_main());
79
-
80
- _exit(130);
81
- }
82
- }
83
- }
84
- #endif
85
-
86
- int main(int argc, char ** argv) {
87
- common_params params;
88
- g_params = &params;
89
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
90
- return 1;
91
- }
92
-
93
- common_init();
94
-
95
- auto & sparams = params.sampling;
96
-
97
- // save choice to use color for later
98
- // (note for later: this is a slightly awkward choice)
99
- console::init(params.simple_io, params.use_color);
100
- atexit([]() { console::cleanup(); });
101
-
102
- if (params.embedding) {
103
- LOG_ERR("************\n");
104
- LOG_ERR("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
105
- LOG_ERR("************\n\n");
106
-
107
- return 0;
108
- }
109
-
110
- if (params.n_ctx != 0 && params.n_ctx < 8) {
111
- LOG_WRN("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
112
- params.n_ctx = 8;
113
- }
114
-
115
- if (params.rope_freq_base != 0.0) {
116
- LOG_WRN("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
117
- }
118
-
119
- if (params.rope_freq_scale != 0.0) {
120
- LOG_WRN("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
121
- }
122
-
123
- LOG_INF("%s: llama backend init\n", __func__);
124
-
125
- llama_backend_init();
126
- llama_numa_init(params.numa);
127
-
128
- llama_model * model = nullptr;
129
- llama_context * ctx = nullptr;
130
- common_sampler * smpl = nullptr;
131
-
132
- g_model = &model;
133
- g_ctx = &ctx;
134
- g_smpl = &smpl;
135
-
136
- std::vector<common_chat_msg> chat_msgs;
137
-
138
- // load the model and apply lora adapter, if any
139
- LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
140
- common_init_result llama_init = common_init_from_params(params);
141
-
142
- model = llama_init.model.get();
143
- ctx = llama_init.context.get();
144
-
145
- if (model == NULL) {
146
- LOG_ERR("%s: error: unable to load model\n", __func__);
147
- return 1;
148
- }
149
-
150
- const llama_vocab * vocab = llama_model_get_vocab(model);
151
- auto chat_templates = common_chat_templates_init(model, params.chat_template);
152
-
153
- LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
154
-
155
- auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
156
- if (!cpu_dev) {
157
- LOG_ERR("%s: no CPU backend found\n", __func__);
158
- return 1;
159
- }
160
- auto * reg = ggml_backend_dev_backend_reg(cpu_dev);
161
- auto * ggml_threadpool_new_fn = (decltype(ggml_threadpool_new) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_new");
162
- auto * ggml_threadpool_free_fn = (decltype(ggml_threadpool_free) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_free");
163
-
164
- struct ggml_threadpool_params tpp_batch =
165
- ggml_threadpool_params_from_cpu_params(params.cpuparams_batch);
166
- struct ggml_threadpool_params tpp =
167
- ggml_threadpool_params_from_cpu_params(params.cpuparams);
168
-
169
- set_process_priority(params.cpuparams.priority);
170
-
171
- struct ggml_threadpool * threadpool_batch = NULL;
172
- if (!ggml_threadpool_params_match(&tpp, &tpp_batch)) {
173
- threadpool_batch = ggml_threadpool_new_fn(&tpp_batch);
174
- if (!threadpool_batch) {
175
- LOG_ERR("%s: batch threadpool create failed : n_threads %d\n", __func__, tpp_batch.n_threads);
176
- return 1;
177
- }
178
-
179
- // Start the non-batch threadpool in the paused state
180
- tpp.paused = true;
181
- }
182
-
183
- struct ggml_threadpool * threadpool = ggml_threadpool_new_fn(&tpp);
184
- if (!threadpool) {
185
- LOG_ERR("%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads);
186
- return 1;
187
- }
188
-
189
- llama_attach_threadpool(ctx, threadpool, threadpool_batch);
190
-
191
- const int n_ctx_train = llama_model_n_ctx_train(model);
192
- const int n_ctx = llama_n_ctx(ctx);
193
-
194
- if (n_ctx > n_ctx_train) {
195
- LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, n_ctx);
196
- }
197
-
198
- // auto enable conversation mode if chat template is available
199
- const bool has_chat_template = common_chat_templates_was_explicit(chat_templates.get());
200
- if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
201
- if (has_chat_template) {
202
- LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
203
- params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
204
- } else {
205
- params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
206
- }
207
- }
208
-
209
- // in case user force-activate conversation mode (via -cnv) without proper chat template, we show a warning
210
- if (params.conversation_mode && !has_chat_template) {
211
- LOG_WRN("%s: chat template is not available or is not supported. This may cause the model to output suboptimal responses\n", __func__);
212
- }
213
-
214
- // print chat template example in conversation mode
215
- if (params.conversation_mode) {
216
- if (params.enable_chat_template) {
217
- if (!params.prompt.empty() && params.system_prompt.empty()) {
218
- LOG_WRN("*** User-specified prompt will pre-start conversation, did you mean to set --system-prompt (-sys) instead?\n");
219
- }
220
-
221
- LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(chat_templates.get(), params.use_jinja).c_str());
222
- } else {
223
- LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
224
- }
225
- }
226
-
227
- // print system information
228
- {
229
- LOG_INF("\n");
230
- LOG_INF("%s\n", common_params_get_system_info(params).c_str());
231
- LOG_INF("\n");
232
- }
233
-
234
- std::string path_session = params.path_prompt_cache;
235
- std::vector<llama_token> session_tokens;
236
-
237
- if (!path_session.empty()) {
238
- LOG_INF("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
239
- if (!file_exists(path_session)) {
240
- LOG_INF("%s: session file does not exist, will create.\n", __func__);
241
- } else if (file_is_empty(path_session)) {
242
- LOG_INF("%s: The session file is empty. A new session will be initialized.\n", __func__);
243
- } else {
244
- // The file exists and is not empty
245
- session_tokens.resize(n_ctx);
246
- size_t n_token_count_out = 0;
247
- if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
248
- LOG_ERR("%s: failed to load session file '%s'\n", __func__, path_session.c_str());
249
- return 1;
250
- }
251
- session_tokens.resize(n_token_count_out);
252
- LOG_INF("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
253
- }
254
- }
255
-
256
- const bool add_bos = llama_vocab_get_add_bos(vocab) && !params.use_jinja;
257
- if (!llama_model_has_encoder(model)) {
258
- GGML_ASSERT(!llama_vocab_get_add_eos(vocab));
259
- }
260
-
261
- LOG_DBG("n_ctx: %d, add_bos: %d\n", n_ctx, add_bos);
262
-
263
- std::vector<llama_token> embd_inp;
264
-
265
- bool waiting_for_first_input = false;
266
- auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
267
- common_chat_msg new_msg;
268
- new_msg.role = role;
269
- new_msg.content = content;
270
- auto formatted = common_chat_format_single(chat_templates.get(), chat_msgs, new_msg, role == "user", g_params->use_jinja);
271
- chat_msgs.push_back(new_msg);
272
- LOG_DBG("formatted: '%s'\n", formatted.c_str());
273
- return formatted;
274
- };
275
-
276
- std::string prompt;
277
- {
278
- if (params.conversation_mode && params.enable_chat_template) {
279
- if (!params.system_prompt.empty()) {
280
- // format the system prompt (will use template default if empty)
281
- chat_add_and_format("system", params.system_prompt);
282
- }
283
-
284
- if (!params.prompt.empty()) {
285
- // format and append the user prompt
286
- chat_add_and_format("user", params.prompt);
287
- } else {
288
- waiting_for_first_input = true;
289
- }
290
-
291
- if (!params.system_prompt.empty() || !params.prompt.empty()) {
292
- common_chat_templates_inputs inputs;
293
- inputs.messages = chat_msgs;
294
- inputs.add_generation_prompt = !params.prompt.empty();
295
-
296
- prompt = common_chat_templates_apply(chat_templates.get(), inputs).prompt;
297
- }
298
- } else {
299
- // otherwise use the prompt as is
300
- prompt = params.prompt;
301
- }
302
-
303
- if (params.interactive_first || !prompt.empty() || session_tokens.empty()) {
304
- LOG_DBG("tokenize the prompt\n");
305
- embd_inp = common_tokenize(ctx, prompt, true, true);
306
- } else {
307
- LOG_DBG("use session tokens\n");
308
- embd_inp = session_tokens;
309
- }
310
-
311
- LOG_DBG("prompt: \"%s\"\n", prompt.c_str());
312
- LOG_DBG("tokens: %s\n", string_from(ctx, embd_inp).c_str());
313
- }
314
-
315
- // Should not run without any tokens
316
- if (!waiting_for_first_input && embd_inp.empty()) {
317
- if (add_bos) {
318
- embd_inp.push_back(llama_vocab_bos(vocab));
319
- LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str());
320
- } else {
321
- LOG_ERR("input is empty\n");
322
- return -1;
323
- }
324
- }
325
-
326
- // Tokenize negative prompt
327
- if ((int) embd_inp.size() > n_ctx - 4) {
328
- LOG_ERR("%s: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
329
- return 1;
330
- }
331
-
332
- // debug message about similarity of saved session, if applicable
333
- size_t n_matching_session_tokens = 0;
334
- if (!session_tokens.empty()) {
335
- for (llama_token id : session_tokens) {
336
- if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
337
- break;
338
- }
339
- n_matching_session_tokens++;
340
- }
341
- if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
342
- LOG_INF("%s: using full prompt from session file\n", __func__);
343
- } else if (n_matching_session_tokens >= embd_inp.size()) {
344
- LOG_INF("%s: session file has exact match for prompt!\n", __func__);
345
- } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
346
- LOG_WRN("%s: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
347
- __func__, n_matching_session_tokens, embd_inp.size());
348
- } else {
349
- LOG_INF("%s: session file matches %zu / %zu tokens of prompt\n",
350
- __func__, n_matching_session_tokens, embd_inp.size());
351
- }
352
-
353
- // remove any "future" tokens that we might have inherited from the previous session
354
- llama_kv_self_seq_rm(ctx, -1, n_matching_session_tokens, -1);
355
- }
356
-
357
- LOG_DBG("recalculate the cached logits (check): embd_inp.size() %zu, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu\n",
358
- embd_inp.size(), n_matching_session_tokens, embd_inp.size(), session_tokens.size());
359
-
360
- // if we will use the cache for the full prompt without reaching the end of the cache, force
361
- // reevaluation of the last token to recalculate the cached logits
362
- if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
363
- LOG_DBG("recalculate the cached logits (do): session_tokens.resize( %zu )\n", embd_inp.size() - 1);
364
-
365
- session_tokens.resize(embd_inp.size() - 1);
366
- }
367
-
368
- // number of tokens to keep when resetting context
369
- if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
370
- params.n_keep = (int)embd_inp.size();
371
- } else {
372
- params.n_keep += add_bos; // always keep the BOS token
373
- }
374
-
375
- if (params.conversation_mode) {
376
- if (params.single_turn && !params.prompt.empty()) {
377
- params.interactive = false;
378
- params.interactive_first = false;
379
- } else {
380
- params.interactive_first = true;
381
- }
382
- }
383
-
384
- // enable interactive mode if interactive start is specified
385
- if (params.interactive_first) {
386
- params.interactive = true;
387
- }
388
-
389
- if (params.verbose_prompt) {
390
- LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
391
- LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
392
- for (int i = 0; i < (int) embd_inp.size(); i++) {
393
- LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
394
- }
395
-
396
- if (params.n_keep > add_bos) {
397
- LOG_INF("%s: static prompt based on n_keep: '", __func__);
398
- for (int i = 0; i < params.n_keep; i++) {
399
- LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
400
- }
401
- LOG_CNT("'\n");
402
- }
403
- LOG_INF("\n");
404
- }
405
-
406
- // ctrl+C handling
407
- {
408
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
409
- struct sigaction sigint_action;
410
- sigint_action.sa_handler = sigint_handler;
411
- sigemptyset (&sigint_action.sa_mask);
412
- sigint_action.sa_flags = 0;
413
- sigaction(SIGINT, &sigint_action, NULL);
414
- #elif defined (_WIN32)
415
- auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
416
- return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
417
- };
418
- SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
419
- #endif
420
- }
421
-
422
- if (params.interactive) {
423
- LOG_INF("%s: interactive mode on.\n", __func__);
424
-
425
- if (!params.antiprompt.empty()) {
426
- for (const auto & antiprompt : params.antiprompt) {
427
- LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
428
- if (params.verbose_prompt) {
429
- auto tmp = common_tokenize(ctx, antiprompt, false, true);
430
- for (int i = 0; i < (int) tmp.size(); i++) {
431
- LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
432
- }
433
- }
434
- }
435
- }
436
-
437
- if (params.input_prefix_bos) {
438
- LOG_INF("Input prefix with BOS\n");
439
- }
440
-
441
- if (!params.input_prefix.empty()) {
442
- LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
443
- if (params.verbose_prompt) {
444
- auto tmp = common_tokenize(ctx, params.input_prefix, true, true);
445
- for (int i = 0; i < (int) tmp.size(); i++) {
446
- LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
447
- }
448
- }
449
- }
450
-
451
- if (!params.input_suffix.empty()) {
452
- LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
453
- if (params.verbose_prompt) {
454
- auto tmp = common_tokenize(ctx, params.input_suffix, false, true);
455
- for (int i = 0; i < (int) tmp.size(); i++) {
456
- LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
457
- }
458
- }
459
- }
460
- }
461
-
462
- smpl = common_sampler_init(model, sparams);
463
- if (!smpl) {
464
- LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
465
- return 1;
466
- }
467
-
468
- LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl));
469
- LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
470
- LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str());
471
-
472
- LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
473
-
474
- // group-attention state
475
- // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
476
- int ga_i = 0;
477
-
478
- const int ga_n = params.grp_attn_n;
479
- const int ga_w = params.grp_attn_w;
480
-
481
- if (ga_n != 1) {
482
- GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
483
- GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
484
- //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
485
- //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
486
- LOG_INF("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
487
- }
488
- LOG_INF("\n");
489
-
490
- if (params.interactive) {
491
- const char * control_message;
492
- if (params.multiline_input) {
493
- control_message = " - To return control to the AI, end your input with '\\'.\n"
494
- " - To return control without starting a new line, end your input with '/'.\n";
495
- } else {
496
- control_message = " - Press Return to return control to the AI.\n"
497
- " - To return control without starting a new line, end your input with '/'.\n"
498
- " - If you want to submit another line, end your input with '\\'.\n";
499
- }
500
- LOG_INF("== Running in interactive mode. ==\n");
501
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
502
- LOG_INF( " - Press Ctrl+C to interject at any time.\n");
503
- #endif
504
- LOG_INF( "%s", control_message);
505
- if (params.conversation_mode && params.enable_chat_template && params.system_prompt.empty()) {
506
- LOG_INF( " - Not using system message. To change it, set a different value via -sys PROMPT\n");
507
- }
508
- LOG_INF("\n");
509
-
510
- is_interacting = params.interactive_first;
511
- }
512
-
513
- bool is_antiprompt = false;
514
- bool input_echo = true;
515
- bool display = true;
516
- bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
517
-
518
- int n_past = 0;
519
- int n_remain = params.n_predict;
520
- int n_consumed = 0;
521
- int n_session_consumed = 0;
522
-
523
- std::vector<int> input_tokens; g_input_tokens = &input_tokens;
524
- std::vector<int> output_tokens; g_output_tokens = &output_tokens;
525
- std::ostringstream output_ss; g_output_ss = &output_ss;
526
- std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
527
-
528
- // the first thing we will do is to output the prompt, so set color accordingly
529
- console::set_display(console::prompt);
530
- display = params.display_prompt;
531
-
532
- std::vector<llama_token> embd;
533
-
534
- // single-token antiprompts
535
- std::vector<llama_token> antiprompt_token;
536
-
537
- for (const std::string & antiprompt : params.antiprompt) {
538
- auto ids = ::common_tokenize(ctx, antiprompt, false, true);
539
- if (ids.size() == 1) {
540
- antiprompt_token.push_back(ids[0]);
541
- }
542
- }
543
-
544
- if (llama_model_has_encoder(model)) {
545
- int enc_input_size = embd_inp.size();
546
- llama_token * enc_input_buf = embd_inp.data();
547
-
548
- if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size))) {
549
- LOG_ERR("%s : failed to eval\n", __func__);
550
- return 1;
551
- }
552
-
553
- llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
554
- if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
555
- decoder_start_token_id = llama_vocab_bos(vocab);
556
- }
557
-
558
- embd_inp.clear();
559
- embd_inp.push_back(decoder_start_token_id);
560
- }
561
-
562
- while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
563
- // predict
564
- if (!embd.empty()) {
565
- // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
566
- // --prompt or --file which uses the same value.
567
- int max_embd_size = n_ctx - 4;
568
-
569
- // Ensure the input doesn't exceed the context size by truncating embd if necessary.
570
- if ((int) embd.size() > max_embd_size) {
571
- const int skipped_tokens = (int) embd.size() - max_embd_size;
572
- embd.resize(max_embd_size);
573
-
574
- console::set_display(console::error);
575
- LOG_WRN("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
576
- console::set_display(console::reset);
577
- }
578
-
579
- if (ga_n == 1) {
580
- // infinite text generation via context shifting
581
- // if we run out of context:
582
- // - take the n_keep first tokens from the original prompt (via n_past)
583
- // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
584
-
585
- if (n_past + (int) embd.size() >= n_ctx) {
586
- if (!params.ctx_shift){
587
- LOG_DBG("\n\n%s: context full and context shift is disabled => stopping\n", __func__);
588
- break;
589
- }
590
-
591
- if (params.n_predict == -2) {
592
- LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
593
- break;
594
- }
595
-
596
- const int n_left = n_past - params.n_keep;
597
- const int n_discard = n_left/2;
598
-
599
- LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
600
- n_past, n_left, n_ctx, params.n_keep, n_discard);
601
-
602
- llama_kv_self_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
603
- llama_kv_self_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
604
-
605
- n_past -= n_discard;
606
-
607
- LOG_DBG("after swap: n_past = %d\n", n_past);
608
-
609
- LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
610
-
611
- LOG_DBG("clear session path\n");
612
- path_session.clear();
613
- }
614
- } else {
615
- // context extension via Self-Extend
616
- while (n_past >= ga_i + ga_w) {
617
- const int ib = (ga_n*ga_i)/ga_w;
618
- const int bd = (ga_w/ga_n)*(ga_n - 1);
619
- const int dd = (ga_w/ga_n) - ib*bd - ga_w;
620
-
621
- LOG_DBG("\n");
622
- LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
623
- LOG_DBG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
624
- LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
625
-
626
- llama_kv_self_seq_add(ctx, 0, ga_i, n_past, ib*bd);
627
- llama_kv_self_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
628
- llama_kv_self_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
629
-
630
- n_past -= bd;
631
-
632
- ga_i += ga_w/ga_n;
633
-
634
- LOG_DBG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
635
- }
636
- }
637
-
638
- // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
639
- if (n_session_consumed < (int) session_tokens.size()) {
640
- size_t i = 0;
641
- for ( ; i < embd.size(); i++) {
642
- if (embd[i] != session_tokens[n_session_consumed]) {
643
- session_tokens.resize(n_session_consumed);
644
- break;
645
- }
646
-
647
- n_past++;
648
- n_session_consumed++;
649
-
650
- if (n_session_consumed >= (int) session_tokens.size()) {
651
- ++i;
652
- break;
653
- }
654
- }
655
- if (i > 0) {
656
- embd.erase(embd.begin(), embd.begin() + i);
657
- }
658
- }
659
-
660
- for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
661
- int n_eval = (int) embd.size() - i;
662
- if (n_eval > params.n_batch) {
663
- n_eval = params.n_batch;
664
- }
665
-
666
- LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
667
-
668
- if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
669
- LOG_ERR("%s : failed to eval\n", __func__);
670
- return 1;
671
- }
672
-
673
- n_past += n_eval;
674
-
675
- LOG_DBG("n_past = %d\n", n_past);
676
- // Display total tokens alongside total time
677
- if (params.n_print > 0 && n_past % params.n_print == 0) {
678
- LOG_DBG("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
679
- }
680
- }
681
-
682
- if (!embd.empty() && !path_session.empty()) {
683
- session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
684
- n_session_consumed = session_tokens.size();
685
- }
686
- }
687
-
688
- embd.clear();
689
-
690
- if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
691
- // optionally save the session on first sample (for faster prompt loading next time)
692
- if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
693
- need_to_save_session = false;
694
- llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
695
-
696
- LOG_DBG("saved session to %s\n", path_session.c_str());
697
- }
698
-
699
- const llama_token id = common_sampler_sample(smpl, ctx, -1);
700
-
701
- common_sampler_accept(smpl, id, /* accept_grammar= */ true);
702
-
703
- // LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
704
-
705
- embd.push_back(id);
706
-
707
- // echo this to console
708
- input_echo = true;
709
-
710
- // decrement remaining sampling budget
711
- --n_remain;
712
-
713
- LOG_DBG("n_remain: %d\n", n_remain);
714
- } else {
715
- // some user input remains from prompt or interaction, forward it to processing
716
- LOG_DBG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
717
- while ((int) embd_inp.size() > n_consumed) {
718
- embd.push_back(embd_inp[n_consumed]);
719
-
720
- // push the prompt in the sampling context in order to apply repetition penalties later
721
- // for the prompt, we don't apply grammar rules
722
- common_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
723
-
724
- ++n_consumed;
725
- if ((int) embd.size() >= params.n_batch) {
726
- break;
727
- }
728
- }
729
- }
730
-
731
- // display text
732
- if (input_echo && display) {
733
- for (auto id : embd) {
734
- const std::string token_str = common_token_to_piece(ctx, id, params.special);
735
-
736
- // Console/Stream Output
737
- LOG("%s", token_str.c_str());
738
-
739
- // Record Displayed Tokens To Log
740
- // Note: Generated tokens are created one by one hence this check
741
- if (embd.size() > 1) {
742
- // Incoming Requested Tokens
743
- input_tokens.push_back(id);
744
- } else {
745
- // Outgoing Generated Tokens
746
- output_tokens.push_back(id);
747
- output_ss << token_str;
748
- }
749
- }
750
- }
751
-
752
- // reset color to default if there is no pending user input
753
- if (input_echo && (int) embd_inp.size() == n_consumed) {
754
- console::set_display(console::reset);
755
- display = true;
756
- }
757
-
758
- // if not currently processing queued inputs;
759
- if ((int) embd_inp.size() <= n_consumed) {
760
- // check for reverse prompt in the last n_prev tokens
761
- if (!params.antiprompt.empty()) {
762
- const int n_prev = 32;
763
- const std::string last_output = common_sampler_prev_str(smpl, ctx, n_prev);
764
-
765
- is_antiprompt = false;
766
- // Check if each of the reverse prompts appears at the end of the output.
767
- // If we're not running interactively, the reverse prompt might be tokenized with some following characters
768
- // so we'll compensate for that by widening the search window a bit.
769
- for (std::string & antiprompt : params.antiprompt) {
770
- size_t extra_padding = params.interactive ? 0 : 2;
771
- size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
772
- ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
773
- : 0;
774
-
775
- if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
776
- if (params.interactive) {
777
- is_interacting = true;
778
- }
779
- is_antiprompt = true;
780
- break;
781
- }
782
- }
783
-
784
- // check for reverse prompt using special tokens
785
- llama_token last_token = common_sampler_last(smpl);
786
- for (auto token : antiprompt_token) {
787
- if (token == last_token) {
788
- if (params.interactive) {
789
- is_interacting = true;
790
- }
791
- is_antiprompt = true;
792
- break;
793
- }
794
- }
795
-
796
- if (is_antiprompt) {
797
- LOG_DBG("found antiprompt: %s\n", last_output.c_str());
798
- }
799
- }
800
-
801
- // deal with end of generation tokens in interactive mode
802
- if (!waiting_for_first_input && llama_vocab_is_eog(vocab, common_sampler_last(smpl))) {
803
- LOG_DBG("found an EOG token\n");
804
-
805
- if (params.interactive) {
806
- if (!params.antiprompt.empty()) {
807
- // tokenize and inject first reverse prompt
808
- const auto first_antiprompt = common_tokenize(ctx, params.antiprompt.front(), false, true);
809
- embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
810
- is_antiprompt = true;
811
- }
812
-
813
- if (params.enable_chat_template) {
814
- chat_add_and_format("assistant", assistant_ss.str());
815
- }
816
- is_interacting = true;
817
- LOG("\n");
818
- }
819
- }
820
-
821
- // if current token is not EOG, we add it to current assistant message
822
- if (params.conversation_mode && !waiting_for_first_input) {
823
- const auto id = common_sampler_last(smpl);
824
- assistant_ss << common_token_to_piece(ctx, id, false);
825
-
826
- if (!prompt.empty()) {
827
- prompt.clear();
828
- is_interacting = false;
829
- }
830
- }
831
-
832
- if ((n_past > 0 || waiting_for_first_input) && is_interacting) {
833
- LOG_DBG("waiting for user input\n");
834
-
835
- if (params.conversation_mode) {
836
- LOG("\n> ");
837
- }
838
-
839
- if (params.input_prefix_bos) {
840
- LOG_DBG("adding input prefix BOS token\n");
841
- embd_inp.push_back(llama_vocab_bos(vocab));
842
- }
843
-
844
- std::string buffer;
845
- if (!params.input_prefix.empty() && !params.conversation_mode) {
846
- LOG_DBG("appending input prefix: '%s'\n", params.input_prefix.c_str());
847
- LOG("%s", params.input_prefix.c_str());
848
- }
849
-
850
- // color user input only
851
- console::set_display(console::user_input);
852
- display = params.display_prompt;
853
-
854
- std::string line;
855
- bool another_line = true;
856
- do {
857
- another_line = console::readline(line, params.multiline_input);
858
- buffer += line;
859
- } while (another_line);
860
-
861
- // done taking input, reset color
862
- console::set_display(console::reset);
863
- display = true;
864
-
865
- if (buffer.empty()) { // Ctrl+D on empty line exits
866
- LOG("EOF by user\n");
867
- break;
868
- }
869
-
870
- if (buffer.back() == '\n') {
871
- // Implement #587:
872
- // If the user wants the text to end in a newline,
873
- // this should be accomplished by explicitly adding a newline by using \ followed by return,
874
- // then returning control by pressing return again.
875
- buffer.pop_back();
876
- }
877
-
878
- if (buffer.empty()) { // Enter key on empty line lets the user pass control back
879
- LOG_DBG("empty line, passing control back\n");
880
- } else { // Add tokens to embd only if the input buffer is non-empty
881
- // append input suffix if any
882
- if (!params.input_suffix.empty() && !params.conversation_mode) {
883
- LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str());
884
- LOG("%s", params.input_suffix.c_str());
885
- }
886
-
887
- LOG_DBG("buffer: '%s'\n", buffer.c_str());
888
-
889
- const size_t original_size = embd_inp.size();
890
-
891
- if (params.escape) {
892
- string_process_escapes(buffer);
893
- }
894
-
895
- bool format_chat = params.conversation_mode && params.enable_chat_template;
896
- std::string user_inp = format_chat
897
- ? chat_add_and_format("user", std::move(buffer))
898
- : std::move(buffer);
899
- // TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
900
- const auto line_pfx = common_tokenize(ctx, params.input_prefix, false, true);
901
- const auto line_inp = common_tokenize(ctx, user_inp, false, format_chat);
902
- const auto line_sfx = common_tokenize(ctx, params.input_suffix, false, true);
903
-
904
- LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
905
-
906
- // if user stop generation mid-way, we must add EOT to finish model's last response
907
- if (need_insert_eot && format_chat) {
908
- llama_token eot = llama_vocab_eot(vocab);
909
- embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_vocab_eos(vocab) : eot);
910
- need_insert_eot = false;
911
- }
912
-
913
- embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
914
- embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
915
- embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
916
-
917
- for (size_t i = original_size; i < embd_inp.size(); ++i) {
918
- const llama_token token = embd_inp[i];
919
- output_tokens.push_back(token);
920
- output_ss << common_token_to_piece(ctx, token);
921
- }
922
-
923
- // reset assistant message
924
- assistant_ss.str("");
925
-
926
- n_remain -= line_inp.size();
927
- LOG_DBG("n_remain: %d\n", n_remain);
928
- }
929
-
930
- input_echo = false; // do not echo this again
931
- }
932
-
933
- if (n_past > 0 || waiting_for_first_input) {
934
- if (is_interacting) {
935
- common_sampler_reset(smpl);
936
- }
937
- is_interacting = false;
938
-
939
- if (waiting_for_first_input && params.single_turn) {
940
- params.interactive = false;
941
- params.interactive_first = false;
942
- }
943
- waiting_for_first_input = false;
944
- }
945
- }
946
-
947
- // end of generation
948
- if (!embd.empty() && llama_vocab_is_eog(vocab, embd.back()) && !(params.interactive)) {
949
- LOG(" [end of text]\n");
950
- break;
951
- }
952
-
953
- // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
954
- // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
955
- if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
956
- n_remain = params.n_predict;
957
- is_interacting = true;
958
- }
959
- }
960
-
961
- if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
962
- LOG("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
963
- llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
964
- }
965
-
966
- LOG("\n\n");
967
- common_perf_print(ctx, smpl);
968
-
969
- common_sampler_free(smpl);
970
-
971
- llama_backend_free();
972
-
973
- ggml_threadpool_free_fn(threadpool);
974
- ggml_threadpool_free_fn(threadpool_batch);
975
-
976
- return 0;
977
- }