@fugood/llama.node 0.6.3 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -7
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  27. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  28. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  29. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  30. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  31. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  32. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  36. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  37. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  38. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  39. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  40. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  41. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  42. package/src/llama.cpp/cmake/common.cmake +0 -35
  43. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  44. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  45. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  46. package/src/llama.cpp/docs/build.md +0 -561
  47. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  48. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  49. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  50. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  53. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  54. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  56. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  57. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  58. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  59. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  60. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  61. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  62. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  63. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  64. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  68. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  70. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  71. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  72. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  73. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  74. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  75. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  76. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  77. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  79. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  80. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  81. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  82. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  83. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  84. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  85. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  86. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  87. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  88. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  89. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  90. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  91. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  92. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  93. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  94. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  96. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  99. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  100. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  101. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  102. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  103. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  104. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  105. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  106. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  107. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  108. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  109. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  110. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  111. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  112. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  113. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  114. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  115. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  116. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  117. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  118. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  119. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  120. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  121. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  122. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  123. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  125. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  127. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  128. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  129. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  130. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  131. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  132. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  135. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  136. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  137. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  138. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  139. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  140. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  141. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  142. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  143. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  144. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  145. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  146. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  147. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  148. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  149. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  150. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  151. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  153. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  155. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  157. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  159. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  161. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  163. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  167. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  169. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  170. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  172. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  173. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  175. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  177. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  181. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  183. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  185. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  186. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  187. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  189. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  191. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  195. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  198. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  199. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  200. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  204. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  205. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  206. package/src/llama.cpp/models/.editorconfig +0 -1
  207. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  208. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  212. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  214. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  217. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  225. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  228. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  231. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  233. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  237. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  240. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  242. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  245. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  248. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  256. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  258. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  261. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  262. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  263. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  264. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  265. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  266. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  267. package/src/llama.cpp/prompts/assistant.txt +0 -31
  268. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  269. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  270. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  271. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  273. package/src/llama.cpp/prompts/chat.txt +0 -28
  274. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  275. package/src/llama.cpp/prompts/dan.txt +0 -1
  276. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  277. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  278. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  279. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  280. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  281. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  284. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  285. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  286. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  287. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  289. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  290. package/src/llama.cpp/requirements.txt +0 -13
  291. package/src/llama.cpp/scripts/build-info.sh +0 -30
  292. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  293. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  294. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  295. package/src/llama.cpp/tests/get-model.cpp +0 -21
  296. package/src/llama.cpp/tests/get-model.h +0 -2
  297. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  298. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  299. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  300. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  301. package/src/llama.cpp/tests/test-c.c +0 -7
  302. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  303. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  304. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  305. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  306. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  307. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  308. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  309. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  310. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  311. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  312. package/src/llama.cpp/tests/test-log.cpp +0 -39
  313. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  314. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  315. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  316. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  317. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  318. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  319. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  320. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  321. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  322. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  323. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  324. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  325. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  326. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  327. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  328. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  329. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  330. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  331. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  332. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  333. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  334. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  335. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  336. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  337. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  338. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  339. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  340. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  341. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  342. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  343. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  344. package/src/llama.cpp/tools/main/main.cpp +0 -977
  345. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  346. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  347. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  348. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  349. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  350. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  351. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  353. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  354. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  355. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  356. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  357. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  358. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  360. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  361. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  362. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  363. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  364. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  365. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  367. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  368. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  369. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  370. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  371. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  372. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  373. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  374. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  375. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  376. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  377. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,6 +0,0 @@
1
- set(TARGET llama-quantize)
2
- add_executable(${TARGET} quantize.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_include_directories(${TARGET} PRIVATE ../../common)
6
- target_compile_features(${TARGET} PRIVATE cxx_std_17)
@@ -1,519 +0,0 @@
1
- #include "common.h"
2
- #include "llama.h"
3
-
4
- #include <cstdio>
5
- #include <cstring>
6
- #include <vector>
7
- #include <string>
8
- #include <unordered_map>
9
- #include <fstream>
10
- #include <cmath>
11
- #include <cctype>
12
- #include <algorithm>
13
-
14
- struct quant_option {
15
- std::string name;
16
- llama_ftype ftype;
17
- std::string desc;
18
- };
19
-
20
- static const std::vector<quant_option> QUANT_OPTIONS = {
21
- { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
22
- { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
23
- { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
24
- { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 5.65G, +0.1062 ppl @ Llama-3-8B", },
25
- { "IQ2_XXS", LLAMA_FTYPE_MOSTLY_IQ2_XXS, " 2.06 bpw quantization", },
26
- { "IQ2_XS", LLAMA_FTYPE_MOSTLY_IQ2_XS, " 2.31 bpw quantization", },
27
- { "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", },
28
- { "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
29
- { "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
30
- { "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
31
- { "TQ1_0", LLAMA_FTYPE_MOSTLY_TQ1_0, " 1.69 bpw ternarization", },
32
- { "TQ2_0", LLAMA_FTYPE_MOSTLY_TQ2_0, " 2.06 bpw ternarization", },
33
- { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.96G, +3.5199 ppl @ Llama-3-8B", },
34
- { "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.96G, +3.1836 ppl @ Llama-3-8B", },
35
- { "IQ3_XXS", LLAMA_FTYPE_MOSTLY_IQ3_XXS, " 3.06 bpw quantization", },
36
- { "IQ3_S", LLAMA_FTYPE_MOSTLY_IQ3_S, " 3.44 bpw quantization", },
37
- { "IQ3_M", LLAMA_FTYPE_MOSTLY_IQ3_M, " 3.66 bpw quantization mix", },
38
- { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
39
- { "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization", },
40
- { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 3.41G, +1.6321 ppl @ Llama-3-8B", },
41
- { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.74G, +0.6569 ppl @ Llama-3-8B", },
42
- { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 4.03G, +0.5562 ppl @ Llama-3-8B", },
43
- { "IQ4_NL", LLAMA_FTYPE_MOSTLY_IQ4_NL, " 4.50 bpw non-linear quantization", },
44
- { "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
45
- { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
46
- { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 4.37G, +0.2689 ppl @ Llama-3-8B", },
47
- { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 4.58G, +0.1754 ppl @ Llama-3-8B", },
48
- { "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
49
- { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 5.21G, +0.1049 ppl @ Llama-3-8B", },
50
- { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
51
- { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
52
- { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
53
- { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
54
- { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
55
- { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
56
- // Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
57
- { "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
58
- };
59
-
60
- // Quantization types. Changes to this struct must be replicated in llama-quantize.cpp
61
- struct tensor_quantization {
62
- std::string name;
63
- ggml_type quant = GGML_TYPE_COUNT;
64
- };
65
-
66
- static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
67
- static const char * const LLM_KV_QUANTIZE_IMATRIX_DATASET = "quantize.imatrix.dataset";
68
- static const char * const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = "quantize.imatrix.entries_count";
69
- static const char * const LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS = "quantize.imatrix.chunks_count";
70
-
71
- static bool striequals(const char * a, const char * b) {
72
- while (*a && *b) {
73
- if (std::tolower(*a) != std::tolower(*b)) {
74
- return false;
75
- }
76
- a++; b++;
77
- }
78
- return *a == *b;
79
- }
80
-
81
- static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) {
82
- std::string ftype_str;
83
-
84
- for (auto ch : ftype_str_in) {
85
- ftype_str.push_back(std::toupper(ch));
86
- }
87
- for (auto & it : QUANT_OPTIONS) {
88
- if (striequals(it.name.c_str(), ftype_str.c_str())) {
89
- ftype = it.ftype;
90
- ftype_str_out = it.name;
91
- return true;
92
- }
93
- }
94
- try {
95
- int ftype_int = std::stoi(ftype_str);
96
- for (auto & it : QUANT_OPTIONS) {
97
- if (it.ftype == ftype_int) {
98
- ftype = it.ftype;
99
- ftype_str_out = it.name;
100
- return true;
101
- }
102
- }
103
- }
104
- catch (...) {
105
- // stoi failed
106
- }
107
- return false;
108
- }
109
-
110
- // usage:
111
- // ./llama-quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
112
- //
113
- [[noreturn]]
114
- static void usage(const char * executable) {
115
- printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable);
116
- printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
117
- printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
118
- printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
119
- printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
120
- printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
121
- printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
122
- printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
123
- printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
124
- printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
125
- printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n");
126
- printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
127
- printf(" --keep-split: will generate quantized model in the same shards as input\n");
128
- printf(" --override-kv KEY=TYPE:VALUE\n");
129
- printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
130
- printf("Note: --include-weights and --exclude-weights cannot be used together\n");
131
- printf("\nAllowed quantization types:\n");
132
- for (auto & it : QUANT_OPTIONS) {
133
- if (it.name != "COPY") {
134
- printf(" %2d or ", it.ftype);
135
- } else {
136
- printf(" ");
137
- }
138
- printf("%-7s : %s\n", it.name.c_str(), it.desc.c_str());
139
- }
140
- exit(1);
141
- }
142
-
143
- static int load_imatrix(const std::string & imatrix_file, std::string & imatrix_dataset, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
144
- std::ifstream in(imatrix_file.c_str(), std::ios::binary);
145
- if (!in) {
146
- printf("%s: failed to open %s\n",__func__, imatrix_file.c_str());
147
- exit(1);
148
- }
149
- int n_entries;
150
- in.read((char *)&n_entries, sizeof(n_entries));
151
- if (in.fail() || n_entries < 1) {
152
- printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
153
- exit(1);
154
- }
155
- for (int i = 0; i < n_entries; ++i) {
156
- int len; in.read((char *)&len, sizeof(len));
157
- std::vector<char> name_as_vec(len+1);
158
- in.read((char *)name_as_vec.data(), len);
159
- if (in.fail()) {
160
- printf("%s: failed reading name for entry %d from %s\n", __func__, i+1, imatrix_file.c_str());
161
- exit(1);
162
- }
163
- name_as_vec[len] = 0;
164
- std::string name{name_as_vec.data()};
165
- auto & e = imatrix_data[name];
166
- int ncall;
167
- in.read((char *)&ncall, sizeof(ncall));
168
- int nval;
169
- in.read((char *)&nval, sizeof(nval));
170
- if (in.fail() || nval < 1) {
171
- printf("%s: failed reading number of values for entry %d\n", __func__, i);
172
- imatrix_data = {};
173
- exit(1);
174
- }
175
- e.resize(nval);
176
- in.read((char *)e.data(), nval*sizeof(float));
177
- if (in.fail()) {
178
- printf("%s: failed reading data for entry %d\n", __func__, i);
179
- imatrix_data = {};
180
- exit(1);
181
- }
182
- if (ncall > 0) {
183
- for (auto& v : e) v /= ncall;
184
- }
185
-
186
- if (getenv("LLAMA_TRACE")) {
187
- printf("%s: loaded data (size = %6d, ncall = %6d) for '%s'\n", __func__, int(e.size()), ncall, name.c_str());
188
- }
189
- }
190
-
191
- // latest imatrix version contains the dataset filename at the end of the file
192
- int m_last_call = 0;
193
- if (in.peek() != EOF) {
194
- in.read((char *)&m_last_call, sizeof(m_last_call));
195
- int dataset_len;
196
- in.read((char *)&dataset_len, sizeof(dataset_len));
197
- std::vector<char> dataset_as_vec(dataset_len);
198
- in.read(dataset_as_vec.data(), dataset_len);
199
- imatrix_dataset.assign(dataset_as_vec.begin(), dataset_as_vec.end());
200
- printf("%s: imatrix dataset='%s'\n", __func__, imatrix_dataset.c_str());
201
- }
202
- printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__, int(imatrix_data.size()), imatrix_file.c_str(), m_last_call);
203
- return m_last_call;
204
- }
205
-
206
- static int prepare_imatrix(const std::string & imatrix_file,
207
- std::string & imatrix_dataset,
208
- const std::vector<std::string> & included_weights,
209
- const std::vector<std::string> & excluded_weights,
210
- std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
211
- int m_last_call = -1;
212
- if (!imatrix_file.empty()) {
213
- m_last_call = load_imatrix(imatrix_file, imatrix_dataset, imatrix_data);
214
- }
215
- if (imatrix_data.empty()) {
216
- return m_last_call;
217
- }
218
- if (!excluded_weights.empty()) {
219
- for (auto& name : excluded_weights) {
220
- for (auto it = imatrix_data.begin(); it != imatrix_data.end(); ) {
221
- auto pos = it->first.find(name);
222
- if (pos != std::string::npos) it = imatrix_data.erase(it);
223
- else ++it;
224
- }
225
- }
226
- }
227
- if (!included_weights.empty()) {
228
- std::unordered_map<std::string, std::vector<float>> tmp;
229
- for (auto& name : included_weights) {
230
- for (auto& e : imatrix_data) {
231
- auto pos = e.first.find(name);
232
- if (pos != std::string::npos) {
233
- tmp.emplace(std::move(e));
234
- }
235
- }
236
- }
237
- imatrix_data = std::move(tmp);
238
- }
239
- if (!imatrix_data.empty()) {
240
- printf("%s: have %d importance matrix entries\n", __func__, int(imatrix_data.size()));
241
- }
242
- return m_last_call;
243
- }
244
-
245
- static ggml_type parse_ggml_type(const char * arg) {
246
- for (int i = 0; i < GGML_TYPE_COUNT; ++i) {
247
- auto type = (ggml_type)i;
248
- const auto * name = ggml_type_name(type);
249
- if (name && striequals(name, arg)) {
250
- return type;
251
- }
252
- }
253
- fprintf(stderr, "\n%s: invalid ggml_type '%s'\n\n", __func__, arg);
254
- return GGML_TYPE_COUNT;
255
- }
256
-
257
- static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) {
258
- const char * sep = strchr(data, '=');
259
- if (sep == nullptr) {
260
- printf("\n%s: malformed tensor type '%s'\n\n", __func__, data);
261
- return false;
262
- }
263
-
264
- const size_t tn_len = sep - data;
265
- if (tn_len == 0) {
266
- printf("\n%s: missing tensor name\n\n", __func__);
267
- return false;
268
- }
269
- if (const size_t qt_len = strlen(sep); qt_len == 1) {
270
- printf("\n%s: missing quantization type\n\n", __func__);
271
- return false;
272
- }
273
-
274
- std::string tn(data, tn_len);
275
- std::transform(tn.begin(), tn.end(), tn.begin(), tolower);
276
- sep++;
277
- tensor_quantization tqz;
278
- tqz.name = tn;
279
- tqz.quant = parse_ggml_type(sep);
280
- tensor_type.emplace_back(std::move(tqz));
281
- if (tqz.quant == GGML_TYPE_COUNT) {
282
- printf("\n%s: invalid quantization type '%s'\n\n", __func__, sep);
283
- return false;
284
- }
285
-
286
- return true;
287
- }
288
-
289
- int main(int argc, char ** argv) {
290
- if (argc < 3) {
291
- usage(argv[0]);
292
- }
293
-
294
- llama_model_quantize_params params = llama_model_quantize_default_params();
295
-
296
- int arg_idx = 1;
297
- std::string imatrix_file;
298
- std::vector<std::string> included_weights, excluded_weights;
299
- std::vector<llama_model_kv_override> kv_overrides;
300
- std::vector<tensor_quantization> tensor_types;
301
-
302
- for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
303
- if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
304
- params.quantize_output_tensor = false;
305
- } else if (strcmp(argv[arg_idx], "--output-tensor-type") == 0) {
306
- if (arg_idx < argc-1) {
307
- params.output_tensor_type = parse_ggml_type(argv[++arg_idx]);
308
- if (params.output_tensor_type == GGML_TYPE_COUNT) {
309
- usage(argv[0]);
310
- }
311
- } else {
312
- usage(argv[0]);
313
- }
314
- } else if (strcmp(argv[arg_idx], "--token-embedding-type") == 0) {
315
- if (arg_idx < argc-1) {
316
- params.token_embedding_type = parse_ggml_type(argv[++arg_idx]);
317
- if (params.token_embedding_type == GGML_TYPE_COUNT) {
318
- usage(argv[0]);
319
- }
320
- } else {
321
- usage(argv[0]);
322
- }
323
- } else if (strcmp(argv[arg_idx], "--tensor-type") == 0) {
324
- if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
325
- usage(argv[0]);
326
- }
327
- } else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
328
- if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
329
- usage(argv[0]);
330
- }
331
- } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
332
- params.allow_requantize = true;
333
- } else if (strcmp(argv[arg_idx], "--pure") == 0) {
334
- params.pure = true;
335
- } else if (strcmp(argv[arg_idx], "--imatrix") == 0) {
336
- if (arg_idx < argc-1) {
337
- imatrix_file = argv[++arg_idx];
338
- } else {
339
- usage(argv[0]);
340
- }
341
- } else if (strcmp(argv[arg_idx], "--include-weights") == 0) {
342
- if (arg_idx < argc-1) {
343
- included_weights.emplace_back(argv[++arg_idx]);
344
- } else {
345
- usage(argv[0]);
346
- }
347
- } else if (strcmp(argv[arg_idx], "--exclude-weights") == 0) {
348
- if (arg_idx < argc-1) {
349
- excluded_weights.emplace_back(argv[++arg_idx]);
350
- } else {
351
- usage(argv[0]);
352
- }
353
- } else if (strcmp(argv[arg_idx], "--keep-split") == 0) {
354
- params.keep_split = true;
355
- } else {
356
- usage(argv[0]);
357
- }
358
- }
359
-
360
- if (argc - arg_idx < 2) {
361
- printf("%s: bad arguments\n", argv[0]);
362
- usage(argv[0]);
363
- }
364
- if (!included_weights.empty() && !excluded_weights.empty()) {
365
- usage(argv[0]);
366
- }
367
-
368
- std::string imatrix_dataset;
369
- std::unordered_map<std::string, std::vector<float>> imatrix_data;
370
- int m_last_call = prepare_imatrix(imatrix_file, imatrix_dataset, included_weights, excluded_weights, imatrix_data);
371
- if (!imatrix_data.empty()) {
372
- params.imatrix = &imatrix_data;
373
- {
374
- llama_model_kv_override kvo;
375
- std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
376
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
377
- strncpy(kvo.val_str, imatrix_file.c_str(), 127);
378
- kvo.val_str[127] = '\0';
379
- kv_overrides.emplace_back(std::move(kvo));
380
- }
381
- if (!imatrix_dataset.empty()) {
382
- llama_model_kv_override kvo;
383
- std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET);
384
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
385
- strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
386
- kvo.val_str[127] = '\0';
387
- kv_overrides.emplace_back(std::move(kvo));
388
- }
389
-
390
- {
391
- llama_model_kv_override kvo;
392
- std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
393
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
394
- kvo.val_i64 = imatrix_data.size();
395
- kv_overrides.emplace_back(std::move(kvo));
396
- }
397
-
398
- if (m_last_call > 0) {
399
- llama_model_kv_override kvo;
400
- std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS);
401
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
402
- kvo.val_i64 = m_last_call;
403
- kv_overrides.emplace_back(std::move(kvo));
404
- }
405
- }
406
- if (!kv_overrides.empty()) {
407
- kv_overrides.emplace_back();
408
- kv_overrides.back().key[0] = 0;
409
- params.kv_overrides = &kv_overrides;
410
- }
411
- if (!tensor_types.empty()) {
412
- params.tensor_types = &tensor_types;
413
- }
414
-
415
- llama_backend_init();
416
-
417
- // parse command line arguments
418
- const std::string fname_inp = argv[arg_idx];
419
- arg_idx++;
420
- std::string fname_out;
421
-
422
- std::string ftype_str;
423
- std::string suffix = ".gguf";
424
- if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
425
- std::string fpath;
426
- const size_t pos = fname_inp.find_last_of("/\\");
427
- if (pos != std::string::npos) {
428
- fpath = fname_inp.substr(0, pos + 1);
429
- }
430
-
431
- // export as [inp path]/ggml-model-[ftype]. Only add extension if there is no splitting
432
- fname_out = fpath + "ggml-model-" + ftype_str;
433
- if (!params.keep_split) {
434
- fname_out += suffix;
435
- }
436
- arg_idx++;
437
- if (ftype_str == "COPY") {
438
- params.only_copy = true;
439
- }
440
- } else {
441
- fname_out = argv[arg_idx];
442
- if (params.keep_split && fname_out.find(suffix) != std::string::npos) {
443
- fname_out = fname_out.substr(0, fname_out.length() - suffix.length());
444
- }
445
- arg_idx++;
446
-
447
- if (argc <= arg_idx) {
448
- fprintf(stderr, "%s: missing ftype\n", __func__);
449
- return 1;
450
- }
451
- if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
452
- fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]);
453
- return 1;
454
- }
455
- if (ftype_str == "COPY") {
456
- params.only_copy = true;
457
- }
458
- arg_idx++;
459
- }
460
-
461
- // parse nthreads
462
- if (argc > arg_idx) {
463
- try {
464
- params.nthread = std::stoi(argv[arg_idx]);
465
- }
466
- catch (const std::exception & e) {
467
- fprintf(stderr, "%s: invalid nthread '%s' (%s)\n", __func__, argv[arg_idx], e.what());
468
- return 1;
469
- }
470
- }
471
-
472
- if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS ||
473
- params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S ||
474
- params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
475
- params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
476
- params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) {
477
- fprintf(stderr, "\n==========================================================================================================\n");
478
- fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");
479
- fprintf(stderr, "==========================================================================================================\n\n\n");
480
- return 1;
481
- }
482
-
483
- print_build_info();
484
-
485
- fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str());
486
- if (params.nthread > 0) {
487
- fprintf(stderr, " using %d threads", params.nthread);
488
- }
489
- fprintf(stderr, "\n");
490
-
491
- const int64_t t_main_start_us = llama_time_us();
492
-
493
- int64_t t_quantize_us = 0;
494
-
495
- // load the model
496
- {
497
- const int64_t t_start_us = llama_time_us();
498
-
499
- if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), &params)) {
500
- fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
501
- return 1;
502
- }
503
-
504
- t_quantize_us = llama_time_us() - t_start_us;
505
- }
506
-
507
- // report timing
508
- {
509
- const int64_t t_main_end_us = llama_time_us();
510
-
511
- printf("\n");
512
- printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
513
- printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
514
- }
515
-
516
- llama_backend_free();
517
-
518
- return 0;
519
- }
@@ -1,4 +0,0 @@
1
- set(TARGET rpc-server)
2
- add_executable(${TARGET} rpc-server.cpp)
3
- target_link_libraries(${TARGET} PRIVATE ggml)
4
- target_compile_features(${TARGET} PRIVATE cxx_std_17)