@fugood/llama.node 0.6.3 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -7
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  27. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  28. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  29. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  30. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  31. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  32. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  36. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  37. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  38. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  39. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  40. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  41. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  42. package/src/llama.cpp/cmake/common.cmake +0 -35
  43. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  44. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  45. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  46. package/src/llama.cpp/docs/build.md +0 -561
  47. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  48. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  49. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  50. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  53. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  54. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  56. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  57. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  58. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  59. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  60. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  61. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  62. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  63. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  64. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  68. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  70. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  71. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  72. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  73. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  74. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  75. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  76. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  77. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  79. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  80. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  81. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  82. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  83. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  84. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  85. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  86. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  87. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  88. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  89. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  90. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  91. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  92. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  93. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  94. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  96. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  99. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  100. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  101. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  102. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  103. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  104. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  105. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  106. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  107. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  108. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  109. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  110. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  111. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  112. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  113. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  114. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  115. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  116. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  117. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  118. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  119. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  120. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  121. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  122. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  123. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  125. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  127. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  128. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  129. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  130. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  131. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  132. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  135. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  136. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  137. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  138. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  139. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  140. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  141. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  142. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  143. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  144. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  145. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  146. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  147. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  148. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  149. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  150. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  151. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  153. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  155. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  157. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  159. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  161. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  163. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  167. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  169. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  170. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  172. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  173. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  175. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  177. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  181. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  183. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  185. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  186. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  187. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  189. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  191. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  195. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  198. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  199. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  200. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  204. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  205. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  206. package/src/llama.cpp/models/.editorconfig +0 -1
  207. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  208. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  212. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  214. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  217. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  225. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  228. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  231. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  233. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  237. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  240. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  242. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  245. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  248. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  256. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  258. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  261. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  262. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  263. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  264. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  265. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  266. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  267. package/src/llama.cpp/prompts/assistant.txt +0 -31
  268. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  269. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  270. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  271. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  273. package/src/llama.cpp/prompts/chat.txt +0 -28
  274. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  275. package/src/llama.cpp/prompts/dan.txt +0 -1
  276. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  277. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  278. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  279. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  280. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  281. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  284. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  285. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  286. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  287. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  289. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  290. package/src/llama.cpp/requirements.txt +0 -13
  291. package/src/llama.cpp/scripts/build-info.sh +0 -30
  292. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  293. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  294. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  295. package/src/llama.cpp/tests/get-model.cpp +0 -21
  296. package/src/llama.cpp/tests/get-model.h +0 -2
  297. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  298. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  299. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  300. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  301. package/src/llama.cpp/tests/test-c.c +0 -7
  302. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  303. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  304. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  305. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  306. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  307. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  308. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  309. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  310. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  311. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  312. package/src/llama.cpp/tests/test-log.cpp +0 -39
  313. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  314. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  315. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  316. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  317. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  318. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  319. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  320. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  321. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  322. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  323. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  324. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  325. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  326. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  327. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  328. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  329. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  330. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  331. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  332. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  333. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  334. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  335. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  336. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  337. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  338. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  339. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  340. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  341. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  342. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  343. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  344. package/src/llama.cpp/tools/main/main.cpp +0 -977
  345. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  346. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  347. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  348. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  349. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  350. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  351. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  353. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  354. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  355. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  356. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  357. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  358. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  360. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  361. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  362. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  363. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  364. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  365. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  367. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  368. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  369. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  370. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  371. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  372. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  373. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  374. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  375. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  376. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  377. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,424 +0,0 @@
1
- #include "ggml.h"
2
- #include "ggml-cpu.h"
3
- #include "llama.h"
4
- #include "common.h"
5
-
6
- #include "../src/llama-model.h"
7
-
8
- #include <algorithm>
9
- #include <cassert>
10
- #include <cinttypes>
11
- #include <cmath>
12
- #include <cstdio>
13
- #include <cstring>
14
- #include <numeric>
15
- #include <regex>
16
- #include <string>
17
- #include <vector>
18
- #include <thread>
19
- #include <mutex>
20
-
21
- #if defined(_MSC_VER)
22
- #pragma warning(disable: 4244 4267) // possible loss of data
23
- #endif
24
-
25
- struct quantize_stats_params {
26
- std::string model = DEFAULT_MODEL_PATH;
27
- bool verbose = false;
28
- bool per_layer_stats = false;
29
- bool print_histogram = false;
30
- bool reference = false;
31
- std::vector<std::string> include_layers;
32
- std::vector<std::string> exclude_layers;
33
- std::vector<enum ggml_type> include_types;
34
- };
35
-
36
- constexpr size_t HISTOGRAM_BUCKETS = 150;
37
- constexpr double HISTOGRAM_RANGE = 0.03;
38
-
39
- struct error_stats {
40
- size_t num_samples;
41
- double total_error;
42
- double max_error;
43
- uint64_t error_histogram[HISTOGRAM_BUCKETS];
44
- };
45
-
46
- static void quantize_stats_print_usage(int /*argc*/, char ** argv) {
47
- quantize_stats_params params;
48
- fprintf(stderr, "usage: %s [options]\n", argv[0]);
49
- fprintf(stderr, "\n");
50
- fprintf(stderr, "options:\n");
51
- fprintf(stderr, " -h, --help show this help message and exit\n");
52
- fprintf(stderr, " -m FNAME, --model FNAME\n");
53
- fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
54
- fprintf(stderr, " -r, --reference\n");
55
- fprintf(stderr, " use reference implementation (default: false)\n");
56
- fprintf(stderr, " -v, --verbose\n");
57
- fprintf(stderr, " verbose output (default: false)\n");
58
- fprintf(stderr, " -p, --per-layer-stats\n");
59
- fprintf(stderr, " print stats per layer (default: false)\n");
60
- fprintf(stderr, " --histogram\n");
61
- fprintf(stderr, " print error histogram (default: false)\n");
62
- fprintf(stderr, " -l LAYER, --include-layer LAYER\n");
63
- fprintf(stderr, " only test layers matching pattern\n");
64
- fprintf(stderr, " -L LAYER, --exclude-layer LAYER\n");
65
- fprintf(stderr, " exclude layers matching pattern\n");
66
- fprintf(stderr, " -t TYPE, --type TYPE\n");
67
- fprintf(stderr, " only test given type (q4_0, q4_1)\n");
68
- fprintf(stderr, "\n");
69
- }
70
-
71
- // Check if a layer is included/excluded by command line
72
- static bool layer_included(const quantize_stats_params & params, const std::string & layer) {
73
- for (const auto& excluded : params.exclude_layers) {
74
- if (std::regex_search(layer, std::regex(excluded))) {
75
- return false;
76
- }
77
- }
78
- for (const auto& included : params.include_layers) {
79
- if (std::regex_search(layer, std::regex(included))) {
80
- return true;
81
- }
82
- }
83
- return params.include_layers.empty();
84
- }
85
-
86
- // Update error statistics given vectors with the before/after result of quantization
87
- static void update_error_stats(int64_t nelements, const float * input, const float * output, error_stats & stats) {
88
- for (int64_t i = 0; i < nelements; i++) {
89
- double diff = input[i] - output[i];
90
- stats.total_error += diff * diff;
91
- stats.max_error = fmax(fabs(diff), stats.max_error);
92
- stats.error_histogram[std::max(std::min((size_t) floor(fabs(diff) / HISTOGRAM_RANGE * HISTOGRAM_BUCKETS), HISTOGRAM_BUCKETS-1), (size_t) 0)]++;
93
- }
94
- stats.num_samples += nelements;
95
- }
96
-
97
- static void combine_error_stats(error_stats & into, const error_stats & from) {
98
- into.num_samples += from.num_samples;
99
- into.total_error += from.total_error;
100
- if (from.max_error > into.max_error) into.max_error = from.max_error;
101
- for (size_t i=0; i<HISTOGRAM_BUCKETS; ++i) into.error_histogram[i] += from.error_histogram[i];
102
- }
103
-
104
- static double find_quantile(const error_stats & stats, double quantile) {
105
- double sum = std::accumulate(std::begin(stats.error_histogram), std::end(stats.error_histogram), 0.0);
106
-
107
- double accum = 0;
108
- for (size_t i = 0; i < HISTOGRAM_BUCKETS; i++) {
109
- accum += stats.error_histogram[i];
110
- if (accum >= sum*quantile) {
111
- return (i+1) * HISTOGRAM_RANGE / HISTOGRAM_BUCKETS;
112
- }
113
- }
114
- return INFINITY;
115
- }
116
-
117
- static void print_error_stats(const std::string & name, const error_stats & stats, bool print_histogram) {
118
- double rmse = sqrt(stats.total_error / (double) stats.num_samples);
119
- double median = find_quantile(stats, .5);
120
- double pct95 = find_quantile(stats, .95);
121
- printf("%-50s: rmse %.8f, maxerr %.8f, 95pct<%.4f, median<%.4f\n", name.c_str(), rmse, stats.max_error, pct95, median);
122
- if (print_histogram) {
123
- printf("Error distribution:\n");
124
- for (size_t i = 0; i < HISTOGRAM_BUCKETS; i++) {
125
- double lower = i * HISTOGRAM_RANGE / HISTOGRAM_BUCKETS;
126
- double upper = (i+1) * HISTOGRAM_RANGE / HISTOGRAM_BUCKETS;
127
- if (i == HISTOGRAM_BUCKETS -1) upper = INFINITY;
128
- printf("[%3.4f, %3.4f): %11" PRIu64 "\n", lower, upper, stats.error_histogram[i]);
129
- }
130
- }
131
- }
132
-
133
- // copied from ggml.h - verify that we can access this as a flat array
134
- static bool tensor_is_contiguous(const struct ggml_tensor * tensor) {
135
- static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
136
-
137
- return
138
- tensor->nb[0] == ggml_type_size(tensor->type) &&
139
- tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
140
- tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
141
- tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
142
- }
143
-
144
- static void test_roundtrip_on_chunk(
145
- const ggml_tensor * layer, int64_t offset, int64_t chunk_size, const ggml_type_traits & qfns, const ggml_type_traits_cpu & qfns_cpu, bool use_reference,
146
- float * input_scratch, char * quantized_scratch, float * output_scratch, error_stats & stats
147
- ) {
148
- if (layer->type == GGML_TYPE_F16) {
149
- for (int i = 0; i < chunk_size; i++) {
150
- input_scratch[i] = ggml_get_f32_1d(layer, i + offset);
151
- }
152
- } else {
153
- input_scratch = ggml_get_data_f32(layer) + offset;
154
- }
155
-
156
- if (use_reference) {
157
- qfns.from_float_ref(input_scratch, quantized_scratch, chunk_size);
158
- } else {
159
- qfns_cpu.from_float(input_scratch, quantized_scratch, chunk_size);
160
- }
161
- qfns.to_float(quantized_scratch, output_scratch, chunk_size);
162
-
163
- update_error_stats(chunk_size, input_scratch, output_scratch, stats);
164
- }
165
-
166
-
167
- // Run quantization function for a single layer and update error stats
168
- static void test_roundtrip_on_layer(
169
- std::string & name, bool print_layer_stats, const ggml_type_traits & qfns, const ggml_type_traits_cpu & qfns_cpu, bool use_reference,
170
- const ggml_tensor * layer, std::vector<float> & input_scratch, std::vector<char> & quantized_scratch,
171
- std::vector<float> & output_scratch, error_stats & total_error, int max_thread = 0
172
- ) {
173
- assert(tensor_is_contiguous(layer));
174
- error_stats layer_error {};
175
- uint64_t nelements = ggml_nelements(layer);
176
-
177
- float* input_scratch_ptr = nullptr;
178
- if (layer->type == GGML_TYPE_F16) {
179
- if (input_scratch.size() < nelements) input_scratch.resize(nelements);
180
- input_scratch_ptr = input_scratch.data();
181
- }
182
- if (quantized_scratch.size() < 4*nelements) quantized_scratch.resize(4*nelements);
183
- if (output_scratch.size() < nelements) output_scratch.resize(nelements);
184
-
185
- if (max_thread < 1) max_thread = std::thread::hardware_concurrency();
186
- int chunk_size = 32*512;
187
- int num_chunks = (nelements + chunk_size - 1)/chunk_size;
188
-
189
- if (num_chunks < 2 || max_thread < 2) {
190
- test_roundtrip_on_chunk(layer, 0, nelements, qfns, qfns_cpu, use_reference, input_scratch_ptr, quantized_scratch.data(),
191
- output_scratch.data(), print_layer_stats ? layer_error : total_error);
192
- } else {
193
- auto & stats = print_layer_stats ? layer_error : total_error;
194
- std::mutex mutex;
195
- uint64_t counter = 0;
196
- auto compute = [&mutex, &counter, &stats, &qfns, &qfns_cpu, nelements, layer, use_reference, input_scratch_ptr,
197
- &quantized_scratch, &output_scratch, chunk_size] () {
198
- error_stats local_stats {};
199
- while (true) {
200
- std::unique_lock<std::mutex> lock(mutex);
201
- uint64_t offset = counter; counter += chunk_size;
202
- if (offset >= nelements) {
203
- combine_error_stats(stats, local_stats);
204
- break;
205
- }
206
- lock.unlock();
207
- uint64_t chunk = offset + chunk_size < nelements ? chunk_size : nelements - offset;
208
- test_roundtrip_on_chunk(layer, offset, chunk, qfns, qfns_cpu, use_reference, input_scratch_ptr + offset,
209
- quantized_scratch.data() + 4*offset, output_scratch.data() + offset, local_stats);
210
- }
211
- };
212
- int nthread = std::min(num_chunks, max_thread);
213
- std::vector<std::thread> workers(nthread-1);
214
- for (auto& w : workers) w = std::thread(compute);
215
- compute();
216
- for (auto& w : workers) w.join();
217
- }
218
-
219
- if (print_layer_stats) {
220
- print_error_stats(name, layer_error, false);
221
- combine_error_stats(total_error, layer_error);
222
- }
223
- }
224
-
225
- int main(int argc, char ** argv) {
226
- ggml_time_init();
227
-
228
- quantize_stats_params params;
229
-
230
- // read command line
231
-
232
- int max_thread = 0;
233
- bool invalid_param = false;
234
- std::string arg;
235
- for (int i = 1; i < argc; i++) {
236
- arg = argv[i];
237
-
238
- if (arg == "-h" || arg == "--help") {
239
- quantize_stats_print_usage(argc, argv);
240
- exit(0);
241
- } else if (arg == "-r" || arg == "--reference") {
242
- params.reference = true;
243
- } else if (arg == "-v") {
244
- params.verbose = true;
245
- } else if (arg == "-p" || arg == "--per-layer-stats") {
246
- params.per_layer_stats = true;
247
- } else if (arg == "--histogram") {
248
- params.print_histogram = true;
249
- } else if (arg == "-m" || arg == "--model") {
250
- if (++i >= argc) {
251
- invalid_param = true;
252
- break;
253
- }
254
- params.model = argv[i];
255
- } else if (arg == "-l" || arg == "--include-layer") {
256
- if (++i >= argc) {
257
- invalid_param = true;
258
- break;
259
- }
260
- params.include_layers.emplace_back(argv[i]);
261
- } else if (arg == "-L" || arg == "--exclude-layer") {
262
- if (++i >= argc) {
263
- invalid_param = true;
264
- break;
265
- }
266
- params.exclude_layers.emplace_back(argv[i]);
267
- } else if (arg == "-t" || arg == "--type") {
268
- if (++i >= argc) {
269
- invalid_param = true;
270
- break;
271
- }
272
- int j;
273
- for (j = 0; j < GGML_TYPE_COUNT; ++j) {
274
- const auto * name = ggml_type_name((ggml_type) j);
275
- if (name && strcmp(argv[i], name) == 0) break;
276
- }
277
- if (j < GGML_TYPE_COUNT) {
278
- params.include_types.push_back((ggml_type) j);
279
- } else {
280
- fprintf(stderr, "error: %s not in list of types\n", argv[i]);
281
- invalid_param = true;
282
- }
283
- } else if (arg == "-n" || arg == "--num-threads") {
284
- if (++i >= argc) {
285
- invalid_param = true;
286
- break;
287
- }
288
- max_thread = atoi(argv[i]);
289
- } else {
290
- fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
291
- quantize_stats_print_usage(argc, argv);
292
- return 1;
293
- }
294
- }
295
- if (invalid_param) {
296
- fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
297
- quantize_stats_print_usage(argc, argv);
298
- return 1;
299
- }
300
-
301
- print_build_info();
302
-
303
- // load the model
304
- fprintf(stderr, "Loading model\n");
305
-
306
- const int64_t t_main_start_us = ggml_time_us();
307
- llama_model * model;
308
- llama_context * ctx;
309
-
310
- {
311
- auto mparams = llama_model_default_params();
312
- mparams.use_mlock = false;
313
-
314
- model = llama_model_load_from_file(params.model.c_str(), mparams);
315
-
316
- if (model == NULL) {
317
- fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
318
- return 1;
319
- }
320
-
321
- auto cparams = llama_context_default_params();
322
- cparams.n_ctx = 256;
323
-
324
- ctx = llama_init_from_model(model, cparams);
325
-
326
- if (ctx == NULL) {
327
- fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
328
- llama_model_free(model);
329
- return 1;
330
- }
331
- }
332
-
333
- const auto & tensors = llama_internal_get_tensor_map(model);
334
-
335
- // check layer tensors
336
- int included_layers = 0;
337
- int64_t max_nelements = 0;
338
- bool is_f16 = false;
339
- for (const auto & kv_tensor : tensors) {
340
- if (!layer_included(params, kv_tensor.first)) {
341
- continue;
342
- }
343
- if (params.verbose) {
344
- printf("%s: type %s, size %" PRId64 "\n", kv_tensor.first.c_str(), ggml_type_name(kv_tensor.second->type), ggml_nelements(kv_tensor.second));
345
- }
346
- if (kv_tensor.second->type == GGML_TYPE_F16) {
347
- is_f16 = true;
348
- } else if (kv_tensor.second->type != GGML_TYPE_F32) {
349
- fprintf(stderr, "%s: error: Quantization should be tested with a float model, "
350
- "this model contains already quantized layers (%s is type %d)\n", __func__, kv_tensor.first.c_str(), kv_tensor.second->type);
351
- llama_free(ctx);
352
- llama_model_free(model);
353
- return 1;
354
- }
355
- included_layers++;
356
- max_nelements = std::max(max_nelements, ggml_nelements(kv_tensor.second));
357
- }
358
-
359
- if (is_f16) {
360
- printf("note: source model is f16\n");
361
- }
362
- printf("testing %d layers with max size %" PRId64 "\n", included_layers, max_nelements);
363
- // allocate scratch space
364
- std::vector<float> input_scratch;
365
- std::vector<char> quantized_scratch;
366
- std::vector<float> output_scratch;
367
-
368
- // loop throught quantization types
369
- for (int i = 0; i < GGML_TYPE_COUNT; i++) {
370
- const ggml_type type = (ggml_type) i;
371
- if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
372
- continue;
373
- }
374
- const auto * qfns = ggml_get_type_traits(type);
375
- const auto * qfns_cpu = ggml_get_type_traits_cpu(type);
376
- if (qfns_cpu->from_float && qfns->to_float) {
377
- if (params.verbose) {
378
- printf("testing %s ...\n", ggml_type_name(type));
379
- }
380
-
381
- ggml_quantize_init(type);
382
-
383
- error_stats global_stats {};
384
-
385
- for (const auto & kv_tensor : tensors) {
386
- if (!layer_included(params, kv_tensor.first)) {
387
- continue;
388
- }
389
- if (params.verbose) {
390
- printf(" %s ...\n", kv_tensor.first.c_str());
391
- }
392
- std::string layer_name { ggml_type_name(type) };
393
- layer_name += "::" + kv_tensor.first;
394
- test_roundtrip_on_layer(
395
- layer_name,
396
- params.per_layer_stats,
397
- *qfns, *qfns_cpu,
398
- params.reference,
399
- kv_tensor.second,
400
- input_scratch,
401
- quantized_scratch,
402
- output_scratch,
403
- global_stats,
404
- max_thread
405
- );
406
- }
407
-
408
- print_error_stats(ggml_type_name(type), global_stats, params.print_histogram);
409
- }
410
- }
411
-
412
-
413
- llama_free(ctx);
414
- llama_model_free(model);
415
- // report timing
416
- {
417
- const int64_t t_main_end_us = ggml_time_us();
418
-
419
- printf("\n");
420
- printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
421
- }
422
-
423
- return 0;
424
- }