@fugood/llama.node 0.6.2 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -10
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/patches/node-api-headers+1.1.0.patch +0 -26
  27. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  28. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  29. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  30. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  31. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  32. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  33. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  34. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  35. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  36. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  37. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  38. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  39. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  40. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  41. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  42. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  43. package/src/llama.cpp/cmake/common.cmake +0 -35
  44. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  45. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  46. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  47. package/src/llama.cpp/docs/build.md +0 -561
  48. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  49. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  50. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  51. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  53. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  54. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  55. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  56. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  57. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  58. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  59. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  61. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  62. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  63. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  64. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  68. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  70. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  71. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  72. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  73. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  74. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  75. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  76. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  77. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  79. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  80. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  81. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  82. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  83. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  84. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  85. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  86. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  87. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  88. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  89. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  90. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  91. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  92. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  93. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  94. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  95. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  96. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  97. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  98. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  99. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  100. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  101. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  102. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  103. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  104. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  105. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  106. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  107. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  108. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  109. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  110. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  111. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  112. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  113. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  114. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  115. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  116. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  117. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  118. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  119. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  120. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  121. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  122. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  123. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  125. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  127. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  128. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  129. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  130. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  131. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  132. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  135. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  136. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  137. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  138. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  139. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  140. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  141. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  142. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  143. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  144. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  145. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  146. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  147. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  148. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  149. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  150. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  151. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  153. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  155. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  157. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  159. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  161. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  163. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  167. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  169. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  170. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  172. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  173. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  175. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  177. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  181. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  183. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  185. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  186. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  187. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  189. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  191. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  193. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  195. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  196. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  198. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  199. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  200. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  204. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  205. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  206. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  207. package/src/llama.cpp/models/.editorconfig +0 -1
  208. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  212. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  214. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  217. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  228. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  231. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  233. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  234. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  242. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  250. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  253. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  256. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  258. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  261. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  262. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  263. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  264. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  265. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  266. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  267. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  268. package/src/llama.cpp/prompts/assistant.txt +0 -31
  269. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  270. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  271. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  273. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  274. package/src/llama.cpp/prompts/chat.txt +0 -28
  275. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  276. package/src/llama.cpp/prompts/dan.txt +0 -1
  277. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  278. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  279. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  280. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  281. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  284. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  285. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  286. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  287. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  289. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  290. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  291. package/src/llama.cpp/requirements.txt +0 -13
  292. package/src/llama.cpp/scripts/build-info.sh +0 -30
  293. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  294. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  295. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  296. package/src/llama.cpp/tests/get-model.cpp +0 -21
  297. package/src/llama.cpp/tests/get-model.h +0 -2
  298. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  299. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  300. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  301. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  302. package/src/llama.cpp/tests/test-c.c +0 -7
  303. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  304. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  305. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  306. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  307. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  308. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  309. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  310. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  311. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  312. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  313. package/src/llama.cpp/tests/test-log.cpp +0 -39
  314. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  315. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  316. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  317. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  318. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  319. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  320. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  321. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  322. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  323. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  324. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  325. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  326. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  327. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  328. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  329. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  330. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  331. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  332. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  333. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  334. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  335. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  336. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  337. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  338. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  339. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  340. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  341. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  342. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  343. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  344. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  345. package/src/llama.cpp/tools/main/main.cpp +0 -977
  346. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  347. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  348. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  349. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  350. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  351. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  353. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  354. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  355. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  356. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  357. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  358. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  360. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  361. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  362. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  363. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  364. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  365. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  367. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  368. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  369. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  370. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  371. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  372. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  373. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  374. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  375. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  376. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  377. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  378. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,434 +0,0 @@
1
- #include "ggml.h"
2
- #include "ggml-alloc.h"
3
- #include "gguf.h"
4
-
5
- #include "arg.h"
6
- #include "common.h"
7
-
8
- #include <map>
9
- #include <vector>
10
- #include <string>
11
- #include <fstream>
12
-
13
- static bool g_verbose = false;
14
-
15
- struct tensor_transformation {
16
- struct ggml_tensor * in;
17
- struct ggml_tensor * out;
18
- bool is_copy;
19
- };
20
-
21
- static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){
22
- int id = gguf_find_key(ctx_gguf, key.c_str());
23
- return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
24
- }
25
-
26
- static float get_kv_f32(struct gguf_context * ctx_gguf, const std::string & key) {
27
- int id = gguf_find_key(ctx_gguf, key.c_str());
28
- return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf, id);
29
- }
30
-
31
- static void zeros(std::ofstream & file, size_t n) {
32
- char zero = 0;
33
- for (size_t i = 0; i < n; ++i) {
34
- file.write(&zero, 1);
35
- }
36
- }
37
-
38
- static std::string ggml_ne_string(const ggml_tensor * t) {
39
- std::string str;
40
- for (int i = 0; i < GGML_MAX_DIMS; ++i) {
41
- str += std::to_string(t->ne[i]);
42
- if (i + 1 < GGML_MAX_DIMS) {
43
- str += ", ";
44
- }
45
- }
46
- return str;
47
- }
48
-
49
- static struct gguf_context * load_gguf(std::string & fname, struct ggml_context ** ctx_ggml) {
50
- struct gguf_init_params params = {
51
- /*.no_alloc = */ true,
52
- /*.ctx = */ ctx_ggml,
53
- };
54
- struct gguf_context * ctx_gguf = gguf_init_from_file(fname.c_str(), params);
55
- if (!ctx_gguf) {
56
- throw std::runtime_error("failed to load input GGUF from " + fname);
57
- }
58
- return ctx_gguf;
59
- }
60
-
61
- struct file_input {
62
- struct ggml_context * ctx_meta = nullptr;
63
- struct gguf_context * ctx_gguf = nullptr;
64
- std::ifstream f_in;
65
- std::map<std::string, ggml_tensor *> tensors;
66
- float alpha;
67
- float scale;
68
-
69
- file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
70
- if (!f_in.is_open()) {
71
- throw std::runtime_error("failed to open input gguf from " + fname);
72
- }
73
-
74
- ctx_gguf = load_gguf(fname, &ctx_meta);
75
- alpha = get_kv_f32(ctx_gguf, "adapter.lora.alpha");
76
- printf("%s: loaded gguf from %s\n", __func__, fname.c_str());
77
-
78
- for (ggml_tensor * cur = ggml_get_first_tensor(ctx_meta); cur; cur = ggml_get_next_tensor(ctx_meta, cur)) {
79
- std::string name(cur->name);
80
- tensors[name] = cur;
81
- if (g_verbose) {
82
- printf("%s: %s\n", __func__, cur->name);
83
- }
84
- }
85
- }
86
-
87
- ggml_tensor * get_tensor(std::string name) {
88
- if (tensors.find(name) == tensors.end()) {
89
- return nullptr;
90
- }
91
- return tensors[name];
92
- }
93
-
94
- void read_tensor_data(std::string name, std::vector<uint8_t> & buf) {
95
- if (tensors.find(name) == tensors.end()) {
96
- throw std::runtime_error("cannot find tensor with name: " + name);
97
- }
98
- auto len = ggml_nbytes(tensors[name]);
99
- if (buf.size() < len) {
100
- buf.resize(len);
101
- }
102
- auto i_tensor_in = gguf_find_tensor(ctx_gguf, name.c_str()); // idx of tensor in the input file
103
- auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor_in);
104
- f_in.seekg(offset);
105
- f_in.read((char* )buf.data(), len);
106
- }
107
-
108
- ~file_input() {
109
- gguf_free(ctx_gguf);
110
- ggml_free(ctx_meta);
111
- }
112
- };
113
-
114
- struct lora_merge_ctx {
115
- // input base model + adapters
116
- file_input base_model;
117
- std::vector<std::unique_ptr<file_input>> adapters;
118
-
119
- // for computing merged tensor
120
- int n_threads;
121
- ggml_backend_t backend = nullptr;
122
- ggml_gallocr_t allocr = nullptr;
123
- std::vector<uint8_t> read_buf;
124
-
125
- // output file
126
- struct gguf_context * ctx_out;
127
- struct ggml_context * ctx_out_ggml;
128
- std::ofstream fout;
129
-
130
- lora_merge_ctx(
131
- std::string & base_fname,
132
- std::vector<common_adapter_lora_info> & lora_files,
133
- std::string & outfile,
134
- int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
135
- fout.exceptions(std::ofstream::failbit); // fail fast on write errors
136
-
137
- if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
138
- throw std::runtime_error("split model is not yet supported");
139
- }
140
-
141
- for (auto & lora_inp : lora_files) {
142
- auto fname = lora_inp.path;
143
- auto scale = lora_inp.scale;
144
- std::unique_ptr<file_input> adapter(new file_input(fname, scale));
145
- check_metadata_lora(adapter.get());
146
- adapters.push_back(std::move(adapter));
147
- }
148
-
149
- ctx_out = gguf_init_empty();
150
- struct ggml_init_params params = {
151
- /*.mem_size =*/ gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead(),
152
- /*.mem_buffer =*/ NULL,
153
- /*.no_alloc =*/ true,
154
- };
155
- ctx_out_ggml = ggml_init(params);
156
- backend = ggml_backend_cpu_init();
157
- allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
158
- }
159
-
160
- void check_metadata_lora(file_input * adapter) {
161
- auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
162
- if (general_type != "adapter") {
163
- throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
164
- }
165
-
166
- auto adapter_type = get_kv_str(adapter->ctx_gguf, "adapter.type");
167
- if (adapter_type != "lora") {
168
- throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
169
- }
170
-
171
- auto general_arch_base = get_kv_str(base_model.ctx_gguf, "general.architecture");
172
- auto general_arch_lora = get_kv_str(adapter->ctx_gguf, "general.architecture");
173
- if (general_arch_base != general_arch_lora) {
174
- throw std::runtime_error("model arch and LoRA arch mismatch");
175
- }
176
- }
177
-
178
- ggml_type get_out_tensor_type(struct ggml_tensor * t) {
179
- if (t->type == GGML_TYPE_F32) {
180
- return GGML_TYPE_F32;
181
- } else {
182
- return GGML_TYPE_F16;
183
- }
184
- }
185
-
186
- void run_merge() {
187
- // prepare metadata
188
- gguf_set_kv(ctx_out, base_model.ctx_gguf);
189
- // output is forced to f16 for now
190
- gguf_set_val_u32(ctx_out, "general.file_type", LLAMA_FTYPE_MOSTLY_F16);
191
-
192
- // check if all lora adapters have the same tensors
193
- // TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggerganov/llama.cpp/pull/8607#discussion_r1686027777
194
- static const char * err_no_subset_adapter = "Input adapters do not have the same list of tensors. This is not yet supported. Please merge the adapter one-by-one instead of merging all at once.";
195
- if (adapters.size() > 1) {
196
- for (size_t i = 1; i < adapters.size(); ++i) {
197
- if (adapters[0]->tensors.size() != adapters[i]->tensors.size()) {
198
- throw std::runtime_error(err_no_subset_adapter);
199
- }
200
- for (auto & it : adapters[i]->tensors) {
201
- if (adapters[0]->get_tensor(it.first) == nullptr) {
202
- throw std::runtime_error(err_no_subset_adapter);
203
- }
204
- }
205
- }
206
- }
207
-
208
- // mapping base tensor to out tensor (same shape with base, but different type)
209
- std::vector<tensor_transformation> trans;
210
- for (auto & it : base_model.tensors) {
211
- bool t_a = true;
212
- bool t_b = true;
213
- for (auto & adapter : adapters) {
214
- t_a &= nullptr != adapter->get_tensor(it.first + ".lora_a");
215
- t_b &= nullptr != adapter->get_tensor(it.first + ".lora_b");
216
- }
217
- auto base_tensor = it.second;
218
- if (!t_a && !t_b) {
219
- // only copy
220
- struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor);
221
- ggml_set_name(cpy_tensor, base_tensor->name);
222
- trans.push_back({
223
- cpy_tensor,
224
- cpy_tensor,
225
- true,
226
- });
227
- gguf_add_tensor(ctx_out, cpy_tensor);
228
- } else if (t_a && t_b) {
229
- // need merging
230
- struct ggml_tensor * out_tensor = ggml_new_tensor(
231
- ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne);
232
- ggml_set_name(out_tensor, base_tensor->name);
233
- trans.push_back({
234
- base_tensor,
235
- out_tensor,
236
- false,
237
- });
238
- gguf_add_tensor(ctx_out, out_tensor);
239
- } else {
240
- throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b");
241
- }
242
- }
243
-
244
- // placeholder for the meta data
245
- {
246
- size_t meta_size = gguf_get_meta_size(ctx_out);
247
- zeros(fout, meta_size);
248
- }
249
-
250
- // process base model tensors
251
- size_t n_merged = 0;
252
- for (auto & it : trans) {
253
- if (!it.is_copy) {
254
- merge_tensor(it.in, it.out);
255
- n_merged++;
256
- } else {
257
- copy_tensor(it.in);
258
- }
259
- }
260
-
261
- // write output metadata
262
- {
263
- std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
264
- gguf_get_meta_data(ctx_out, data.data());
265
- fout.seekp(0);
266
- fout.write((const char *)data.data(), data.size());
267
- }
268
-
269
- printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged);
270
- printf("%s : wrote %zu tensors to output file\n", __func__, trans.size());
271
- }
272
-
273
- void copy_tensor(struct ggml_tensor * base) {
274
- printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
275
- size_t len = ggml_nbytes(base);
276
- base_model.read_tensor_data(base->name, read_buf);
277
- fout.write((char* )read_buf.data(), len);
278
- zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
279
- }
280
-
281
- void merge_tensor(struct ggml_tensor * base, struct ggml_tensor * out) {
282
- std::string name_base(base->name);
283
- std::string name_lora_a = name_base + ".lora_a";
284
- std::string name_lora_b = name_base + ".lora_b";
285
-
286
- printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
287
-
288
- // context for input tensor
289
- std::vector<struct ggml_tensor *> inp_a(adapters.size());
290
- std::vector<struct ggml_tensor *> inp_b(adapters.size());
291
- struct ggml_init_params params {
292
- /*.mem_size =*/ ggml_tensor_overhead()*(2+adapters.size()*2),
293
- /*.mem_buffer =*/ NULL,
294
- /*.no_alloc =*/ true,
295
- };
296
- struct ggml_context * ctx = ggml_init(params);
297
-
298
- // alloc tensors
299
- struct ggml_tensor * inp_base = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, base->ne);
300
- for (size_t i = 0; i < adapters.size(); ++i) {
301
- auto t_a = adapters[i]->get_tensor(name_lora_a);
302
- auto t_b = adapters[i]->get_tensor(name_lora_b);
303
- // TODO: add support for quantized lora
304
- if (ggml_is_quantized(t_a->type) || ggml_is_quantized(t_b->type)) {
305
- throw std::runtime_error("quantized LoRA adapters is not supported, please retry with f16 or f32");
306
- }
307
- inp_a[i] = ggml_dup_tensor(ctx, t_a);
308
- inp_b[i] = ggml_dup_tensor(ctx, t_b);
309
- }
310
- ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
311
-
312
- // load base tensor to backend buffer
313
- base_model.read_tensor_data(name_base, read_buf);
314
- if (base->type != GGML_TYPE_F32) {
315
- // optionally dequantize it
316
- printf("%s : + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
317
- auto nels = ggml_nelements(inp_base);
318
- const auto * qtype = ggml_get_type_traits(base->type);
319
- std::vector<uint8_t> dequant_buf(nels * sizeof(float));
320
- qtype->to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
321
- ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());
322
- } else {
323
- ggml_backend_tensor_set(inp_base, read_buf.data(), 0, ggml_nbytes(inp_base));
324
- }
325
-
326
- // load lora tensors to backend buffer
327
- for (size_t i = 0; i < adapters.size(); ++i) {
328
- adapters[i]->read_tensor_data(name_lora_a, read_buf);
329
- ggml_backend_tensor_set(inp_a[i], read_buf.data(), 0, ggml_nbytes(inp_a[i]));
330
- adapters[i]->read_tensor_data(name_lora_b, read_buf);
331
- ggml_backend_tensor_set(inp_b[i], read_buf.data(), 0, ggml_nbytes(inp_b[i]));
332
- }
333
-
334
- // build graph
335
- struct ggml_cgraph * gf;
336
- {
337
- static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
338
- static std::vector<uint8_t> buf(buf_size);
339
- struct ggml_init_params params0 = {
340
- /*.mem_size =*/ buf_size,
341
- /*.mem_buffer =*/ buf.data(),
342
- /*.no_alloc =*/ true,
343
- };
344
- struct ggml_context * ctx0 = ggml_init(params0);
345
- gf = ggml_new_graph(ctx0);
346
- struct ggml_tensor * cur = inp_base;
347
- for (size_t i = 0; i < adapters.size(); ++i) {
348
- struct ggml_tensor * delta;
349
- bool is_tok_embd = string_starts_with(name_base, "token_embd");
350
- if (is_tok_embd) {
351
- printf("%s : detected token embeddings tensor\n", __func__);
352
- delta = ggml_mul_mat(ctx0,
353
- ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32),
354
- ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32));
355
- } else {
356
- delta = ggml_mul_mat(ctx0,
357
- ggml_cont(ctx0, ggml_transpose(ctx0, ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32))),
358
- ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32));
359
- }
360
- // scale
361
- const float alpha = adapters[i]->alpha;
362
- const float rank = (float) inp_b[i]->ne[0];
363
- const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
364
- delta = ggml_scale(ctx0, delta, scale);
365
- cur = ggml_add(ctx0, delta, cur);
366
- printf("%s : + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
367
- printf("%s : input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
368
- }
369
- cur = ggml_cast(ctx0, cur, out->type);
370
- printf("%s : + output type is %s\n", __func__, ggml_type_name(out->type));
371
- ggml_build_forward_expand(gf, cur);
372
- ggml_free(ctx0);
373
- }
374
-
375
- // compute
376
- {
377
- ggml_gallocr_alloc_graph(allocr, gf);
378
- ggml_backend_cpu_set_n_threads(backend, n_threads);
379
- ggml_backend_graph_compute(backend, gf);
380
- }
381
-
382
- // write data to output file
383
- {
384
- auto * result = ggml_graph_node(gf, -1);
385
- size_t len = ggml_nbytes(result);
386
- if (read_buf.size() < len) {
387
- read_buf.resize(len);
388
- }
389
- ggml_backend_tensor_get(result, read_buf.data(), 0, len);
390
- fout.write((char* )read_buf.data(), len);
391
- zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
392
- }
393
-
394
- ggml_free(ctx);
395
- ggml_backend_buffer_free(buffer);
396
- }
397
-
398
- ~lora_merge_ctx() {
399
- ggml_gallocr_free(allocr);
400
- ggml_backend_free(backend);
401
- gguf_free(ctx_out);
402
- ggml_free(ctx_out_ggml);
403
- }
404
- };
405
-
406
- static void print_usage(int, char ** argv) {
407
- printf("\nexample usage:\n");
408
- printf("\n %s -m base-model.gguf --lora lora-file.gguf -o merged-model-f16.gguf\n", argv[0]);
409
- printf("\nNOTE: output model is F16\n");
410
- printf("\n");
411
- }
412
-
413
- int main(int argc, char ** argv) {
414
- common_params params;
415
-
416
- params.out_file = "ggml-lora-merged-f16.gguf";
417
-
418
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
419
- return 1;
420
- }
421
-
422
- g_verbose = (params.verbosity > 1);
423
- try {
424
- lora_merge_ctx ctx(params.model.path, params.lora_adapters, params.out_file, params.cpuparams.n_threads);
425
- ctx.run_merge();
426
- } catch (const std::exception & err) {
427
- fprintf(stderr, "%s\n", err.what());
428
- exit(EXIT_FAILURE);
429
- }
430
-
431
- printf("done, output file is %s\n", params.out_file.c_str());
432
-
433
- return 0;
434
- }
@@ -1,5 +0,0 @@
1
- set(TARGET llama-gguf-split)
2
- add_executable(${TARGET} gguf-split.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
5
- target_compile_features(${TARGET} PRIVATE cxx_std_17)