@novastera-oss/llamarn 0.0.1-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (989) hide show
  1. package/INTERFACE.md +389 -0
  2. package/LICENSE +201 -0
  3. package/README.md +235 -0
  4. package/RNLlamaCpp.podspec +69 -0
  5. package/android/CMakeLists.txt +107 -0
  6. package/android/build.gradle +111 -0
  7. package/android/generated/java/com/novastera/llamarn/NativeRNLlamaCppSpec.java +47 -0
  8. package/android/generated/jni/CMakeLists.txt +36 -0
  9. package/android/generated/jni/RNLlamaCppSpec-generated.cpp +44 -0
  10. package/android/generated/jni/RNLlamaCppSpec.h +31 -0
  11. package/android/generated/jni/react/renderer/components/RNLlamaCppSpec/RNLlamaCppSpecJSI-generated.cpp +42 -0
  12. package/android/generated/jni/react/renderer/components/RNLlamaCppSpec/RNLlamaCppSpecJSI.h +336 -0
  13. package/android/gradle.properties +5 -0
  14. package/android/src/main/AndroidManifest.xml +3 -0
  15. package/android/src/main/AndroidManifestNew.xml +2 -0
  16. package/android/src/main/cpp/include/llama-cpp.h +30 -0
  17. package/android/src/main/cpp/include/llama.h +1440 -0
  18. package/android/src/main/java/com/novastera/llamarn/RNLlamaCppPackage.kt +21 -0
  19. package/android/src/main/jniLibs/arm64-v8a/libOpenCL.so +0 -0
  20. package/android/src/main/jniLibs/arm64-v8a/libggml-base.so +0 -0
  21. package/android/src/main/jniLibs/arm64-v8a/libggml-cpu.so +0 -0
  22. package/android/src/main/jniLibs/arm64-v8a/libggml.so +0 -0
  23. package/android/src/main/jniLibs/arm64-v8a/libllama.so +0 -0
  24. package/android/src/main/jniLibs/x86_64/libOpenCL.so +0 -0
  25. package/android/src/main/jniLibs/x86_64/libggml-base.so +0 -0
  26. package/android/src/main/jniLibs/x86_64/libggml-cpu.so +0 -0
  27. package/android/src/main/jniLibs/x86_64/libggml.so +0 -0
  28. package/android/src/main/jniLibs/x86_64/libllama.so +0 -0
  29. package/cpp/LlamaCppModel.cpp +984 -0
  30. package/cpp/LlamaCppModel.h +162 -0
  31. package/cpp/PureCppImpl.cpp +308 -0
  32. package/cpp/PureCppImpl.h +59 -0
  33. package/cpp/SystemUtils.cpp +180 -0
  34. package/cpp/SystemUtils.h +74 -0
  35. package/cpp/build-info.cpp +4 -0
  36. package/cpp/llama.cpp/AUTHORS +1106 -0
  37. package/cpp/llama.cpp/CMakeLists.txt +254 -0
  38. package/cpp/llama.cpp/CMakePresets.json +84 -0
  39. package/cpp/llama.cpp/CODEOWNERS +11 -0
  40. package/cpp/llama.cpp/CONTRIBUTING.md +127 -0
  41. package/cpp/llama.cpp/LICENSE +21 -0
  42. package/cpp/llama.cpp/Makefile +1608 -0
  43. package/cpp/llama.cpp/README.md +575 -0
  44. package/cpp/llama.cpp/SECURITY.md +68 -0
  45. package/cpp/llama.cpp/build-xcframework.sh +540 -0
  46. package/cpp/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
  47. package/cpp/llama.cpp/cmake/arm64-windows-llvm.cmake +16 -0
  48. package/cpp/llama.cpp/cmake/build-info.cmake +64 -0
  49. package/cpp/llama.cpp/cmake/common.cmake +35 -0
  50. package/cpp/llama.cpp/cmake/git-vars.cmake +22 -0
  51. package/cpp/llama.cpp/cmake/llama-config.cmake.in +30 -0
  52. package/cpp/llama.cpp/cmake/llama.pc.in +10 -0
  53. package/cpp/llama.cpp/cmake/x64-windows-llvm.cmake +5 -0
  54. package/cpp/llama.cpp/common/CMakeLists.txt +170 -0
  55. package/cpp/llama.cpp/common/arg.cpp +3337 -0
  56. package/cpp/llama.cpp/common/arg.h +89 -0
  57. package/cpp/llama.cpp/common/base64.hpp +392 -0
  58. package/cpp/llama.cpp/common/build-info.cpp.in +4 -0
  59. package/cpp/llama.cpp/common/chat.cpp +1781 -0
  60. package/cpp/llama.cpp/common/chat.h +135 -0
  61. package/cpp/llama.cpp/common/cmake/build-info-gen-cpp.cmake +24 -0
  62. package/cpp/llama.cpp/common/common.cpp +1567 -0
  63. package/cpp/llama.cpp/common/common.h +668 -0
  64. package/cpp/llama.cpp/common/console.cpp +504 -0
  65. package/cpp/llama.cpp/common/console.h +19 -0
  66. package/cpp/llama.cpp/common/json-schema-to-grammar.cpp +1027 -0
  67. package/cpp/llama.cpp/common/json-schema-to-grammar.h +21 -0
  68. package/cpp/llama.cpp/common/json.hpp +24766 -0
  69. package/cpp/llama.cpp/common/llguidance.cpp +254 -0
  70. package/cpp/llama.cpp/common/log.cpp +393 -0
  71. package/cpp/llama.cpp/common/log.h +103 -0
  72. package/cpp/llama.cpp/common/minja/chat-template.hpp +537 -0
  73. package/cpp/llama.cpp/common/minja/minja.hpp +2941 -0
  74. package/cpp/llama.cpp/common/ngram-cache.cpp +286 -0
  75. package/cpp/llama.cpp/common/ngram-cache.h +101 -0
  76. package/cpp/llama.cpp/common/sampling.cpp +580 -0
  77. package/cpp/llama.cpp/common/sampling.h +107 -0
  78. package/cpp/llama.cpp/common/speculative.cpp +278 -0
  79. package/cpp/llama.cpp/common/speculative.h +28 -0
  80. package/cpp/llama.cpp/common/stb_image.h +7988 -0
  81. package/cpp/llama.cpp/convert_hf_to_gguf.py +6195 -0
  82. package/cpp/llama.cpp/convert_hf_to_gguf_update.py +393 -0
  83. package/cpp/llama.cpp/convert_llama_ggml_to_gguf.py +450 -0
  84. package/cpp/llama.cpp/convert_lora_to_gguf.py +461 -0
  85. package/cpp/llama.cpp/flake.lock +58 -0
  86. package/cpp/llama.cpp/flake.nix +185 -0
  87. package/cpp/llama.cpp/ggml/CMakeLists.txt +388 -0
  88. package/cpp/llama.cpp/ggml/cmake/GitVars.cmake +22 -0
  89. package/cpp/llama.cpp/ggml/cmake/common.cmake +26 -0
  90. package/cpp/llama.cpp/ggml/cmake/ggml-config.cmake.in +152 -0
  91. package/cpp/llama.cpp/ggml/include/ggml-alloc.h +76 -0
  92. package/cpp/llama.cpp/ggml/include/ggml-backend.h +354 -0
  93. package/cpp/llama.cpp/ggml/include/ggml-blas.h +25 -0
  94. package/cpp/llama.cpp/ggml/include/ggml-cann.h +123 -0
  95. package/cpp/llama.cpp/ggml/include/ggml-cpp.h +39 -0
  96. package/cpp/llama.cpp/ggml/include/ggml-cpu.h +143 -0
  97. package/cpp/llama.cpp/ggml/include/ggml-cuda.h +47 -0
  98. package/cpp/llama.cpp/ggml/include/ggml-kompute.h +50 -0
  99. package/cpp/llama.cpp/ggml/include/ggml-metal.h +66 -0
  100. package/cpp/llama.cpp/ggml/include/ggml-opencl.h +26 -0
  101. package/cpp/llama.cpp/ggml/include/ggml-opt.h +216 -0
  102. package/cpp/llama.cpp/ggml/include/ggml-rpc.h +33 -0
  103. package/cpp/llama.cpp/ggml/include/ggml-sycl.h +49 -0
  104. package/cpp/llama.cpp/ggml/include/ggml-vulkan.h +29 -0
  105. package/cpp/llama.cpp/ggml/include/ggml.h +2192 -0
  106. package/cpp/llama.cpp/ggml/include/gguf.h +202 -0
  107. package/cpp/llama.cpp/ggml/src/CMakeLists.txt +345 -0
  108. package/cpp/llama.cpp/ggml/src/ggml-alloc.c +1042 -0
  109. package/cpp/llama.cpp/ggml/src/ggml-backend-impl.h +255 -0
  110. package/cpp/llama.cpp/ggml/src/ggml-backend-reg.cpp +586 -0
  111. package/cpp/llama.cpp/ggml/src/ggml-backend.cpp +2008 -0
  112. package/cpp/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  113. package/cpp/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
  114. package/cpp/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +74 -0
  115. package/cpp/llama.cpp/ggml/src/ggml-cann/Doxyfile +2579 -0
  116. package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +179 -0
  117. package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +258 -0
  118. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +2589 -0
  119. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +1083 -0
  120. package/cpp/llama.cpp/ggml/src/ggml-cann/common.h +420 -0
  121. package/cpp/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +2554 -0
  122. package/cpp/llama.cpp/ggml/src/ggml-common.h +1857 -0
  123. package/cpp/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +495 -0
  124. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +221 -0
  125. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/amx.h +8 -0
  126. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/common.h +91 -0
  127. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
  128. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/mmq.h +10 -0
  129. package/cpp/llama.cpp/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  130. package/cpp/llama.cpp/ggml/src/ggml-cpu/binary-ops.h +16 -0
  131. package/cpp/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
  132. package/cpp/llama.cpp/ggml/src/ggml-cpu/common.h +72 -0
  133. package/cpp/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +327 -0
  134. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
  135. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
  136. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
  137. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
  138. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +512 -0
  139. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +13131 -0
  140. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  141. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
  142. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
  143. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +3492 -0
  144. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +671 -0
  145. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +254 -0
  146. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +60 -0
  147. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +287 -0
  148. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
  149. package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
  150. package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  151. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.cpp +8796 -0
  152. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.h +110 -0
  153. package/cpp/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  154. package/cpp/llama.cpp/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  155. package/cpp/llama.cpp/ggml/src/ggml-cpu/unary-ops.h +28 -0
  156. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.cpp +252 -0
  157. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.h +802 -0
  158. package/cpp/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
  159. package/cpp/llama.cpp/ggml/src/ggml-cuda/acc.cu +47 -0
  160. package/cpp/llama.cpp/ggml/src/ggml-cuda/acc.cuh +5 -0
  161. package/cpp/llama.cpp/ggml/src/ggml-cuda/arange.cu +34 -0
  162. package/cpp/llama.cpp/ggml/src/ggml-cuda/arange.cuh +5 -0
  163. package/cpp/llama.cpp/ggml/src/ggml-cuda/argmax.cu +91 -0
  164. package/cpp/llama.cpp/ggml/src/ggml-cuda/argmax.cuh +3 -0
  165. package/cpp/llama.cpp/ggml/src/ggml-cuda/argsort.cu +104 -0
  166. package/cpp/llama.cpp/ggml/src/ggml-cuda/argsort.cuh +3 -0
  167. package/cpp/llama.cpp/ggml/src/ggml-cuda/binbcast.cu +363 -0
  168. package/cpp/llama.cpp/ggml/src/ggml-cuda/binbcast.cuh +9 -0
  169. package/cpp/llama.cpp/ggml/src/ggml-cuda/clamp.cu +45 -0
  170. package/cpp/llama.cpp/ggml/src/ggml-cuda/clamp.cuh +5 -0
  171. package/cpp/llama.cpp/ggml/src/ggml-cuda/common.cuh +828 -0
  172. package/cpp/llama.cpp/ggml/src/ggml-cuda/concat.cu +221 -0
  173. package/cpp/llama.cpp/ggml/src/ggml-cuda/concat.cuh +5 -0
  174. package/cpp/llama.cpp/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
  175. package/cpp/llama.cpp/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
  176. package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cu +730 -0
  177. package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cuh +26 -0
  178. package/cpp/llama.cpp/ggml/src/ggml-cuda/count-equal.cu +64 -0
  179. package/cpp/llama.cpp/ggml/src/ggml-cuda/count-equal.cuh +5 -0
  180. package/cpp/llama.cpp/ggml/src/ggml-cuda/cp-async.cuh +57 -0
  181. package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cu +695 -0
  182. package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cuh +11 -0
  183. package/cpp/llama.cpp/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
  184. package/cpp/llama.cpp/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
  185. package/cpp/llama.cpp/ggml/src/ggml-cuda/dequantize.cuh +103 -0
  186. package/cpp/llama.cpp/ggml/src/ggml-cuda/diagmask.cu +40 -0
  187. package/cpp/llama.cpp/ggml/src/ggml-cuda/diagmask.cuh +5 -0
  188. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-common.cuh +873 -0
  189. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1269 -0
  190. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
  191. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
  192. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
  193. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
  194. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f16.cuh +437 -0
  195. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f32.cuh +428 -0
  196. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
  197. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
  198. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cu +345 -0
  199. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cuh +3 -0
  200. package/cpp/llama.cpp/ggml/src/ggml-cuda/getrows.cu +275 -0
  201. package/cpp/llama.cpp/ggml/src/ggml-cuda/getrows.cuh +15 -0
  202. package/cpp/llama.cpp/ggml/src/ggml-cuda/ggml-cuda.cu +3501 -0
  203. package/cpp/llama.cpp/ggml/src/ggml-cuda/gla.cu +93 -0
  204. package/cpp/llama.cpp/ggml/src/ggml-cuda/gla.cuh +3 -0
  205. package/cpp/llama.cpp/ggml/src/ggml-cuda/im2col.cu +103 -0
  206. package/cpp/llama.cpp/ggml/src/ggml-cuda/im2col.cuh +5 -0
  207. package/cpp/llama.cpp/ggml/src/ggml-cuda/mma.cuh +396 -0
  208. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cu +322 -0
  209. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cuh +3217 -0
  210. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmv.cu +336 -0
  211. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmv.cuh +12 -0
  212. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmvq.cu +595 -0
  213. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh +12 -0
  214. package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cu +458 -0
  215. package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cuh +11 -0
  216. package/cpp/llama.cpp/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
  217. package/cpp/llama.cpp/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
  218. package/cpp/llama.cpp/ggml/src/ggml-cuda/out-prod.cu +68 -0
  219. package/cpp/llama.cpp/ggml/src/ggml-cuda/out-prod.cuh +3 -0
  220. package/cpp/llama.cpp/ggml/src/ggml-cuda/pad.cu +49 -0
  221. package/cpp/llama.cpp/ggml/src/ggml-cuda/pad.cuh +5 -0
  222. package/cpp/llama.cpp/ggml/src/ggml-cuda/pool2d.cu +94 -0
  223. package/cpp/llama.cpp/ggml/src/ggml-cuda/pool2d.cuh +5 -0
  224. package/cpp/llama.cpp/ggml/src/ggml-cuda/quantize.cu +189 -0
  225. package/cpp/llama.cpp/ggml/src/ggml-cuda/quantize.cuh +27 -0
  226. package/cpp/llama.cpp/ggml/src/ggml-cuda/rope.cu +456 -0
  227. package/cpp/llama.cpp/ggml/src/ggml-cuda/rope.cuh +7 -0
  228. package/cpp/llama.cpp/ggml/src/ggml-cuda/scale.cu +31 -0
  229. package/cpp/llama.cpp/ggml/src/ggml-cuda/scale.cuh +5 -0
  230. package/cpp/llama.cpp/ggml/src/ggml-cuda/softmax.cu +283 -0
  231. package/cpp/llama.cpp/ggml/src/ggml-cuda/softmax.cuh +7 -0
  232. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
  233. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
  234. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
  235. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
  236. package/cpp/llama.cpp/ggml/src/ggml-cuda/sum.cu +45 -0
  237. package/cpp/llama.cpp/ggml/src/ggml-cuda/sum.cuh +5 -0
  238. package/cpp/llama.cpp/ggml/src/ggml-cuda/sumrows.cu +39 -0
  239. package/cpp/llama.cpp/ggml/src/ggml-cuda/sumrows.cuh +5 -0
  240. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
  241. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
  242. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
  243. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
  244. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
  245. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
  246. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
  247. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
  248. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
  249. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
  250. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
  251. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
  252. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
  253. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
  254. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
  255. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
  256. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
  257. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
  258. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
  259. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  260. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  261. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  262. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  263. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  264. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  265. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  266. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  267. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  268. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  269. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  270. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  271. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  272. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  273. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  274. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  275. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  276. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  277. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  278. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  279. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  280. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  281. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  282. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  283. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  284. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  285. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  286. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  287. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  288. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  289. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  290. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  291. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  292. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  293. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  294. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  295. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  296. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  297. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  298. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  299. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  300. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  301. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  302. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  303. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  304. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  305. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  306. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  307. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  308. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  309. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  310. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  311. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  312. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  313. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  314. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  315. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  316. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  317. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  318. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  319. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  320. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  321. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  322. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  323. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  324. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  325. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  326. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  327. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  328. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  329. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  330. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  331. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  332. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  333. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  334. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  335. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  336. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  337. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  338. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  339. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  340. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  341. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  342. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  343. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  344. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  345. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
  346. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
  347. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
  348. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
  349. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
  350. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
  351. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
  352. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
  353. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
  354. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  355. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  356. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  357. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  358. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  359. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  360. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  361. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  362. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  363. package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  364. package/cpp/llama.cpp/ggml/src/ggml-cuda/tsembd.cu +47 -0
  365. package/cpp/llama.cpp/ggml/src/ggml-cuda/tsembd.cuh +5 -0
  366. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cu +279 -0
  367. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cuh +57 -0
  368. package/cpp/llama.cpp/ggml/src/ggml-cuda/upscale.cu +51 -0
  369. package/cpp/llama.cpp/ggml/src/ggml-cuda/upscale.cuh +5 -0
  370. package/cpp/llama.cpp/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
  371. package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +15 -0
  372. package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +243 -0
  373. package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +140 -0
  374. package/cpp/llama.cpp/ggml/src/ggml-cuda/wkv.cu +199 -0
  375. package/cpp/llama.cpp/ggml/src/ggml-cuda/wkv.cuh +7 -0
  376. package/cpp/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +131 -0
  377. package/cpp/llama.cpp/ggml/src/ggml-impl.h +601 -0
  378. package/cpp/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  379. package/cpp/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
  380. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
  381. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
  382. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
  383. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
  384. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
  385. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
  386. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
  387. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
  388. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
  389. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
  390. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
  391. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
  392. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
  393. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
  394. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
  395. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
  396. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
  397. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
  398. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
  399. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
  400. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
  401. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
  402. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
  403. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
  404. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
  405. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
  406. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
  407. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
  408. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
  409. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
  410. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
  411. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
  412. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
  413. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
  414. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
  415. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
  416. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
  417. package/cpp/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +120 -0
  418. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +618 -0
  419. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.m +5916 -0
  420. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal +6891 -0
  421. package/cpp/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +107 -0
  422. package/cpp/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
  423. package/cpp/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +4966 -0
  424. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/add.cl +83 -0
  425. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
  426. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
  427. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
  428. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
  429. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
  430. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
  431. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
  432. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
  433. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
  434. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
  435. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
  436. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
  437. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
  438. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
  439. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
  440. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
  441. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
  442. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
  443. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
  444. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
  445. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
  446. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
  447. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
  448. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
  449. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
  450. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
  451. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
  452. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
  453. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
  454. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
  455. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
  456. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
  457. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
  458. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
  459. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
  460. package/cpp/llama.cpp/ggml/src/ggml-opt.cpp +854 -0
  461. package/cpp/llama.cpp/ggml/src/ggml-quants.c +5232 -0
  462. package/cpp/llama.cpp/ggml/src/ggml-quants.h +100 -0
  463. package/cpp/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  464. package/cpp/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +1813 -0
  465. package/cpp/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +183 -0
  466. package/cpp/llama.cpp/ggml/src/ggml-sycl/backend.hpp +37 -0
  467. package/cpp/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +350 -0
  468. package/cpp/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  469. package/cpp/llama.cpp/ggml/src/ggml-sycl/common.cpp +83 -0
  470. package/cpp/llama.cpp/ggml/src/ggml-sycl/common.hpp +493 -0
  471. package/cpp/llama.cpp/ggml/src/ggml-sycl/concat.cpp +197 -0
  472. package/cpp/llama.cpp/ggml/src/ggml-sycl/concat.hpp +20 -0
  473. package/cpp/llama.cpp/ggml/src/ggml-sycl/conv.cpp +100 -0
  474. package/cpp/llama.cpp/ggml/src/ggml-sycl/conv.hpp +20 -0
  475. package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.cpp +596 -0
  476. package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.hpp +34 -0
  477. package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +701 -0
  478. package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +11 -0
  479. package/cpp/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +753 -0
  480. package/cpp/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +1154 -0
  481. package/cpp/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  482. package/cpp/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
  483. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1559 -0
  484. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +75 -0
  485. package/cpp/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +70 -0
  486. package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +311 -0
  487. package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +20 -0
  488. package/cpp/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +4302 -0
  489. package/cpp/llama.cpp/ggml/src/ggml-sycl/gla.cpp +105 -0
  490. package/cpp/llama.cpp/ggml/src/ggml-sycl/gla.hpp +8 -0
  491. package/cpp/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +136 -0
  492. package/cpp/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +21 -0
  493. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +3030 -0
  494. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +33 -0
  495. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +1081 -0
  496. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  497. package/cpp/llama.cpp/ggml/src/ggml-sycl/norm.cpp +474 -0
  498. package/cpp/llama.cpp/ggml/src/ggml-sycl/norm.hpp +26 -0
  499. package/cpp/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +46 -0
  500. package/cpp/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +10 -0
  501. package/cpp/llama.cpp/ggml/src/ggml-sycl/presets.hpp +74 -0
  502. package/cpp/llama.cpp/ggml/src/ggml-sycl/quants.hpp +61 -0
  503. package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.cpp +362 -0
  504. package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.hpp +20 -0
  505. package/cpp/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +264 -0
  506. package/cpp/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +20 -0
  507. package/cpp/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
  508. package/cpp/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
  509. package/cpp/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +73 -0
  510. package/cpp/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +20 -0
  511. package/cpp/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +1189 -0
  512. package/cpp/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +305 -0
  513. package/cpp/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +10 -0
  514. package/cpp/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
  515. package/cpp/llama.cpp/ggml/src/ggml-threading.h +14 -0
  516. package/cpp/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +202 -0
  517. package/cpp/llama.cpp/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
  518. package/cpp/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +10502 -0
  519. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +22 -0
  520. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
  521. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
  522. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
  523. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
  524. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
  525. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
  526. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
  527. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
  528. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
  529. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
  530. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
  531. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
  532. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
  533. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
  534. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
  535. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
  536. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
  537. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
  538. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
  539. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
  540. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
  541. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
  542. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
  543. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
  544. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
  545. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
  546. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
  547. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
  548. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
  549. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
  550. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
  551. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
  552. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
  553. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
  554. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
  555. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
  556. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
  557. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
  558. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +483 -0
  559. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +383 -0
  560. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
  561. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
  562. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
  563. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
  564. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
  565. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
  566. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
  567. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
  568. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
  569. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
  570. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
  571. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
  572. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
  573. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
  574. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
  575. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
  576. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
  577. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
  578. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
  579. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
  580. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
  581. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
  582. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
  583. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
  584. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
  585. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
  586. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
  587. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
  588. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
  589. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
  590. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
  591. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
  592. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
  593. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
  594. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
  595. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
  596. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
  597. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
  598. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
  599. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
  600. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
  601. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
  602. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
  603. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
  604. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
  605. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
  606. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
  607. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
  608. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
  609. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
  610. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
  611. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
  612. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
  613. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
  614. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
  615. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
  616. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
  617. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
  618. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
  619. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
  620. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
  621. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
  622. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
  623. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
  624. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
  625. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
  626. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
  627. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +740 -0
  628. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
  629. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
  630. package/cpp/llama.cpp/ggml/src/ggml.c +6499 -0
  631. package/cpp/llama.cpp/ggml/src/gguf.cpp +1330 -0
  632. package/cpp/llama.cpp/gguf-py/LICENSE +21 -0
  633. package/cpp/llama.cpp/gguf-py/README.md +99 -0
  634. package/cpp/llama.cpp/gguf-py/examples/reader.py +49 -0
  635. package/cpp/llama.cpp/gguf-py/examples/writer.py +39 -0
  636. package/cpp/llama.cpp/gguf-py/gguf/__init__.py +9 -0
  637. package/cpp/llama.cpp/gguf-py/gguf/constants.py +2296 -0
  638. package/cpp/llama.cpp/gguf-py/gguf/gguf.py +15 -0
  639. package/cpp/llama.cpp/gguf-py/gguf/gguf_reader.py +367 -0
  640. package/cpp/llama.cpp/gguf-py/gguf/gguf_writer.py +1041 -0
  641. package/cpp/llama.cpp/gguf-py/gguf/lazy.py +223 -0
  642. package/cpp/llama.cpp/gguf-py/gguf/metadata.py +642 -0
  643. package/cpp/llama.cpp/gguf-py/gguf/py.typed +0 -0
  644. package/cpp/llama.cpp/gguf-py/gguf/quants.py +1269 -0
  645. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_convert_endian.py +182 -0
  646. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_dump.py +454 -0
  647. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_editor_gui.py +1610 -0
  648. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_hash.py +102 -0
  649. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_new_metadata.py +207 -0
  650. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_set_metadata.py +95 -0
  651. package/cpp/llama.cpp/gguf-py/gguf/tensor_mapping.py +1172 -0
  652. package/cpp/llama.cpp/gguf-py/gguf/utility.py +264 -0
  653. package/cpp/llama.cpp/gguf-py/gguf/vocab.py +492 -0
  654. package/cpp/llama.cpp/gguf-py/pyproject.toml +43 -0
  655. package/cpp/llama.cpp/gguf-py/tests/__init__.py +1 -0
  656. package/cpp/llama.cpp/gguf-py/tests/test_metadata.py +238 -0
  657. package/cpp/llama.cpp/gguf-py/tests/test_quants.py +238 -0
  658. package/cpp/llama.cpp/grammars/README.md +382 -0
  659. package/cpp/llama.cpp/grammars/arithmetic.gbnf +6 -0
  660. package/cpp/llama.cpp/grammars/c.gbnf +42 -0
  661. package/cpp/llama.cpp/grammars/chess.gbnf +13 -0
  662. package/cpp/llama.cpp/grammars/english.gbnf +6 -0
  663. package/cpp/llama.cpp/grammars/japanese.gbnf +7 -0
  664. package/cpp/llama.cpp/grammars/json.gbnf +25 -0
  665. package/cpp/llama.cpp/grammars/json_arr.gbnf +34 -0
  666. package/cpp/llama.cpp/grammars/list.gbnf +4 -0
  667. package/cpp/llama.cpp/include/llama-cpp.h +30 -0
  668. package/cpp/llama.cpp/include/llama.h +1440 -0
  669. package/cpp/llama.cpp/licenses/LICENSE-curl +9 -0
  670. package/cpp/llama.cpp/licenses/LICENSE-httplib +21 -0
  671. package/cpp/llama.cpp/licenses/LICENSE-jsonhpp +21 -0
  672. package/cpp/llama.cpp/licenses/LICENSE-linenoise +26 -0
  673. package/cpp/llama.cpp/media/llama0-banner.png +0 -0
  674. package/cpp/llama.cpp/media/llama0-logo.png +0 -0
  675. package/cpp/llama.cpp/media/llama1-banner.png +0 -0
  676. package/cpp/llama.cpp/media/llama1-logo.png +0 -0
  677. package/cpp/llama.cpp/media/llama1-logo.svg +34 -0
  678. package/cpp/llama.cpp/media/matmul.png +0 -0
  679. package/cpp/llama.cpp/media/matmul.svg +1238 -0
  680. package/cpp/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  681. package/cpp/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  682. package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  683. package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +112 -0
  684. package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +46 -0
  685. package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +112 -0
  686. package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.out +46 -0
  687. package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  688. package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.inp +112 -0
  689. package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.out +46 -0
  690. package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  691. package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +112 -0
  692. package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +46 -0
  693. package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  694. package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +112 -0
  695. package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +46 -0
  696. package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +112 -0
  697. package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +46 -0
  698. package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  699. package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.inp +112 -0
  700. package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.out +46 -0
  701. package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  702. package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +112 -0
  703. package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +46 -0
  704. package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +112 -0
  705. package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +46 -0
  706. package/cpp/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  707. package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  708. package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +112 -0
  709. package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +46 -0
  710. package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  711. package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +112 -0
  712. package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +46 -0
  713. package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.inp +112 -0
  714. package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.out +46 -0
  715. package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  716. package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.inp +112 -0
  717. package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.out +46 -0
  718. package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  719. package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +112 -0
  720. package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.out +46 -0
  721. package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +112 -0
  722. package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.out +46 -0
  723. package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  724. package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +112 -0
  725. package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.out +46 -0
  726. package/cpp/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  727. package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.inp +112 -0
  728. package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.out +46 -0
  729. package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +112 -0
  730. package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +46 -0
  731. package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  732. package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +112 -0
  733. package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.out +46 -0
  734. package/cpp/llama.cpp/models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja +202 -0
  735. package/cpp/llama.cpp/models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja +156 -0
  736. package/cpp/llama.cpp/models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja +152 -0
  737. package/cpp/llama.cpp/models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja +152 -0
  738. package/cpp/llama.cpp/models/templates/Qwen-Qwen2.5-7B-Instruct.jinja +54 -0
  739. package/cpp/llama.cpp/models/templates/README.md +22 -0
  740. package/cpp/llama.cpp/models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja +1 -0
  741. package/cpp/llama.cpp/models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja +1 -0
  742. package/cpp/llama.cpp/models/templates/fireworks-ai-llama-3-firefunction-v2.jinja +57 -0
  743. package/cpp/llama.cpp/models/templates/google-gemma-2-2b-it.jinja +4 -0
  744. package/cpp/llama.cpp/models/templates/llama-cpp-deepseek-r1.jinja +76 -0
  745. package/cpp/llama.cpp/models/templates/meetkai-functionary-medium-v3.1.jinja +58 -0
  746. package/cpp/llama.cpp/models/templates/meetkai-functionary-medium-v3.2.jinja +287 -0
  747. package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja +109 -0
  748. package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja +93 -0
  749. package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja +109 -0
  750. package/cpp/llama.cpp/models/templates/microsoft-Phi-3.5-mini-instruct.jinja +8 -0
  751. package/cpp/llama.cpp/models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja +87 -0
  752. package/cpp/llama.cpp/mypy.ini +7 -0
  753. package/cpp/llama.cpp/pocs/CMakeLists.txt +14 -0
  754. package/cpp/llama.cpp/pocs/vdot/CMakeLists.txt +9 -0
  755. package/cpp/llama.cpp/pocs/vdot/q8dot.cpp +173 -0
  756. package/cpp/llama.cpp/pocs/vdot/vdot.cpp +311 -0
  757. package/cpp/llama.cpp/poetry.lock +1197 -0
  758. package/cpp/llama.cpp/prompts/LLM-questions.txt +49 -0
  759. package/cpp/llama.cpp/prompts/alpaca.txt +1 -0
  760. package/cpp/llama.cpp/prompts/assistant.txt +31 -0
  761. package/cpp/llama.cpp/prompts/chat-with-baichuan.txt +4 -0
  762. package/cpp/llama.cpp/prompts/chat-with-bob.txt +7 -0
  763. package/cpp/llama.cpp/prompts/chat-with-qwen.txt +1 -0
  764. package/cpp/llama.cpp/prompts/chat-with-vicuna-v0.txt +7 -0
  765. package/cpp/llama.cpp/prompts/chat-with-vicuna-v1.txt +7 -0
  766. package/cpp/llama.cpp/prompts/chat.txt +28 -0
  767. package/cpp/llama.cpp/prompts/dan-modified.txt +1 -0
  768. package/cpp/llama.cpp/prompts/dan.txt +1 -0
  769. package/cpp/llama.cpp/prompts/mnemonics.txt +93 -0
  770. package/cpp/llama.cpp/prompts/parallel-questions.txt +43 -0
  771. package/cpp/llama.cpp/prompts/reason-act.txt +18 -0
  772. package/cpp/llama.cpp/pyproject.toml +45 -0
  773. package/cpp/llama.cpp/pyrightconfig.json +22 -0
  774. package/cpp/llama.cpp/requirements/requirements-all.txt +15 -0
  775. package/cpp/llama.cpp/requirements/requirements-compare-llama-bench.txt +2 -0
  776. package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +3 -0
  777. package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +3 -0
  778. package/cpp/llama.cpp/requirements/requirements-convert_legacy_llama.txt +5 -0
  779. package/cpp/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +1 -0
  780. package/cpp/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +2 -0
  781. package/cpp/llama.cpp/requirements/requirements-gguf_editor_gui.txt +3 -0
  782. package/cpp/llama.cpp/requirements/requirements-pydantic.txt +3 -0
  783. package/cpp/llama.cpp/requirements/requirements-test-tokenizer-random.txt +1 -0
  784. package/cpp/llama.cpp/requirements/requirements-tool_bench.txt +12 -0
  785. package/cpp/llama.cpp/requirements.txt +13 -0
  786. package/cpp/llama.cpp/src/CMakeLists.txt +45 -0
  787. package/cpp/llama.cpp/src/llama-adapter.cpp +388 -0
  788. package/cpp/llama.cpp/src/llama-adapter.h +76 -0
  789. package/cpp/llama.cpp/src/llama-arch.cpp +1743 -0
  790. package/cpp/llama.cpp/src/llama-arch.h +437 -0
  791. package/cpp/llama.cpp/src/llama-batch.cpp +372 -0
  792. package/cpp/llama.cpp/src/llama-batch.h +89 -0
  793. package/cpp/llama.cpp/src/llama-chat.cpp +663 -0
  794. package/cpp/llama.cpp/src/llama-chat.h +58 -0
  795. package/cpp/llama.cpp/src/llama-context.cpp +2459 -0
  796. package/cpp/llama.cpp/src/llama-context.h +246 -0
  797. package/cpp/llama.cpp/src/llama-cparams.cpp +1 -0
  798. package/cpp/llama.cpp/src/llama-cparams.h +39 -0
  799. package/cpp/llama.cpp/src/llama-grammar.cpp +1219 -0
  800. package/cpp/llama.cpp/src/llama-grammar.h +173 -0
  801. package/cpp/llama.cpp/src/llama-graph.cpp +1713 -0
  802. package/cpp/llama.cpp/src/llama-graph.h +595 -0
  803. package/cpp/llama.cpp/src/llama-hparams.cpp +79 -0
  804. package/cpp/llama.cpp/src/llama-hparams.h +161 -0
  805. package/cpp/llama.cpp/src/llama-impl.cpp +167 -0
  806. package/cpp/llama.cpp/src/llama-impl.h +61 -0
  807. package/cpp/llama.cpp/src/llama-io.cpp +15 -0
  808. package/cpp/llama.cpp/src/llama-io.h +35 -0
  809. package/cpp/llama.cpp/src/llama-kv-cache.cpp +2486 -0
  810. package/cpp/llama.cpp/src/llama-kv-cache.h +405 -0
  811. package/cpp/llama.cpp/src/llama-memory.cpp +1 -0
  812. package/cpp/llama.cpp/src/llama-memory.h +31 -0
  813. package/cpp/llama.cpp/src/llama-mmap.cpp +600 -0
  814. package/cpp/llama.cpp/src/llama-mmap.h +68 -0
  815. package/cpp/llama.cpp/src/llama-model-loader.cpp +1133 -0
  816. package/cpp/llama.cpp/src/llama-model-loader.h +169 -0
  817. package/cpp/llama.cpp/src/llama-model.cpp +13453 -0
  818. package/cpp/llama.cpp/src/llama-model.h +420 -0
  819. package/cpp/llama.cpp/src/llama-quant.cpp +964 -0
  820. package/cpp/llama.cpp/src/llama-quant.h +1 -0
  821. package/cpp/llama.cpp/src/llama-sampling.cpp +2575 -0
  822. package/cpp/llama.cpp/src/llama-sampling.h +32 -0
  823. package/cpp/llama.cpp/src/llama-vocab.cpp +3313 -0
  824. package/cpp/llama.cpp/src/llama-vocab.h +125 -0
  825. package/cpp/llama.cpp/src/llama.cpp +340 -0
  826. package/cpp/llama.cpp/src/unicode-data.cpp +7034 -0
  827. package/cpp/llama.cpp/src/unicode-data.h +20 -0
  828. package/cpp/llama.cpp/src/unicode.cpp +849 -0
  829. package/cpp/llama.cpp/src/unicode.h +66 -0
  830. package/cpp/rn-completion.cpp +431 -0
  831. package/cpp/rn-llama.hpp +60 -0
  832. package/cpp/rn-utils.hpp +331 -0
  833. package/ios/OnLoad.mm +22 -0
  834. package/ios/generated/RNLlamaCppSpec/RNLlamaCppSpec-generated.mm +64 -0
  835. package/ios/generated/RNLlamaCppSpec/RNLlamaCppSpec.h +251 -0
  836. package/ios/generated/RNLlamaCppSpecJSI-generated.cpp +42 -0
  837. package/ios/generated/RNLlamaCppSpecJSI.h +336 -0
  838. package/ios/include/chat.h +135 -0
  839. package/ios/include/common/base64.hpp +392 -0
  840. package/ios/include/common/json.hpp +24766 -0
  841. package/ios/include/common/minja/chat-template.hpp +537 -0
  842. package/ios/include/common/minja/minja.hpp +2941 -0
  843. package/ios/include/common.h +668 -0
  844. package/ios/include/json-schema-to-grammar.h +21 -0
  845. package/ios/include/llama-cpp.h +30 -0
  846. package/ios/include/llama.h +1440 -0
  847. package/ios/include/log.h +103 -0
  848. package/ios/include/ngram-cache.h +101 -0
  849. package/ios/include/sampling.h +107 -0
  850. package/ios/include/speculative.h +28 -0
  851. package/ios/libs/llama.xcframework/Info.plist +135 -0
  852. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  853. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  854. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4492 -0
  855. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
  856. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-backend.h +354 -0
  857. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-blas.h +25 -0
  858. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
  859. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-metal.h +66 -0
  860. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml.h +2192 -0
  861. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/gguf.h +202 -0
  862. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/llama.h +1440 -0
  863. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Info.plist +36 -0
  864. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Modules/module.modulemap +17 -0
  865. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/llama +0 -0
  866. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  867. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  868. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
  869. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3440 -0
  870. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
  871. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
  872. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
  873. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
  874. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
  875. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
  876. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
  877. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
  878. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Info.plist +36 -0
  879. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
  880. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/llama +0 -0
  881. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  882. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  883. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
  884. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3442 -0
  885. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-alloc.h +76 -0
  886. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-backend.h +354 -0
  887. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-blas.h +25 -0
  888. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-cpu.h +143 -0
  889. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-metal.h +66 -0
  890. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml.h +2192 -0
  891. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/gguf.h +202 -0
  892. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/llama.h +1440 -0
  893. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Modules/module.modulemap +17 -0
  894. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Resources/Info.plist +32 -0
  895. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-alloc.h +76 -0
  896. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-backend.h +354 -0
  897. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-blas.h +25 -0
  898. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-cpu.h +143 -0
  899. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-metal.h +66 -0
  900. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml.h +2192 -0
  901. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/gguf.h +202 -0
  902. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/llama.h +1440 -0
  903. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Modules/module.modulemap +17 -0
  904. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Resources/Info.plist +32 -0
  905. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/llama +0 -0
  906. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-alloc.h +76 -0
  907. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-backend.h +354 -0
  908. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-blas.h +25 -0
  909. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-cpu.h +143 -0
  910. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-metal.h +66 -0
  911. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml.h +2192 -0
  912. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/gguf.h +202 -0
  913. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/llama.h +1440 -0
  914. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Modules/module.modulemap +17 -0
  915. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Resources/Info.plist +32 -0
  916. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/llama +0 -0
  917. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/llama +0 -0
  918. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  919. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  920. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4492 -0
  921. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
  922. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-backend.h +354 -0
  923. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-blas.h +25 -0
  924. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
  925. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-metal.h +66 -0
  926. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml.h +2192 -0
  927. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/gguf.h +202 -0
  928. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/llama.h +1440 -0
  929. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Info.plist +35 -0
  930. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Modules/module.modulemap +17 -0
  931. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/llama +0 -0
  932. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  933. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  934. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
  935. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3440 -0
  936. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
  937. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
  938. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
  939. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
  940. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
  941. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
  942. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
  943. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
  944. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Info.plist +35 -0
  945. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
  946. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/llama +0 -0
  947. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  948. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  949. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4528 -0
  950. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
  951. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-backend.h +354 -0
  952. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-blas.h +25 -0
  953. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
  954. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-metal.h +66 -0
  955. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml.h +2192 -0
  956. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/gguf.h +202 -0
  957. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/llama.h +1440 -0
  958. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Info.plist +32 -0
  959. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Modules/module.modulemap +17 -0
  960. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/llama +0 -0
  961. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
  962. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  963. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4549 -0
  964. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3470 -0
  965. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
  966. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
  967. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
  968. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
  969. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
  970. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
  971. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
  972. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
  973. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Info.plist +32 -0
  974. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
  975. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/llama +0 -0
  976. package/lib/module/NativeRNLlamaCpp.js +35 -0
  977. package/lib/module/NativeRNLlamaCpp.js.map +1 -0
  978. package/lib/module/index.js +20 -0
  979. package/lib/module/index.js.map +1 -0
  980. package/lib/module/package.json +1 -0
  981. package/lib/typescript/package.json +1 -0
  982. package/lib/typescript/src/NativeRNLlamaCpp.d.ts +222 -0
  983. package/lib/typescript/src/NativeRNLlamaCpp.d.ts.map +1 -0
  984. package/lib/typescript/src/index.d.ts +5 -0
  985. package/lib/typescript/src/index.d.ts.map +1 -0
  986. package/package.json +161 -0
  987. package/react-native.config.js +15 -0
  988. package/src/NativeRNLlamaCpp.ts +282 -0
  989. package/src/index.tsx +54 -0
@@ -0,0 +1,1743 @@
1
+ #include "llama-arch.h"
2
+
3
+ #include "llama-impl.h"
4
+
5
+ #include <map>
6
+
7
+ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
8
+ { LLM_ARCH_LLAMA, "llama" },
9
+ { LLM_ARCH_LLAMA4, "llama4" },
10
+ { LLM_ARCH_DECI, "deci" },
11
+ { LLM_ARCH_FALCON, "falcon" },
12
+ { LLM_ARCH_GROK, "grok" },
13
+ { LLM_ARCH_GPT2, "gpt2" },
14
+ { LLM_ARCH_GPTJ, "gptj" },
15
+ { LLM_ARCH_GPTNEOX, "gptneox" },
16
+ { LLM_ARCH_MPT, "mpt" },
17
+ { LLM_ARCH_BAICHUAN, "baichuan" },
18
+ { LLM_ARCH_STARCODER, "starcoder" },
19
+ { LLM_ARCH_REFACT, "refact" },
20
+ { LLM_ARCH_BERT, "bert" },
21
+ { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
22
+ { LLM_ARCH_NOMIC_BERT_MOE, "nomic-bert-moe" },
23
+ { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
24
+ { LLM_ARCH_BLOOM, "bloom" },
25
+ { LLM_ARCH_STABLELM, "stablelm" },
26
+ { LLM_ARCH_QWEN, "qwen" },
27
+ { LLM_ARCH_QWEN2, "qwen2" },
28
+ { LLM_ARCH_QWEN2MOE, "qwen2moe" },
29
+ { LLM_ARCH_QWEN2VL, "qwen2vl" },
30
+ { LLM_ARCH_QWEN3, "qwen3" },
31
+ { LLM_ARCH_QWEN3MOE, "qwen3moe" },
32
+ { LLM_ARCH_PHI2, "phi2" },
33
+ { LLM_ARCH_PHI3, "phi3" },
34
+ { LLM_ARCH_PHIMOE, "phimoe" },
35
+ { LLM_ARCH_PLAMO, "plamo" },
36
+ { LLM_ARCH_CODESHELL, "codeshell" },
37
+ { LLM_ARCH_ORION, "orion" },
38
+ { LLM_ARCH_INTERNLM2, "internlm2" },
39
+ { LLM_ARCH_MINICPM, "minicpm" },
40
+ { LLM_ARCH_MINICPM3, "minicpm3" },
41
+ { LLM_ARCH_GEMMA, "gemma" },
42
+ { LLM_ARCH_GEMMA2, "gemma2" },
43
+ { LLM_ARCH_GEMMA3, "gemma3" },
44
+ { LLM_ARCH_STARCODER2, "starcoder2" },
45
+ { LLM_ARCH_MAMBA, "mamba" },
46
+ { LLM_ARCH_XVERSE, "xverse" },
47
+ { LLM_ARCH_COMMAND_R, "command-r" },
48
+ { LLM_ARCH_COHERE2, "cohere2" },
49
+ { LLM_ARCH_DBRX, "dbrx" },
50
+ { LLM_ARCH_OLMO, "olmo" },
51
+ { LLM_ARCH_OLMO2, "olmo2" },
52
+ { LLM_ARCH_OLMOE, "olmoe" },
53
+ { LLM_ARCH_OPENELM, "openelm" },
54
+ { LLM_ARCH_ARCTIC, "arctic" },
55
+ { LLM_ARCH_DEEPSEEK, "deepseek" },
56
+ { LLM_ARCH_DEEPSEEK2, "deepseek2" },
57
+ { LLM_ARCH_CHATGLM, "chatglm" },
58
+ { LLM_ARCH_GLM4, "glm4" },
59
+ { LLM_ARCH_BITNET, "bitnet" },
60
+ { LLM_ARCH_T5, "t5" },
61
+ { LLM_ARCH_T5ENCODER, "t5encoder" },
62
+ { LLM_ARCH_JAIS, "jais" },
63
+ { LLM_ARCH_NEMOTRON, "nemotron" },
64
+ { LLM_ARCH_EXAONE, "exaone" },
65
+ { LLM_ARCH_RWKV6, "rwkv6" },
66
+ { LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" },
67
+ { LLM_ARCH_RWKV7, "rwkv7" },
68
+ { LLM_ARCH_ARWKV7, "arwkv7" },
69
+ { LLM_ARCH_GRANITE, "granite" },
70
+ { LLM_ARCH_GRANITE_MOE, "granitemoe" },
71
+ { LLM_ARCH_CHAMELEON, "chameleon" },
72
+ { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
73
+ { LLM_ARCH_PLM, "plm" },
74
+ { LLM_ARCH_BAILINGMOE, "bailingmoe" },
75
+ { LLM_ARCH_UNKNOWN, "(unknown)" },
76
+ };
77
+
78
+ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
79
+ { LLM_KV_GENERAL_TYPE, "general.type" },
80
+ { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
81
+ { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
82
+ { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
83
+ { LLM_KV_GENERAL_FILE_TYPE, "general.file_type" },
84
+ { LLM_KV_GENERAL_NAME, "general.name" },
85
+ { LLM_KV_GENERAL_AUTHOR, "general.author" },
86
+ { LLM_KV_GENERAL_VERSION, "general.version" },
87
+ { LLM_KV_GENERAL_URL, "general.url" },
88
+ { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
89
+ { LLM_KV_GENERAL_LICENSE, "general.license" },
90
+ { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
91
+ { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
92
+
93
+ { LLM_KV_VOCAB_SIZE, "%s.vocab_size" },
94
+ { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
95
+ { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
96
+ { LLM_KV_FEATURES_LENGTH, "%s.features_length" },
97
+ { LLM_KV_BLOCK_COUNT, "%s.block_count" },
98
+ { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" },
99
+ { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
100
+ { LLM_KV_EXPERT_FEED_FORWARD_LENGTH, "%s.expert_feed_forward_length" },
101
+ { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
102
+ { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
103
+ { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
104
+ { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
105
+ { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
106
+ { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" },
107
+ { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" },
108
+ { LLM_KV_EXPERT_WEIGHTS_NORM, "%s.expert_weights_norm" },
109
+ { LLM_KV_EXPERT_GATING_FUNC, "%s.expert_gating_func" },
110
+ { LLM_KV_MOE_EVERY_N_LAYERS, "%s.moe_every_n_layers" },
111
+ { LLM_KV_POOLING_TYPE, "%s.pooling_type" },
112
+ { LLM_KV_LOGIT_SCALE, "%s.logit_scale" },
113
+ { LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" },
114
+ { LLM_KV_ATTN_LOGIT_SOFTCAPPING, "%s.attn_logit_softcapping" },
115
+ { LLM_KV_FINAL_LOGIT_SOFTCAPPING, "%s.final_logit_softcapping" },
116
+ { LLM_KV_SWIN_NORM, "%s.swin_norm" },
117
+ { LLM_KV_RESCALE_EVERY_N_LAYERS, "%s.rescale_every_n_layers" },
118
+ { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" },
119
+ { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" },
120
+ { LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" },
121
+ { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" },
122
+ { LLM_KV_TOKEN_SHIFT_COUNT, "%s.token_shift_count" },
123
+ { LLM_KV_INTERLEAVE_MOE_LAYER_STEP, "%s.interleave_moe_layer_step" },
124
+
125
+ { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
126
+ { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
127
+ { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
128
+ { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
129
+ { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
130
+ { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
131
+ { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
132
+ { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
133
+ { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" },
134
+ { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" },
135
+ { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
136
+ { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
137
+ { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
138
+ { LLM_KV_ATTENTION_DECAY_LORA_RANK, "%s.attention.decay_lora_rank" },
139
+ { LLM_KV_ATTENTION_ICLR_LORA_RANK, "%s.attention.iclr_lora_rank" },
140
+ { LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" },
141
+ { LLM_KV_ATTENTION_GATE_LORA_RANK, "%s.attention.gate_lora_rank" },
142
+ { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
143
+ { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
144
+ { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
145
+ { LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" },
146
+ { LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" },
147
+
148
+ { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
149
+ { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" },
150
+ { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
151
+ { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
152
+ { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
153
+ { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
154
+ { LLM_KV_ROPE_SCALING_ATTN_FACTOR, "%s.rope.scaling.attn_factor" },
155
+ { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
156
+ { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
157
+ { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier" },
158
+
159
+ { LLM_KV_SPLIT_NO, "split.no" },
160
+ { LLM_KV_SPLIT_COUNT, "split.count" },
161
+ { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
162
+
163
+ { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" },
164
+ { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" },
165
+ { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" },
166
+ { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
167
+ { LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" },
168
+
169
+ { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" },
170
+
171
+ { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" },
172
+ { LLM_KV_POSNET_BLOCK_COUNT, "%s.posnet.block_count" },
173
+
174
+ { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" },
175
+ { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" },
176
+
177
+ { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
178
+ { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" },
179
+ { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
180
+ { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
181
+ { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
182
+ { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
183
+ { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
184
+ { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
185
+ { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
186
+ { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" },
187
+ { LLM_KV_TOKENIZER_EOM_ID, "tokenizer.ggml.eom_token_id" },
188
+ { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
189
+ { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
190
+ { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
191
+ { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" },
192
+ { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
193
+ { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
194
+ { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
195
+ { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
196
+ { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" },
197
+ { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" },
198
+ { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
199
+ { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
200
+ { LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
201
+ { LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, "tokenizer.chat_template.%s" },
202
+ { LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
203
+ { LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
204
+ { LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
205
+ { LLM_KV_TOKENIZER_FIM_PAD_ID, "tokenizer.ggml.fim_pad_token_id" },
206
+ { LLM_KV_TOKENIZER_FIM_REP_ID, "tokenizer.ggml.fim_rep_token_id" },
207
+ { LLM_KV_TOKENIZER_FIM_SEP_ID, "tokenizer.ggml.fim_sep_token_id" },
208
+
209
+ { LLM_KV_ADAPTER_TYPE, "adapter.type" },
210
+ { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" },
211
+
212
+ // deprecated
213
+ { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
214
+ { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
215
+ { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
216
+ };
217
+
218
+ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_NAMES = {
219
+ {
220
+ LLM_ARCH_LLAMA,
221
+ {
222
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
223
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
224
+ { LLM_TENSOR_OUTPUT, "output" },
225
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
226
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
227
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
228
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
229
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
230
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
231
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
232
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
233
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
234
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
235
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
236
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
237
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
238
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
239
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
240
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
241
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
242
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
243
+ },
244
+ },
245
+ {
246
+ LLM_ARCH_LLAMA4,
247
+ {
248
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
249
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
250
+ { LLM_TENSOR_OUTPUT, "output" },
251
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
252
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
253
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
254
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
255
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
256
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
257
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
258
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
259
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
260
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
261
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
262
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
263
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
264
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
265
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
266
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
267
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
268
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
269
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
270
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
271
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
272
+ },
273
+ },
274
+ {
275
+ LLM_ARCH_DECI,
276
+ {
277
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
278
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
279
+ { LLM_TENSOR_OUTPUT, "output" },
280
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
281
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
282
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
283
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
284
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
285
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
286
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
287
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
288
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
289
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
290
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
291
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
292
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
293
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
294
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
295
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
296
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
297
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
298
+ },
299
+ },
300
+ {
301
+ LLM_ARCH_BAICHUAN,
302
+ {
303
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
304
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
305
+ { LLM_TENSOR_OUTPUT, "output" },
306
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
307
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
308
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
309
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
310
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
311
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
312
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
313
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
314
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
315
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
316
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
317
+ },
318
+ },
319
+ {
320
+ LLM_ARCH_FALCON,
321
+ {
322
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
323
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
324
+ { LLM_TENSOR_OUTPUT, "output" },
325
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
326
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
327
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
328
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
329
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
330
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
331
+ },
332
+ },
333
+ {
334
+ LLM_ARCH_GROK,
335
+ {
336
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
337
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
338
+ { LLM_TENSOR_OUTPUT, "output" },
339
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
340
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
341
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
342
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
343
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
344
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
345
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
346
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
347
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
348
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
349
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
350
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
351
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
352
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
353
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
354
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
355
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
356
+ },
357
+ },
358
+ {
359
+ LLM_ARCH_GPT2,
360
+ {
361
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
362
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
363
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
364
+ { LLM_TENSOR_OUTPUT, "output" },
365
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
366
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
367
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
368
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
369
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
370
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
371
+ },
372
+ },
373
+ {
374
+ LLM_ARCH_GPTJ,
375
+ {
376
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
377
+ },
378
+ },
379
+ {
380
+ LLM_ARCH_GPTNEOX,
381
+ {
382
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
383
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
384
+ { LLM_TENSOR_OUTPUT, "output" },
385
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
386
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
387
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
388
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
389
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
390
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
391
+ },
392
+ },
393
+ {
394
+ LLM_ARCH_MPT,
395
+ {
396
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
397
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
398
+ { LLM_TENSOR_OUTPUT, "output"},
399
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
400
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
401
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
402
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
403
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
404
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
405
+ { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
406
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
407
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
408
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
409
+ },
410
+ },
411
+ {
412
+ LLM_ARCH_STARCODER,
413
+ {
414
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
415
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
416
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
417
+ { LLM_TENSOR_OUTPUT, "output" },
418
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
419
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
420
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
421
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
422
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
423
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
424
+ },
425
+ },
426
+ {
427
+ LLM_ARCH_REFACT,
428
+ {
429
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
430
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
431
+ { LLM_TENSOR_OUTPUT, "output" },
432
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
433
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
434
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
435
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
436
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
437
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
438
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
439
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
440
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
441
+ },
442
+ },
443
+ {
444
+ LLM_ARCH_BERT,
445
+ {
446
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
447
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
448
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
449
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
450
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
451
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
452
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
453
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
454
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
455
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
456
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
457
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
458
+ { LLM_TENSOR_CLS, "cls" },
459
+ { LLM_TENSOR_CLS_OUT, "cls.output" },
460
+ },
461
+ },
462
+ {
463
+ LLM_ARCH_NOMIC_BERT,
464
+ {
465
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
466
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
467
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
468
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
469
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
470
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
471
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
472
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
473
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
474
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
475
+ },
476
+ },
477
+ {
478
+ LLM_ARCH_NOMIC_BERT_MOE,
479
+ {
480
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
481
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
482
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
483
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
484
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
485
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
486
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
487
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
488
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
489
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
490
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
491
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
492
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
493
+ },
494
+ },
495
+ {
496
+ LLM_ARCH_JINA_BERT_V2,
497
+ {
498
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
499
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
500
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
501
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
502
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
503
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
504
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
505
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
506
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
507
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
508
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
509
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
510
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
511
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
512
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
513
+ { LLM_TENSOR_CLS, "cls" },
514
+ },
515
+ },
516
+ {
517
+ LLM_ARCH_BLOOM,
518
+ {
519
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
520
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
521
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
522
+ { LLM_TENSOR_OUTPUT, "output" },
523
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
524
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
525
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
526
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
527
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
528
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
529
+ },
530
+ },
531
+ {
532
+ LLM_ARCH_STABLELM,
533
+ {
534
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
535
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
536
+ { LLM_TENSOR_OUTPUT, "output" },
537
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
538
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
539
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
540
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
541
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
542
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
543
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
544
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
545
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
546
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
547
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
548
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
549
+ },
550
+ },
551
+ {
552
+ LLM_ARCH_QWEN,
553
+ {
554
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
555
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
556
+ { LLM_TENSOR_OUTPUT, "output" },
557
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
558
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
559
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
560
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
561
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
562
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
563
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
564
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
565
+ },
566
+ },
567
+ {
568
+ LLM_ARCH_QWEN2,
569
+ {
570
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
571
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
572
+ { LLM_TENSOR_OUTPUT, "output" },
573
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
574
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
575
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
576
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
577
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
578
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
579
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
580
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
581
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
582
+ },
583
+ },
584
+ {
585
+ LLM_ARCH_QWEN2VL,
586
+ {
587
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
588
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
589
+ { LLM_TENSOR_OUTPUT, "output" },
590
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
591
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
592
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
593
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
594
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
595
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
596
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
597
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
598
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
599
+ },
600
+ },
601
+ {
602
+ LLM_ARCH_QWEN2MOE,
603
+ {
604
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
605
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
606
+ { LLM_TENSOR_OUTPUT, "output" },
607
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
608
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
609
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
610
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
611
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
612
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
613
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
614
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
615
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
616
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
617
+ { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
618
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
619
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
620
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
621
+ },
622
+ },
623
+ {
624
+ LLM_ARCH_QWEN3,
625
+ {
626
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
627
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
628
+ { LLM_TENSOR_OUTPUT, "output" },
629
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
630
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
631
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
632
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
633
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
634
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
635
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
636
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
637
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
638
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
639
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
640
+ },
641
+ },
642
+ {
643
+ LLM_ARCH_QWEN3MOE,
644
+ {
645
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
646
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
647
+ { LLM_TENSOR_OUTPUT, "output" },
648
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
649
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
650
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
651
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
652
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
653
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
654
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
655
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
656
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
657
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
658
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
659
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
660
+ },
661
+ },
662
+ {
663
+ LLM_ARCH_PHI2,
664
+ {
665
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
666
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
667
+ { LLM_TENSOR_OUTPUT, "output" },
668
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
669
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
670
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
671
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
672
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
673
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
674
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
675
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
676
+ },
677
+ },
678
+ {
679
+ LLM_ARCH_PHI3,
680
+ {
681
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
682
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
683
+ { LLM_TENSOR_OUTPUT, "output" },
684
+ { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
685
+ { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
686
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
687
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
688
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
689
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
690
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
691
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
692
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
693
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
694
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
695
+ },
696
+ },
697
+ {
698
+ LLM_ARCH_PHIMOE,
699
+ {
700
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
701
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
702
+ { LLM_TENSOR_OUTPUT, "output" },
703
+ { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
704
+ { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
705
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
706
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
707
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
708
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
709
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
710
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
711
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
712
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
713
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
714
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
715
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
716
+ },
717
+ },
718
+ {
719
+ LLM_ARCH_PLAMO,
720
+ {
721
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
722
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
723
+ { LLM_TENSOR_OUTPUT, "output" },
724
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
725
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
726
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
727
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
728
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
729
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
730
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
731
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
732
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
733
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
734
+ },
735
+ },
736
+ {
737
+ LLM_ARCH_CODESHELL,
738
+ {
739
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
740
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
741
+ { LLM_TENSOR_OUTPUT, "output" },
742
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
743
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
744
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
745
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
746
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
747
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
748
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
749
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
750
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
751
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
752
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
753
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
754
+ },
755
+ },
756
+ {
757
+ LLM_ARCH_ORION,
758
+ {
759
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
760
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
761
+ { LLM_TENSOR_OUTPUT, "output" },
762
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
763
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
764
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
765
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
766
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
767
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
768
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
769
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
770
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
771
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
772
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
773
+ },
774
+ },
775
+ {
776
+ LLM_ARCH_INTERNLM2,
777
+ {
778
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
779
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
780
+ { LLM_TENSOR_OUTPUT, "output" },
781
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
782
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
783
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
784
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
785
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
786
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
787
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
788
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
789
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
790
+ },
791
+ },
792
+ {
793
+ LLM_ARCH_MINICPM,
794
+ {
795
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
796
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
797
+ { LLM_TENSOR_OUTPUT, "output" },
798
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
799
+ { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
800
+ { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
801
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
802
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
803
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
804
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
805
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
806
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
807
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
808
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
809
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
810
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
811
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
812
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
813
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
814
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
815
+ },
816
+ },
817
+ {
818
+ LLM_ARCH_MINICPM3,
819
+ {
820
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
821
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
822
+ { LLM_TENSOR_OUTPUT, "output" },
823
+ { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
824
+ { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
825
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
826
+ { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
827
+ { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
828
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
829
+ { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
830
+ { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
831
+ { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
832
+ { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
833
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
834
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
835
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
836
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
837
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
838
+ },
839
+ },
840
+ {
841
+ LLM_ARCH_GEMMA,
842
+ {
843
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
844
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
845
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
846
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
847
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
848
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
849
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
850
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
851
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
852
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
853
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
854
+ },
855
+ },
856
+ {
857
+ LLM_ARCH_GEMMA2,
858
+ {
859
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
860
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
861
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
862
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
863
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
864
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
865
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
866
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
867
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
868
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
869
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
870
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
871
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
872
+ },
873
+ },
874
+ {
875
+ LLM_ARCH_GEMMA3,
876
+ {
877
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
878
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
879
+ { LLM_TENSOR_OUTPUT, "output" },
880
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
881
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
882
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
883
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
884
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
885
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
886
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
887
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
888
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
889
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
890
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
891
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
892
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
893
+ },
894
+ },
895
+ {
896
+ LLM_ARCH_STARCODER2,
897
+ {
898
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
899
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
900
+ { LLM_TENSOR_OUTPUT, "output" },
901
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
902
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
903
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
904
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
905
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
906
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
907
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
908
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
909
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
910
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
911
+ },
912
+ },
913
+ {
914
+ LLM_ARCH_MAMBA,
915
+ {
916
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
917
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
918
+ { LLM_TENSOR_OUTPUT, "output" },
919
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
920
+ { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
921
+ { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
922
+ { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
923
+ { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
924
+ { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
925
+ { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
926
+ { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
927
+ },
928
+ },
929
+ {
930
+ LLM_ARCH_XVERSE,
931
+ {
932
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
933
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
934
+ { LLM_TENSOR_OUTPUT, "output" },
935
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
936
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
937
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
938
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
939
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
940
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
941
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
942
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
943
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
944
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
945
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
946
+ },
947
+ },
948
+ {
949
+ LLM_ARCH_COMMAND_R,
950
+ {
951
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
952
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
953
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
954
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
955
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
956
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
957
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
958
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
959
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
960
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
961
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
962
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
963
+ },
964
+ },
965
+ {
966
+ LLM_ARCH_COHERE2,
967
+ {
968
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
969
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
970
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
971
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
972
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
973
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
974
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
975
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
976
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
977
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
978
+ },
979
+ },
980
+ {
981
+ LLM_ARCH_DBRX,
982
+ {
983
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
984
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
985
+ { LLM_TENSOR_OUTPUT, "output" },
986
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
987
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
988
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
989
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
990
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
991
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
992
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
993
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
994
+ },
995
+ },
996
+ {
997
+ LLM_ARCH_OLMO,
998
+ {
999
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1000
+ { LLM_TENSOR_OUTPUT, "output" },
1001
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1002
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1003
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1004
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1005
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1006
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1007
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1008
+ },
1009
+ },
1010
+ {
1011
+ LLM_ARCH_OLMO2,
1012
+ {
1013
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1014
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1015
+ { LLM_TENSOR_OUTPUT, "output" },
1016
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1017
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1018
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1019
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1020
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1021
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1022
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1023
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1024
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1025
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1026
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1027
+ },
1028
+ },
1029
+ {
1030
+ LLM_ARCH_OLMOE,
1031
+ {
1032
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1033
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1034
+ { LLM_TENSOR_OUTPUT, "output" },
1035
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1036
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1037
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1038
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1039
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1040
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1041
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1042
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1043
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1044
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1045
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1046
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1047
+ },
1048
+ },
1049
+ {
1050
+ LLM_ARCH_OPENELM,
1051
+ {
1052
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1053
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1054
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1055
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1056
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1057
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1058
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1059
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1060
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1061
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1062
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1063
+ },
1064
+ },
1065
+ {
1066
+ LLM_ARCH_ARCTIC,
1067
+ {
1068
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1069
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1070
+ { LLM_TENSOR_OUTPUT, "output" },
1071
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1072
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1073
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1074
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1075
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1076
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1077
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1078
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1079
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1080
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1081
+ { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" },
1082
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1083
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1084
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1085
+ },
1086
+ },
1087
+ {
1088
+ LLM_ARCH_DEEPSEEK,
1089
+ {
1090
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1091
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1092
+ { LLM_TENSOR_OUTPUT, "output" },
1093
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1094
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1095
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1096
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1097
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1098
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1099
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1100
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1101
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1102
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1103
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1104
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1105
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1106
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1107
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1108
+ { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
1109
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1110
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1111
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1112
+ },
1113
+ },
1114
+ {
1115
+ LLM_ARCH_DEEPSEEK2,
1116
+ {
1117
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1118
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1119
+ { LLM_TENSOR_OUTPUT, "output" },
1120
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1121
+ { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
1122
+ { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
1123
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1124
+ { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
1125
+ { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
1126
+ { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
1127
+ { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
1128
+ { LLM_TENSOR_ATTN_K_B, "blk.%d.attn_k_b" },
1129
+ { LLM_TENSOR_ATTN_V_B, "blk.%d.attn_v_b" },
1130
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1131
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1132
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1133
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1134
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1135
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1136
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1137
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1138
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1139
+ { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
1140
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1141
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1142
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1143
+ { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
1144
+ },
1145
+ },
1146
+ {
1147
+ LLM_ARCH_PLM,
1148
+ {
1149
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1150
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1151
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1152
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1153
+ { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
1154
+ { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
1155
+ { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
1156
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1157
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1158
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1159
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1160
+ },
1161
+ },
1162
+ {
1163
+ LLM_ARCH_CHATGLM,
1164
+ {
1165
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1166
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1167
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1168
+ { LLM_TENSOR_OUTPUT, "output" },
1169
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1170
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1171
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1172
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1173
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1174
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1175
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1176
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1177
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1178
+ },
1179
+ },
1180
+ {
1181
+ LLM_ARCH_GLM4,
1182
+ {
1183
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1184
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1185
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1186
+ { LLM_TENSOR_OUTPUT, "output" },
1187
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1188
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1189
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1190
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1191
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1192
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1193
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1194
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1195
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1196
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1197
+ },
1198
+ },
1199
+ {
1200
+ LLM_ARCH_BITNET,
1201
+ {
1202
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1203
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1204
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1205
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1206
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1207
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1208
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1209
+ { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" },
1210
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1211
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1212
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1213
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1214
+ { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" },
1215
+ },
1216
+ },
1217
+ {
1218
+ LLM_ARCH_T5,
1219
+ {
1220
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1221
+ { LLM_TENSOR_OUTPUT, "output" },
1222
+ { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" },
1223
+ { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" },
1224
+ { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" },
1225
+ { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" },
1226
+ { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" },
1227
+ { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" },
1228
+ { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" },
1229
+ { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" },
1230
+ { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" },
1231
+ { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" },
1232
+ { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" },
1233
+ { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" },
1234
+ { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
1235
+ { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" },
1236
+ { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" },
1237
+ { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" },
1238
+ { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" },
1239
+ { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
1240
+ { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
1241
+ { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
1242
+ { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
1243
+ { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
1244
+ { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
1245
+ { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
1246
+ { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
1247
+ { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
1248
+ { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
1249
+ { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
1250
+ },
1251
+ },
1252
+ {
1253
+ LLM_ARCH_T5ENCODER,
1254
+ {
1255
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1256
+ { LLM_TENSOR_OUTPUT, "output" },
1257
+ { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
1258
+ { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
1259
+ { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
1260
+ { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
1261
+ { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
1262
+ { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
1263
+ { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
1264
+ { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
1265
+ { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
1266
+ { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
1267
+ { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
1268
+ },
1269
+ },
1270
+ {
1271
+ LLM_ARCH_JAIS,
1272
+ {
1273
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1274
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1275
+ { LLM_TENSOR_OUTPUT, "output" },
1276
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1277
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1278
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1279
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1280
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1281
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1282
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1283
+ },
1284
+ },
1285
+ {
1286
+ LLM_ARCH_NEMOTRON,
1287
+ {
1288
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1289
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1290
+ { LLM_TENSOR_OUTPUT, "output" },
1291
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1292
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1293
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1294
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1295
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1296
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1297
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1298
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1299
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1300
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1301
+ },
1302
+ },
1303
+ {
1304
+ LLM_ARCH_EXAONE,
1305
+ {
1306
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1307
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1308
+ { LLM_TENSOR_OUTPUT, "output" },
1309
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1310
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1311
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1312
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1313
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1314
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1315
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1316
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1317
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1318
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1319
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1320
+ },
1321
+ },
1322
+ {
1323
+ LLM_ARCH_RWKV6,
1324
+ {
1325
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1326
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1327
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1328
+ { LLM_TENSOR_OUTPUT, "output" },
1329
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1330
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
1331
+ { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1332
+ { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1333
+ { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
1334
+ { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" },
1335
+ { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" },
1336
+ { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" },
1337
+ { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" },
1338
+ { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" },
1339
+ { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1340
+ { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
1341
+ { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
1342
+ { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
1343
+ { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
1344
+ { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1345
+ { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1346
+ { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1347
+ { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
1348
+ { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1349
+ { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1350
+ { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
1351
+ { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" },
1352
+ { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
1353
+ { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
1354
+ { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
1355
+ },
1356
+ },
1357
+ {
1358
+ LLM_ARCH_RWKV6QWEN2,
1359
+ {
1360
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1361
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1362
+ { LLM_TENSOR_OUTPUT, "output" },
1363
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1364
+ { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1365
+ { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1366
+ { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
1367
+ { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1368
+ { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
1369
+ { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
1370
+ { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
1371
+ { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
1372
+ { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1373
+ { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1374
+ { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1375
+ { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
1376
+ { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1377
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1378
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1379
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1380
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1381
+ },
1382
+ },
1383
+ {
1384
+ LLM_ARCH_RWKV7,
1385
+ {
1386
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1387
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1388
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1389
+ { LLM_TENSOR_OUTPUT, "output" },
1390
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1391
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
1392
+ { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
1393
+ { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1394
+ { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1395
+ { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
1396
+ { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
1397
+ { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
1398
+ { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
1399
+ { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
1400
+ { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
1401
+ { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
1402
+ { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
1403
+ { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
1404
+ { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
1405
+ { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
1406
+ { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1407
+ { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1408
+ { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1409
+ { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1410
+ { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1411
+ { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1412
+ { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
1413
+ { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
1414
+ { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
1415
+ },
1416
+ },
1417
+ {
1418
+ LLM_ARCH_ARWKV7,
1419
+ {
1420
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1421
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1422
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1423
+ { LLM_TENSOR_OUTPUT, "output" },
1424
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1425
+ { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
1426
+ { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1427
+ { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1428
+ { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
1429
+ { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
1430
+ { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
1431
+ { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
1432
+ { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
1433
+ { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
1434
+ { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
1435
+ { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
1436
+ { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
1437
+ { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
1438
+ { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
1439
+ { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1440
+ { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1441
+ { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1442
+ { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1443
+ { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1444
+ { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1445
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1446
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1447
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1448
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1449
+ },
1450
+ },
1451
+ {
1452
+ LLM_ARCH_GRANITE,
1453
+ {
1454
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1455
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1456
+ { LLM_TENSOR_OUTPUT, "output" },
1457
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1458
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1459
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1460
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1461
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1462
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1463
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1464
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1465
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1466
+ },
1467
+ },
1468
+ {
1469
+ LLM_ARCH_GRANITE_MOE,
1470
+ {
1471
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1472
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1473
+ { LLM_TENSOR_OUTPUT, "output" },
1474
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1475
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1476
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1477
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1478
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1479
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1480
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1481
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1482
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1483
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1484
+ },
1485
+ },
1486
+ {
1487
+ LLM_ARCH_CHAMELEON,
1488
+ {
1489
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1490
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1491
+ { LLM_TENSOR_OUTPUT, "output" },
1492
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1493
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1494
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1495
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1496
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1497
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1498
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1499
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1500
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1501
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1502
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1503
+ },
1504
+ },
1505
+ {
1506
+ LLM_ARCH_WAVTOKENIZER_DEC,
1507
+ {
1508
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1509
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1510
+ { LLM_TENSOR_CONV1D, "conv1d" },
1511
+ { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" },
1512
+ { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" },
1513
+ { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" },
1514
+ { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" },
1515
+ { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" },
1516
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1517
+ { LLM_TENSOR_OUTPUT, "output" },
1518
+ { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" },
1519
+ { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" },
1520
+ { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" },
1521
+ { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" },
1522
+ { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" },
1523
+ { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
1524
+ { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" },
1525
+ { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" },
1526
+ { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" },
1527
+ { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" },
1528
+ },
1529
+ },
1530
+ {
1531
+ LLM_ARCH_BAILINGMOE,
1532
+ {
1533
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1534
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1535
+ { LLM_TENSOR_OUTPUT, "output" },
1536
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1537
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1538
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1539
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1540
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1541
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1542
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1543
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1544
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1545
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1546
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1547
+ { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
1548
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1549
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1550
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1551
+ },
1552
+ },
1553
+ {
1554
+ LLM_ARCH_UNKNOWN,
1555
+ {
1556
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1557
+ },
1558
+ },
1559
+ };
1560
+
1561
+ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
1562
+ {LLM_TENSOR_TOKEN_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
1563
+ {LLM_TENSOR_POS_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
1564
+ {LLM_TENSOR_TOKEN_EMBD_NORM, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
1565
+ {LLM_TENSOR_TOKEN_TYPES, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
1566
+ {LLM_TENSOR_OUTPUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
1567
+ {LLM_TENSOR_CLS, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
1568
+ {LLM_TENSOR_CLS_OUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
1569
+ {LLM_TENSOR_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
1570
+ {LLM_TENSOR_DEC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
1571
+ {LLM_TENSOR_ENC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
1572
+ {LLM_TENSOR_ROPE_FREQS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
1573
+ {LLM_TENSOR_ROPE_FACTORS_LONG, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
1574
+ {LLM_TENSOR_ROPE_FACTORS_SHORT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
1575
+ {LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1576
+ {LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1577
+ {LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1578
+ {LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1579
+ {LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1580
+ {LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1581
+ {LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1582
+ {LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1583
+ {LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1584
+ {LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1585
+ {LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1586
+ {LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1587
+ {LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1588
+ {LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1589
+ {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1590
+ {LLM_TENSOR_ATTN_K_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1591
+ {LLM_TENSOR_ATTN_V_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1592
+ {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1593
+ {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1594
+ {LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1595
+ {LLM_TENSOR_DEC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1596
+ {LLM_TENSOR_DEC_CROSS_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1597
+ {LLM_TENSOR_DEC_CROSS_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1598
+ {LLM_TENSOR_DEC_CROSS_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1599
+ {LLM_TENSOR_DEC_CROSS_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1600
+ {LLM_TENSOR_DEC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1601
+ {LLM_TENSOR_DEC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1602
+ {LLM_TENSOR_DEC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1603
+ {LLM_TENSOR_ENC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1604
+ {LLM_TENSOR_ENC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1605
+ {LLM_TENSOR_ENC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1606
+ {LLM_TENSOR_ENC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1607
+ {LLM_TENSOR_ENC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1608
+ {LLM_TENSOR_ENC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1609
+ {LLM_TENSOR_ENC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1610
+ {LLM_TENSOR_FFN_GATE_INP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1611
+ {LLM_TENSOR_FFN_GATE_INP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1612
+ {LLM_TENSOR_SSM_IN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1613
+ {LLM_TENSOR_SSM_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1614
+ {LLM_TENSOR_SSM_DT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1615
+ {LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1616
+ {LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1617
+ {LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1618
+ {LLM_TENSOR_TIME_MIX_A1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1619
+ {LLM_TENSOR_TIME_MIX_A2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1620
+ {LLM_TENSOR_TIME_MIX_V1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1621
+ {LLM_TENSOR_TIME_MIX_V2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1622
+ {LLM_TENSOR_TIME_MIX_G1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1623
+ {LLM_TENSOR_TIME_MIX_G2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1624
+ {LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1625
+ {LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1626
+ {LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1627
+ {LLM_TENSOR_TIME_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1628
+ {LLM_TENSOR_TIME_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1629
+ {LLM_TENSOR_TIME_MIX_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1630
+ {LLM_TENSOR_TIME_MIX_OUTPUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1631
+ {LLM_TENSOR_CHANNEL_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1632
+ {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1633
+ {LLM_TENSOR_CHANNEL_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1634
+ {LLM_TENSOR_FFN_ACT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
1635
+ {LLM_TENSOR_SSM_CONV1D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
1636
+ {LLM_TENSOR_SSM_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
1637
+ {LLM_TENSOR_SSM_D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1638
+ {LLM_TENSOR_TIME_MIX_LERP_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1639
+ {LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1640
+ {LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1641
+ {LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1642
+ {LLM_TENSOR_TIME_MIX_K_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1643
+ {LLM_TENSOR_TIME_MIX_K_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1644
+ {LLM_TENSOR_TIME_MIX_R_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1645
+ {LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1646
+ {LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1647
+ {LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1648
+ {LLM_TENSOR_TIME_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1649
+ {LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1650
+ {LLM_TENSOR_TIME_MIX_LERP_FUSED, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1651
+ {LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1652
+ {LLM_TENSOR_TIME_MIX_W0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1653
+ {LLM_TENSOR_TIME_MIX_A0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1654
+ {LLM_TENSOR_TIME_MIX_V0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1655
+ {LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
1656
+ {LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1657
+ {LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1658
+ {LLM_TENSOR_ATTN_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1659
+ {LLM_TENSOR_ATTN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1660
+ {LLM_TENSOR_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1661
+ {LLM_TENSOR_FFN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1662
+ {LLM_TENSOR_FFN_NORM_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1663
+ {LLM_TENSOR_ATTN_Q_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1664
+ {LLM_TENSOR_ATTN_K_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1665
+ {LLM_TENSOR_LAYER_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1666
+ {LLM_TENSOR_ATTN_Q_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1667
+ {LLM_TENSOR_ATTN_KV_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1668
+ {LLM_TENSOR_ATTN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1669
+ {LLM_TENSOR_FFN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1670
+ {LLM_TENSOR_DEC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1671
+ {LLM_TENSOR_DEC_CROSS_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1672
+ {LLM_TENSOR_DEC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1673
+ {LLM_TENSOR_ENC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1674
+ {LLM_TENSOR_ENC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1675
+ {LLM_TENSOR_DEC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
1676
+ {LLM_TENSOR_ENC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
1677
+ {LLM_TENSOR_FFN_DOWN_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
1678
+ {LLM_TENSOR_FFN_GATE_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
1679
+ {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
1680
+ {LLM_TENSOR_FFN_EXP_PROBS_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
1681
+ // this tensor is loaded for T5, but never used
1682
+ {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
1683
+ {LLM_TENSOR_CONV1D, {LLM_TENSOR_LAYER_INPUT, GGML_OP_IM2COL}},
1684
+ {LLM_TENSOR_POS_NET_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1685
+ {LLM_TENSOR_POS_NET_NORM1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1686
+ {LLM_TENSOR_POS_NET_NORM2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1687
+ {LLM_TENSOR_POS_NET_CONV1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
1688
+ {LLM_TENSOR_POS_NET_CONV2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
1689
+ {LLM_TENSOR_POS_NET_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1690
+ {LLM_TENSOR_POS_NET_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1691
+ {LLM_TENSOR_POS_NET_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1692
+ {LLM_TENSOR_POS_NET_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1693
+ {LLM_TENSOR_POS_NET_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1694
+ {LLM_TENSOR_CONVNEXT_DW, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
1695
+ {LLM_TENSOR_CONVNEXT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1696
+ {LLM_TENSOR_CONVNEXT_PW1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1697
+ {LLM_TENSOR_CONVNEXT_PW2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
1698
+ {LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
1699
+ };
1700
+
1701
+ LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
1702
+
1703
+ std::string LLM_KV::operator()(llm_kv kv) const {
1704
+ return suffix ? ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch), suffix)
1705
+ : ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
1706
+ }
1707
+
1708
+ std::string LLM_TN_IMPL::str() const {
1709
+ if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1710
+ return "__missing__";
1711
+ }
1712
+
1713
+ std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid);
1714
+
1715
+ if (suffix != nullptr) {
1716
+ name += ".";
1717
+ name += suffix;
1718
+ }
1719
+
1720
+ return name;
1721
+ }
1722
+
1723
+ const char * llm_arch_name(llm_arch arch) {
1724
+ auto it = LLM_ARCH_NAMES.find(arch);
1725
+ if (it == LLM_ARCH_NAMES.end()) {
1726
+ return "unknown";
1727
+ }
1728
+ return it->second;
1729
+ }
1730
+
1731
+ llm_arch llm_arch_from_string(const std::string & name) {
1732
+ for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
1733
+ if (kv.second == name) {
1734
+ return kv.first;
1735
+ }
1736
+ }
1737
+
1738
+ return LLM_ARCH_UNKNOWN;
1739
+ }
1740
+
1741
+ const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
1742
+ return LLM_TENSOR_INFOS.at(tensor);
1743
+ }