cui-llama.rn 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +54 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1085
  14. package/cpp/chat.h +143 -0
  15. package/cpp/common.cpp +1562 -1996
  16. package/cpp/common.h +677 -744
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-alloc.c +1039 -1030
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +255 -255
  21. package/cpp/ggml-backend-reg.cpp +586 -582
  22. package/cpp/ggml-backend.cpp +2004 -2002
  23. package/cpp/ggml-backend.h +354 -354
  24. package/cpp/ggml-common.h +1857 -1851
  25. package/cpp/ggml-cpp.h +39 -39
  26. package/cpp/ggml-cpu-aarch64.cpp +5725 -4247
  27. package/cpp/ggml-cpu-aarch64.h +8 -8
  28. package/cpp/ggml-cpu-impl.h +512 -380
  29. package/cpp/ggml-cpu-quants.c +13026 -11517
  30. package/cpp/ggml-cpu-traits.cpp +36 -36
  31. package/cpp/ggml-cpu-traits.h +38 -38
  32. package/cpp/ggml-cpu.c +3438 -14485
  33. package/cpp/ggml-cpu.cpp +655 -633
  34. package/cpp/ggml-cpu.h +138 -135
  35. package/cpp/ggml-impl.h +594 -567
  36. package/cpp/ggml-metal-impl.h +312 -3
  37. package/cpp/ggml-metal.h +66 -66
  38. package/cpp/ggml-metal.m +5360 -5002
  39. package/cpp/ggml-opt.cpp +854 -854
  40. package/cpp/ggml-opt.h +216 -216
  41. package/cpp/ggml-quants.c +5238 -5238
  42. package/cpp/ggml-threading.h +14 -14
  43. package/cpp/ggml.c +6618 -6524
  44. package/cpp/ggml.h +2222 -2194
  45. package/cpp/gguf.cpp +1330 -1329
  46. package/cpp/gguf.h +202 -202
  47. package/cpp/json-schema-to-grammar.cpp +1024 -1025
  48. package/cpp/json-schema-to-grammar.h +21 -22
  49. package/cpp/json.hpp +24766 -24766
  50. package/cpp/llama-adapter.cpp +382 -347
  51. package/cpp/llama-adapter.h +76 -74
  52. package/cpp/llama-arch.cpp +1714 -1492
  53. package/cpp/llama-arch.h +428 -402
  54. package/cpp/llama-batch.cpp +368 -368
  55. package/cpp/llama-batch.h +88 -88
  56. package/cpp/llama-chat.cpp +640 -587
  57. package/cpp/llama-chat.h +56 -53
  58. package/cpp/llama-context.cpp +2831 -1775
  59. package/cpp/llama-context.h +265 -128
  60. package/cpp/llama-cparams.cpp +1 -1
  61. package/cpp/llama-cparams.h +38 -37
  62. package/cpp/llama-cpp.h +30 -30
  63. package/cpp/llama-grammar.cpp +1219 -1219
  64. package/cpp/llama-grammar.h +173 -164
  65. package/cpp/llama-graph.cpp +1695 -0
  66. package/cpp/llama-graph.h +592 -0
  67. package/cpp/llama-hparams.cpp +79 -71
  68. package/cpp/llama-hparams.h +156 -139
  69. package/cpp/llama-impl.cpp +167 -167
  70. package/cpp/llama-impl.h +61 -61
  71. package/cpp/llama-io.cpp +15 -0
  72. package/cpp/llama-io.h +35 -0
  73. package/cpp/llama-kv-cache.cpp +1380 -718
  74. package/cpp/llama-kv-cache.h +213 -218
  75. package/cpp/llama-memory.cpp +1 -0
  76. package/cpp/llama-memory.h +21 -0
  77. package/cpp/llama-mmap.cpp +600 -590
  78. package/cpp/llama-mmap.h +68 -68
  79. package/cpp/llama-model-loader.cpp +1129 -1124
  80. package/cpp/llama-model-loader.h +169 -167
  81. package/cpp/llama-model.cpp +13080 -4023
  82. package/cpp/llama-model.h +409 -370
  83. package/cpp/llama-sampling.cpp +2563 -2525
  84. package/cpp/llama-sampling.h +32 -32
  85. package/cpp/llama-vocab.cpp +3295 -3252
  86. package/cpp/llama-vocab.h +125 -125
  87. package/cpp/llama.cpp +351 -10137
  88. package/cpp/llama.h +1434 -1340
  89. package/cpp/log.cpp +427 -423
  90. package/cpp/log.h +132 -132
  91. package/cpp/{chat-template.hpp → minja/chat-template.hpp} +537 -529
  92. package/cpp/{minja.hpp → minja/minja.hpp} +2941 -2883
  93. package/cpp/ops.cpp +8723 -0
  94. package/cpp/ops.h +128 -0
  95. package/cpp/rn-llama.cpp +45 -71
  96. package/cpp/rn-llama.h +3 -3
  97. package/cpp/sampling.cpp +573 -532
  98. package/cpp/sgemm.cpp +3043 -2598
  99. package/cpp/sgemm.h +14 -14
  100. package/cpp/simd-mappings.h +888 -0
  101. package/cpp/speculative.cpp +278 -277
  102. package/cpp/speculative.h +28 -28
  103. package/cpp/unary-ops.cpp +186 -0
  104. package/cpp/unary-ops.h +28 -0
  105. package/cpp/vec.cpp +258 -0
  106. package/cpp/vec.h +802 -0
  107. package/ios/CMakeLists.txt +5 -2
  108. package/ios/RNLlama.mm +2 -2
  109. package/ios/RNLlamaContext.mm +40 -24
  110. package/package.json +1 -1
  111. package/src/NativeRNLlama.ts +6 -4
  112. package/src/index.ts +3 -1
  113. package/android/src/main/build-arm64/CMakeCache.txt +0 -429
  114. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCCompiler.cmake +0 -81
  115. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCXXCompiler.cmake +0 -101
  116. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_C.bin +0 -0
  117. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_CXX.bin +0 -0
  118. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeSystem.cmake +0 -15
  119. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.c +0 -904
  120. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.o +0 -0
  121. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.cpp +0 -919
  122. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.o +0 -0
  123. package/android/src/main/build-arm64/CMakeFiles/CMakeConfigureLog.yaml +0 -431
  124. package/android/src/main/build-arm64/CMakeFiles/CMakeDirectoryInformation.cmake +0 -16
  125. package/android/src/main/build-arm64/CMakeFiles/Makefile.cmake +0 -165
  126. package/android/src/main/build-arm64/CMakeFiles/Makefile2 +0 -297
  127. package/android/src/main/build-arm64/CMakeFiles/Progress/1 +0 -1
  128. package/android/src/main/build-arm64/CMakeFiles/Progress/2 +0 -1
  129. package/android/src/main/build-arm64/CMakeFiles/Progress/3 +0 -1
  130. package/android/src/main/build-arm64/CMakeFiles/Progress/4 +0 -1
  131. package/android/src/main/build-arm64/CMakeFiles/Progress/5 +0 -1
  132. package/android/src/main/build-arm64/CMakeFiles/Progress/6 +0 -1
  133. package/android/src/main/build-arm64/CMakeFiles/Progress/count.txt +0 -1
  134. package/android/src/main/build-arm64/CMakeFiles/TargetDirectories.txt +0 -8
  135. package/android/src/main/build-arm64/CMakeFiles/cmake.check_cache +0 -1
  136. package/android/src/main/build-arm64/CMakeFiles/progress.marks +0 -1
  137. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o +0 -0
  138. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o.d +0 -58
  139. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o +0 -0
  140. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o.d +0 -756
  141. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o +0 -0
  142. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o.d +0 -709
  143. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o +0 -0
  144. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o.d +0 -714
  145. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o +0 -0
  146. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o.d +0 -62
  147. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o +0 -0
  148. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o.d +0 -708
  149. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o +0 -0
  150. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o.d +0 -113
  151. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o +0 -0
  152. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o.d +0 -713
  153. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o +0 -0
  154. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o.d +0 -763
  155. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o +0 -0
  156. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o.d +0 -61
  157. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o +0 -0
  158. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o.d +0 -707
  159. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o +0 -0
  160. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o.d +0 -104
  161. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o +0 -0
  162. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o.d +0 -714
  163. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o +0 -0
  164. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o.d +0 -723
  165. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/DependInfo.cmake +0 -62
  166. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/build.make +0 -722
  167. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/cmake_clean.cmake +0 -89
  168. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.make +0 -2
  169. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.ts +0 -2
  170. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/depend.make +0 -2
  171. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/flags.make +0 -17
  172. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/progress.make +0 -41
  173. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/DependInfo.cmake +0 -62
  174. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/build.make +0 -722
  175. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/cmake_clean.cmake +0 -89
  176. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.make +0 -2
  177. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.ts +0 -2
  178. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/depend.make +0 -2
  179. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/flags.make +0 -17
  180. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/progress.make +0 -41
  181. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/DependInfo.cmake +0 -62
  182. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/build.make +0 -722
  183. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/cmake_clean.cmake +0 -89
  184. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.make +0 -2
  185. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.ts +0 -2
  186. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/depend.make +0 -2
  187. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/flags.make +0 -17
  188. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/progress.make +0 -41
  189. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/DependInfo.cmake +0 -62
  190. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/build.make +0 -722
  191. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/cmake_clean.cmake +0 -89
  192. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.make +0 -2
  193. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.ts +0 -2
  194. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/depend.make +0 -2
  195. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/flags.make +0 -17
  196. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/progress.make +0 -41
  197. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/DependInfo.cmake +0 -62
  198. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/build.make +0 -722
  199. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/cmake_clean.cmake +0 -89
  200. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.make +0 -2
  201. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.ts +0 -2
  202. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/depend.make +0 -2
  203. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/flags.make +0 -17
  204. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/progress.make +0 -41
  205. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/DependInfo.cmake +0 -62
  206. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/build.make +0 -722
  207. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/cmake_clean.cmake +0 -89
  208. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.make +0 -2
  209. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.ts +0 -2
  210. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/depend.make +0 -2
  211. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/flags.make +0 -17
  212. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/progress.make +0 -41
  213. package/android/src/main/build-arm64/Makefile +0 -1862
  214. package/android/src/main/build-arm64/cmake_install.cmake +0 -66
  215. package/cpp/chat.hpp +0 -55
  216. package/cpp/rn-llama.hpp +0 -913
@@ -0,0 +1,592 @@
1
+ #pragma once
2
+
3
+ #include "llama-arch.h"
4
+ #include "llama-hparams.h"
5
+ #include "llama-adapter.h"
6
+
7
+ #include <cstdint>
8
+ #include <vector>
9
+ #include <memory>
10
+ #include <set>
11
+ #include <functional>
12
+
13
+ struct lm_ggml_cgraph;
14
+ struct lm_ggml_context;
15
+ struct lm_ggml_tensor;
16
+
17
+ struct llama_ubatch;
18
+ struct llama_cparams;
19
+
20
+ class llama_memory_i;
21
+ class llama_kv_cache_unified;
22
+
23
+ // certain models (typically multi-modal) can produce different types of graphs
24
+ enum llm_graph_type {
25
+ LLM_GRAPH_TYPE_DEFAULT,
26
+ LLM_GRAPH_TYPE_ENCODER,
27
+ LLM_GRAPH_TYPE_DECODER,
28
+ };
29
+
30
+ enum llm_ffn_op_type {
31
+ LLM_FFN_SILU,
32
+ LLM_FFN_GELU,
33
+ LLM_FFN_RELU,
34
+ LLM_FFN_RELU_SQR,
35
+ LLM_FFN_SWIGLU,
36
+ };
37
+
38
+ enum llm_ffn_gate_type {
39
+ LLM_FFN_SEQ,
40
+ LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
41
+ };
42
+
43
+ enum llm_norm_type {
44
+ LLM_NORM,
45
+ LLM_NORM_RMS,
46
+ LLM_NORM_GROUP,
47
+ };
48
+
49
+ // TODO: tmp - need something better to pass the data from the encoder to the decoder
50
+ struct llama_cross {
51
+ // the output embeddings from the encoder as a ggml tensor
52
+ // TODO: this needs more work to be correct, for now copy the embeddings data to host memory
53
+ // ref: https://github.com/ggml-org/llama.cpp/pull/11213#discussion_r1969892524
54
+ //lm_ggml_tensor * t_embd = nullptr;
55
+
56
+ int64_t n_embd = 0;
57
+ int64_t n_enc = 0;
58
+
59
+ // embeddings data copied to host memory (tmp)
60
+ std::vector<float> v_embd;
61
+
62
+ // needed to construct the cross-attention mask in the decoder
63
+ std::vector<std::set<llama_seq_id>> seq_ids_enc;
64
+ };
65
+
66
+ //
67
+ // llm_graph_input
68
+ //
69
+
70
+ class llm_graph_input_i {
71
+ public:
72
+ virtual ~llm_graph_input_i() = default;
73
+
74
+ virtual void set_input(const llama_ubatch * ubatch) = 0;
75
+ };
76
+
77
+ using llm_graph_input_ptr = std::unique_ptr<llm_graph_input_i>;
78
+
79
+
80
+ class llm_graph_input_embd : public llm_graph_input_i {
81
+ public:
82
+ llm_graph_input_embd() = default;
83
+ virtual ~llm_graph_input_embd() = default;
84
+
85
+ void set_input(const llama_ubatch * ubatch) override;
86
+
87
+ lm_ggml_tensor * tokens = nullptr; // I32 [n_batch]
88
+ lm_ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch]
89
+ };
90
+
91
+ class llm_graph_input_pos : public llm_graph_input_i {
92
+ public:
93
+ llm_graph_input_pos(int64_t n_pos_per_token) : n_pos_per_token(n_pos_per_token) {}
94
+ virtual ~llm_graph_input_pos() = default;
95
+
96
+ void set_input(const llama_ubatch * ubatch) override;
97
+
98
+ lm_ggml_tensor * pos = nullptr; // I32 [n_batch]
99
+
100
+ const int64_t n_pos_per_token = 1;
101
+ };
102
+
103
+ // temperature tuning, used by llama4
104
+ class llm_graph_input_attn_temp : public llm_graph_input_i {
105
+ public:
106
+ llm_graph_input_attn_temp(int64_t n_pos_per_token, uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale)
107
+ : n_pos_per_token(n_pos_per_token), n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale) {}
108
+ virtual ~llm_graph_input_attn_temp() = default;
109
+
110
+ void set_input(const llama_ubatch * ubatch) override;
111
+
112
+ lm_ggml_tensor * attn_scale = nullptr; // F32 [n_batch]
113
+
114
+ const int64_t n_pos_per_token = 1;
115
+
116
+ const uint32_t n_attn_temp_floor_scale;
117
+ const float f_attn_temp_scale;
118
+ };
119
+
120
+ class llm_graph_input_pos_bucket : public llm_graph_input_i {
121
+ public:
122
+ llm_graph_input_pos_bucket(const llama_hparams & hparams) : hparams(hparams) {}
123
+ virtual ~llm_graph_input_pos_bucket() = default;
124
+
125
+ void set_input(const llama_ubatch * ubatch) override;
126
+
127
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch]
128
+
129
+ const llama_hparams & hparams;
130
+ };
131
+
132
+ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i {
133
+ public:
134
+ llm_graph_input_pos_bucket_kv(
135
+ const llama_hparams & hparams,
136
+ const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {}
137
+ virtual ~llm_graph_input_pos_bucket_kv() = default;
138
+
139
+ void set_input(const llama_ubatch * ubatch) override;
140
+
141
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch]
142
+
143
+ const llama_hparams & hparams;
144
+ const llama_kv_cache_unified * kv_self;
145
+ };
146
+
147
+ class llm_graph_input_out_ids : public llm_graph_input_i {
148
+ public:
149
+ llm_graph_input_out_ids(
150
+ const llama_hparams & hparams,
151
+ const llama_cparams & cparams,
152
+ int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {}
153
+ virtual ~llm_graph_input_out_ids() = default;
154
+
155
+ void set_input(const llama_ubatch * ubatch) override;
156
+
157
+ lm_ggml_tensor * out_ids; // I32 [n_outputs]
158
+
159
+ const llama_hparams & hparams;
160
+ const llama_cparams & cparams;
161
+
162
+ const int32_t n_outputs;
163
+ };
164
+
165
+ class llm_graph_input_mean : public llm_graph_input_i {
166
+ public:
167
+ llm_graph_input_mean(const llama_cparams & cparams) : cparams(cparams) {}
168
+ virtual ~llm_graph_input_mean() = default;
169
+
170
+ void set_input(const llama_ubatch * ubatch) override;
171
+
172
+ lm_ggml_tensor * mean; // F32 [n_batch, n_batch]
173
+
174
+ const llama_cparams & cparams;
175
+ };
176
+
177
+ class llm_graph_input_cls : public llm_graph_input_i {
178
+ public:
179
+ llm_graph_input_cls(const llama_cparams & cparams) : cparams(cparams) {}
180
+ virtual ~llm_graph_input_cls() = default;
181
+
182
+ void set_input(const llama_ubatch * ubatch) override;
183
+
184
+ lm_ggml_tensor * cls; // I32 [n_batch]
185
+
186
+ const llama_cparams & cparams;
187
+ };
188
+
189
+ class llm_graph_input_s_copy : public llm_graph_input_i {
190
+ public:
191
+ llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
192
+ virtual ~llm_graph_input_s_copy() = default;
193
+
194
+ void set_input(const llama_ubatch * ubatch) override;
195
+
196
+ lm_ggml_tensor * s_copy; // I32 [kv_size]
197
+
198
+ const llama_kv_cache_unified * kv_self;
199
+ };
200
+
201
+ class llm_graph_input_s_mask : public llm_graph_input_i {
202
+ public:
203
+ llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
204
+ virtual ~llm_graph_input_s_mask() = default;
205
+
206
+ void set_input(const llama_ubatch * ubatch) override;
207
+
208
+ lm_ggml_tensor * s_mask; // F32 [1, n_kv]
209
+
210
+ const llama_kv_cache_unified * kv_self;
211
+ };
212
+
213
+ class llm_graph_input_cross_embd : public llm_graph_input_i {
214
+ public:
215
+ llm_graph_input_cross_embd(
216
+ const llama_cross * cross) : cross(cross) {}
217
+ virtual ~llm_graph_input_cross_embd() = default;
218
+
219
+ void set_input(const llama_ubatch * ubatch) override;
220
+
221
+ lm_ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc]
222
+
223
+ const llama_cross * cross;
224
+ };
225
+
226
+ class llm_graph_input_attn_no_cache : public llm_graph_input_i {
227
+ public:
228
+ llm_graph_input_attn_no_cache(const llama_hparams & hparams, const llama_cparams & cparams) :
229
+ hparams(hparams),
230
+ cparams(cparams) {
231
+ }
232
+ ~llm_graph_input_attn_no_cache() = default;
233
+
234
+ void set_input(const llama_ubatch * ubatch) override;
235
+
236
+ lm_ggml_tensor * get_kq_mask() const { return kq_mask_cnv; }
237
+
238
+ lm_ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch]
239
+ lm_ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch]
240
+
241
+ const llama_hparams & hparams;
242
+ const llama_cparams & cparams;
243
+ };
244
+
245
+ class llm_graph_input_attn_kv_unified : public llm_graph_input_i {
246
+ public:
247
+ llm_graph_input_attn_kv_unified(
248
+ const llama_hparams & hparams,
249
+ const llama_cparams & cparams,
250
+ const llama_kv_cache_unified * kv_self) :
251
+ hparams(hparams),
252
+ cparams(cparams),
253
+ kv_self(kv_self) {
254
+ }
255
+ ~llm_graph_input_attn_kv_unified() = default;
256
+
257
+ void set_input(const llama_ubatch * ubatch) override;
258
+
259
+ lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
260
+ lm_ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
261
+
262
+ lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
263
+ lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
264
+ lm_ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch]
265
+ lm_ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch]
266
+
267
+ const llama_hparams & hparams;
268
+ const llama_cparams & cparams;
269
+
270
+ const llama_kv_cache_unified * kv_self;
271
+ };
272
+
273
+ class llm_graph_input_attn_cross : public llm_graph_input_i {
274
+ public:
275
+ llm_graph_input_attn_cross(const llama_cross * cross) : cross(cross) {}
276
+ ~llm_graph_input_attn_cross() = default;
277
+
278
+ void set_input(const llama_ubatch * ubatch) override;
279
+
280
+ lm_ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; }
281
+
282
+ lm_ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch]
283
+ lm_ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch]
284
+
285
+ const llama_cross * cross = nullptr;
286
+ };
287
+
288
+ //
289
+ // llm_graph_result
290
+ //
291
+
292
+ // these objects deliver the result from the graph build process back to the llama_context
293
+ // note that the input tensors created for the graph are referenced here - the goal is to be able to populate their
294
+ // specific data, by calling the set_inputs() method
295
+ // along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc.
296
+ // these are used by the llama_context to extact the relevant data, based on the compute parameters
297
+
298
+ class llm_graph_result_i {
299
+ public:
300
+ virtual ~llm_graph_result_i() = default;
301
+
302
+ virtual lm_ggml_tensor * get_logits() = 0;
303
+ virtual lm_ggml_tensor * get_embd() = 0;
304
+ virtual lm_ggml_tensor * get_embd_pooled() = 0;
305
+
306
+ virtual void set_inputs(const llama_ubatch * ubatch) = 0;
307
+ };
308
+
309
+ using llm_graph_result_ptr = std::unique_ptr<llm_graph_result_i>;
310
+
311
+
312
+ class llm_graph_result : public llm_graph_result_i {
313
+ public:
314
+ virtual ~llm_graph_result() = default;
315
+
316
+ lm_ggml_tensor * get_logits() override { return t_logits; }
317
+ lm_ggml_tensor * get_embd() override { return t_embd; }
318
+ lm_ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
319
+
320
+ void set_inputs(const llama_ubatch * ubatch) override {
321
+ for (auto & input : inputs) {
322
+ input->set_input(ubatch);
323
+ }
324
+ }
325
+
326
+ llm_graph_input_i * add_input(llm_graph_input_ptr input) {
327
+ inputs.emplace_back(std::move(input));
328
+ return inputs.back().get();
329
+ }
330
+
331
+ // important graph nodes
332
+ lm_ggml_tensor * t_logits = nullptr;
333
+ lm_ggml_tensor * t_embd = nullptr;
334
+ lm_ggml_tensor * t_embd_pooled = nullptr;
335
+
336
+ std::vector<llm_graph_input_ptr> inputs;
337
+ };
338
+
339
+ //
340
+ // llm_graph_context
341
+ //
342
+
343
+ // callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
344
+ using llm_graph_cb = std::function<void(const llama_ubatch & ubatch, lm_ggml_tensor * cur, const char * name, int il)>;
345
+
346
+ struct llm_graph_params {
347
+ lm_ggml_context * ctx;
348
+
349
+ const llm_arch arch;
350
+
351
+ const llama_hparams & hparams;
352
+ const llama_cparams & cparams;
353
+ const llama_ubatch & ubatch;
354
+
355
+ lm_ggml_backend_sched * sched;
356
+ lm_ggml_backend * backend_cpu;
357
+
358
+ const llama_adapter_cvec * cvec;
359
+ const llama_adapter_loras * loras;
360
+ const llama_memory_i * memory;
361
+ const llama_cross * cross;
362
+
363
+ int32_t n_outputs;
364
+
365
+ const llm_graph_cb & cb;
366
+ };
367
+
368
+ struct llm_graph_context {
369
+ const llm_arch arch;
370
+
371
+ const llama_hparams & hparams;
372
+ const llama_cparams & cparams;
373
+ const llama_ubatch & ubatch;
374
+
375
+ const int64_t n_embd;
376
+ const int64_t n_layer;
377
+ const int64_t n_rot;
378
+ const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
379
+ const int64_t n_ctx_per_seq;
380
+ const int64_t n_head;
381
+ const int64_t n_head_kv;
382
+ const int64_t n_embd_head_k;
383
+ const int64_t n_embd_k_gqa;
384
+ const int64_t n_embd_head_v;
385
+ const int64_t n_embd_v_gqa;
386
+ const int64_t n_expert;
387
+ const int64_t n_expert_used;
388
+
389
+ const float freq_base;
390
+ const float freq_scale;
391
+ const float ext_factor;
392
+ const float attn_factor;
393
+ const float beta_fast;
394
+ const float beta_slow;
395
+ const float norm_eps;
396
+ const float norm_rms_eps;
397
+
398
+ const int32_t n_tokens;
399
+ const int32_t n_outputs;
400
+ const int32_t n_ctx_orig; // yarn
401
+
402
+ const enum llama_pooling_type pooling_type;
403
+ const enum llama_rope_type rope_type;
404
+
405
+ lm_ggml_context * ctx0 = nullptr;
406
+
407
+ lm_ggml_backend_sched * sched;
408
+
409
+ lm_ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
410
+
411
+ const llama_adapter_cvec * cvec;
412
+ const llama_adapter_loras * loras;
413
+ const llama_memory_i * memory;
414
+ const llama_cross * cross;
415
+
416
+ const llm_graph_cb & cb_func;
417
+
418
+ std::unique_ptr<llm_graph_result> res;
419
+
420
+ llm_graph_context(const llm_graph_params & params);
421
+
422
+ int64_t n_pos_per_token() const;
423
+
424
+ void cb(lm_ggml_tensor * cur, const char * name, int il) const;
425
+
426
+ //
427
+ // common
428
+ //
429
+
430
+ lm_ggml_tensor * build_cvec(
431
+ lm_ggml_tensor * cur,
432
+ int il) const;
433
+
434
+ // do mat_mul, while optionally apply lora
435
+ lm_ggml_tensor * build_lora_mm(
436
+ lm_ggml_tensor * w,
437
+ lm_ggml_tensor * cur) const;
438
+
439
+ // do mat_mul_id, while optionally apply lora
440
+ lm_ggml_tensor * build_lora_mm_id(
441
+ lm_ggml_tensor * w, // lm_ggml_tensor * as
442
+ lm_ggml_tensor * cur, // lm_ggml_tensor * b
443
+ lm_ggml_tensor * ids) const;
444
+
445
+ lm_ggml_tensor * build_norm(
446
+ lm_ggml_tensor * cur,
447
+ lm_ggml_tensor * mw,
448
+ lm_ggml_tensor * mb,
449
+ llm_norm_type type,
450
+ int il) const;
451
+
452
+ lm_ggml_tensor * build_ffn(
453
+ lm_ggml_tensor * cur,
454
+ lm_ggml_tensor * up,
455
+ lm_ggml_tensor * up_b,
456
+ lm_ggml_tensor * up_s,
457
+ lm_ggml_tensor * gate,
458
+ lm_ggml_tensor * gate_b,
459
+ lm_ggml_tensor * gate_s,
460
+ lm_ggml_tensor * down,
461
+ lm_ggml_tensor * down_b,
462
+ lm_ggml_tensor * down_s,
463
+ lm_ggml_tensor * act_scales,
464
+ llm_ffn_op_type type_op,
465
+ llm_ffn_gate_type type_gate,
466
+ int il) const;
467
+
468
+ lm_ggml_tensor * build_moe_ffn(
469
+ lm_ggml_tensor * cur,
470
+ lm_ggml_tensor * gate_inp,
471
+ lm_ggml_tensor * up_exps,
472
+ lm_ggml_tensor * gate_exps,
473
+ lm_ggml_tensor * down_exps,
474
+ lm_ggml_tensor * exp_probs_b,
475
+ int64_t n_expert,
476
+ int64_t n_expert_used,
477
+ llm_ffn_op_type type_op,
478
+ bool norm_w,
479
+ bool scale_w,
480
+ float w_scale,
481
+ llama_expert_gating_func_type gating_op,
482
+ int il) const;
483
+
484
+ //
485
+ // inputs
486
+ //
487
+
488
+ lm_ggml_tensor * build_inp_embd(lm_ggml_tensor * tok_embd) const;
489
+ lm_ggml_tensor * build_inp_pos() const;
490
+ lm_ggml_tensor * build_inp_attn_scale() const;
491
+ lm_ggml_tensor * build_inp_out_ids() const;
492
+ lm_ggml_tensor * build_inp_mean() const;
493
+ lm_ggml_tensor * build_inp_cls() const;
494
+ lm_ggml_tensor * build_inp_s_copy() const;
495
+ lm_ggml_tensor * build_inp_s_mask() const;
496
+
497
+ lm_ggml_tensor * build_inp_cross_embd() const;
498
+ lm_ggml_tensor * build_inp_pos_bucket_enc() const;
499
+ lm_ggml_tensor * build_inp_pos_bucket_dec() const;
500
+ lm_ggml_tensor * build_pos_bias(lm_ggml_tensor * pos_bucket, lm_ggml_tensor * attn_rel_b) const;
501
+
502
+ //
503
+ // attention
504
+ //
505
+
506
+ lm_ggml_tensor * build_attn_mha(
507
+ lm_ggml_cgraph * gf,
508
+ lm_ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q]
509
+ lm_ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k]
510
+ lm_ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false)
511
+ lm_ggml_tensor * kq_b,
512
+ lm_ggml_tensor * kq_mask,
513
+ bool v_trans,
514
+ float kq_scale) const;
515
+
516
+ llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
517
+
518
+ lm_ggml_tensor * build_attn(
519
+ llm_graph_input_attn_no_cache * inp,
520
+ lm_ggml_cgraph * gf,
521
+ lm_ggml_tensor * wo,
522
+ lm_ggml_tensor * wo_b,
523
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
524
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
525
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
526
+ lm_ggml_tensor * kq_b,
527
+ float kq_scale,
528
+ int il) const;
529
+
530
+ llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified() const;
531
+
532
+ lm_ggml_tensor * build_attn(
533
+ llm_graph_input_attn_kv_unified * inp,
534
+ lm_ggml_cgraph * gf,
535
+ lm_ggml_tensor * wo,
536
+ lm_ggml_tensor * wo_b,
537
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
538
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
539
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
540
+ lm_ggml_tensor * kq_b,
541
+ float kq_scale,
542
+ int il) const;
543
+
544
+ llm_graph_input_attn_cross * build_attn_inp_cross() const;
545
+
546
+ lm_ggml_tensor * build_attn(
547
+ llm_graph_input_attn_cross * inp,
548
+ lm_ggml_cgraph * gf,
549
+ lm_ggml_tensor * wo,
550
+ lm_ggml_tensor * wo_b,
551
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
552
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
553
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
554
+ lm_ggml_tensor * kq_b,
555
+ float kq_scale,
556
+ int il) const;
557
+
558
+ //
559
+ // recurrent
560
+ //
561
+
562
+ lm_ggml_tensor * build_copy_mask_state(
563
+ lm_ggml_cgraph * gf,
564
+ lm_ggml_tensor * s,
565
+ lm_ggml_tensor * state_copy,
566
+ lm_ggml_tensor * state_mask,
567
+ int32_t n_state,
568
+ int32_t n_seqs) const;
569
+
570
+ lm_ggml_tensor * build_rwkv_token_shift_load(
571
+ lm_ggml_cgraph * gf,
572
+ lm_ggml_tensor * state_copy,
573
+ lm_ggml_tensor * state_mask,
574
+ const llama_ubatch & ubatch,
575
+ int il) const;
576
+
577
+ lm_ggml_tensor * build_rwkv_token_shift_store(
578
+ lm_ggml_tensor * token_shift,
579
+ const llama_ubatch & ubatch,
580
+ int il) const;
581
+
582
+ //
583
+ // pooling
584
+ //
585
+
586
+ void build_pooling(
587
+ lm_ggml_cgraph * gf,
588
+ lm_ggml_tensor * cls,
589
+ lm_ggml_tensor * cls_b,
590
+ lm_ggml_tensor * cls_out,
591
+ lm_ggml_tensor * cls_out_b) const;
592
+ };