cui-llama.rn 1.6.1 → 1.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. package/android/src/main/CMakeLists.txt +6 -0
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +51 -14
  3. package/android/src/main/java/com/rnllama/RNLlama.java +158 -6
  4. package/android/src/main/jni.cpp +153 -14
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
  14. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
  15. package/cpp/chat.cpp +128 -106
  16. package/cpp/chat.h +2 -0
  17. package/cpp/common.cpp +38 -76
  18. package/cpp/common.h +23 -19
  19. package/cpp/ggml-backend.cpp +9 -5
  20. package/cpp/ggml-backend.h +4 -4
  21. package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
  22. package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
  23. package/cpp/ggml-cpu/ggml-cpu.c +5 -13
  24. package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
  25. package/cpp/ggml-cpu/ops.cpp +107 -13
  26. package/cpp/ggml-cpu/vec.cpp +0 -6
  27. package/cpp/ggml-cpu/vec.h +16 -0
  28. package/cpp/ggml-llama-sim.metallib +0 -0
  29. package/cpp/ggml-llama.metallib +0 -0
  30. package/cpp/ggml-metal-impl.h +36 -11
  31. package/cpp/ggml-metal.m +321 -132
  32. package/cpp/ggml-opt.cpp +373 -190
  33. package/cpp/ggml-opt.h +49 -28
  34. package/cpp/ggml-quants.c +0 -6
  35. package/cpp/ggml.c +93 -38
  36. package/cpp/ggml.h +21 -7
  37. package/cpp/gguf.cpp +33 -33
  38. package/cpp/llama-adapter.cpp +6 -0
  39. package/cpp/llama-arch.cpp +3 -0
  40. package/cpp/llama-batch.cpp +3 -1
  41. package/cpp/llama-chat.cpp +8 -6
  42. package/cpp/llama-chat.h +1 -0
  43. package/cpp/llama-context.cpp +349 -135
  44. package/cpp/llama-context.h +30 -3
  45. package/cpp/llama-cparams.h +1 -0
  46. package/cpp/llama-graph.cpp +150 -234
  47. package/cpp/llama-graph.h +52 -7
  48. package/cpp/llama-hparams.cpp +17 -1
  49. package/cpp/llama-hparams.h +34 -5
  50. package/cpp/llama-kv-cache.cpp +662 -321
  51. package/cpp/llama-kv-cache.h +203 -93
  52. package/cpp/llama-memory.h +3 -2
  53. package/cpp/llama-model-loader.cpp +24 -15
  54. package/cpp/llama-model-saver.cpp +281 -0
  55. package/cpp/llama-model-saver.h +37 -0
  56. package/cpp/llama-model.cpp +536 -132
  57. package/cpp/llama-model.h +7 -1
  58. package/cpp/llama-sampling.cpp +18 -6
  59. package/cpp/llama-vocab.cpp +46 -8
  60. package/cpp/llama-vocab.h +6 -0
  61. package/cpp/llama.cpp +14 -0
  62. package/cpp/llama.h +72 -131
  63. package/cpp/minja/chat-template.hpp +9 -5
  64. package/cpp/minja/minja.hpp +69 -36
  65. package/cpp/rn-llama.cpp +611 -47
  66. package/cpp/rn-llama.h +33 -3
  67. package/cpp/sampling.cpp +57 -50
  68. package/cpp/tools/mtmd/clip-impl.h +462 -0
  69. package/cpp/tools/mtmd/clip.cpp +4024 -0
  70. package/cpp/tools/mtmd/clip.h +101 -0
  71. package/cpp/tools/mtmd/miniaudio.h +93468 -0
  72. package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
  73. package/cpp/tools/mtmd/mtmd-audio.h +62 -0
  74. package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
  75. package/cpp/tools/mtmd/mtmd.cpp +942 -0
  76. package/cpp/tools/mtmd/mtmd.h +362 -0
  77. package/cpp/tools/mtmd/stb_image.h +7988 -0
  78. package/ios/CMakeLists.txt +7 -0
  79. package/ios/RNLlama.mm +77 -3
  80. package/ios/RNLlamaContext.h +5 -1
  81. package/ios/RNLlamaContext.mm +105 -10
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  101. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  102. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  129. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
  130. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
  131. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  132. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  133. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  134. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
  135. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  136. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  137. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  138. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  139. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  140. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  141. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  142. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  143. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  144. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  145. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
  146. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  147. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  148. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  149. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  150. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  151. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  152. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  153. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  154. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  155. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  156. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  157. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  158. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  159. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  160. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  161. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  162. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  163. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  164. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  165. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  166. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  167. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  168. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  169. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  170. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  171. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  172. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  173. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  174. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  176. package/jest/mock.js +33 -7
  177. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  178. package/lib/commonjs/index.js +153 -21
  179. package/lib/commonjs/index.js.map +1 -1
  180. package/lib/module/NativeRNLlama.js.map +1 -1
  181. package/lib/module/index.js +152 -20
  182. package/lib/module/index.js.map +1 -1
  183. package/lib/typescript/NativeRNLlama.d.ts +50 -4
  184. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  185. package/lib/typescript/index.d.ts +72 -6
  186. package/lib/typescript/index.d.ts.map +1 -1
  187. package/package.json +1 -1
  188. package/src/NativeRNLlama.ts +67 -4
  189. package/src/index.ts +212 -38
  190. package/lib/commonjs/chat.js +0 -37
  191. package/lib/commonjs/chat.js.map +0 -1
  192. package/lib/module/chat.js +0 -33
  193. package/lib/module/chat.js.map +0 -1
  194. package/lib/typescript/chat.d.ts +0 -10
  195. package/lib/typescript/chat.d.ts.map +0 -1
  196. package/src/chat.ts +0 -44
@@ -37,13 +37,16 @@ extern "C" {
37
37
  // ====== Dataset ======
38
38
 
39
39
  LM_GGML_API lm_ggml_opt_dataset_t lm_ggml_opt_dataset_init(
40
- int64_t ne_datapoint, // number of elements per datapoint
41
- int64_t ne_label, // number of elements per label
42
- int64_t ndata, // total number of datapoints/labels
43
- int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
40
+ enum lm_ggml_type type_data, // the type for the internal data tensor
41
+ enum lm_ggml_type type_label, // the type for the internal labels tensor
42
+ int64_t ne_datapoint, // number of elements per datapoint
43
+ int64_t ne_label, // number of elements per label
44
+ int64_t ndata, // total number of datapoints/labels
45
+ int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
44
46
  LM_GGML_API void lm_ggml_opt_dataset_free(lm_ggml_opt_dataset_t dataset);
45
47
 
46
48
  // get underlying tensors that store the data
49
+ LM_GGML_API int64_t lm_ggml_opt_dataset_ndata (lm_ggml_opt_dataset_t dataset);
47
50
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_data (lm_ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
48
51
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_labels(lm_ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
49
52
 
@@ -56,13 +59,19 @@ extern "C" {
56
59
  struct lm_ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
57
60
  struct lm_ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
58
61
  int64_t ibatch);
62
+ LM_GGML_API void lm_ggml_opt_dataset_get_batch_host(
63
+ lm_ggml_opt_dataset_t dataset,
64
+ void * data_batch,
65
+ size_t nb_data_batch,
66
+ void * labels_batch,
67
+ int64_t ibatch);
59
68
 
60
69
  // ====== Model / Context ======
61
70
 
62
71
  enum lm_ggml_opt_build_type {
63
- LM_GGML_OPT_BUILD_TYPE_FORWARD,
64
- LM_GGML_OPT_BUILD_TYPE_GRAD,
65
- LM_GGML_OPT_BUILD_TYPE_OPT,
72
+ LM_GGML_OPT_BUILD_TYPE_FORWARD = 10,
73
+ LM_GGML_OPT_BUILD_TYPE_GRAD = 20,
74
+ LM_GGML_OPT_BUILD_TYPE_OPT = 30,
66
75
  };
67
76
 
68
77
  // parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
@@ -81,20 +90,22 @@ extern "C" {
81
90
  // userdata can be used to pass arbitrary data
82
91
  typedef struct lm_ggml_opt_optimizer_params (*lm_ggml_opt_get_optimizer_params)(void * userdata);
83
92
 
84
- // returns the default optimizer params (constant)
93
+ // returns the default optimizer params (constant, hard-coded values)
85
94
  // userdata is not used
86
95
  LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_default_optimizer_params(void * userdata);
87
96
 
97
+ // casts userdata to lm_ggml_opt_optimizer_params and returns it
98
+ LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_constant_optimizer_params(void * userdata);
99
+
88
100
  // parameters for initializing a new optimization context
89
101
  struct lm_ggml_opt_params {
90
102
  lm_ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
91
103
 
92
- struct lm_ggml_context * ctx_compute; // created in user code, holds non-static tensors
93
-
94
- // the forward graph is defined by inputs and outputs
95
- // those tensors and all tensors inbetween are not intended to be reusable between multiple optimization contexts
96
- struct lm_ggml_tensor * inputs;
97
- struct lm_ggml_tensor * outputs;
104
+ // by default the forward graph needs to be reconstructed for each eval
105
+ // if ctx_compute, inputs, and outputs are set the graphs are instead allocated statically
106
+ struct lm_ggml_context * ctx_compute;
107
+ struct lm_ggml_tensor * inputs;
108
+ struct lm_ggml_tensor * outputs;
98
109
 
99
110
  enum lm_ggml_opt_loss_type loss_type;
100
111
  enum lm_ggml_opt_build_type build_type;
@@ -107,12 +118,9 @@ extern "C" {
107
118
 
108
119
  // get parameters for an optimization context with defaults set where possible
109
120
  // parameters for which no sensible defaults exist are supplied as arguments to this function
110
- LM_GGML_API lm_ggml_opt_params lm_ggml_opt_default_params(
111
- lm_ggml_backend_sched_t backend_sched,
112
- struct lm_ggml_context * ctx_compute,
113
- struct lm_ggml_tensor * inputs,
114
- struct lm_ggml_tensor * outputs,
115
- enum lm_ggml_opt_loss_type loss_type);
121
+ LM_GGML_API struct lm_ggml_opt_params lm_ggml_opt_default_params(
122
+ lm_ggml_backend_sched_t backend_sched,
123
+ enum lm_ggml_opt_loss_type loss_type);
116
124
 
117
125
  LM_GGML_API lm_ggml_opt_context_t lm_ggml_opt_init(struct lm_ggml_opt_params params);
118
126
  LM_GGML_API void lm_ggml_opt_free(lm_ggml_opt_context_t opt_ctx);
@@ -120,7 +128,10 @@ extern "C" {
120
128
  // set gradients to zero, initilize loss, and optionally reset the optimizer
121
129
  LM_GGML_API void lm_ggml_opt_reset(lm_ggml_opt_context_t opt_ctx, bool optimizer);
122
130
 
131
+ LM_GGML_API bool lm_ggml_opt_static_graphs(lm_ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically
132
+
123
133
  // get underlying tensors that store data
134
+ // if not using static graphs these pointers become invalid with the next call to lm_ggml_opt_alloc
124
135
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_inputs( lm_ggml_opt_context_t opt_ctx); // forward graph input tensor
125
136
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_outputs( lm_ggml_opt_context_t opt_ctx); // forward graph output tensor
126
137
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_labels( lm_ggml_opt_context_t opt_ctx); // labels to compare outputs against
@@ -128,11 +139,12 @@ extern "C" {
128
139
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_pred( lm_ggml_opt_context_t opt_ctx); // predictions made by outputs
129
140
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_ncorrect(lm_ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
130
141
 
142
+ // get the gradient accumulator for a node from the forward graph
131
143
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_grad_acc(lm_ggml_opt_context_t opt_ctx, struct lm_ggml_tensor * node);
132
144
 
133
145
  // ====== Optimization Result ======
134
146
 
135
- LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init();
147
+ LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init(void);
136
148
  LM_GGML_API void lm_ggml_opt_result_free(lm_ggml_opt_result_t result);
137
149
  LM_GGML_API void lm_ggml_opt_result_reset(lm_ggml_opt_result_t result);
138
150
 
@@ -144,11 +156,20 @@ extern "C" {
144
156
 
145
157
  // ====== Computation ======
146
158
 
147
- // do forward pass, increment result if not NULL
148
- LM_GGML_API void lm_ggml_opt_forward(lm_ggml_opt_context_t opt_ctx, lm_ggml_opt_result_t result);
159
+ // if not using static graphs, this function must be called prior to lm_ggml_opt_alloc
160
+ LM_GGML_API void lm_ggml_opt_prepare_alloc(
161
+ lm_ggml_opt_context_t opt_ctx,
162
+ struct lm_ggml_context * ctx_compute,
163
+ struct lm_ggml_cgraph * gf,
164
+ struct lm_ggml_tensor * inputs,
165
+ struct lm_ggml_tensor * outputs);
166
+
167
+ // allocate the next graph for evaluation, either forward or forward + backward
168
+ // must be called exactly once prior to calling lm_ggml_opt_eval
169
+ LM_GGML_API void lm_ggml_opt_alloc(lm_ggml_opt_context_t opt_ctx, bool backward);
149
170
 
150
- // do forward pass, increment result if not NULL, do backward pass
151
- LM_GGML_API void lm_ggml_opt_forward_backward(lm_ggml_opt_context_t opt_ctx, lm_ggml_opt_result_t result);
171
+ // do forward pass, increment result if not NULL, do backward pass if allocated
172
+ LM_GGML_API void lm_ggml_opt_eval(lm_ggml_opt_context_t opt_ctx, lm_ggml_opt_result_t result);
152
173
 
153
174
  // ############################################################################
154
175
  // ## The high-level functions start here. They do not depend on any private ##
@@ -200,9 +221,9 @@ extern "C" {
200
221
  // fit model defined by inputs and outputs to dataset
201
222
  LM_GGML_API void lm_ggml_opt_fit(
202
223
  lm_ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
203
- lm_ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
204
- lm_ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
205
- lm_ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
224
+ struct lm_ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
225
+ struct lm_ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
226
+ struct lm_ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
206
227
  lm_ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
207
228
  enum lm_ggml_opt_loss_type loss_type, // loss to minimize
208
229
  lm_ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
@@ -537,6 +537,7 @@ extern "C" {
537
537
  LM_GGML_UNARY_OP_HARDSWISH,
538
538
  LM_GGML_UNARY_OP_HARDSIGMOID,
539
539
  LM_GGML_UNARY_OP_EXP,
540
+ LM_GGML_UNARY_OP_GELU_ERF,
540
541
 
541
542
  LM_GGML_UNARY_OP_COUNT,
542
543
  };
@@ -674,11 +675,15 @@ extern "C" {
674
675
  LM_GGML_API bool lm_ggml_is_3d (const struct lm_ggml_tensor * tensor);
675
676
  LM_GGML_API int lm_ggml_n_dims (const struct lm_ggml_tensor * tensor); // returns 1 for scalars
676
677
 
678
+ // returns whether the tensor elements can be iterated over with a flattened index (no gaps, no permutation)
677
679
  LM_GGML_API bool lm_ggml_is_contiguous (const struct lm_ggml_tensor * tensor);
678
680
  LM_GGML_API bool lm_ggml_is_contiguous_0(const struct lm_ggml_tensor * tensor); // same as lm_ggml_is_contiguous()
679
681
  LM_GGML_API bool lm_ggml_is_contiguous_1(const struct lm_ggml_tensor * tensor); // contiguous for dims >= 1
680
682
  LM_GGML_API bool lm_ggml_is_contiguous_2(const struct lm_ggml_tensor * tensor); // contiguous for dims >= 2
681
683
 
684
+ // returns whether the tensor elements are allocated as one contiguous block of memory (no gaps, but permutation ok)
685
+ LM_GGML_API bool lm_ggml_is_contiguously_allocated(const struct lm_ggml_tensor * tensor);
686
+
682
687
  // true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN
683
688
  LM_GGML_API bool lm_ggml_is_contiguous_channels(const struct lm_ggml_tensor * tensor);
684
689
 
@@ -765,7 +770,7 @@ extern "C" {
765
770
  // Tensor flags
766
771
  LM_GGML_API void lm_ggml_set_input(struct lm_ggml_tensor * tensor);
767
772
  LM_GGML_API void lm_ggml_set_output(struct lm_ggml_tensor * tensor);
768
- LM_GGML_API void lm_ggml_set_param(struct lm_ggml_context * ctx, struct lm_ggml_tensor * tensor);
773
+ LM_GGML_API void lm_ggml_set_param(struct lm_ggml_tensor * tensor);
769
774
  LM_GGML_API void lm_ggml_set_loss(struct lm_ggml_tensor * tensor);
770
775
 
771
776
  //
@@ -935,7 +940,7 @@ extern "C" {
935
940
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_repeat_back(
936
941
  struct lm_ggml_context * ctx,
937
942
  struct lm_ggml_tensor * a,
938
- struct lm_ggml_tensor * b);
943
+ struct lm_ggml_tensor * b); // sum up values that are adjacent in dims > 0 instead of repeated with same stride
939
944
 
940
945
  // concat a and b along dim
941
946
  // used in stable-diffusion
@@ -1021,6 +1026,16 @@ extern "C" {
1021
1026
  struct lm_ggml_context * ctx,
1022
1027
  struct lm_ggml_tensor * a);
1023
1028
 
1029
+ // GELU using erf (error function) when possible
1030
+ // some backends may fallback to approximation based on Abramowitz and Stegun formula
1031
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_gelu_erf(
1032
+ struct lm_ggml_context * ctx,
1033
+ struct lm_ggml_tensor * a);
1034
+
1035
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_gelu_erf_inplace(
1036
+ struct lm_ggml_context * ctx,
1037
+ struct lm_ggml_tensor * a);
1038
+
1024
1039
  LM_GGML_API struct lm_ggml_tensor * lm_ggml_gelu_quick(
1025
1040
  struct lm_ggml_context * ctx,
1026
1041
  struct lm_ggml_tensor * a);
@@ -2046,15 +2061,14 @@ extern "C" {
2046
2061
 
2047
2062
  LM_GGML_API void lm_ggml_build_forward_expand(struct lm_ggml_cgraph * cgraph, struct lm_ggml_tensor * tensor);
2048
2063
  LM_GGML_API void lm_ggml_build_backward_expand(
2049
- struct lm_ggml_context * ctx_static, // context for static gradients (loss + gradient accumulation)
2050
- struct lm_ggml_context * ctx_compute, // context for gradient computation
2051
- struct lm_ggml_cgraph * cgraph,
2052
- bool accumulate); // whether or not gradients should be accumulated, requires static allocation of tensors in ctx_static
2064
+ struct lm_ggml_context * ctx, // context for gradient computation
2065
+ struct lm_ggml_cgraph * cgraph,
2066
+ struct lm_ggml_tensor ** grad_accs);
2053
2067
 
2054
2068
  // graph allocation in a context
2055
2069
  LM_GGML_API struct lm_ggml_cgraph * lm_ggml_new_graph (struct lm_ggml_context * ctx); // size = LM_GGML_DEFAULT_GRAPH_SIZE, grads = false
2056
2070
  LM_GGML_API struct lm_ggml_cgraph * lm_ggml_new_graph_custom(struct lm_ggml_context * ctx, size_t size, bool grads);
2057
- LM_GGML_API struct lm_ggml_cgraph * lm_ggml_graph_dup (struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph);
2071
+ LM_GGML_API struct lm_ggml_cgraph * lm_ggml_graph_dup (struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, bool force_grads);
2058
2072
  LM_GGML_API void lm_ggml_graph_cpy (struct lm_ggml_cgraph * src, struct lm_ggml_cgraph * dst);
2059
2073
  LM_GGML_API void lm_ggml_graph_reset (struct lm_ggml_cgraph * cgraph); // set regular grads + optimizer momenta to 0, set loss grad to 1
2060
2074
  LM_GGML_API void lm_ggml_graph_clear (struct lm_ggml_cgraph * cgraph);
@@ -14,6 +14,7 @@ enum llm_chat_template {
14
14
  LLM_CHAT_TEMPLATE_MISTRAL_V3,
15
15
  LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
16
16
  LLM_CHAT_TEMPLATE_MISTRAL_V7,
17
+ LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
17
18
  LLM_CHAT_TEMPLATE_PHI_3,
18
19
  LLM_CHAT_TEMPLATE_PHI_4,
19
20
  LLM_CHAT_TEMPLATE_FALCON_3,
@@ -7,6 +7,7 @@
7
7
  #include "llama-adapter.h"
8
8
 
9
9
  #include "ggml-cpp.h"
10
+ #include "ggml-opt.h"
10
11
 
11
12
  #include <map>
12
13
  #include <vector>
@@ -133,6 +134,32 @@ struct llama_context {
133
134
  llama_perf_context_data perf_get_data() const;
134
135
  void perf_reset();
135
136
 
137
+ //
138
+ // training
139
+ //
140
+
141
+ void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
142
+
143
+ void opt_epoch(
144
+ lm_ggml_opt_dataset_t dataset,
145
+ lm_ggml_opt_result_t result_train,
146
+ lm_ggml_opt_result_t result_eval,
147
+ int64_t idata_split,
148
+ lm_ggml_opt_epoch_callback callback_train,
149
+ lm_ggml_opt_epoch_callback callback_eval);
150
+
151
+ void opt_epoch_iter(
152
+ lm_ggml_opt_dataset_t dataset,
153
+ lm_ggml_opt_result_t result,
154
+ const std::vector<llama_token> & tokens,
155
+ const std::vector<llama_token> & labels_sparse,
156
+ llama_batch & batch,
157
+ lm_ggml_opt_epoch_callback callback,
158
+ bool train,
159
+ int64_t idata_in_loop,
160
+ int64_t ndata_in_loop,
161
+ int64_t t_loop_start);
162
+
136
163
  private:
137
164
  //
138
165
  // output
@@ -187,9 +214,6 @@ private:
187
214
 
188
215
  std::unique_ptr<llama_memory_i> memory;
189
216
 
190
- // TODO: remove
191
- bool logits_all = false;
192
-
193
217
  // decode output (2-dimensional array: [n_outputs][n_vocab])
194
218
  size_t logits_size = 0; // capacity (of floats) for logits
195
219
  float * logits = nullptr;
@@ -215,6 +239,9 @@ private:
215
239
 
216
240
  lm_ggml_context_ptr ctx_compute;
217
241
 
242
+ // training
243
+ lm_ggml_opt_context_t opt_ctx = nullptr;
244
+
218
245
  lm_ggml_threadpool_t threadpool = nullptr;
219
246
  lm_ggml_threadpool_t threadpool_batch = nullptr;
220
247
 
@@ -30,6 +30,7 @@ struct llama_cparams {
30
30
  bool flash_attn;
31
31
  bool no_perf;
32
32
  bool warmup;
33
+ bool op_offload;
33
34
 
34
35
  enum llama_pooling_type pooling_type;
35
36
 
@@ -19,6 +19,7 @@ struct llama_cparams;
19
19
 
20
20
  class llama_memory_i;
21
21
  class llama_kv_cache_unified;
22
+ class llama_kv_cache_unified_iswa;
22
23
  class llama_kv_cache_recurrent;
23
24
 
24
25
  // certain models (typically multi-modal) can produce different types of graphs
@@ -255,6 +256,31 @@ public:
255
256
 
256
257
  void set_input(const llama_ubatch * ubatch) override;
257
258
 
259
+ lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
260
+
261
+ lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
262
+ lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
263
+
264
+ const llama_hparams & hparams;
265
+ const llama_cparams & cparams;
266
+
267
+ const llama_kv_cache_unified * kv_self;
268
+ };
269
+
270
+ class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i {
271
+ public:
272
+ llm_graph_input_attn_kv_unified_iswa(
273
+ const llama_hparams & hparams,
274
+ const llama_cparams & cparams,
275
+ const llama_kv_cache_unified_iswa * kv_self) :
276
+ hparams(hparams),
277
+ cparams(cparams),
278
+ kv_self(kv_self) {
279
+ }
280
+ ~llm_graph_input_attn_kv_unified_iswa() = default;
281
+
282
+ void set_input(const llama_ubatch * ubatch) override;
283
+
258
284
  lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
259
285
  lm_ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
260
286
 
@@ -266,7 +292,7 @@ public:
266
292
  const llama_hparams & hparams;
267
293
  const llama_cparams & cparams;
268
294
 
269
- const llama_kv_cache_unified * kv_self;
295
+ const llama_kv_cache_unified_iswa * kv_self;
270
296
  };
271
297
 
272
298
  class llm_graph_input_attn_cross : public llm_graph_input_i {
@@ -298,6 +324,7 @@ class llm_graph_result_i {
298
324
  public:
299
325
  virtual ~llm_graph_result_i() = default;
300
326
 
327
+ virtual lm_ggml_tensor * get_tokens() = 0;
301
328
  virtual lm_ggml_tensor * get_logits() = 0;
302
329
  virtual lm_ggml_tensor * get_embd() = 0;
303
330
  virtual lm_ggml_tensor * get_embd_pooled() = 0;
@@ -312,6 +339,7 @@ class llm_graph_result : public llm_graph_result_i {
312
339
  public:
313
340
  virtual ~llm_graph_result() = default;
314
341
 
342
+ lm_ggml_tensor * get_tokens() override { return t_tokens; }
315
343
  lm_ggml_tensor * get_logits() override { return t_logits; }
316
344
  lm_ggml_tensor * get_embd() override { return t_embd; }
317
345
  lm_ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
@@ -328,6 +356,7 @@ public:
328
356
  }
329
357
 
330
358
  // important graph nodes
359
+ lm_ggml_tensor * t_tokens = nullptr;
331
360
  lm_ggml_tensor * t_logits = nullptr;
332
361
  lm_ggml_tensor * t_embd = nullptr;
333
362
  lm_ggml_tensor * t_embd_pooled = nullptr;
@@ -375,7 +404,6 @@ struct llm_graph_context {
375
404
  const int64_t n_layer;
376
405
  const int64_t n_rot;
377
406
  const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
378
- const int64_t n_ctx_per_seq;
379
407
  const int64_t n_head;
380
408
  const int64_t n_head_kv;
381
409
  const int64_t n_embd_head_k;
@@ -504,13 +532,12 @@ struct llm_graph_context {
504
532
 
505
533
  lm_ggml_tensor * build_attn_mha(
506
534
  lm_ggml_cgraph * gf,
507
- lm_ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q]
508
- lm_ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k]
509
- lm_ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false)
535
+ lm_ggml_tensor * q, // [n_embd_head_q, n_head_q, n_tokens]
536
+ lm_ggml_tensor * k, // [n_embd_head_k, n_head_k, n_tokens]
537
+ lm_ggml_tensor * v, // [n_embd_head_v, n_head_v, n_tokens] (v_trans == false)
510
538
  lm_ggml_tensor * kq_b,
511
539
  lm_ggml_tensor * kq_mask,
512
- lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
513
- bool v_trans,
540
+ lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
514
541
  float kq_scale) const;
515
542
 
516
543
  llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
@@ -543,6 +570,21 @@ struct llm_graph_context {
543
570
  float kq_scale,
544
571
  int il) const;
545
572
 
573
+ llm_graph_input_attn_kv_unified_iswa * build_attn_inp_kv_unified_iswa() const;
574
+
575
+ lm_ggml_tensor * build_attn(
576
+ llm_graph_input_attn_kv_unified_iswa * inp,
577
+ lm_ggml_cgraph * gf,
578
+ lm_ggml_tensor * wo,
579
+ lm_ggml_tensor * wo_b,
580
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
581
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
582
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
583
+ lm_ggml_tensor * kq_b,
584
+ lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
585
+ float kq_scale,
586
+ int il) const;
587
+
546
588
  llm_graph_input_attn_cross * build_attn_inp_cross() const;
547
589
 
548
590
  lm_ggml_tensor * build_attn(
@@ -593,3 +635,6 @@ struct llm_graph_context {
593
635
  lm_ggml_tensor * cls_out,
594
636
  lm_ggml_tensor * cls_out_b) const;
595
637
  };
638
+
639
+ // TODO: better name
640
+ int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional);
@@ -14,6 +14,12 @@ enum llama_expert_gating_func_type {
14
14
  LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
15
15
  };
16
16
 
17
+ enum llama_swa_type {
18
+ LLAMA_SWA_TYPE_NONE = 0,
19
+ LLAMA_SWA_TYPE_STANDARD = 1,
20
+ LLAMA_SWA_TYPE_CHUNKED = 2,
21
+ };
22
+
17
23
  struct llama_hparams_posnet {
18
24
  uint32_t n_embd;
19
25
  uint32_t n_layer;
@@ -35,8 +41,6 @@ struct llama_hparams {
35
41
  uint32_t n_embd_features = 0;
36
42
  uint32_t n_layer;
37
43
  uint32_t n_rot;
38
- uint32_t n_swa = 0; // sliding window attention (SWA)
39
- uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
40
44
  uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
41
45
  uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
42
46
  uint32_t n_expert = 0;
@@ -96,6 +100,15 @@ struct llama_hparams {
96
100
 
97
101
  std::array<int, 4> rope_sections;
98
102
 
103
+ // Sliding Window Attention (SWA)
104
+ llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
105
+ // the size of the sliding window (0 - no SWA)
106
+ uint32_t n_swa = 0;
107
+ // if swa_layers[il] == true, then layer il is SWA
108
+ // if swa_layers[il] == false, then layer il is dense (i.e. non-SWA)
109
+ // by default, all layers are dense
110
+ std::array<bool, LLAMA_MAX_LAYERS> swa_layers;
111
+
99
112
  // for State Space Models
100
113
  uint32_t ssm_d_conv = 0;
101
114
  uint32_t ssm_d_inner = 0;
@@ -116,11 +129,10 @@ struct llama_hparams {
116
129
  bool causal_attn = true;
117
130
  bool use_alibi = false;
118
131
  bool attn_soft_cap = false;
132
+ bool use_kq_norm = true;
119
133
 
134
+ // llama4
120
135
  uint32_t n_moe_layer_step = 0;
121
- bool use_kq_norm = true;
122
- uint32_t n_attn_chunk = 0;
123
- // values below seems to be fixed on llama4
124
136
  uint32_t n_no_rope_layer_step = 4;
125
137
  uint32_t n_attn_temp_floor_scale = 8192;
126
138
  float f_attn_temp_scale = 0.1;
@@ -133,6 +145,23 @@ struct llama_hparams {
133
145
  enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
134
146
  enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
135
147
 
148
+ // this value n_pattern means that every nth layer is dense (i.e. non-SWA)
149
+ // note that if n_pattern == 0, all layers are SWA
150
+ // if n_pattern == 1, all layers are dense
151
+ // example: n_pattern = 3
152
+ // il == 0: swa
153
+ // il == 1: swa
154
+ // il == 2: dense
155
+ // il == 3: swa
156
+ // il == 4: swa
157
+ // il == 5: dense
158
+ // il == 6: swa
159
+ // etc ...
160
+ void set_swa_pattern(uint32_t n_pattern);
161
+
162
+ // return true if one of the layers is SWA
163
+ bool is_swa_any() const;
164
+
136
165
  uint32_t n_head(uint32_t il = 0) const;
137
166
 
138
167
  uint32_t n_head_kv(uint32_t il = 0) const;