llama-cpp-capacitor 0.0.6 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. package/android/src/main/CMakeLists.txt +9 -9
  2. package/cpp/LICENSE +21 -0
  3. package/cpp/README.md +4 -0
  4. package/cpp/anyascii.c +22223 -0
  5. package/cpp/anyascii.h +42 -0
  6. package/cpp/chat-parser.cpp +393 -0
  7. package/cpp/chat-parser.h +120 -0
  8. package/cpp/chat.cpp +2315 -0
  9. package/cpp/chat.h +221 -0
  10. package/cpp/common.cpp +1619 -0
  11. package/cpp/common.h +744 -0
  12. package/cpp/ggml-alloc.c +1028 -0
  13. package/cpp/ggml-alloc.h +76 -0
  14. package/cpp/ggml-backend-impl.h +255 -0
  15. package/cpp/ggml-backend-reg.cpp +600 -0
  16. package/cpp/ggml-backend.cpp +2118 -0
  17. package/cpp/ggml-backend.h +354 -0
  18. package/cpp/ggml-common.h +1878 -0
  19. package/cpp/ggml-cpp.h +39 -0
  20. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  21. package/cpp/ggml-cpu/amx/amx.h +8 -0
  22. package/cpp/ggml-cpu/amx/common.h +91 -0
  23. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  24. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  25. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  26. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  27. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  28. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  29. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  30. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  31. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  32. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  33. package/cpp/ggml-cpu/binary-ops.h +16 -0
  34. package/cpp/ggml-cpu/common.h +73 -0
  35. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  36. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  37. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  38. package/cpp/ggml-cpu/ops.cpp +10587 -0
  39. package/cpp/ggml-cpu/ops.h +114 -0
  40. package/cpp/ggml-cpu/quants.c +1193 -0
  41. package/cpp/ggml-cpu/quants.h +97 -0
  42. package/cpp/ggml-cpu/repack.cpp +1982 -0
  43. package/cpp/ggml-cpu/repack.h +120 -0
  44. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  45. package/cpp/ggml-cpu/traits.cpp +36 -0
  46. package/cpp/ggml-cpu/traits.h +38 -0
  47. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  48. package/cpp/ggml-cpu/unary-ops.h +28 -0
  49. package/cpp/ggml-cpu/vec.cpp +348 -0
  50. package/cpp/ggml-cpu/vec.h +1121 -0
  51. package/cpp/ggml-cpu.h +145 -0
  52. package/cpp/ggml-impl.h +622 -0
  53. package/cpp/ggml-metal-impl.h +688 -0
  54. package/cpp/ggml-metal.h +66 -0
  55. package/cpp/ggml-metal.m +6833 -0
  56. package/cpp/ggml-opt.cpp +1093 -0
  57. package/cpp/ggml-opt.h +256 -0
  58. package/cpp/ggml-quants.c +5324 -0
  59. package/cpp/ggml-quants.h +106 -0
  60. package/cpp/ggml-threading.cpp +12 -0
  61. package/cpp/ggml-threading.h +14 -0
  62. package/cpp/ggml.c +7108 -0
  63. package/cpp/ggml.h +2492 -0
  64. package/cpp/gguf.cpp +1358 -0
  65. package/cpp/gguf.h +202 -0
  66. package/cpp/json-partial.cpp +256 -0
  67. package/cpp/json-partial.h +38 -0
  68. package/cpp/json-schema-to-grammar.cpp +985 -0
  69. package/cpp/json-schema-to-grammar.h +21 -0
  70. package/cpp/llama-adapter.cpp +388 -0
  71. package/cpp/llama-adapter.h +76 -0
  72. package/cpp/llama-arch.cpp +2355 -0
  73. package/cpp/llama-arch.h +499 -0
  74. package/cpp/llama-batch.cpp +875 -0
  75. package/cpp/llama-batch.h +160 -0
  76. package/cpp/llama-chat.cpp +783 -0
  77. package/cpp/llama-chat.h +65 -0
  78. package/cpp/llama-context.cpp +2748 -0
  79. package/cpp/llama-context.h +306 -0
  80. package/cpp/llama-cparams.cpp +5 -0
  81. package/cpp/llama-cparams.h +41 -0
  82. package/cpp/llama-cpp.h +30 -0
  83. package/cpp/llama-grammar.cpp +1229 -0
  84. package/cpp/llama-grammar.h +173 -0
  85. package/cpp/llama-graph.cpp +1891 -0
  86. package/cpp/llama-graph.h +810 -0
  87. package/cpp/llama-hparams.cpp +180 -0
  88. package/cpp/llama-hparams.h +233 -0
  89. package/cpp/llama-impl.cpp +167 -0
  90. package/cpp/llama-impl.h +61 -0
  91. package/cpp/llama-io.cpp +15 -0
  92. package/cpp/llama-io.h +35 -0
  93. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  94. package/cpp/llama-kv-cache-iswa.h +135 -0
  95. package/cpp/llama-kv-cache.cpp +2059 -0
  96. package/cpp/llama-kv-cache.h +374 -0
  97. package/cpp/llama-kv-cells.h +491 -0
  98. package/cpp/llama-memory-hybrid.cpp +258 -0
  99. package/cpp/llama-memory-hybrid.h +137 -0
  100. package/cpp/llama-memory-recurrent.cpp +1146 -0
  101. package/cpp/llama-memory-recurrent.h +179 -0
  102. package/cpp/llama-memory.cpp +59 -0
  103. package/cpp/llama-memory.h +119 -0
  104. package/cpp/llama-mmap.cpp +600 -0
  105. package/cpp/llama-mmap.h +68 -0
  106. package/cpp/llama-model-loader.cpp +1164 -0
  107. package/cpp/llama-model-loader.h +170 -0
  108. package/cpp/llama-model-saver.cpp +282 -0
  109. package/cpp/llama-model-saver.h +37 -0
  110. package/cpp/llama-model.cpp +19042 -0
  111. package/cpp/llama-model.h +491 -0
  112. package/cpp/llama-sampling.cpp +2575 -0
  113. package/cpp/llama-sampling.h +32 -0
  114. package/cpp/llama-vocab.cpp +3792 -0
  115. package/cpp/llama-vocab.h +176 -0
  116. package/cpp/llama.cpp +358 -0
  117. package/cpp/llama.h +1373 -0
  118. package/cpp/log.cpp +427 -0
  119. package/cpp/log.h +103 -0
  120. package/cpp/minja/chat-template.hpp +550 -0
  121. package/cpp/minja/minja.hpp +3009 -0
  122. package/cpp/nlohmann/json.hpp +25526 -0
  123. package/cpp/nlohmann/json_fwd.hpp +187 -0
  124. package/cpp/regex-partial.cpp +204 -0
  125. package/cpp/regex-partial.h +56 -0
  126. package/cpp/rn-completion.cpp +681 -0
  127. package/cpp/rn-completion.h +116 -0
  128. package/cpp/rn-llama.cpp +345 -0
  129. package/cpp/rn-llama.h +149 -0
  130. package/cpp/rn-mtmd.hpp +602 -0
  131. package/cpp/rn-tts.cpp +591 -0
  132. package/cpp/rn-tts.h +59 -0
  133. package/cpp/sampling.cpp +579 -0
  134. package/cpp/sampling.h +107 -0
  135. package/cpp/tools/mtmd/clip-impl.h +473 -0
  136. package/cpp/tools/mtmd/clip.cpp +4322 -0
  137. package/cpp/tools/mtmd/clip.h +106 -0
  138. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  139. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  140. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  141. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  142. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  143. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  144. package/cpp/tools/mtmd/mtmd.h +298 -0
  145. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  146. package/cpp/unicode-data.cpp +7034 -0
  147. package/cpp/unicode-data.h +20 -0
  148. package/cpp/unicode.cpp +1061 -0
  149. package/cpp/unicode.h +68 -0
  150. package/package.json +2 -1
@@ -0,0 +1,2059 @@
1
+ #include "llama-kv-cache.h"
2
+
3
+ #include "llama-impl.h"
4
+ #include "llama-io.h"
5
+ #include "llama-model.h"
6
+ #include "llama-context.h"
7
+
8
+ #include <algorithm>
9
+ #include <cassert>
10
+ #include <cmath>
11
+ #include <limits>
12
+ #include <map>
13
+ #include <stdexcept>
14
+
15
+ //
16
+ // llama_kv_cache
17
+ //
18
+
19
+ llama_kv_cache::llama_kv_cache(
20
+ const llama_model & model,
21
+ lm_ggml_type type_k,
22
+ lm_ggml_type type_v,
23
+ bool v_trans,
24
+ bool offload,
25
+ bool unified,
26
+ uint32_t kv_size,
27
+ uint32_t n_seq_max,
28
+ uint32_t n_pad,
29
+ uint32_t n_swa,
30
+ llama_swa_type swa_type,
31
+ const layer_filter_cb & filter,
32
+ const layer_reuse_cb & reuse) :
33
+ model(model), hparams(model.hparams), v_trans(v_trans),
34
+ n_seq_max(n_seq_max), n_stream(unified ? 1 : n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) {
35
+
36
+ LM_GGML_ASSERT(kv_size % n_pad == 0);
37
+
38
+ const uint32_t n_layer_kv = hparams.n_layer_kv();
39
+
40
+ // create a context for each buffer type
41
+ std::map<lm_ggml_backend_buffer_type_t, lm_ggml_context *> ctx_map;
42
+ auto ctx_for_buft = [&](lm_ggml_backend_buffer_type_t buft) -> lm_ggml_context * {
43
+ auto it = ctx_map.find(buft);
44
+ if (it == ctx_map.end()) {
45
+ lm_ggml_init_params params = {
46
+ /*.mem_size =*/ size_t(2u*(1 + n_stream)*n_layer_kv*lm_ggml_tensor_overhead()),
47
+ /*.mem_buffer =*/ NULL,
48
+ /*.no_alloc =*/ true,
49
+ };
50
+
51
+ lm_ggml_context * ctx = lm_ggml_init(params);
52
+ if (!ctx) {
53
+ return nullptr;
54
+ }
55
+
56
+ ctx_map[buft] = ctx;
57
+ ctxs.emplace_back(ctx);
58
+
59
+ return ctx;
60
+ }
61
+
62
+ return it->second;
63
+ };
64
+
65
+ LM_GGML_ASSERT(n_stream == 1 || n_stream == n_seq_max);
66
+
67
+ v_heads.resize(n_stream);
68
+ for (uint32_t s = 0; s < n_stream; ++s) {
69
+ v_heads[s] = 0;
70
+ }
71
+
72
+ v_cells.resize(n_stream);
73
+ for (uint32_t s = 0; s < n_stream; ++s) {
74
+ v_cells[s].resize(kv_size);
75
+ }
76
+
77
+ // by default, all sequence ids are mapped to the 0th stream
78
+ seq_to_stream.resize(LLAMA_MAX_SEQ, 0);
79
+
80
+ if (n_stream > 1) {
81
+ seq_to_stream.resize(n_stream, 0);
82
+ for (uint32_t s = 0; s < n_stream; ++s) {
83
+ seq_to_stream[s] = s;
84
+ }
85
+ }
86
+
87
+ // [TAG_V_CACHE_VARIABLE]
88
+ if (v_trans && hparams.is_n_embd_v_gqa_variable()) {
89
+ LLAMA_LOG_WARN("%s: the V embeddings have different sizes across layers and FA is not enabled - padding V cache to %d\n",
90
+ __func__, hparams.n_embd_v_gqa_max());
91
+ }
92
+
93
+ for (uint32_t il = 0; il < hparams.n_layer; il++) {
94
+ if (!hparams.has_kv(il)) {
95
+ LLAMA_LOG_DEBUG("%s: layer %3d: does not have KV cache\n", __func__, il);
96
+ continue;
97
+ }
98
+
99
+ if (filter && !filter(il)) {
100
+ LLAMA_LOG_DEBUG("%s: layer %3d: filtered\n", __func__, il);
101
+ continue;
102
+ }
103
+
104
+ // [TAG_V_CACHE_VARIABLE]
105
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
106
+ const uint32_t n_embd_v_gqa = !v_trans ? hparams.n_embd_v_gqa(il) : hparams.n_embd_v_gqa_max();
107
+
108
+ const char * dev_name = "CPU";
109
+
110
+ lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_cpu_buffer_type();
111
+
112
+ if (offload) {
113
+ auto * dev = model.dev_layer(il);
114
+ buft = lm_ggml_backend_dev_buffer_type(dev);
115
+
116
+ dev_name = lm_ggml_backend_dev_name(dev);
117
+ }
118
+
119
+ LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name);
120
+
121
+ lm_ggml_context * ctx = ctx_for_buft(buft);
122
+ if (!ctx) {
123
+ throw std::runtime_error("failed to create ggml context for kv cache");
124
+ }
125
+
126
+ lm_ggml_tensor * k;
127
+ lm_ggml_tensor * v;
128
+
129
+ k = lm_ggml_new_tensor_3d(ctx, type_k, n_embd_k_gqa, kv_size, n_stream);
130
+ v = lm_ggml_new_tensor_3d(ctx, type_v, n_embd_v_gqa, kv_size, n_stream);
131
+
132
+ lm_ggml_format_name(k, "cache_k_l%d", il);
133
+ lm_ggml_format_name(v, "cache_v_l%d", il);
134
+
135
+ std::vector<lm_ggml_tensor *> k_stream;
136
+ std::vector<lm_ggml_tensor *> v_stream;
137
+
138
+ for (uint32_t s = 0; s < n_stream; ++s) {
139
+ k_stream.push_back(lm_ggml_view_2d(ctx, k, n_embd_k_gqa, kv_size, k->nb[1], s*k->nb[2]));
140
+ v_stream.push_back(lm_ggml_view_2d(ctx, v, n_embd_v_gqa, kv_size, v->nb[1], s*v->nb[2]));
141
+ }
142
+
143
+ map_layer_ids[il] = layers.size();
144
+
145
+ layers.push_back({ il, k, v, k_stream, v_stream, });
146
+ }
147
+
148
+ if (reuse) {
149
+ LLAMA_LOG_DEBUG("%s: reusing layers:\n", __func__);
150
+
151
+ for (uint32_t il = 0; il < hparams.n_layer; il++) {
152
+ const int32_t il_reuse = reuse(il);
153
+
154
+ if (il_reuse < 0) {
155
+ LLAMA_LOG_DEBUG("%s: - layer %3d: no reuse\n", __func__, il);
156
+ continue;
157
+ }
158
+
159
+ if (filter && !filter(il)) {
160
+ LLAMA_LOG_DEBUG("%s: - layer %3d: filtered\n", __func__, il);
161
+ continue;
162
+ }
163
+
164
+ LM_GGML_ASSERT(map_layer_ids.find(il_reuse) != map_layer_ids.end());
165
+
166
+ map_layer_ids[il] = map_layer_ids[il_reuse];
167
+
168
+ LLAMA_LOG_DEBUG("%s: - layer %3d: reuse layer %d, is_swa = %d\n", __func__, il, il_reuse, hparams.is_swa(il));
169
+ }
170
+ }
171
+
172
+ // allocate tensors and initialize the buffers to avoid NaNs in the padding
173
+ for (auto it : ctx_map) {
174
+ auto * buft = it.first;
175
+ auto * ctx = it.second;
176
+
177
+ lm_ggml_backend_buffer_t buf = lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
178
+ if (!buf) {
179
+ throw std::runtime_error("failed to allocate buffer for kv cache");
180
+ }
181
+
182
+ LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, lm_ggml_backend_buffer_name(buf), lm_ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
183
+
184
+ lm_ggml_backend_buffer_clear(buf, 0);
185
+ bufs.emplace_back(buf);
186
+ }
187
+
188
+ {
189
+ const size_t memory_size_k = size_k_bytes();
190
+ const size_t memory_size_v = size_v_bytes();
191
+
192
+ LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u/%u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
193
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max, n_stream,
194
+ lm_ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
195
+ lm_ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
196
+ }
197
+
198
+ const char * LLAMA_KV_CACHE_DEBUG = getenv("LLAMA_KV_CACHE_DEBUG");
199
+ debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0;
200
+
201
+ const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS");
202
+ supports_set_rows = LLAMA_SET_ROWS ? atoi(LLAMA_SET_ROWS) != 0 : supports_set_rows;
203
+
204
+ if (!supports_set_rows) {
205
+ // ref: https://github.com/ggml-org/llama.cpp/pull/14363
206
+ LM_GGML_ASSERT(unified && "cannot use non-unified KV cache without lm_ggml_set_rows() support");
207
+ }
208
+
209
+ if (!supports_set_rows) {
210
+ LLAMA_LOG_WARN("%s: LLAMA_SET_ROWS=0, using old lm_ggml_cpy() method for backwards compatibility\n", __func__);
211
+ }
212
+ }
213
+
214
+ void llama_kv_cache::clear(bool data) {
215
+ for (uint32_t s = 0; s < n_stream; ++s) {
216
+ v_cells[s].reset();
217
+ v_heads[s] = 0;
218
+ }
219
+
220
+ if (data) {
221
+ for (auto & buf : bufs) {
222
+ lm_ggml_backend_buffer_clear(buf.get(), 0);
223
+ }
224
+ }
225
+ }
226
+
227
+ bool llama_kv_cache::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
228
+ LM_GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
229
+
230
+ if (p0 < 0) {
231
+ p0 = 0;
232
+ }
233
+
234
+ if (p1 < 0) {
235
+ p1 = std::numeric_limits<llama_pos>::max();
236
+ }
237
+
238
+ if (seq_id >= 0) {
239
+ auto & cells = v_cells[seq_to_stream[seq_id]];
240
+ auto & head = v_heads[seq_to_stream[seq_id]];
241
+
242
+ uint32_t new_head = cells.size();
243
+
244
+ for (uint32_t i = 0; i < cells.size(); ++i) {
245
+ if (!cells.pos_in(i, p0, p1)) {
246
+ continue;
247
+ }
248
+
249
+ if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) {
250
+ if (new_head == cells.size()) {
251
+ new_head = i;
252
+ }
253
+ }
254
+ }
255
+
256
+ // If we freed up a slot, set head to it so searching can start there.
257
+ if (new_head != cells.size() && new_head < head) {
258
+ head = new_head;
259
+ }
260
+ } else {
261
+ // match any sequence
262
+ for (uint32_t s = 0; s < n_stream; ++s) {
263
+ auto & cells = v_cells[s];
264
+ auto & head = v_heads[s];
265
+
266
+ uint32_t new_head = cells.size();
267
+
268
+ for (uint32_t i = 0; i < cells.size(); ++i) {
269
+ if (!cells.pos_in(i, p0, p1)) {
270
+ continue;
271
+ }
272
+
273
+ cells.rm(i);
274
+
275
+ if (new_head == cells.size()) {
276
+ new_head = i;
277
+ }
278
+ }
279
+
280
+ // If we freed up a slot, set head to it so searching can start there.
281
+ if (new_head != cells.size() && new_head < head) {
282
+ head = new_head;
283
+ }
284
+ }
285
+ }
286
+
287
+ return true;
288
+ }
289
+
290
+ void llama_kv_cache::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
291
+ LM_GGML_ASSERT(seq_id_src >= 0 && (size_t) seq_id_src < seq_to_stream.size());
292
+ LM_GGML_ASSERT(seq_id_dst >= 0 && (size_t) seq_id_dst < seq_to_stream.size());
293
+
294
+ const auto s0 = seq_to_stream[seq_id_src];
295
+ const auto s1 = seq_to_stream[seq_id_dst];
296
+
297
+ if (s0 == s1) {
298
+ // since both sequences are in the same stream, no data copy is necessary
299
+ // we just have to update the cells meta data
300
+
301
+ auto & cells = v_cells[s0];
302
+
303
+ if (seq_id_src == seq_id_dst) {
304
+ return;
305
+ }
306
+
307
+ if (p0 < 0) {
308
+ p0 = 0;
309
+ }
310
+
311
+ if (p1 < 0) {
312
+ p1 = std::numeric_limits<llama_pos>::max();
313
+ }
314
+
315
+ for (uint32_t i = 0; i < cells.size(); ++i) {
316
+ if (!cells.pos_in(i, p0, p1)) {
317
+ continue;
318
+ }
319
+
320
+ if (cells.seq_has(i, seq_id_src)) {
321
+ cells.seq_add(i, seq_id_dst);
322
+ }
323
+ }
324
+
325
+ return;
326
+ }
327
+
328
+ // cross-stream sequence copies require to copy the actual buffer data
329
+
330
+ bool is_full = true;
331
+
332
+ if (p0 > 0 && p0 + 1 < (int) get_size()) {
333
+ is_full = false;
334
+ }
335
+
336
+ if (p1 > 0 && p1 + 1 < (int) get_size()) {
337
+ is_full = false;
338
+ }
339
+
340
+ LM_GGML_ASSERT(is_full && "seq_cp() is only supported for full KV buffers");
341
+
342
+ // enqueue the copy operation - the buffer copy will be performed during the next update
343
+ sc_info.ssrc.push_back(s0);
344
+ sc_info.sdst.push_back(s1);
345
+
346
+ v_cells[s1].reset();
347
+ for (uint32_t i = 0; i < v_cells[s0].size(); ++i) {
348
+ if (v_cells[s0].seq_has(i, seq_id_src)) {
349
+ llama_pos pos = v_cells[s0].pos_get(i);
350
+ llama_pos shift = v_cells[s0].get_shift(i);
351
+
352
+ if (shift != 0) {
353
+ pos -= shift;
354
+ assert(pos >= 0);
355
+ }
356
+
357
+ v_cells[s1].pos_set(i, pos);
358
+ v_cells[s1].seq_add(i, seq_id_dst);
359
+
360
+ if (shift != 0) {
361
+ v_cells[s1].pos_add(i, shift);
362
+ }
363
+ }
364
+ }
365
+
366
+ v_heads[s1] = v_heads[s0];
367
+
368
+ //for (uint32_t s = 0; s < n_stream; ++s) {
369
+ // LLAMA_LOG_WARN("%s: seq %d: min = %d, max = %d\n", __func__, s, v_cells[s].seq_pos_min(s), v_cells[s].seq_pos_max(s));
370
+ //}
371
+ }
372
+
373
+ void llama_kv_cache::seq_keep(llama_seq_id seq_id) {
374
+ LM_GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
375
+
376
+ auto & cells = v_cells[seq_to_stream[seq_id]];
377
+ auto & head = v_heads[seq_to_stream[seq_id]];
378
+
379
+ uint32_t new_head = cells.size();
380
+
381
+ for (uint32_t i = 0; i < cells.size(); ++i) {
382
+ if (cells.seq_keep(i, seq_id)) {
383
+ if (new_head == cells.size()) {
384
+ new_head = i;
385
+ }
386
+ }
387
+ }
388
+
389
+ // If we freed up a slot, set head to it so searching can start there.
390
+ if (new_head != cells.size() && new_head < head) {
391
+ head = new_head;
392
+ }
393
+ }
394
+
395
+ void llama_kv_cache::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
396
+ LM_GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
397
+
398
+ auto & cells = v_cells[seq_to_stream[seq_id]];
399
+ auto & head = v_heads[seq_to_stream[seq_id]];
400
+
401
+ if (shift == 0) {
402
+ return;
403
+ }
404
+
405
+ uint32_t new_head = cells.size();
406
+
407
+ if (p0 < 0) {
408
+ p0 = 0;
409
+ }
410
+
411
+ if (p1 < 0) {
412
+ p1 = std::numeric_limits<llama_pos>::max();
413
+ }
414
+
415
+ // If there is no range then return early to avoid looping over all cells.
416
+ if (p0 == p1) {
417
+ return;
418
+ }
419
+
420
+ for (uint32_t i = 0; i < cells.size(); ++i) {
421
+ if (!cells.pos_in(i, p0, p1)) {
422
+ continue;
423
+ }
424
+
425
+ if (cells.seq_has(i, seq_id)) {
426
+ if (cells.pos_add(i, shift)) {
427
+ if (new_head == cells.size()) {
428
+ new_head = i;
429
+ }
430
+ }
431
+ }
432
+ }
433
+
434
+ // If we freed up a slot, set head to it so searching can start there.
435
+ // Otherwise we just start the next search from the beginning.
436
+ head = new_head != cells.size() ? new_head : 0;
437
+ }
438
+
439
+ void llama_kv_cache::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
440
+ LM_GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
441
+
442
+ auto & cells = v_cells[seq_to_stream[seq_id]];
443
+
444
+ if (d == 1) {
445
+ return;
446
+ }
447
+
448
+ if (p0 < 0) {
449
+ p0 = 0;
450
+ }
451
+
452
+ if (p1 < 0) {
453
+ p1 = std::numeric_limits<llama_pos>::max();
454
+ }
455
+
456
+ // If there is no range then return early to avoid looping over the cache.
457
+ if (p0 == p1) {
458
+ return;
459
+ }
460
+
461
+ for (uint32_t i = 0; i < cells.size(); ++i) {
462
+ if (!cells.pos_in(i, p0, p1)) {
463
+ continue;
464
+ }
465
+
466
+ if (cells.seq_has(i, seq_id)) {
467
+ cells.pos_div(i, d);
468
+ }
469
+ }
470
+ }
471
+
472
+ llama_pos llama_kv_cache::seq_pos_min(llama_seq_id seq_id) const {
473
+ LM_GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
474
+
475
+ const auto & cells = v_cells[seq_to_stream[seq_id]];
476
+
477
+ return cells.seq_pos_min(seq_id);
478
+ }
479
+
480
+ llama_pos llama_kv_cache::seq_pos_max(llama_seq_id seq_id) const {
481
+ LM_GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
482
+
483
+ const auto & cells = v_cells[seq_to_stream[seq_id]];
484
+
485
+ return cells.seq_pos_max(seq_id);
486
+ }
487
+
488
+ llama_memory_context_ptr llama_kv_cache::init_batch(
489
+ llama_batch_allocr & balloc,
490
+ uint32_t n_ubatch,
491
+ bool embd_all) {
492
+ LM_GGML_UNUSED(embd_all);
493
+
494
+ do {
495
+ balloc.split_reset();
496
+
497
+ std::vector<llama_ubatch> ubatches;
498
+ while (true) {
499
+ auto ubatch = n_stream == 1 ? balloc.split_simple(n_ubatch) : balloc.split_equal(n_ubatch, true);
500
+
501
+ if (ubatch.n_tokens == 0) {
502
+ break;
503
+ }
504
+
505
+ ubatches.push_back(std::move(ubatch)); // NOLINT
506
+ }
507
+
508
+ if (balloc.get_n_used() < balloc.get_n_tokens()) {
509
+ // failed to find a suitable split
510
+ break;
511
+ }
512
+
513
+ auto sinfos = prepare(ubatches);
514
+ if (sinfos.empty()) {
515
+ break;
516
+ }
517
+
518
+ return std::make_unique<llama_kv_cache_context>(
519
+ this, std::move(sinfos), std::move(ubatches));
520
+ } while (false);
521
+
522
+ return std::make_unique<llama_kv_cache_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
523
+ }
524
+
525
+ llama_memory_context_ptr llama_kv_cache::init_full() {
526
+ return std::make_unique<llama_kv_cache_context>(this);
527
+ }
528
+
529
+ llama_memory_context_ptr llama_kv_cache::init_update(llama_context * lctx, bool optimize) {
530
+ LM_GGML_UNUSED(optimize);
531
+
532
+ bool do_shift = get_has_shift();
533
+
534
+ return std::make_unique<llama_kv_cache_context>(this, lctx, do_shift, std::move(sc_info));
535
+ }
536
+
537
+ llama_kv_cache::slot_info_vec_t llama_kv_cache::prepare(const std::vector<llama_ubatch> & ubatches) {
538
+ llama_kv_cache::slot_info_vec_t res;
539
+
540
+ struct state_t {
541
+ slot_info sinfo; // slot info for the ubatch
542
+
543
+ std::vector<uint32_t> v_heads_old; // old positions of the heads, before placing the ubatch
544
+
545
+ std::vector<llama_kv_cells> v_cells; // copy of the old cells, before placing the ubatch
546
+ };
547
+
548
+ // remember the old state of the cells so we can restore it in the end
549
+ std::vector<state_t> states;
550
+
551
+ bool success = true;
552
+
553
+ for (const auto & ubatch : ubatches) {
554
+ // non-continuous slots require support for lm_ggml_set_rows()
555
+ const bool cont = supports_set_rows ? false : true;
556
+
557
+ // only find a suitable slot for the ubatch. don't modify the cells yet
558
+ const auto sinfo_new = find_slot(ubatch, cont);
559
+ if (sinfo_new.empty()) {
560
+ success = false;
561
+ break;
562
+ }
563
+
564
+ // remeber the position that we found
565
+ res.push_back(sinfo_new);
566
+
567
+ // store the old state of the cells in the recovery stack
568
+ {
569
+ state_t state = { sinfo_new, v_heads, {} };
570
+
571
+ for (uint32_t s = 0; s < sinfo_new.n_stream(); ++s) {
572
+ auto & cells = v_cells[sinfo_new.strm[s]];
573
+
574
+ state.v_cells.push_back(cells.cp(sinfo_new.idxs[s]));
575
+ }
576
+
577
+ states.push_back(std::move(state));
578
+ }
579
+
580
+ // now emplace the ubatch
581
+ apply_ubatch(sinfo_new, ubatch);
582
+ }
583
+
584
+ LM_GGML_ASSERT(!states.empty() || !success);
585
+
586
+ // iterate backwards and restore the cells to their original state
587
+ for (auto it = states.rbegin(); it != states.rend(); ++it) {
588
+ const auto & sinfo = it->sinfo;
589
+
590
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
591
+ auto & cells = v_cells[sinfo.strm[s]];
592
+ auto & head = v_heads[sinfo.strm[s]];
593
+
594
+ cells.set(sinfo.idxs[s], it->v_cells[s]);
595
+ head = it->v_heads_old[s];
596
+ }
597
+ }
598
+
599
+ if (!success) {
600
+ return {};
601
+ }
602
+
603
+ return res;
604
+ }
605
+
606
+ bool llama_kv_cache::update(llama_context * lctx, bool do_shift, const stream_copy_info & sc_info) {
607
+ bool updated = false;
608
+
609
+ auto * sched = lctx->get_sched();
610
+
611
+ if (!sc_info.empty()) {
612
+ assert(n_stream > 1 && "stream copy should never happen with a single stream");
613
+
614
+ llama_synchronize(lctx);
615
+
616
+ const size_t n_copy = sc_info.ssrc.size();
617
+
618
+ for (size_t i = 0; i < n_copy; ++i) {
619
+ const auto ssrc = sc_info.ssrc[i];
620
+ const auto sdst = sc_info.sdst[i];
621
+
622
+ assert(ssrc < n_stream);
623
+ assert(sdst < n_stream);
624
+
625
+ LLAMA_LOG_DEBUG("%s: copying KV buffer: stream %d to stream %d\n", __func__, ssrc, sdst);
626
+
627
+ assert(ssrc != sdst);
628
+
629
+ for (uint32_t il = 0; il < layers.size(); ++il) {
630
+ const auto & layer = layers[il];
631
+
632
+ lm_ggml_backend_tensor_copy(layer.k_stream[ssrc], layer.k_stream[sdst]);
633
+ lm_ggml_backend_tensor_copy(layer.v_stream[ssrc], layer.v_stream[sdst]);
634
+ }
635
+ }
636
+ }
637
+
638
+ if (do_shift) {
639
+ if (!get_can_shift()) {
640
+ LM_GGML_ABORT("The current KV cache / model configuration does not support K-shift");
641
+ }
642
+
643
+ LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
644
+
645
+ // apply K-shift if needed
646
+ if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
647
+ lm_ggml_backend_sched_reset(sched);
648
+
649
+ auto * res = lctx->get_gf_res_reserve();
650
+
651
+ res->reset();
652
+
653
+ auto * gf = build_graph_shift(res, lctx);
654
+ if (!lm_ggml_backend_sched_alloc_graph(sched, gf)) {
655
+ LLAMA_LOG_ERROR("%s: failed to allocate compute graph for K-shift\n", __func__);
656
+ return updated;
657
+ }
658
+
659
+ res->set_inputs(nullptr);
660
+
661
+ if (lctx->graph_compute(gf, false) != LM_GGML_STATUS_SUCCESS) {
662
+ LLAMA_LOG_ERROR("%s: failed to compute K-shift\n", __func__);
663
+ return updated;
664
+ }
665
+
666
+ updated = true;
667
+ }
668
+
669
+ for (uint32_t s = 0; s < n_stream; ++s) {
670
+ auto & cells = v_cells[s];
671
+
672
+ cells.reset_shift();
673
+ }
674
+ }
675
+
676
+ return updated;
677
+ }
678
+
679
+ llama_kv_cache::slot_info llama_kv_cache::find_slot(const llama_ubatch & ubatch, bool cont) const {
680
+
681
+ if (debug > 0) {
682
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
683
+ const auto seq_id = ubatch.seq_id_unq[s];
684
+ const auto stream_id = seq_to_stream[seq_id];
685
+ const auto & cells = v_cells[stream_id];
686
+ const uint32_t head_cur = v_heads[stream_id];
687
+
688
+ LLAMA_LOG_DEBUG("%s: stream[%d], n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n",
689
+ __func__, stream_id, cells.used_max_p1(), cells.get_used(), head_cur, get_size(), n_swa);
690
+
691
+ if ((debug == 2 && n_swa > 0) || debug > 2) {
692
+ std::string ss;
693
+ for (uint32_t i = 0; i < cells.size(); ++i) {
694
+ if (cells.is_empty(i)) {
695
+ ss += '.';
696
+ } else {
697
+ assert(cells.seq_count(i) >= 1);
698
+
699
+ if (cells.seq_count(i) == 1) {
700
+ ss += std::to_string(cells.seq_get(i));
701
+ } else {
702
+ ss += 'M';
703
+ }
704
+ }
705
+ if (i%256 == 255) {
706
+ ss += " *";
707
+ ss += '\n';
708
+ }
709
+ }
710
+ LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
711
+ }
712
+
713
+ if ((debug == 2 && n_swa > 0) || debug > 2) {
714
+ std::string ss;
715
+ for (uint32_t i = 0; i < cells.size(); ++i) {
716
+ std::string cur;
717
+ if (cells.is_empty(i)) {
718
+ cur = '.';
719
+ } else {
720
+ cur = std::to_string(cells.pos_get(i));
721
+ }
722
+ const int n = cur.size();
723
+ for (int j = 0; j < 5 - n; ++j) {
724
+ cur += ' ';
725
+ }
726
+ ss += cur;
727
+ if (i%256 == 255) {
728
+ ss += " *";
729
+ }
730
+ if (i%64 == 63) {
731
+ ss += '\n';
732
+ }
733
+ }
734
+ LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
735
+ }
736
+
737
+ for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
738
+ if (cells.seq_pos_min(s) < 0) {
739
+ continue;
740
+ }
741
+
742
+ LLAMA_LOG_DEBUG("%s: stream[%d] min[%d] = %5d, max[%d] = %5d\n", __func__, stream_id, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s));
743
+ }
744
+ }
745
+ }
746
+
747
+ uint32_t n_tokens = ubatch.n_tokens;
748
+ uint32_t n_seqs = 1;
749
+
750
+ if (n_stream > 1) {
751
+ LM_GGML_ASSERT(n_tokens % ubatch.n_seqs_unq == 0);
752
+
753
+ n_seqs = ubatch.n_seqs_unq;
754
+ n_tokens = n_tokens / n_seqs;
755
+ }
756
+
757
+ slot_info res = {
758
+ /*.s0 =*/ LLAMA_MAX_SEQ,
759
+ /*.s1 =*/ 0,
760
+ /*.strm =*/ { },
761
+ /*.idxs =*/ { },
762
+ };
763
+
764
+ res.resize(n_seqs);
765
+
766
+ for (uint32_t s = 0; s < n_seqs; ++s) {
767
+ const auto seq_id = ubatch.seq_id_unq[s];
768
+
769
+ if (n_stream > 1) {
770
+ LM_GGML_ASSERT(ubatch.n_seq_id[s*n_tokens] == 1);
771
+ LM_GGML_ASSERT(ubatch.seq_id [s*n_tokens][0] == seq_id);
772
+ }
773
+
774
+ res.s0 = std::min<llama_seq_id>(res.s0, seq_to_stream[seq_id]);
775
+ res.s1 = std::max<llama_seq_id>(res.s1, seq_to_stream[seq_id]);
776
+
777
+ res.strm[s] = seq_to_stream[seq_id];
778
+ res.idxs[s].reserve(n_tokens);
779
+
780
+ const auto & cells = v_cells[seq_to_stream[seq_id]];
781
+
782
+ uint32_t head_cur = v_heads[seq_to_stream[seq_id]];
783
+
784
+ // if we have enough unused cells before the current head ->
785
+ // better to start searching from the beginning of the cache, hoping to fill it
786
+ if (head_cur > cells.get_used() + 2*n_tokens) {
787
+ head_cur = 0;
788
+ }
789
+
790
+ if (n_tokens > cells.size()) {
791
+ LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size());
792
+ return { };
793
+ }
794
+
795
+ uint32_t n_tested = 0;
796
+
797
+ // for continuous slots, we test that all tokens in the ubatch fit, starting from the current head
798
+ // for non-continuous slots, we test the tokens one by one
799
+ const uint32_t n_test = cont ? n_tokens : 1;
800
+
801
+ while (true) {
802
+ if (head_cur + n_test > cells.size()) {
803
+ n_tested += cells.size() - head_cur;
804
+ head_cur = 0;
805
+ continue;
806
+ }
807
+
808
+ for (uint32_t i = 0; i < n_test; i++) {
809
+ const auto idx = head_cur;
810
+
811
+ head_cur++;
812
+ n_tested++;
813
+
814
+ //const llama_pos pos = ubatch.pos[i];
815
+ //const llama_seq_id seq_id = ubatch.seq_id[i][0];
816
+
817
+ // can we use this cell? either:
818
+ // - the cell is empty
819
+ // - the cell is occupied only by one sequence:
820
+ // - (disabled) mask causally, if the sequence is the same as the one we are inserting
821
+ // - mask SWA, using current max pos for that sequence in the cache
822
+ // always insert in the cell with minimum pos
823
+ bool can_use = cells.is_empty(idx);
824
+
825
+ if (!can_use && cells.seq_count(idx) == 1) {
826
+ const llama_pos pos_cell = cells.pos_get(idx);
827
+
828
+ // (disabled) causal mask
829
+ // note: it's better to purge any "future" tokens beforehand
830
+ //if (cells.seq_has(idx, seq_id)) {
831
+ // can_use = pos_cell >= pos;
832
+ //}
833
+
834
+ if (!can_use) {
835
+ const llama_seq_id seq_id_cell = cells.seq_get(idx);
836
+
837
+ // SWA mask
838
+ if (is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) {
839
+ can_use = true;
840
+ }
841
+ }
842
+ }
843
+
844
+ if (can_use) {
845
+ res.idxs[s].push_back(idx);
846
+ } else {
847
+ if (cont) {
848
+ break;
849
+ }
850
+ }
851
+ }
852
+
853
+ if (res.idxs[s].size() == n_tokens) {
854
+ break;
855
+ }
856
+
857
+ if (cont) {
858
+ res.idxs[s].clear();
859
+ }
860
+
861
+ if (n_tested >= cells.size()) {
862
+ //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
863
+ return { };
864
+ }
865
+ }
866
+
867
+ // we didn't find a suitable slot - return empty result
868
+ if (res.idxs[s].size() < n_tokens) {
869
+ return { };
870
+ }
871
+ }
872
+
873
+ assert(res.s1 >= res.s0);
874
+
875
+ return res;
876
+ }
877
+
878
+ void llama_kv_cache::apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch) {
879
+ // keep track of the max sequence position that we would overwrite with this ubatch
880
+ // for non-SWA cache, this would be always empty
881
+ llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
882
+ for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
883
+ seq_pos_max_rm[s] = -1;
884
+ }
885
+
886
+ assert(ubatch.n_tokens == sinfo.n_stream()*sinfo.size());
887
+
888
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
889
+ for (uint32_t ii = 0; ii < sinfo.size(); ++ii) {
890
+ const uint32_t i = s*sinfo.size() + ii;
891
+
892
+ auto & cells = v_cells[sinfo.strm[s]];
893
+
894
+ const auto idx = sinfo.idxs[s][ii];
895
+
896
+ if (!cells.is_empty(idx)) {
897
+ assert(cells.seq_count(idx) == 1);
898
+
899
+ const llama_seq_id seq_id = cells.seq_get(idx);
900
+ const llama_pos pos = cells.pos_get(idx);
901
+
902
+ seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
903
+
904
+ cells.rm(idx);
905
+ }
906
+
907
+ cells.pos_set(idx, ubatch.pos[i]);
908
+
909
+ for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
910
+ cells.seq_add(idx, ubatch.seq_id[i][s]);
911
+ }
912
+ }
913
+ }
914
+
915
+ // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence
916
+ // will be present in the cache. so we have to purge any position which is less than those we would overwrite
917
+ // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092
918
+ for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
919
+ if (seq_pos_max_rm[s] == -1) {
920
+ continue;
921
+ }
922
+
923
+ LM_GGML_ASSERT(s < seq_to_stream.size());
924
+
925
+ auto & cells = v_cells[seq_to_stream[s]];
926
+
927
+ if (cells.seq_pos_min(s) <= seq_pos_max_rm[s]) {
928
+ LLAMA_LOG_DEBUG("%s: purging positions [%d, %d] of sequence %d from KV cache\n",
929
+ __func__, cells.seq_pos_min(s), seq_pos_max_rm[s], s);
930
+
931
+ seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
932
+ }
933
+ }
934
+
935
+ // move the head at the end of the slot
936
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
937
+ auto & head = v_heads[sinfo.strm[s]];
938
+
939
+ head = sinfo.idxs[s].back() + 1;
940
+ }
941
+ }
942
+
943
+ bool llama_kv_cache::get_can_shift() const {
944
+ return true;
945
+ }
946
+
947
+ uint32_t llama_kv_cache::get_size() const {
948
+ const auto & cells = v_cells[seq_to_stream[0]];
949
+
950
+ return cells.size();
951
+ }
952
+
953
+ uint32_t llama_kv_cache::get_n_stream() const {
954
+ return n_stream;
955
+ }
956
+
957
+ bool llama_kv_cache::get_has_shift() const {
958
+ bool result = false;
959
+
960
+ for (uint32_t s = 0; s < n_stream; ++s) {
961
+ result |= v_cells[s].get_has_shift();
962
+ }
963
+
964
+ return result;
965
+ }
966
+
967
+ uint32_t llama_kv_cache::get_n_kv() const {
968
+ uint32_t result = 0;
969
+
970
+ for (uint32_t s = 0; s < n_stream; ++s) {
971
+ const auto & cells = v_cells[s];
972
+
973
+ result = std::max(std::min(cells.size(), std::max(n_pad, LM_GGML_PAD(cells.used_max_p1(), n_pad))), result);
974
+ }
975
+
976
+ return result;
977
+ }
978
+
979
+ bool llama_kv_cache::get_supports_set_rows() const {
980
+ return supports_set_rows;
981
+ }
982
+
983
+ lm_ggml_tensor * llama_kv_cache::get_k(lm_ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
984
+ const int32_t ikv = map_layer_ids.at(il);
985
+
986
+ auto * k = layers[ikv].k;
987
+
988
+ const uint64_t kv_size = get_size();
989
+ const uint64_t n_embd_k_gqa = k->ne[0];
990
+
991
+ assert(n_embd_k_gqa == hparams.n_embd_k_gqa(il));
992
+
993
+ const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
994
+
995
+ return lm_ggml_view_4d(ctx, k,
996
+ hparams.n_embd_head_k, hparams.n_head_kv(il), n_kv, ns,
997
+ lm_ggml_row_size(k->type, hparams.n_embd_head_k),
998
+ lm_ggml_row_size(k->type, n_embd_k_gqa),
999
+ lm_ggml_row_size(k->type, n_embd_k_gqa*kv_size),
1000
+ lm_ggml_row_size(k->type, n_embd_k_gqa*kv_size)*sinfo.s0);
1001
+ }
1002
+
1003
+ lm_ggml_tensor * llama_kv_cache::get_v(lm_ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
1004
+ const int32_t ikv = map_layer_ids.at(il);
1005
+
1006
+ auto * v = layers[ikv].v;
1007
+
1008
+ const uint64_t kv_size = get_size();
1009
+ const uint64_t n_embd_v_gqa = v->ne[0];
1010
+
1011
+ // [TAG_V_CACHE_VARIABLE]
1012
+ assert(n_embd_v_gqa >= hparams.n_embd_v_gqa(il));
1013
+
1014
+ const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
1015
+
1016
+ if (!v_trans) {
1017
+ // note: v->nb[1] <= v->nb[2]
1018
+ return lm_ggml_view_4d(ctx, v,
1019
+ hparams.n_embd_head_v, hparams.n_head_kv(il), n_kv, ns,
1020
+ lm_ggml_row_size(v->type, hparams.n_embd_head_v), // v->nb[1]
1021
+ lm_ggml_row_size(v->type, n_embd_v_gqa), // v->nb[2]
1022
+ lm_ggml_row_size(v->type, n_embd_v_gqa*kv_size), // v->nb[3]
1023
+ lm_ggml_row_size(v->type, n_embd_v_gqa*kv_size)*sinfo.s0);
1024
+ }
1025
+
1026
+ // note: v->nb[1] > v->nb[2]
1027
+ return lm_ggml_view_4d(ctx, v,
1028
+ n_kv, hparams.n_head_kv(il), hparams.n_embd_head_v, ns,
1029
+ lm_ggml_row_size(v->type, kv_size*hparams.n_embd_head_v), // v->nb[1]
1030
+ lm_ggml_row_size(v->type, kv_size), // v->nb[2]
1031
+ lm_ggml_row_size(v->type, kv_size*n_embd_v_gqa), // v->nb[3]
1032
+ lm_ggml_row_size(v->type, kv_size*n_embd_v_gqa)*sinfo.s0);
1033
+ }
1034
+
1035
+ lm_ggml_tensor * llama_kv_cache::cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, lm_ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const {
1036
+ const int32_t ikv = map_layer_ids.at(il);
1037
+
1038
+ auto * k = layers[ikv].k;
1039
+
1040
+ const int64_t n_embd_k_gqa = k->ne[0];
1041
+ const int64_t n_tokens = k_cur->ne[2];
1042
+
1043
+ k_cur = lm_ggml_reshape_2d(ctx, k_cur, k->ne[0], n_tokens);
1044
+
1045
+ if (k_idxs && supports_set_rows) {
1046
+ if (k->ne[2] > 1) {
1047
+ k = lm_ggml_reshape_2d(ctx, k, k->ne[0], k->ne[1]*k->ne[2]);
1048
+ }
1049
+
1050
+ return lm_ggml_set_rows(ctx, k, k_cur, k_idxs);
1051
+ }
1052
+
1053
+ // TODO: fallback to old lm_ggml_cpy() method for backwards compatibility
1054
+ // will be removed when lm_ggml_set_rows() is adopted by all backends
1055
+
1056
+ LM_GGML_ASSERT(n_stream == 1 && "n_stream > 1 not supported without LLAMA_SET_ROWS");
1057
+
1058
+ lm_ggml_tensor * k_view = lm_ggml_view_1d(ctx, k,
1059
+ n_tokens*n_embd_k_gqa,
1060
+ lm_ggml_row_size(k->type, n_embd_k_gqa)*sinfo.head());
1061
+
1062
+ return lm_ggml_cpy(ctx, k_cur, k_view);
1063
+ }
1064
+
1065
+ lm_ggml_tensor * llama_kv_cache::cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, lm_ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const {
1066
+ const int32_t ikv = map_layer_ids.at(il);
1067
+
1068
+ auto * v = layers[ikv].v;
1069
+
1070
+ const int64_t n_embd_v_gqa = v_cur->ne[0]*v_cur->ne[1];
1071
+ const int64_t n_tokens = v_cur->ne[2];
1072
+
1073
+ v_cur = lm_ggml_reshape_2d(ctx, v_cur, n_embd_v_gqa, n_tokens);
1074
+
1075
+ if (v_idxs && supports_set_rows) {
1076
+ if (!v_trans) {
1077
+ if (v->ne[2] > 1) {
1078
+ v = lm_ggml_reshape_2d(ctx, v, v->ne[0], v->ne[1]*v->ne[2]);
1079
+ }
1080
+
1081
+ return lm_ggml_set_rows(ctx, v, v_cur, v_idxs);
1082
+ }
1083
+
1084
+ // [TAG_V_CACHE_VARIABLE]
1085
+ if (n_embd_v_gqa < v->ne[0]) {
1086
+ v_cur = lm_ggml_pad(ctx, v_cur, v->ne[0] - n_embd_v_gqa, 0, 0, 0);
1087
+ }
1088
+
1089
+ // the row becomes a single element
1090
+ lm_ggml_tensor * v_view = lm_ggml_reshape_2d(ctx, v, 1, v->ne[0]*v->ne[1]*v->ne[2]);
1091
+
1092
+ v_cur = lm_ggml_reshape_2d(ctx, v_cur, 1, v_cur->ne[0]*v_cur->ne[1]);
1093
+
1094
+ return lm_ggml_set_rows(ctx, v_view, v_cur, v_idxs);
1095
+ }
1096
+
1097
+ // TODO: fallback to old lm_ggml_cpy() method for backwards compatibility
1098
+ // will be removed when lm_ggml_set_rows() is adopted by all backends
1099
+
1100
+ LM_GGML_ASSERT(n_stream == 1 && "n_stream > 1 not supported without LLAMA_SET_ROWS");
1101
+
1102
+ lm_ggml_tensor * v_view = nullptr;
1103
+
1104
+ if (!v_trans) {
1105
+ v_view = lm_ggml_view_1d(ctx, v,
1106
+ n_tokens*n_embd_v_gqa,
1107
+ lm_ggml_row_size(v->type, n_embd_v_gqa)*sinfo.head());
1108
+ } else {
1109
+ v_cur = lm_ggml_transpose(ctx, v_cur);
1110
+
1111
+ v_view = lm_ggml_view_2d(ctx, v, n_tokens, n_embd_v_gqa,
1112
+ (v->ne[1] )*lm_ggml_element_size(v),
1113
+ (sinfo.head())*lm_ggml_element_size(v));
1114
+ }
1115
+
1116
+ return lm_ggml_cpy(ctx, v_cur, v_view);
1117
+ }
1118
+
1119
+ lm_ggml_tensor * llama_kv_cache::build_input_k_idxs(lm_ggml_context * ctx, const llama_ubatch & ubatch) const {
1120
+ const uint32_t n_tokens = ubatch.n_tokens;
1121
+
1122
+ lm_ggml_tensor * k_idxs = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_I64, n_tokens);
1123
+
1124
+ lm_ggml_set_input(k_idxs);
1125
+
1126
+ return k_idxs;
1127
+ }
1128
+
1129
+ lm_ggml_tensor * llama_kv_cache::build_input_v_idxs(lm_ggml_context * ctx, const llama_ubatch & ubatch) const {
1130
+ const uint32_t n_tokens = ubatch.n_tokens;
1131
+
1132
+ lm_ggml_tensor * v_idxs;
1133
+
1134
+ if (!v_trans) {
1135
+ v_idxs = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_I64, n_tokens);
1136
+ } else {
1137
+ v_idxs = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_I64, n_tokens*hparams.n_embd_v_gqa_max());
1138
+ }
1139
+
1140
+ lm_ggml_set_input(v_idxs);
1141
+
1142
+ return v_idxs;
1143
+ }
1144
+
1145
+ void llama_kv_cache::set_input_k_idxs(lm_ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
1146
+ if (!supports_set_rows) {
1147
+ return;
1148
+ }
1149
+
1150
+ const uint32_t n_tokens = ubatch->n_tokens;
1151
+ LM_GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
1152
+
1153
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_host(dst->buffer));
1154
+ int64_t * data = (int64_t *) dst->data;
1155
+
1156
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1157
+ const int64_t offs = sinfo.strm[s]*get_size();
1158
+
1159
+ for (uint32_t i = 0; i < sinfo.size(); ++i) {
1160
+ data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
1161
+ }
1162
+ }
1163
+ }
1164
+
1165
+ void llama_kv_cache::set_input_v_idxs(lm_ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
1166
+ if (!supports_set_rows) {
1167
+ return;
1168
+ }
1169
+
1170
+ const uint32_t n_tokens = ubatch->n_tokens;
1171
+ LM_GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
1172
+
1173
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_host(dst->buffer));
1174
+ int64_t * data = (int64_t *) dst->data;
1175
+
1176
+ if (!v_trans) {
1177
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1178
+ const int64_t offs = sinfo.strm[s]*get_size();
1179
+
1180
+ for (uint32_t i = 0; i < sinfo.size(); ++i) {
1181
+ data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
1182
+ }
1183
+ }
1184
+ } else {
1185
+ // note: the V cache is transposed when not using flash attention
1186
+ const int64_t kv_size = get_size();
1187
+
1188
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa_max();
1189
+
1190
+ for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1191
+ const int64_t offs = sinfo.strm[s]*kv_size*n_embd_v_gqa;
1192
+
1193
+ for (uint32_t i = 0; i < sinfo.size(); ++i) {
1194
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1195
+ data[s*sinfo.size()*n_embd_v_gqa + i*n_embd_v_gqa + j] = offs + j*kv_size + sinfo.idxs[s][i];
1196
+ }
1197
+ }
1198
+ }
1199
+ }
1200
+ }
1201
+
1202
+ void llama_kv_cache::set_input_k_shift(lm_ggml_tensor * dst) const {
1203
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_host(dst->buffer));
1204
+
1205
+ int32_t * data = (int32_t *) dst->data;
1206
+
1207
+ for (uint32_t s = 0; s < n_stream; ++s) {
1208
+ const auto & cells = v_cells[s];
1209
+
1210
+ for (uint32_t i = 0; i < cells.size(); ++i) {
1211
+ data[s*cells.size() + i] = cells.is_empty(i) ? 0 : cells.get_shift(i);
1212
+ }
1213
+ }
1214
+ }
1215
+
1216
+ void llama_kv_cache::set_input_kq_mask(lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
1217
+ const uint32_t n_tokens = ubatch->n_tokens;
1218
+
1219
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_host(dst->buffer));
1220
+ float * data = (float *) dst->data;
1221
+
1222
+ const int64_t n_kv = dst->ne[0];
1223
+ const int64_t n_stream = dst->ne[3]; // num streams in the current ubatch
1224
+
1225
+ LM_GGML_ASSERT(n_tokens%n_stream == 0);
1226
+
1227
+ // n_tps == n_tokens_per_stream
1228
+ const int64_t n_tps = n_tokens/n_stream;
1229
+ const int64_t n_tps_pad = LM_GGML_PAD(n_tps, LM_GGML_KQ_MASK_PAD);
1230
+
1231
+ std::fill(data, data + lm_ggml_nelements(dst), -INFINITY);
1232
+
1233
+ // Use only the previous KV cells of the correct sequence for each token of the ubatch.
1234
+ // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
1235
+ // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch:
1236
+ // Causal mask:
1237
+ // xxx-------
1238
+ // xxxx------
1239
+ // xxxxx-----
1240
+ // Non-causal mask:
1241
+ // xxxxx-----
1242
+ // xxxxx-----
1243
+ // xxxxx-----
1244
+ // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
1245
+ // TODO: optimize this section
1246
+ for (uint32_t h = 0; h < 1; ++h) {
1247
+ for (uint32_t s = 0; s < n_stream; ++s) {
1248
+ for (uint32_t ii = 0; ii < n_tps; ++ii) {
1249
+ const uint32_t i = s*n_tps + ii;
1250
+
1251
+ const llama_seq_id seq_id = ubatch->seq_id[i][0];
1252
+
1253
+ const auto & cells = v_cells[seq_to_stream[seq_id]];
1254
+
1255
+ const llama_pos p1 = ubatch->pos[i];
1256
+
1257
+ const uint64_t idst = n_kv*(h*n_stream*n_tps_pad + s*n_tps_pad + ii);
1258
+
1259
+ for (uint32_t j = 0; j < n_kv; ++j) {
1260
+ if (cells.is_empty(j)) {
1261
+ continue;
1262
+ }
1263
+
1264
+ // mask the token if not the same sequence
1265
+ if (!cells.seq_has(j, seq_id)) {
1266
+ continue;
1267
+ }
1268
+
1269
+ const llama_pos p0 = cells.pos_get(j);
1270
+
1271
+ // mask future tokens
1272
+ if (causal_attn && p0 > p1) {
1273
+ continue;
1274
+ }
1275
+
1276
+ // apply SWA if any
1277
+ if (is_masked_swa(p0, p1)) {
1278
+ continue;
1279
+ }
1280
+
1281
+ data[idst + j] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f;
1282
+ }
1283
+ }
1284
+ }
1285
+ }
1286
+ }
1287
+
1288
+ void llama_kv_cache::set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const {
1289
+ const int64_t n_tokens = ubatch->n_tokens;
1290
+
1291
+ LM_GGML_ASSERT(n_stream == 1 && "TODO: support multiple streams");
1292
+ const auto & cells = v_cells[0];
1293
+
1294
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_host(dst->buffer));
1295
+ LM_GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
1296
+
1297
+ int32_t * data = (int32_t *) dst->data;
1298
+
1299
+ const int32_t n_kv = dst->ne[0];
1300
+
1301
+ for (int h = 0; h < 1; ++h) {
1302
+ for (int i = 0; i < n_tokens; ++i) {
1303
+ for (int j = 0; j < n_kv; ++j) {
1304
+ // the position when the cells is empty is irrelevant - it will be masked out later in the attention
1305
+ const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
1306
+
1307
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
1308
+ }
1309
+ }
1310
+ }
1311
+ }
1312
+
1313
+ size_t llama_kv_cache::total_size() const {
1314
+ size_t size = 0;
1315
+
1316
+ for (const auto & buf : bufs) {
1317
+ size += lm_ggml_backend_buffer_get_size(buf.get());
1318
+ }
1319
+
1320
+ return size;
1321
+ }
1322
+
1323
+ size_t llama_kv_cache::size_k_bytes() const {
1324
+ size_t size_k_bytes = 0;
1325
+
1326
+ for (const auto & layer : layers) {
1327
+ size_k_bytes += lm_ggml_nbytes(layer.k);
1328
+ }
1329
+
1330
+ return size_k_bytes;
1331
+ }
1332
+
1333
+ size_t llama_kv_cache::size_v_bytes() const {
1334
+ size_t size_v_bytes = 0;
1335
+
1336
+ for (const auto & layer : layers) {
1337
+ size_v_bytes += lm_ggml_nbytes(layer.v);
1338
+ }
1339
+
1340
+ return size_v_bytes;
1341
+ }
1342
+
1343
+ lm_ggml_tensor * llama_kv_cache::build_rope_shift(
1344
+ const llama_cparams & cparams,
1345
+ lm_ggml_context * ctx,
1346
+ lm_ggml_tensor * cur,
1347
+ lm_ggml_tensor * shift,
1348
+ lm_ggml_tensor * factors,
1349
+ float freq_base,
1350
+ float freq_scale) const {
1351
+ const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
1352
+
1353
+ const auto & yarn_ext_factor = cparams.yarn_ext_factor;
1354
+ const auto & yarn_beta_fast = cparams.yarn_beta_fast;
1355
+ const auto & yarn_beta_slow = cparams.yarn_beta_slow;
1356
+
1357
+ const auto & n_rot = hparams.n_rot;
1358
+ const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE
1359
+ // @ngxson : this is a workaround
1360
+ // for M-RoPE, we want to rotate the whole vector when doing KV shift
1361
+ // a normal RoPE should work, we just need to use the correct ordering
1362
+ // ref: https://github.com/ggml-org/llama.cpp/pull/13870
1363
+ ? LLAMA_ROPE_TYPE_NEOX
1364
+ : hparams.rope_type;
1365
+
1366
+ // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly.
1367
+ // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
1368
+ const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2
1369
+ ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale))
1370
+ : cparams.yarn_attn_factor;
1371
+
1372
+ lm_ggml_tensor * tmp;
1373
+
1374
+ if (lm_ggml_is_quantized(cur->type)) {
1375
+ // dequantize to f32 -> RoPE -> quantize back
1376
+ tmp = lm_ggml_cast(ctx, cur, LM_GGML_TYPE_F32);
1377
+
1378
+ tmp = lm_ggml_rope_ext(ctx, tmp,
1379
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
1380
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
1381
+
1382
+ tmp = lm_ggml_cpy(ctx, tmp, cur);
1383
+ } else {
1384
+ // we rotate only the first n_rot dimensions
1385
+ tmp = lm_ggml_rope_ext_inplace(ctx, cur,
1386
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
1387
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
1388
+ }
1389
+
1390
+ return tmp;
1391
+ }
1392
+
1393
+ class llm_graph_input_k_shift : public llm_graph_input_i {
1394
+ public:
1395
+ llm_graph_input_k_shift(const llama_kv_cache * kv_self) : kv_self(kv_self) {}
1396
+ virtual ~llm_graph_input_k_shift() = default;
1397
+
1398
+ void set_input(const llama_ubatch * ubatch) override;
1399
+
1400
+ lm_ggml_tensor * k_shift; // I32 [kv_size*n_stream]
1401
+
1402
+ const llama_kv_cache * kv_self;
1403
+ };
1404
+
1405
+ void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
1406
+ LM_GGML_UNUSED(ubatch);
1407
+
1408
+ if (k_shift) {
1409
+ kv_self->set_input_k_shift(k_shift);
1410
+ }
1411
+ }
1412
+
1413
+ lm_ggml_cgraph * llama_kv_cache::build_graph_shift(llm_graph_result * res, llama_context * lctx) const {
1414
+ auto * ctx = res->get_ctx();
1415
+ auto * gf = res->get_gf();
1416
+
1417
+ const auto & n_embd_head_k = hparams.n_embd_head_k;
1418
+ //const auto & n_embd_head_v = hparams.n_embd_head_v;
1419
+
1420
+ auto inp = std::make_unique<llm_graph_input_k_shift>(this);
1421
+
1422
+ inp->k_shift = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_I32, (int64_t) get_size()*n_stream);
1423
+ lm_ggml_set_input(inp->k_shift);
1424
+
1425
+ const auto & cparams = lctx->get_cparams();
1426
+
1427
+ for (const auto & layer : layers) {
1428
+ const uint32_t il = layer.il;
1429
+
1430
+ const int64_t n_head_kv = hparams.n_head_kv(il);
1431
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1432
+
1433
+ const float freq_base_l = model.get_rope_freq_base (cparams, il);
1434
+ const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
1435
+
1436
+ lm_ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
1437
+
1438
+ lm_ggml_tensor * k =
1439
+ lm_ggml_view_3d(ctx, layer.k,
1440
+ n_embd_head_k, n_head_kv, get_size()*n_stream,
1441
+ lm_ggml_row_size(layer.k->type, n_embd_head_k),
1442
+ lm_ggml_row_size(layer.k->type, n_embd_k_gqa),
1443
+ 0);
1444
+
1445
+ lm_ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
1446
+
1447
+ lm_ggml_build_forward_expand(gf, cur);
1448
+ }
1449
+
1450
+ res->add_input(std::move(inp));
1451
+
1452
+ return gf;
1453
+ }
1454
+
1455
+ bool llama_kv_cache::is_masked_swa(llama_pos p0, llama_pos p1) const {
1456
+ assert(p0 >= 0 && p1 >= 0);
1457
+
1458
+ switch (swa_type) {
1459
+ case LLAMA_SWA_TYPE_NONE:
1460
+ {
1461
+ } break;
1462
+ case LLAMA_SWA_TYPE_STANDARD:
1463
+ {
1464
+ if (p1 - p0 >= (int32_t) n_swa) {
1465
+ return true;
1466
+ }
1467
+ } break;
1468
+ case LLAMA_SWA_TYPE_CHUNKED:
1469
+ {
1470
+ const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa;
1471
+
1472
+ if (p0 < pos_chunk_start) {
1473
+ return true;
1474
+ }
1475
+ } break;
1476
+ }
1477
+
1478
+ return false;
1479
+ }
1480
+
1481
+ void llama_kv_cache::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
1482
+ LM_GGML_UNUSED(flags);
1483
+
1484
+ io.write(&n_stream, sizeof(n_stream));
1485
+
1486
+ for (uint32_t s = 0; s < n_stream; ++s) {
1487
+ cell_ranges_t cr { s, {} };
1488
+
1489
+ uint32_t cell_count = 0;
1490
+
1491
+ const auto & cells = v_cells[s];
1492
+
1493
+ // Count the number of cells with the specified seq_id
1494
+ // Find all the ranges of cells with this seq id (or all, when -1)
1495
+ uint32_t cell_range_begin = cells.size();
1496
+
1497
+ for (uint32_t i = 0; i < cells.size(); ++i) {
1498
+ if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) {
1499
+ ++cell_count;
1500
+ if (cell_range_begin == cells.size()) {
1501
+ cell_range_begin = i;
1502
+ }
1503
+ } else {
1504
+ if (cell_range_begin != cells.size()) {
1505
+ cr.data.emplace_back(cell_range_begin, i);
1506
+ cell_range_begin = cells.size();
1507
+ }
1508
+ }
1509
+ }
1510
+
1511
+ if (cell_range_begin != cells.size()) {
1512
+ cr.data.emplace_back(cell_range_begin, cells.size());
1513
+ }
1514
+
1515
+ // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
1516
+ uint32_t cell_count_check = 0;
1517
+ for (const auto & range : cr.data) {
1518
+ cell_count_check += range.second - range.first;
1519
+ }
1520
+ LM_GGML_ASSERT(cell_count == cell_count_check);
1521
+
1522
+ io.write(&cell_count, sizeof(cell_count));
1523
+
1524
+ // skip empty streams
1525
+ if (cell_count == 0) {
1526
+ continue;
1527
+ }
1528
+
1529
+ state_write_meta(io, cr, seq_id);
1530
+ state_write_data(io, cr);
1531
+ }
1532
+ }
1533
+
1534
+ void llama_kv_cache::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
1535
+ LM_GGML_UNUSED(flags);
1536
+
1537
+ LM_GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
1538
+
1539
+ uint32_t n_stream_cur;
1540
+ io.read_to(&n_stream_cur, sizeof(n_stream_cur));
1541
+ if (n_stream_cur != n_stream) {
1542
+ throw std::runtime_error("n_stream mismatch");
1543
+ }
1544
+
1545
+ for (uint32_t s = 0; s < n_stream; ++s) {
1546
+ uint32_t cell_count;
1547
+ io.read_to(&cell_count, sizeof(cell_count));
1548
+
1549
+ if (cell_count == 0) {
1550
+ continue;
1551
+ }
1552
+
1553
+ const uint32_t strm = seq_id == -1 ? s : seq_to_stream[seq_id];
1554
+
1555
+ bool res = true;
1556
+ res = res && state_read_meta(io, strm, cell_count, seq_id);
1557
+ res = res && state_read_data(io, strm, cell_count);
1558
+
1559
+ if (!res) {
1560
+ if (seq_id == -1) {
1561
+ clear(true);
1562
+ } else {
1563
+ seq_rm(seq_id, -1, -1);
1564
+ }
1565
+ throw std::runtime_error("failed to restore kv cache");
1566
+ }
1567
+ }
1568
+ }
1569
+
1570
+ void llama_kv_cache::state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id) const {
1571
+ const auto & cells = v_cells[cr.strm];
1572
+
1573
+ for (const auto & range : cr.data) {
1574
+ for (uint32_t i = range.first; i < range.second; ++i) {
1575
+ std::vector<llama_seq_id> seq_ids;
1576
+
1577
+ for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) {
1578
+ if (cur == seq_id || seq_id == -1) {
1579
+ if (cells.seq_has(i, cur)) {
1580
+ seq_ids.push_back(cur);
1581
+ }
1582
+ }
1583
+ }
1584
+
1585
+ const llama_pos pos = cells.pos_get(i);
1586
+ const uint32_t n_seq_id = seq_ids.size();
1587
+
1588
+ io.write(&pos, sizeof(pos));
1589
+ io.write(&n_seq_id, sizeof(n_seq_id));
1590
+
1591
+ for (const auto & seq_id : seq_ids) {
1592
+ io.write(&seq_id, sizeof(seq_id));
1593
+ }
1594
+ }
1595
+ }
1596
+ }
1597
+
1598
+ void llama_kv_cache::state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const {
1599
+ const auto & cells = v_cells[cr.strm];
1600
+
1601
+ const uint32_t v_trans = this->v_trans ? 1 : 0;
1602
+ const uint32_t n_layer = layers.size();
1603
+
1604
+ io.write(&v_trans, sizeof(v_trans));
1605
+ io.write(&n_layer, sizeof(n_layer));
1606
+
1607
+ std::vector<uint8_t> tmp_buf;
1608
+
1609
+ // Iterate and write all the keys first, each row is a cell
1610
+ // Get whole range at a time
1611
+ for (const auto & layer : layers) {
1612
+ const uint32_t il = layer.il;
1613
+
1614
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1615
+
1616
+ auto * k = layer.k_stream[cr.strm];
1617
+
1618
+ // Write key type
1619
+ const int32_t k_type_i = (int32_t) k->type;
1620
+ io.write(&k_type_i, sizeof(k_type_i));
1621
+
1622
+ // Write row size of key
1623
+ const uint64_t k_size_row = lm_ggml_row_size(k->type, n_embd_k_gqa);
1624
+ io.write(&k_size_row, sizeof(k_size_row));
1625
+
1626
+ // Read each range of cells of k_size length each into tmp_buf and write out
1627
+ for (const auto & range : cr.data) {
1628
+ const size_t range_size = range.second - range.first;
1629
+ const size_t buf_size = range_size * k_size_row;
1630
+ io.write_tensor(k, range.first * k_size_row, buf_size);
1631
+ }
1632
+ }
1633
+
1634
+ if (!v_trans) {
1635
+ for (const auto & layer : layers) {
1636
+ const uint32_t il = layer.il;
1637
+
1638
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1639
+
1640
+ auto * v = layer.v_stream[cr.strm];
1641
+
1642
+ // Write value type
1643
+ const int32_t v_type_i = (int32_t) v->type;
1644
+ io.write(&v_type_i, sizeof(v_type_i));
1645
+
1646
+ // Write row size of value
1647
+ const uint64_t v_size_row = lm_ggml_row_size(v->type, n_embd_v_gqa);
1648
+ io.write(&v_size_row, sizeof(v_size_row));
1649
+
1650
+ // Read each range of cells of v_size length each into tmp_buf and write out
1651
+ for (const auto & range : cr.data) {
1652
+ const size_t range_size = range.second - range.first;
1653
+ const size_t buf_size = range_size * v_size_row;
1654
+ io.write_tensor(v, range.first * v_size_row, buf_size);
1655
+ }
1656
+ }
1657
+ } else {
1658
+ // When v is transposed, we also need the element size and get the element ranges from each row
1659
+ const uint32_t kv_size = cells.size();
1660
+
1661
+ for (const auto & layer : layers) {
1662
+ const uint32_t il = layer.il;
1663
+
1664
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1665
+
1666
+ auto * v = layer.v_stream[cr.strm];
1667
+
1668
+ // Write value type
1669
+ const int32_t v_type_i = (int32_t) v->type;
1670
+ io.write(&v_type_i, sizeof(v_type_i));
1671
+
1672
+ // Write element size
1673
+ const uint32_t v_size_el = lm_ggml_type_size(v->type);
1674
+ io.write(&v_size_el, sizeof(v_size_el));
1675
+
1676
+ // Write GQA embedding size
1677
+ io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
1678
+
1679
+ // For each row, we get the element values of each cell
1680
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1681
+ // Read each range of cells of v_size_el length each into tmp_buf and write out
1682
+ for (const auto & range : cr.data) {
1683
+ const size_t range_size = range.second - range.first;
1684
+ const size_t src_offset = (range.first + j * kv_size) * v_size_el;
1685
+ const size_t buf_size = range_size * v_size_el;
1686
+ io.write_tensor(v, src_offset, buf_size);
1687
+ }
1688
+ }
1689
+ }
1690
+ }
1691
+ }
1692
+
1693
+ bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id) {
1694
+ auto & cells = v_cells[strm];
1695
+ auto & head = v_heads[strm];
1696
+
1697
+ if (dest_seq_id != -1) {
1698
+ // single sequence
1699
+ seq_rm(dest_seq_id, -1, -1);
1700
+
1701
+ llama_batch_allocr balloc(hparams.n_pos_per_embd());
1702
+
1703
+ llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
1704
+
1705
+ ubatch.seq_id_unq[0] = dest_seq_id;
1706
+
1707
+ for (uint32_t i = 0; i < cell_count; ++i) {
1708
+ llama_pos pos;
1709
+ uint32_t n_seq_id;
1710
+
1711
+ io.read_to(&pos, sizeof(pos));
1712
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
1713
+
1714
+ if (n_seq_id != 1) {
1715
+ LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
1716
+ return false;
1717
+ }
1718
+
1719
+ // read the sequence id, but directly discard it - we will use dest_seq_id instead
1720
+ {
1721
+ llama_seq_id seq_id;
1722
+ io.read_to(&seq_id, sizeof(seq_id));
1723
+ }
1724
+
1725
+ ubatch.pos[i] = pos;
1726
+ ubatch.n_seq_id[i] = n_seq_id;
1727
+ ubatch.seq_id[i] = &dest_seq_id;
1728
+ }
1729
+
1730
+ const auto sinfo = find_slot(ubatch, true);
1731
+ if (sinfo.empty()) {
1732
+ LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
1733
+ return false;
1734
+ }
1735
+
1736
+ apply_ubatch(sinfo, ubatch);
1737
+
1738
+ const auto head_cur = sinfo.head();
1739
+
1740
+ // keep the head at the old position because we will read the KV data into it in state_read_data()
1741
+ head = head_cur;
1742
+
1743
+ LLAMA_LOG_DEBUG("%s: head_cur = %d, head = %d, cell_count = %d, dest_seq_id = %d\n", __func__, head_cur, head, cell_count, dest_seq_id);
1744
+
1745
+ // DEBUG CHECK: head_cur should be our first cell, head_cur + cell_count - 1 should be our last cell (verify seq_id and pos values)
1746
+ // Assume that this is one contiguous block of cells
1747
+ LM_GGML_ASSERT(head_cur + cell_count <= cells.size());
1748
+ LM_GGML_ASSERT(cells.pos_get(head_cur) == ubatch.pos[0]);
1749
+ LM_GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == ubatch.pos[cell_count - 1]);
1750
+ LM_GGML_ASSERT(cells.seq_has(head_cur, dest_seq_id));
1751
+ LM_GGML_ASSERT(cells.seq_has(head_cur + cell_count - 1, dest_seq_id));
1752
+ } else {
1753
+ // whole KV cache restore
1754
+
1755
+ if (cell_count > cells.size()) {
1756
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
1757
+ return false;
1758
+ }
1759
+
1760
+ clear(true);
1761
+
1762
+ for (uint32_t i = 0; i < cell_count; ++i) {
1763
+ llama_pos pos;
1764
+ uint32_t n_seq_id;
1765
+
1766
+ io.read_to(&pos, sizeof(pos));
1767
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
1768
+
1769
+ cells.pos_set(i, pos);
1770
+
1771
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
1772
+ llama_seq_id seq_id;
1773
+ io.read_to(&seq_id, sizeof(seq_id));
1774
+
1775
+ if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) {
1776
+ LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max);
1777
+ return false;
1778
+ }
1779
+
1780
+ cells.seq_add(i, seq_id);
1781
+ }
1782
+ }
1783
+
1784
+ head = 0;
1785
+ }
1786
+
1787
+ return true;
1788
+ }
1789
+
1790
+ bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count) {
1791
+ auto & cells = v_cells[strm];
1792
+ auto & head = v_heads[strm];
1793
+
1794
+ uint32_t v_trans;
1795
+ uint32_t n_layer;
1796
+
1797
+ io.read_to(&v_trans, sizeof(v_trans));
1798
+ io.read_to(&n_layer, sizeof(n_layer));
1799
+
1800
+ if (n_layer != layers.size()) {
1801
+ LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size());
1802
+ return false;
1803
+ }
1804
+
1805
+ if (cell_count > cells.size()) {
1806
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size());
1807
+ return false;
1808
+ }
1809
+
1810
+ if (this->v_trans != (bool) v_trans) {
1811
+ LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
1812
+ return false;
1813
+ }
1814
+
1815
+ // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
1816
+ for (const auto & layer : layers) {
1817
+ const uint32_t il = layer.il;
1818
+
1819
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1820
+
1821
+ auto * k = layer.k_stream[strm];
1822
+
1823
+ // Read type of key
1824
+ int32_t k_type_i_ref;
1825
+ io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
1826
+ const int32_t k_type_i = (int32_t) k->type;
1827
+ if (k_type_i != k_type_i_ref) {
1828
+ LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
1829
+ return false;
1830
+ }
1831
+
1832
+ // Read row size of key
1833
+ uint64_t k_size_row_ref;
1834
+ io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
1835
+ const size_t k_size_row = lm_ggml_row_size(k->type, n_embd_k_gqa);
1836
+ if (k_size_row != k_size_row_ref) {
1837
+ LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
1838
+ return false;
1839
+ }
1840
+
1841
+ if (cell_count) {
1842
+ // Read and set the keys for the whole cell range
1843
+ lm_ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
1844
+ }
1845
+ }
1846
+
1847
+ if (!this->v_trans) {
1848
+ for (const auto & layer : layers) {
1849
+ const uint32_t il = layer.il;
1850
+
1851
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1852
+
1853
+ auto * v = layer.v_stream[strm];
1854
+
1855
+ // Read type of value
1856
+ int32_t v_type_i_ref;
1857
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1858
+ const int32_t v_type_i = (int32_t) v->type;
1859
+ if (v_type_i != v_type_i_ref) {
1860
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1861
+ return false;
1862
+ }
1863
+
1864
+ // Read row size of value
1865
+ uint64_t v_size_row_ref;
1866
+ io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
1867
+ const size_t v_size_row = lm_ggml_row_size(v->type, n_embd_v_gqa);
1868
+ if (v_size_row != v_size_row_ref) {
1869
+ LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
1870
+ return false;
1871
+ }
1872
+
1873
+ if (cell_count) {
1874
+ // Read and set the values for the whole cell range
1875
+ lm_ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
1876
+ }
1877
+ }
1878
+ } else {
1879
+ // For each layer, read the values for each cell (transposed)
1880
+ for (const auto & layer : layers) {
1881
+ const uint32_t il = layer.il;
1882
+
1883
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1884
+
1885
+ auto * v = layer.v_stream[strm];
1886
+
1887
+ // Read type of value
1888
+ int32_t v_type_i_ref;
1889
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1890
+ const int32_t v_type_i = (int32_t) v->type;
1891
+ if (v_type_i != v_type_i_ref) {
1892
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1893
+ return false;
1894
+ }
1895
+
1896
+ // Read element size of value
1897
+ uint32_t v_size_el_ref;
1898
+ io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
1899
+ const size_t v_size_el = lm_ggml_type_size(v->type);
1900
+ if (v_size_el != v_size_el_ref) {
1901
+ LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
1902
+ return false;
1903
+ }
1904
+
1905
+ // Read GQA embedding size
1906
+ uint32_t n_embd_v_gqa_ref;
1907
+ io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
1908
+ if (n_embd_v_gqa != n_embd_v_gqa_ref) {
1909
+ LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
1910
+ return false;
1911
+ }
1912
+
1913
+ if (cell_count) {
1914
+ // For each row in the transposed matrix, read the values for the whole cell range
1915
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1916
+ const size_t dst_offset = (head + j * cells.size()) * v_size_el;
1917
+ lm_ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
1918
+ }
1919
+ }
1920
+ }
1921
+ }
1922
+
1923
+ return true;
1924
+ }
1925
+
1926
+ //
1927
+ // llama_kv_cache_context
1928
+ //
1929
+
1930
+ llama_kv_cache_context::llama_kv_cache_context(llama_memory_status status) : status(status) {}
1931
+
1932
+ llama_kv_cache_context::llama_kv_cache_context(
1933
+ llama_kv_cache * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv) {
1934
+ n_kv = kv->get_size();
1935
+
1936
+ const uint32_t n_stream = kv->get_n_stream();
1937
+
1938
+ // create a dummy slot info - the actual data is irrelevant. we just need to build the graph
1939
+ sinfos.resize(1);
1940
+ sinfos[0].s0 = 0;
1941
+ sinfos[0].s1 = n_stream - 1;
1942
+ sinfos[0].idxs.resize(n_stream);
1943
+ for (uint32_t s = 0; s < n_stream; ++s) {
1944
+ sinfos[0].strm.push_back(s);
1945
+ sinfos[0].idxs[s].resize(1, 0);
1946
+ }
1947
+ }
1948
+
1949
+ llama_kv_cache_context::llama_kv_cache_context(
1950
+ llama_kv_cache * kv,
1951
+ llama_context * lctx,
1952
+ bool do_shift,
1953
+ stream_copy_info sc_info) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), sc_info(std::move(sc_info)) {
1954
+ if (!do_shift && this->sc_info.empty()) {
1955
+ status = LLAMA_MEMORY_STATUS_NO_UPDATE;
1956
+ }
1957
+ }
1958
+
1959
+ llama_kv_cache_context::llama_kv_cache_context(
1960
+ llama_kv_cache * kv,
1961
+ llama_kv_cache::slot_info_vec_t sinfos,
1962
+ std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sinfos(std::move(sinfos)), ubatches(std::move(ubatches)) {
1963
+ }
1964
+
1965
+ llama_kv_cache_context::~llama_kv_cache_context() = default;
1966
+
1967
+ bool llama_kv_cache_context::next() {
1968
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
1969
+
1970
+ if (++i_cur >= ubatches.size()) {
1971
+ return false;
1972
+ }
1973
+
1974
+ return true;
1975
+ }
1976
+
1977
+ bool llama_kv_cache_context::apply() {
1978
+ assert(!llama_memory_status_is_fail(status));
1979
+
1980
+ // no ubatches -> this is a KV cache update
1981
+ if (ubatches.empty()) {
1982
+ kv->update(lctx, do_shift, sc_info);
1983
+
1984
+ return true;
1985
+ }
1986
+
1987
+ kv->apply_ubatch(sinfos[i_cur], ubatches[i_cur]);
1988
+
1989
+ n_kv = kv->get_n_kv();
1990
+
1991
+ return true;
1992
+ }
1993
+
1994
+ llama_memory_status llama_kv_cache_context::get_status() const {
1995
+ return status;
1996
+ }
1997
+
1998
+ const llama_ubatch & llama_kv_cache_context::get_ubatch() const {
1999
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
2000
+
2001
+ return ubatches[i_cur];
2002
+ }
2003
+
2004
+ uint32_t llama_kv_cache_context::get_n_kv() const {
2005
+ return n_kv;
2006
+ }
2007
+
2008
+ bool llama_kv_cache_context::get_supports_set_rows() const {
2009
+ return kv->get_supports_set_rows();
2010
+ }
2011
+
2012
+ lm_ggml_tensor * llama_kv_cache_context::get_k(lm_ggml_context * ctx, int32_t il) const {
2013
+ return kv->get_k(ctx, il, n_kv, sinfos[i_cur]);
2014
+ }
2015
+
2016
+ lm_ggml_tensor * llama_kv_cache_context::get_v(lm_ggml_context * ctx, int32_t il) const {
2017
+ return kv->get_v(ctx, il, n_kv, sinfos[i_cur]);
2018
+ }
2019
+
2020
+ lm_ggml_tensor * llama_kv_cache_context::cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, lm_ggml_tensor * k_idxs, int32_t il) const {
2021
+ return kv->cpy_k(ctx, k_cur, k_idxs, il, sinfos[i_cur]);
2022
+ }
2023
+
2024
+ lm_ggml_tensor * llama_kv_cache_context::cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, lm_ggml_tensor * v_idxs, int32_t il) const {
2025
+ return kv->cpy_v(ctx, v_cur, v_idxs, il, sinfos[i_cur]);
2026
+ }
2027
+
2028
+ lm_ggml_tensor * llama_kv_cache_context::build_input_k_idxs(lm_ggml_context * ctx, const llama_ubatch & ubatch) const {
2029
+ return kv->build_input_k_idxs(ctx, ubatch);
2030
+ }
2031
+
2032
+ lm_ggml_tensor * llama_kv_cache_context::build_input_v_idxs(lm_ggml_context * ctx, const llama_ubatch & ubatch) const {
2033
+ return kv->build_input_v_idxs(ctx, ubatch);
2034
+ }
2035
+
2036
+ void llama_kv_cache_context::set_input_k_shift(lm_ggml_tensor * dst) const {
2037
+ kv->set_input_k_shift(dst);
2038
+ }
2039
+
2040
+ void llama_kv_cache_context::set_input_k_idxs(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const {
2041
+ kv->set_input_k_idxs(dst, ubatch, sinfos[i_cur]);
2042
+ }
2043
+
2044
+ void llama_kv_cache_context::set_input_v_idxs(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const {
2045
+ kv->set_input_v_idxs(dst, ubatch, sinfos[i_cur]);
2046
+ }
2047
+
2048
+ void llama_kv_cache_context::set_input_kq_mask(lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
2049
+ kv->set_input_kq_mask(dst, ubatch, causal_attn);
2050
+ }
2051
+
2052
+ void llama_kv_cache_context::set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const {
2053
+ kv->set_input_pos_bucket(dst, ubatch);
2054
+ }
2055
+
2056
+ uint32_t llama_kv_cache::get_padding(const llama_cparams & cparams) {
2057
+ // the FA kernels require padding to avoid extra runtime boundary checks
2058
+ return cparams.flash_attn ? 256u : 32u;
2059
+ }