@fugood/llama.node 1.0.0-beta.5 → 1.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/lib/binding.ts +3 -1
  2. package/lib/index.js +2 -0
  3. package/lib/index.ts +3 -1
  4. package/package.json +14 -14
  5. package/scripts/llama.cpp.patch +27 -26
  6. package/src/EmbeddingWorker.cpp +1 -1
  7. package/src/LlamaCompletionWorker.cpp +28 -7
  8. package/src/LlamaCompletionWorker.h +4 -0
  9. package/src/LlamaContext.cpp +14 -17
  10. package/src/common.hpp +7 -6
  11. package/src/llama.cpp/CMakeLists.txt +15 -4
  12. package/src/llama.cpp/common/CMakeLists.txt +15 -24
  13. package/src/llama.cpp/common/arg.cpp +172 -110
  14. package/src/llama.cpp/common/chat-parser.cpp +385 -0
  15. package/src/llama.cpp/common/chat-parser.h +120 -0
  16. package/src/llama.cpp/common/chat.cpp +726 -596
  17. package/src/llama.cpp/common/chat.h +74 -8
  18. package/src/llama.cpp/common/common.cpp +56 -38
  19. package/src/llama.cpp/common/common.h +9 -3
  20. package/src/llama.cpp/common/json-partial.cpp +256 -0
  21. package/src/llama.cpp/common/json-partial.h +38 -0
  22. package/src/llama.cpp/common/json-schema-to-grammar.cpp +2 -1
  23. package/src/llama.cpp/common/json-schema-to-grammar.h +4 -4
  24. package/src/llama.cpp/common/sampling.cpp +7 -8
  25. package/src/llama.cpp/common/speculative.cpp +6 -4
  26. package/src/llama.cpp/ggml/CMakeLists.txt +48 -3
  27. package/src/llama.cpp/ggml/include/ggml.h +22 -3
  28. package/src/llama.cpp/ggml/src/CMakeLists.txt +81 -22
  29. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +131 -49
  30. package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +1 -1
  31. package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +1 -1
  32. package/src/llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  33. package/src/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c +4113 -0
  34. package/src/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp +2162 -0
  35. package/src/llama.cpp/ggml/src/ggml-cpu/arch/loongarch/quants.c +2638 -0
  36. package/src/llama.cpp/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp +82 -0
  37. package/src/llama.cpp/ggml/src/ggml-cpu/arch/powerpc/quants.c +2731 -0
  38. package/src/llama.cpp/ggml/src/ggml-cpu/arch/riscv/quants.c +2068 -0
  39. package/src/llama.cpp/ggml/src/ggml-cpu/arch/riscv/repack.cpp +396 -0
  40. package/src/llama.cpp/ggml/src/ggml-cpu/arch/s390/quants.c +1299 -0
  41. package/src/llama.cpp/ggml/src/ggml-cpu/arch/wasm/quants.c +1480 -0
  42. package/src/llama.cpp/ggml/src/ggml-cpu/arch/x86/quants.c +4310 -0
  43. package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-aarch64.cpp → arch/x86/repack.cpp} +59 -3206
  44. package/src/llama.cpp/ggml/src/ggml-cpu/arch-fallback.h +184 -0
  45. package/src/llama.cpp/ggml/src/ggml-cpu/common.h +1 -1
  46. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +12 -13
  47. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +64 -88
  48. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +8 -8
  49. package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-hbm.cpp → hbm.cpp} +1 -1
  50. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +1 -1
  51. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +56 -7
  52. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h +5 -0
  53. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +282 -100
  54. package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +1 -0
  55. package/src/llama.cpp/ggml/src/ggml-cpu/quants.c +1157 -0
  56. package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-quants.h → quants.h} +26 -0
  57. package/src/llama.cpp/ggml/src/ggml-cpu/repack.cpp +1570 -0
  58. package/src/llama.cpp/ggml/src/ggml-cpu/repack.h +98 -0
  59. package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +119 -5
  60. package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +1 -1
  61. package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +85 -16
  62. package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +204 -49
  63. package/src/llama.cpp/include/llama.h +145 -40
  64. package/src/llama.cpp/src/CMakeLists.txt +5 -1
  65. package/src/llama.cpp/src/llama-arch.cpp +99 -3
  66. package/src/llama.cpp/src/llama-arch.h +10 -1
  67. package/src/llama.cpp/src/llama-batch.cpp +728 -272
  68. package/src/llama.cpp/src/llama-batch.h +112 -54
  69. package/src/llama.cpp/src/llama-chat.cpp +19 -2
  70. package/src/llama.cpp/src/llama-chat.h +1 -0
  71. package/src/llama.cpp/src/llama-context.cpp +525 -339
  72. package/src/llama.cpp/src/llama-context.h +38 -17
  73. package/src/llama.cpp/src/llama-cparams.cpp +4 -0
  74. package/src/llama.cpp/src/llama-cparams.h +2 -0
  75. package/src/llama.cpp/src/llama-grammar.cpp +12 -2
  76. package/src/llama.cpp/src/llama-graph.cpp +413 -353
  77. package/src/llama.cpp/src/llama-graph.h +112 -56
  78. package/src/llama.cpp/src/llama-hparams.cpp +10 -2
  79. package/src/llama.cpp/src/llama-hparams.h +13 -2
  80. package/src/llama.cpp/src/llama-kv-cache-unified-iswa.cpp +279 -0
  81. package/src/llama.cpp/src/llama-kv-cache-unified-iswa.h +128 -0
  82. package/src/llama.cpp/src/llama-kv-cache-unified.cpp +1815 -0
  83. package/src/llama.cpp/src/llama-kv-cache-unified.h +303 -0
  84. package/src/llama.cpp/src/llama-kv-cells.h +415 -0
  85. package/src/llama.cpp/src/llama-memory-hybrid.cpp +246 -0
  86. package/src/llama.cpp/src/llama-memory-hybrid.h +138 -0
  87. package/src/llama.cpp/src/llama-memory-recurrent.cpp +1112 -0
  88. package/src/llama.cpp/src/llama-memory-recurrent.h +183 -0
  89. package/src/llama.cpp/src/llama-memory.cpp +41 -0
  90. package/src/llama.cpp/src/llama-memory.h +86 -5
  91. package/src/llama.cpp/src/llama-mmap.cpp +1 -1
  92. package/src/llama.cpp/src/llama-model-loader.cpp +42 -17
  93. package/src/llama.cpp/src/llama-model-saver.cpp +1 -0
  94. package/src/llama.cpp/src/llama-model.cpp +1137 -528
  95. package/src/llama.cpp/src/llama-model.h +4 -0
  96. package/src/llama.cpp/src/llama-quant.cpp +2 -1
  97. package/src/llama.cpp/src/llama-sampling.cpp +2 -2
  98. package/src/llama.cpp/src/llama-vocab.cpp +69 -32
  99. package/src/llama.cpp/src/llama-vocab.h +1 -0
  100. package/src/llama.cpp/src/llama.cpp +11 -7
  101. package/src/llama.cpp/src/unicode.cpp +5 -0
  102. package/src/tts_utils.h +1 -1
  103. package/src/llama.cpp/common/json.hpp +0 -24766
  104. package/src/llama.cpp/common/minja/chat-template.hpp +0 -541
  105. package/src/llama.cpp/common/minja/minja.hpp +0 -2974
  106. package/src/llama.cpp/common/stb_image.h +0 -7988
  107. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
  108. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -13326
  109. package/src/llama.cpp/src/llama-kv-cache.cpp +0 -2827
  110. package/src/llama.cpp/src/llama-kv-cache.h +0 -515
  111. /package/src/llama.cpp/ggml/src/ggml-cpu/{cpu-feats-x86.cpp → arch/x86/cpu-feats.cpp} +0 -0
  112. /package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-hbm.h → hbm.h} +0 -0
  113. /package/src/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-traits.h → traits.h} +0 -0
@@ -0,0 +1,246 @@
1
+ #include "llama-memory-hybrid.h"
2
+
3
+ #include "llama-impl.h"
4
+ #include "llama-model.h"
5
+ #include "llama-context.h"
6
+
7
+ //
8
+ // llama_memory_hybrid
9
+ //
10
+
11
+ llama_memory_hybrid::llama_memory_hybrid(
12
+ const llama_model & model,
13
+ /* attn */
14
+ ggml_type type_k,
15
+ ggml_type type_v,
16
+ bool v_trans,
17
+ uint32_t kv_size,
18
+ uint32_t n_pad,
19
+ uint32_t n_swa,
20
+ llama_swa_type swa_type,
21
+ /* recurrent */
22
+ ggml_type type_r,
23
+ ggml_type type_s,
24
+ uint32_t rs_size,
25
+ /* common */
26
+ uint32_t n_seq_max,
27
+ bool offload,
28
+ /* layer filters */
29
+ layer_filter_cb && filter_attn,
30
+ layer_filter_cb && filter_recr) :
31
+ hparams(model.hparams),
32
+ mem_attn(new llama_kv_cache_unified(
33
+ model,
34
+ filter_attn == nullptr ?
35
+ [&](int32_t il) { return !hparams.is_recurrent(il); }
36
+ : filter_attn,
37
+ type_k,
38
+ type_v,
39
+ v_trans,
40
+ offload,
41
+ kv_size,
42
+ n_seq_max,
43
+ n_pad,
44
+ n_swa,
45
+ swa_type
46
+ )),
47
+ mem_recr(new llama_memory_recurrent(
48
+ model,
49
+ filter_recr == nullptr ?
50
+ [&](int32_t il) { return hparams.is_recurrent(il); }
51
+ : filter_recr,
52
+ type_r,
53
+ type_s,
54
+ offload,
55
+ rs_size,
56
+ n_seq_max
57
+ )) {}
58
+
59
+ llama_memory_context_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
60
+ do {
61
+ balloc.split_reset();
62
+
63
+ // follow the recurrent pattern for creating the ubatch splits
64
+ std::vector<llama_ubatch> ubatches;
65
+
66
+ while (true) {
67
+ llama_ubatch ubatch;
68
+
69
+ if (embd_all) {
70
+ // if all tokens are output, split by sequence
71
+ ubatch = balloc.split_seq(n_ubatch);
72
+ } else {
73
+ ubatch = balloc.split_equal(n_ubatch);
74
+ }
75
+
76
+ if (ubatch.n_tokens == 0) {
77
+ break;
78
+ }
79
+
80
+ ubatches.push_back(std::move(ubatch)); // NOLINT
81
+ }
82
+
83
+ // prepare the recurrent batches first
84
+ if (!mem_recr->prepare(ubatches)) {
85
+ // TODO: will the recurrent cache be in an undefined context at this point?
86
+ LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__);
87
+ return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
88
+ }
89
+
90
+ // prepare the attention cache
91
+ auto heads_attn = mem_attn->prepare(ubatches);
92
+ if (heads_attn.empty()) {
93
+ LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__);
94
+ return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
95
+ }
96
+
97
+ return std::make_unique<llama_memory_hybrid_context>(
98
+ this, std::move(heads_attn), std::move(ubatches));
99
+ } while(false);
100
+
101
+ return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
102
+ }
103
+
104
+ llama_memory_context_ptr llama_memory_hybrid::init_full() {
105
+ return std::make_unique<llama_memory_hybrid_context>(this);
106
+ }
107
+
108
+ llama_memory_context_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) {
109
+ return std::make_unique<llama_memory_hybrid_context>(this, lctx, optimize);
110
+ }
111
+
112
+ bool llama_memory_hybrid::get_can_shift() const {
113
+ // Shifting is trivially supported for recurrent
114
+ return mem_attn->get_can_shift();
115
+ }
116
+
117
+ void llama_memory_hybrid::clear(bool data) {
118
+ mem_attn->clear(data);
119
+ mem_recr->clear(data);
120
+ }
121
+
122
+ bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
123
+ // Try removing from the recurrent cache first since it may fail. If it does
124
+ // fail, the cache will not have been mutated.
125
+ if (!mem_recr->seq_rm(seq_id, p0, p1)) {
126
+ return false;
127
+ }
128
+ return mem_attn->seq_rm(seq_id, p0, p1);
129
+ }
130
+
131
+ void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
132
+ mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1);
133
+ mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1);
134
+ }
135
+
136
+ void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) {
137
+ mem_attn->seq_keep(seq_id);
138
+ mem_recr->seq_keep(seq_id);
139
+ }
140
+
141
+ void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
142
+ mem_attn->seq_add(seq_id, p0, p1, shift);
143
+ mem_recr->seq_add(seq_id, p0, p1, shift);
144
+ }
145
+
146
+ void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
147
+ mem_attn->seq_div(seq_id, p0, p1, d);
148
+ mem_recr->seq_div(seq_id, p0, p1, d);
149
+ }
150
+
151
+ llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const {
152
+ // the min of the total cache is the max of the two caches' min values
153
+ return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id));
154
+ }
155
+
156
+ llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const {
157
+ // the max of the total cache is the min of the two caches' max values
158
+ return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id));
159
+ }
160
+
161
+ void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
162
+ mem_attn->state_write(io, seq_id);
163
+ mem_recr->state_write(io, seq_id);
164
+ }
165
+
166
+ void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
167
+ mem_attn->state_read(io, seq_id);
168
+ mem_recr->state_read(io, seq_id);
169
+ }
170
+
171
+ llama_kv_cache_unified * llama_memory_hybrid::get_mem_attn() const {
172
+ return mem_attn.get();
173
+ }
174
+
175
+ llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const {
176
+ return mem_recr.get();
177
+ }
178
+
179
+ llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_status status) : status(status) {}
180
+
181
+ llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_hybrid * mem) :
182
+ ctx_attn(mem->get_mem_attn()->init_full()),
183
+ ctx_recr(mem->get_mem_recr()->init_full()),
184
+ status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
185
+ }
186
+
187
+ llama_memory_hybrid_context::llama_memory_hybrid_context(
188
+ llama_memory_hybrid * mem,
189
+ llama_context * lctx,
190
+ bool optimize) :
191
+ ctx_attn(mem->get_mem_attn()->init_update(lctx, optimize)),
192
+ ctx_recr(mem->get_mem_recr()->init_update(lctx, optimize)),
193
+ status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
194
+ }
195
+
196
+ llama_memory_hybrid_context::llama_memory_hybrid_context(
197
+ llama_memory_hybrid * mem,
198
+ std::vector<uint32_t> heads_attn,
199
+ std::vector<llama_ubatch> ubatches) :
200
+ ubatches(std::move(ubatches)),
201
+ // note: here we copy the ubatches. not sure if this is ideal
202
+ ctx_attn(new llama_kv_cache_unified_context(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)),
203
+ ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)),
204
+ status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
205
+ }
206
+
207
+ bool llama_memory_hybrid_context::next() {
208
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
209
+
210
+ ctx_attn->next();
211
+ ctx_recr->next();
212
+
213
+ if (++i_next >= ubatches.size()) {
214
+ return false;
215
+ }
216
+
217
+ return true;
218
+ }
219
+
220
+ bool llama_memory_hybrid_context::apply() {
221
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
222
+
223
+ bool res = true;
224
+
225
+ res = res & ctx_attn->apply();
226
+ res = res & ctx_recr->apply();
227
+
228
+ return res;
229
+ }
230
+
231
+ llama_memory_status llama_memory_hybrid_context::get_status() const {
232
+ return status;
233
+ }
234
+
235
+ const llama_ubatch & llama_memory_hybrid_context::get_ubatch() const {
236
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
237
+ return ubatches[i_next];
238
+ }
239
+
240
+ const llama_kv_cache_unified_context * llama_memory_hybrid_context::get_attn() const {
241
+ return static_cast<const llama_kv_cache_unified_context *>(ctx_attn.get());
242
+ }
243
+
244
+ const llama_memory_recurrent_context * llama_memory_hybrid_context::get_recr() const {
245
+ return static_cast<const llama_memory_recurrent_context *>(ctx_recr.get());
246
+ }
@@ -0,0 +1,138 @@
1
+ #pragma once
2
+
3
+ #include "llama-batch.h"
4
+ #include "llama-graph.h"
5
+ #include "llama-kv-cache-unified.h"
6
+ #include "llama-memory.h"
7
+ #include "llama-memory-recurrent.h"
8
+
9
+ #include <memory>
10
+ #include <vector>
11
+
12
+ //
13
+ // llama_memory_hybrid
14
+ //
15
+
16
+ // utilizes instances of llama_memory_recurrent and llama_kv_cache_unified to
17
+ // support models where each layer may be either attention-based or recurrent
18
+
19
+ class llama_memory_hybrid : public llama_memory_i {
20
+ public:
21
+
22
+ // this callback is used to filter out layers that should not be included in the cache
23
+ using layer_filter_cb = std::function<bool(int32_t il)>;
24
+
25
+ llama_memory_hybrid(
26
+ const llama_model & model,
27
+ /* attn */
28
+ ggml_type type_k,
29
+ ggml_type type_v,
30
+ bool v_trans,
31
+ uint32_t kv_size,
32
+ uint32_t n_pad,
33
+ uint32_t n_swa,
34
+ llama_swa_type swa_type,
35
+ /* recurrent */
36
+ ggml_type type_r,
37
+ ggml_type type_s,
38
+ uint32_t rs_size,
39
+ /* common */
40
+ uint32_t n_seq_max,
41
+ bool offload,
42
+ /* layer filters */
43
+ layer_filter_cb && filter_attn = nullptr,
44
+ layer_filter_cb && filter_recr = nullptr);
45
+
46
+ ~llama_memory_hybrid() = default;
47
+
48
+ //
49
+ // llama_memory_i
50
+ //
51
+
52
+ llama_memory_context_ptr init_batch(
53
+ llama_batch_allocr & balloc,
54
+ uint32_t n_ubatch,
55
+ bool embd_all) override;
56
+
57
+ llama_memory_context_ptr init_full() override;
58
+
59
+ llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
60
+
61
+ bool get_can_shift() const override;
62
+
63
+ void clear(bool data) override;
64
+
65
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
66
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
67
+ void seq_keep(llama_seq_id seq_id) override;
68
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
69
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
70
+
71
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
72
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
73
+
74
+ // state write/load
75
+
76
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
77
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
78
+
79
+ //
80
+ // llama_memory_hybrid specific API
81
+ //
82
+
83
+ llama_kv_cache_unified * get_mem_attn() const;
84
+ llama_memory_recurrent * get_mem_recr() const;
85
+
86
+ private:
87
+ const llama_hparams & hparams;
88
+
89
+ const std::unique_ptr<llama_kv_cache_unified> mem_attn;
90
+ const std::unique_ptr<llama_memory_recurrent> mem_recr;
91
+ };
92
+
93
+ class llama_memory_hybrid_context : public llama_memory_context_i {
94
+ public:
95
+ // init failure
96
+ explicit llama_memory_hybrid_context(llama_memory_status status);
97
+
98
+ // init full
99
+ explicit llama_memory_hybrid_context(llama_memory_hybrid * mem);
100
+
101
+ // init update
102
+ explicit llama_memory_hybrid_context(
103
+ llama_memory_hybrid * mem,
104
+ llama_context * lctx,
105
+ bool optimize);
106
+
107
+ // init success
108
+ llama_memory_hybrid_context(
109
+ llama_memory_hybrid * mem,
110
+ std::vector<uint32_t> heads_attn,
111
+ std::vector<llama_ubatch> ubatches);
112
+
113
+ ~llama_memory_hybrid_context() = default;
114
+
115
+ bool next() override;
116
+ bool apply() override;
117
+
118
+ llama_memory_status get_status() const override;
119
+ const llama_ubatch & get_ubatch() const override;
120
+
121
+ //
122
+ // llama_memory_hybrid_context
123
+ //
124
+
125
+ const llama_kv_cache_unified_context * get_attn() const;
126
+ const llama_memory_recurrent_context * get_recr() const;
127
+
128
+ private:
129
+ // the index of the next ubatch to process
130
+ size_t i_next = 0;
131
+
132
+ std::vector<llama_ubatch> ubatches;
133
+
134
+ const llama_memory_context_ptr ctx_attn;
135
+ const llama_memory_context_ptr ctx_recr;
136
+
137
+ const llama_memory_status status;
138
+ };