@fugood/llama.node 0.3.14 → 0.3.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/package.json +1 -1
- package/src/llama.cpp/.github/workflows/build.yml +30 -1
- package/src/llama.cpp/CMakeLists.txt +9 -1
- package/src/llama.cpp/cmake/common.cmake +2 -0
- package/src/llama.cpp/common/arg.cpp +20 -2
- package/src/llama.cpp/common/common.cpp +6 -3
- package/src/llama.cpp/common/speculative.cpp +4 -4
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +2 -2
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +1 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +1 -1
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +2 -2
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +1 -1
- package/src/llama.cpp/examples/infill/infill.cpp +2 -2
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +2 -2
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +4 -4
- package/src/llama.cpp/examples/llava/gemma3-cli.cpp +1 -1
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +6 -6
- package/src/llama.cpp/examples/lookup/lookup.cpp +1 -1
- package/src/llama.cpp/examples/main/main.cpp +6 -6
- package/src/llama.cpp/examples/parallel/parallel.cpp +5 -5
- package/src/llama.cpp/examples/passkey/passkey.cpp +14 -14
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +6 -6
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +2 -2
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +1 -1
- package/src/llama.cpp/examples/run/run.cpp +91 -46
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +2 -2
- package/src/llama.cpp/examples/server/server.cpp +37 -15
- package/src/llama.cpp/examples/server/utils.hpp +3 -1
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +2 -2
- package/src/llama.cpp/examples/speculative/speculative.cpp +14 -14
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +1 -1
- package/src/llama.cpp/examples/tts/tts.cpp +20 -9
- package/src/llama.cpp/ggml/CMakeLists.txt +1 -0
- package/src/llama.cpp/ggml/cmake/common.cmake +26 -0
- package/src/llama.cpp/ggml/include/ggml.h +24 -0
- package/src/llama.cpp/ggml/src/CMakeLists.txt +10 -28
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +6 -2
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -5
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +15 -7
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +1493 -12
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +150 -1
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +284 -29
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +2 -1
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +3 -1
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +7 -0
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -4
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +95 -22
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +35 -12
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +93 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +12 -13
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +40 -40
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +12 -43
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +109 -40
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +19 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +114 -6
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +6 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +305 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +398 -158
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -4
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +7 -2
- package/src/llama.cpp/ggml/src/ggml.c +85 -2
- package/src/llama.cpp/include/llama.h +86 -22
- package/src/llama.cpp/src/CMakeLists.txt +5 -2
- package/src/llama.cpp/src/llama-adapter.cpp +19 -20
- package/src/llama.cpp/src/llama-adapter.h +11 -9
- package/src/llama.cpp/src/llama-arch.cpp +103 -16
- package/src/llama.cpp/src/llama-arch.h +18 -0
- package/src/llama.cpp/src/llama-batch.h +2 -2
- package/src/llama.cpp/src/llama-context.cpp +2253 -1222
- package/src/llama.cpp/src/llama-context.h +214 -77
- package/src/llama.cpp/src/llama-cparams.h +1 -0
- package/src/llama.cpp/src/llama-graph.cpp +1662 -0
- package/src/llama.cpp/src/llama-graph.h +574 -0
- package/src/llama.cpp/src/llama-hparams.cpp +8 -0
- package/src/llama.cpp/src/llama-hparams.h +9 -0
- package/src/llama.cpp/src/llama-io.cpp +15 -0
- package/src/llama.cpp/src/llama-io.h +35 -0
- package/src/llama.cpp/src/llama-kv-cache.cpp +1006 -291
- package/src/llama.cpp/src/llama-kv-cache.h +178 -110
- package/src/llama.cpp/src/llama-memory.cpp +1 -0
- package/src/llama.cpp/src/llama-memory.h +21 -0
- package/src/llama.cpp/src/llama-model.cpp +8244 -173
- package/src/llama.cpp/src/llama-model.h +34 -1
- package/src/llama.cpp/src/llama-quant.cpp +10 -1
- package/src/llama.cpp/src/llama.cpp +51 -9984
- package/src/llama.cpp/tests/test-backend-ops.cpp +145 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +0 -143
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +0 -9
|
@@ -1,12 +1,29 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
3
|
#include "llama.h"
|
|
4
|
+
#include "llama-io.h"
|
|
5
|
+
#include "llama-memory.h"
|
|
4
6
|
|
|
5
7
|
#include "ggml-cpp.h"
|
|
6
8
|
|
|
9
|
+
#include <functional>
|
|
7
10
|
#include <set>
|
|
8
11
|
#include <vector>
|
|
9
|
-
|
|
12
|
+
|
|
13
|
+
struct llama_cparams;
|
|
14
|
+
struct llama_hparams;
|
|
15
|
+
struct llama_ubatch;
|
|
16
|
+
|
|
17
|
+
struct llama_kv_cache : public llama_memory_i {
|
|
18
|
+
using llama_memory_i::llama_memory_i;
|
|
19
|
+
|
|
20
|
+
virtual int32_t get_n_tokens() const = 0;
|
|
21
|
+
virtual uint32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
|
|
22
|
+
|
|
23
|
+
virtual bool get_can_shift() const = 0;
|
|
24
|
+
|
|
25
|
+
bool get_can_edit() const override { return get_can_shift(); }
|
|
26
|
+
};
|
|
10
27
|
|
|
11
28
|
struct llama_kv_cell {
|
|
12
29
|
llama_pos pos = -1;
|
|
@@ -29,55 +46,6 @@ struct llama_kv_cell {
|
|
|
29
46
|
}
|
|
30
47
|
};
|
|
31
48
|
|
|
32
|
-
// ring-buffer of cached KV data
|
|
33
|
-
struct llama_kv_cache {
|
|
34
|
-
bool has_shift = false;
|
|
35
|
-
bool do_defrag = false;
|
|
36
|
-
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
|
|
37
|
-
bool v_trans = true; // the value tensor is transposed
|
|
38
|
-
bool can_shift = false;
|
|
39
|
-
|
|
40
|
-
// Note: The value of head isn't only used to optimize searching
|
|
41
|
-
// for a free KV slot. llama_decode_impl also uses it, so it
|
|
42
|
-
// cannot be freely changed after a slot has been allocated.
|
|
43
|
-
uint32_t head = 0;
|
|
44
|
-
uint32_t size = 0;
|
|
45
|
-
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
46
|
-
|
|
47
|
-
// computed before each graph build
|
|
48
|
-
uint32_t n = 0;
|
|
49
|
-
|
|
50
|
-
ggml_type type_k = GGML_TYPE_F16;
|
|
51
|
-
ggml_type type_v = GGML_TYPE_F16;
|
|
52
|
-
|
|
53
|
-
std::vector<llama_kv_cell> cells;
|
|
54
|
-
|
|
55
|
-
std::vector<struct ggml_tensor *> k_l; // per layer
|
|
56
|
-
std::vector<struct ggml_tensor *> v_l;
|
|
57
|
-
|
|
58
|
-
std::vector<ggml_context_ptr> ctxs;
|
|
59
|
-
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
60
|
-
|
|
61
|
-
size_t total_size() const {
|
|
62
|
-
size_t size = 0;
|
|
63
|
-
for (const auto & buf : bufs) {
|
|
64
|
-
size += ggml_backend_buffer_get_size(buf.get());
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
return size;
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
// TODO: better data structures to reduce the cost of this operation
|
|
71
|
-
llama_pos max_pos() const {
|
|
72
|
-
llama_pos max_pos = -1;
|
|
73
|
-
for (const auto & cell : cells) {
|
|
74
|
-
max_pos = std::max(max_pos, cell.pos);
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
return max_pos;
|
|
78
|
-
}
|
|
79
|
-
};
|
|
80
|
-
|
|
81
49
|
// a structure holds information about the slot found in llama_kv_cache_find_slot
|
|
82
50
|
struct llama_kv_cache_slot_info {
|
|
83
51
|
std::pair<uint32_t, uint32_t> boundaries; // slot boundaries [begin, end)
|
|
@@ -89,82 +57,131 @@ struct llama_kv_cache_slot_info {
|
|
|
89
57
|
operator bool() const { return found; }
|
|
90
58
|
};
|
|
91
59
|
|
|
92
|
-
//
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
60
|
+
// ring-buffer of cached KV data
|
|
61
|
+
// TODO: pimpl
|
|
62
|
+
// TODO: add notion of max sequences
|
|
63
|
+
class llama_kv_cache_unified : public llama_kv_cache {
|
|
64
|
+
public:
|
|
65
|
+
// can be used to query data from the model if needed
|
|
66
|
+
struct callbacks {
|
|
67
|
+
std::function<ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
llama_kv_cache_unified(
|
|
71
|
+
const llama_hparams & hparams,
|
|
72
|
+
callbacks cbs);
|
|
73
|
+
|
|
74
|
+
virtual ~llama_kv_cache_unified() = default;
|
|
75
|
+
|
|
76
|
+
// TODO: become constructor
|
|
77
|
+
bool init(
|
|
78
|
+
const llama_model & model, // TODO: do not reference the model
|
|
98
79
|
const llama_cparams & cparams,
|
|
99
80
|
ggml_type type_k,
|
|
100
81
|
ggml_type type_v,
|
|
101
82
|
uint32_t kv_size,
|
|
102
83
|
bool offload);
|
|
103
84
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
// returns a structure holding information about the slot found
|
|
107
|
-
// Note: On success, it's important that cache.head points
|
|
108
|
-
// to the first cell of the slot.
|
|
109
|
-
struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
|
|
110
|
-
struct llama_kv_cache & cache,
|
|
111
|
-
const struct llama_ubatch & batch);
|
|
85
|
+
int32_t get_n_tokens() const override;
|
|
86
|
+
uint32_t get_used_cells() const override;
|
|
112
87
|
|
|
113
|
-
|
|
114
|
-
uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache);
|
|
88
|
+
size_t total_size() const;
|
|
115
89
|
|
|
116
|
-
|
|
90
|
+
// TODO: better data structures to reduce the cost of this operation
|
|
91
|
+
llama_pos pos_max() const;
|
|
117
92
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
llama_seq_id seq_id,
|
|
121
|
-
llama_pos p0,
|
|
122
|
-
llama_pos p1);
|
|
93
|
+
void clear() override;
|
|
94
|
+
void defrag() override;
|
|
123
95
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
llama_pos p1);
|
|
96
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
97
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
98
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
99
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
100
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
130
101
|
|
|
131
|
-
|
|
132
|
-
struct llama_kv_cache & cache,
|
|
133
|
-
llama_seq_id seq_id);
|
|
102
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) override;
|
|
134
103
|
|
|
135
|
-
|
|
136
|
-
struct llama_kv_cache & cache,
|
|
137
|
-
llama_seq_id seq_id,
|
|
138
|
-
llama_pos p0,
|
|
139
|
-
llama_pos p1,
|
|
140
|
-
llama_pos delta);
|
|
104
|
+
bool get_can_shift() const override;
|
|
141
105
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
106
|
+
// find an empty slot of size "n_tokens" in the cache
|
|
107
|
+
// updates the cache head
|
|
108
|
+
// returns a structure holding information about the slot found
|
|
109
|
+
// Note: On success, it's important that cache.head points
|
|
110
|
+
// to the first cell of the slot.
|
|
111
|
+
llama_kv_cache_slot_info find_slot(const llama_ubatch & batch);
|
|
148
112
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
llama_seq_id seq_id);
|
|
113
|
+
// TODO: maybe not needed
|
|
114
|
+
uint32_t get_padding(const llama_cparams & cparams) const;
|
|
152
115
|
|
|
153
|
-
|
|
116
|
+
// find how many cells are currently in use
|
|
117
|
+
uint32_t cell_max() const;
|
|
154
118
|
|
|
155
|
-
|
|
119
|
+
size_t size_k_bytes() const;
|
|
120
|
+
size_t size_v_bytes() const;
|
|
156
121
|
|
|
157
|
-
|
|
122
|
+
// defrag
|
|
158
123
|
|
|
159
|
-
|
|
124
|
+
struct {
|
|
125
|
+
std::vector<uint32_t> ids;
|
|
126
|
+
} defrag_info;
|
|
160
127
|
|
|
161
|
-
//
|
|
162
|
-
|
|
163
|
-
|
|
128
|
+
// return true if cells have been moved
|
|
129
|
+
bool defrag_prepare(int32_t n_max_nodes);
|
|
130
|
+
|
|
131
|
+
// state save/load
|
|
132
|
+
|
|
133
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
|
|
134
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
|
|
164
135
|
|
|
165
|
-
|
|
136
|
+
// members
|
|
166
137
|
|
|
167
|
-
|
|
138
|
+
const llama_hparams & hparams;
|
|
139
|
+
|
|
140
|
+
callbacks cbs;
|
|
141
|
+
|
|
142
|
+
bool has_shift = false;
|
|
143
|
+
bool do_defrag = false;
|
|
144
|
+
|
|
145
|
+
// TODO: remove this and implement llama_kv_cache_recurrent instead
|
|
146
|
+
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
|
|
147
|
+
|
|
148
|
+
bool v_trans = true; // the value tensor is transposed
|
|
149
|
+
bool can_shift = false;
|
|
150
|
+
|
|
151
|
+
// Note: The value of head isn't only used to optimize searching
|
|
152
|
+
// for a free KV slot. llama_decode_impl also uses it, so it
|
|
153
|
+
// cannot be freely changed after a slot has been allocated.
|
|
154
|
+
uint32_t head = 0;
|
|
155
|
+
uint32_t size = 0;
|
|
156
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
157
|
+
|
|
158
|
+
// computed before each graph build
|
|
159
|
+
uint32_t n = 0;
|
|
160
|
+
|
|
161
|
+
std::vector<llama_kv_cell> cells;
|
|
162
|
+
|
|
163
|
+
std::vector<ggml_tensor *> k_l; // per layer
|
|
164
|
+
std::vector<ggml_tensor *> v_l;
|
|
165
|
+
|
|
166
|
+
private:
|
|
167
|
+
ggml_type type_k = GGML_TYPE_F16;
|
|
168
|
+
ggml_type type_v = GGML_TYPE_F16;
|
|
169
|
+
|
|
170
|
+
std::vector<ggml_context_ptr> ctxs;
|
|
171
|
+
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
172
|
+
|
|
173
|
+
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
174
|
+
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
175
|
+
|
|
176
|
+
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
|
177
|
+
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
|
|
181
|
+
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
|
|
182
|
+
//public:
|
|
183
|
+
// using llama_kv_cache_unified::llama_kv_cache_unified;
|
|
184
|
+
//};
|
|
168
185
|
|
|
169
186
|
//
|
|
170
187
|
// kv cache restore
|
|
@@ -184,13 +201,15 @@ struct llama_kv_slot_restorer {
|
|
|
184
201
|
|
|
185
202
|
bool do_restore = false;
|
|
186
203
|
|
|
187
|
-
|
|
204
|
+
llama_kv_cache_unified & cache;
|
|
205
|
+
|
|
206
|
+
explicit llama_kv_slot_restorer(llama_kv_cache_unified & cache) : cache(cache) {
|
|
188
207
|
old_state.head = cache.head;
|
|
189
208
|
old_state.n = cache.n;
|
|
190
209
|
}
|
|
191
210
|
|
|
192
211
|
// saves a slot information for future restoration
|
|
193
|
-
void save(const
|
|
212
|
+
void save(const llama_kv_cache_slot_info & slot) {
|
|
194
213
|
if (slot) {
|
|
195
214
|
do_restore = true;
|
|
196
215
|
if (slot.boundaries.first != slot.boundaries.second) {
|
|
@@ -201,19 +220,68 @@ struct llama_kv_slot_restorer {
|
|
|
201
220
|
|
|
202
221
|
// must be explicitly called to restore the kv_cache state
|
|
203
222
|
// and rollback changes from all llama_kv_cache_find_slot calls
|
|
204
|
-
void restore(
|
|
223
|
+
void restore() {
|
|
205
224
|
if (do_restore) {
|
|
206
225
|
cache.head = old_state.head;
|
|
207
226
|
cache.n = old_state.n;
|
|
208
227
|
|
|
209
228
|
if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
|
|
210
|
-
|
|
229
|
+
cache.seq_rm(-1, -1, -1);
|
|
211
230
|
} else {
|
|
212
231
|
for (auto & slot : slot_boundaries) {
|
|
213
|
-
|
|
232
|
+
cache.seq_rm(-1, slot.first, slot.second);
|
|
214
233
|
}
|
|
215
234
|
}
|
|
216
235
|
}
|
|
217
236
|
}
|
|
218
237
|
};
|
|
219
238
|
|
|
239
|
+
// TODO: maybe become part of the public llama_kv_cache in the future
|
|
240
|
+
int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv);
|
|
241
|
+
|
|
242
|
+
int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv);
|
|
243
|
+
|
|
244
|
+
void llama_kv_cache_clear(llama_kv_cache * kv);
|
|
245
|
+
|
|
246
|
+
bool llama_kv_cache_seq_rm(
|
|
247
|
+
llama_kv_cache * kv,
|
|
248
|
+
llama_seq_id seq_id,
|
|
249
|
+
llama_pos p0,
|
|
250
|
+
llama_pos p1);
|
|
251
|
+
|
|
252
|
+
void llama_kv_cache_seq_cp(
|
|
253
|
+
llama_kv_cache * kv,
|
|
254
|
+
llama_seq_id seq_id_src,
|
|
255
|
+
llama_seq_id seq_id_dst,
|
|
256
|
+
llama_pos p0,
|
|
257
|
+
llama_pos p1);
|
|
258
|
+
|
|
259
|
+
void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id);
|
|
260
|
+
|
|
261
|
+
void llama_kv_cache_seq_add(
|
|
262
|
+
llama_kv_cache * kv,
|
|
263
|
+
llama_seq_id seq_id,
|
|
264
|
+
llama_pos p0,
|
|
265
|
+
llama_pos p1,
|
|
266
|
+
llama_pos delta);
|
|
267
|
+
|
|
268
|
+
void llama_kv_cache_seq_div(
|
|
269
|
+
llama_kv_cache * kv,
|
|
270
|
+
llama_seq_id seq_id,
|
|
271
|
+
llama_pos p0,
|
|
272
|
+
llama_pos p1,
|
|
273
|
+
int d);
|
|
274
|
+
|
|
275
|
+
llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id);
|
|
276
|
+
|
|
277
|
+
void llama_kv_cache_defrag(llama_kv_cache * kv);
|
|
278
|
+
|
|
279
|
+
bool llama_kv_cache_can_shift(const llama_kv_cache * kv);
|
|
280
|
+
|
|
281
|
+
//
|
|
282
|
+
// kv cache view
|
|
283
|
+
//
|
|
284
|
+
|
|
285
|
+
llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
|
|
286
|
+
|
|
287
|
+
void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
#include "llama-memory.h"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
// general concept of LLM memory
|
|
6
|
+
// the KV cache is a type of LLM memory, but there can be other types
|
|
7
|
+
class llama_memory_i {
|
|
8
|
+
public:
|
|
9
|
+
virtual void clear() = 0;
|
|
10
|
+
virtual void defrag() = 0;
|
|
11
|
+
|
|
12
|
+
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
|
13
|
+
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
|
14
|
+
virtual void seq_keep(llama_seq_id seq_id) = 0;
|
|
15
|
+
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
|
|
16
|
+
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
|
17
|
+
|
|
18
|
+
virtual llama_pos seq_pos_max(llama_seq_id seq_id) = 0;
|
|
19
|
+
|
|
20
|
+
virtual bool get_can_edit() const = 0;
|
|
21
|
+
};
|