@fugood/llama.node 0.3.17 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +3 -1
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +39 -2
- package/lib/index.js +132 -1
- package/lib/index.ts +203 -3
- package/package.json +2 -1
- package/src/EmbeddingWorker.cpp +1 -1
- package/src/LlamaCompletionWorker.cpp +366 -19
- package/src/LlamaCompletionWorker.h +30 -10
- package/src/LlamaContext.cpp +213 -5
- package/src/LlamaContext.h +12 -0
- package/src/common.hpp +15 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +133 -24
- package/src/llama.cpp/.github/workflows/build.yml +41 -762
- package/src/llama.cpp/.github/workflows/docker.yml +5 -2
- package/src/llama.cpp/.github/workflows/release.yml +716 -0
- package/src/llama.cpp/.github/workflows/server.yml +12 -12
- package/src/llama.cpp/CMakeLists.txt +5 -17
- package/src/llama.cpp/cmake/build-info.cmake +8 -2
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -6
- package/src/llama.cpp/common/CMakeLists.txt +31 -3
- package/src/llama.cpp/common/arg.cpp +48 -29
- package/src/llama.cpp/common/chat.cpp +128 -106
- package/src/llama.cpp/common/chat.h +2 -0
- package/src/llama.cpp/common/common.cpp +37 -1
- package/src/llama.cpp/common/common.h +18 -9
- package/src/llama.cpp/common/llguidance.cpp +1 -0
- package/src/llama.cpp/common/minja/chat-template.hpp +9 -5
- package/src/llama.cpp/common/minja/minja.hpp +69 -36
- package/src/llama.cpp/common/regex-partial.cpp +204 -0
- package/src/llama.cpp/common/regex-partial.h +56 -0
- package/src/llama.cpp/common/sampling.cpp +57 -50
- package/src/llama.cpp/examples/CMakeLists.txt +2 -23
- package/src/llama.cpp/examples/embedding/embedding.cpp +2 -11
- package/src/llama.cpp/examples/parallel/parallel.cpp +86 -14
- package/src/llama.cpp/examples/training/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/training/finetune.cpp +96 -0
- package/src/llama.cpp/ggml/CMakeLists.txt +27 -0
- package/src/llama.cpp/ggml/include/ggml-backend.h +4 -4
- package/src/llama.cpp/ggml/include/ggml-cpp.h +1 -1
- package/src/llama.cpp/ggml/include/ggml-opt.h +47 -28
- package/src/llama.cpp/ggml/include/ggml.h +10 -7
- package/src/llama.cpp/ggml/src/CMakeLists.txt +1 -1
- package/src/llama.cpp/ggml/src/ggml-alloc.c +4 -1
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +9 -5
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +20 -13
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +306 -6
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +4 -13
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +29 -16
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +88 -5
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +47 -12
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +264 -69
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +501 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +0 -13
- package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +0 -6
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +23 -4
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +36 -11
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -2
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +368 -190
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -6
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +41 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +29 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +9 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +121 -232
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +7 -15
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +72 -25
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +14 -7
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +59 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +7 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +37 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +338 -166
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +185 -89
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +83 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +128 -53
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +81 -70
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +657 -193
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +20 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +123 -29
- package/src/llama.cpp/ggml/src/ggml.c +29 -20
- package/src/llama.cpp/ggml/src/gguf.cpp +33 -33
- package/src/llama.cpp/include/llama.h +52 -11
- package/src/llama.cpp/requirements/requirements-all.txt +3 -3
- package/src/llama.cpp/scripts/xxd.cmake +1 -1
- package/src/llama.cpp/src/CMakeLists.txt +1 -0
- package/src/llama.cpp/src/llama-adapter.cpp +6 -0
- package/src/llama.cpp/src/llama-arch.cpp +3 -0
- package/src/llama.cpp/src/llama-batch.cpp +5 -1
- package/src/llama.cpp/src/llama-batch.h +2 -1
- package/src/llama.cpp/src/llama-chat.cpp +17 -7
- package/src/llama.cpp/src/llama-chat.h +1 -0
- package/src/llama.cpp/src/llama-context.cpp +389 -501
- package/src/llama.cpp/src/llama-context.h +44 -32
- package/src/llama.cpp/src/llama-cparams.h +1 -0
- package/src/llama.cpp/src/llama-graph.cpp +20 -38
- package/src/llama.cpp/src/llama-graph.h +12 -8
- package/src/llama.cpp/src/llama-kv-cache.cpp +1503 -389
- package/src/llama.cpp/src/llama-kv-cache.h +271 -85
- package/src/llama.cpp/src/llama-memory.h +11 -1
- package/src/llama.cpp/src/llama-model-loader.cpp +24 -15
- package/src/llama.cpp/src/llama-model-saver.cpp +281 -0
- package/src/llama.cpp/src/llama-model-saver.h +37 -0
- package/src/llama.cpp/src/llama-model.cpp +316 -69
- package/src/llama.cpp/src/llama-model.h +8 -1
- package/src/llama.cpp/src/llama-quant.cpp +15 -13
- package/src/llama.cpp/src/llama-sampling.cpp +18 -6
- package/src/llama.cpp/src/llama-vocab.cpp +42 -4
- package/src/llama.cpp/src/llama-vocab.h +6 -0
- package/src/llama.cpp/src/llama.cpp +14 -0
- package/src/llama.cpp/tests/CMakeLists.txt +10 -2
- package/src/llama.cpp/tests/test-backend-ops.cpp +107 -47
- package/src/llama.cpp/tests/test-chat-template.cpp +10 -11
- package/src/llama.cpp/tests/test-chat.cpp +3 -1
- package/src/llama.cpp/tests/test-mtmd-c-api.c +63 -0
- package/src/llama.cpp/tests/test-opt.cpp +33 -21
- package/src/llama.cpp/tests/test-regex-partial.cpp +288 -0
- package/src/llama.cpp/tests/test-sampling.cpp +1 -1
- package/src/llama.cpp/tools/CMakeLists.txt +39 -0
- package/src/llama.cpp/{examples → tools}/batched-bench/batched-bench.cpp +2 -2
- package/src/llama.cpp/{examples → tools}/imatrix/imatrix.cpp +11 -9
- package/src/llama.cpp/{examples → tools}/llama-bench/llama-bench.cpp +495 -348
- package/src/llama.cpp/{examples → tools}/main/main.cpp +6 -9
- package/src/llama.cpp/{examples/llava → tools/mtmd}/CMakeLists.txt +1 -35
- package/src/llama.cpp/{examples/llava → tools/mtmd}/clip-impl.h +25 -5
- package/src/llama.cpp/{examples/llava → tools/mtmd}/clip.cpp +1440 -1349
- package/src/llama.cpp/tools/mtmd/clip.h +99 -0
- package/src/llama.cpp/{examples/llava → tools/mtmd}/mtmd-cli.cpp +70 -44
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +310 -0
- package/src/llama.cpp/{examples/llava → tools/mtmd}/mtmd.cpp +251 -281
- package/src/llama.cpp/tools/mtmd/mtmd.h +331 -0
- package/src/llama.cpp/{examples → tools}/perplexity/perplexity.cpp +4 -2
- package/src/llama.cpp/{examples → tools}/quantize/quantize.cpp +13 -76
- package/src/llama.cpp/{examples → tools}/rpc/rpc-server.cpp +70 -74
- package/src/llama.cpp/{examples → tools}/run/run.cpp +18 -4
- package/src/llama.cpp/{examples → tools}/server/CMakeLists.txt +2 -1
- package/src/llama.cpp/{examples → tools}/server/server.cpp +291 -76
- package/src/llama.cpp/{examples → tools}/server/utils.hpp +377 -5
- package/src/llama.cpp/cmake/arm64-windows-msvc.cmake +0 -6
- package/src/llama.cpp/examples/infill/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/infill/infill.cpp +0 -590
- package/src/llama.cpp/examples/llava/android/build_64.sh +0 -8
- package/src/llama.cpp/examples/llava/clip-quantize-cli.cpp +0 -59
- package/src/llama.cpp/examples/llava/clip.h +0 -135
- package/src/llama.cpp/examples/llava/llava.cpp +0 -586
- package/src/llama.cpp/examples/llava/llava.h +0 -49
- package/src/llama.cpp/examples/llava/mtmd.h +0 -168
- package/src/llama.cpp/examples/llava/qwen2vl-test.cpp +0 -636
- /package/src/llama.cpp/{examples → tools}/batched-bench/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/completions.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/cvector-generator.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/mean.hpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/negative.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/pca.hpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/positive.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/export-lora/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/export-lora/export-lora.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/gguf-split/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/gguf-split/gguf-split.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/imatrix/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/llama-bench/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/main/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples/llava → tools/mtmd}/deprecation-warning.cpp +0 -0
- /package/src/llama.cpp/{examples/llava → tools/mtmd}/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/perplexity/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/quantize/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/rpc/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/run/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.h +0 -0
- /package/src/llama.cpp/{examples → tools}/server/bench/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/server/httplib.h +0 -0
- /package/src/llama.cpp/{examples → tools}/server/tests/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/tokenize/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/tokenize/tokenize.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/tts/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/tts/tts.cpp +0 -0
|
@@ -2,32 +2,72 @@
|
|
|
2
2
|
|
|
3
3
|
#include "llama.h"
|
|
4
4
|
#include "llama-io.h"
|
|
5
|
+
#include "llama-graph.h"
|
|
5
6
|
#include "llama-memory.h"
|
|
6
7
|
|
|
7
8
|
#include "ggml-cpp.h"
|
|
8
9
|
|
|
9
|
-
#include <functional>
|
|
10
10
|
#include <set>
|
|
11
11
|
#include <vector>
|
|
12
12
|
|
|
13
13
|
struct llama_cparams;
|
|
14
14
|
struct llama_hparams;
|
|
15
15
|
struct llama_ubatch;
|
|
16
|
+
struct llama_sbatch;
|
|
17
|
+
struct llama_model;
|
|
18
|
+
struct llama_context;
|
|
16
19
|
|
|
17
20
|
struct llama_kv_cache : public llama_memory_i {
|
|
18
|
-
|
|
21
|
+
virtual ~llama_kv_cache() = default;
|
|
19
22
|
|
|
20
|
-
|
|
21
|
-
virtual void
|
|
23
|
+
// call if batch processing fails - restores the cache state
|
|
24
|
+
virtual void restore() = 0;
|
|
22
25
|
|
|
23
|
-
|
|
24
|
-
virtual
|
|
26
|
+
// call after successful batch processing - clears any pending state
|
|
27
|
+
virtual void commit() = 0;
|
|
25
28
|
|
|
26
|
-
|
|
29
|
+
// process any pending defrag/shift/etc. operations
|
|
30
|
+
// optionally call once before processing a new batch
|
|
31
|
+
virtual bool update(llama_context & lctx) = 0;
|
|
32
|
+
|
|
33
|
+
// schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
|
|
34
|
+
virtual void defrag_sched(float thold) = 0;
|
|
35
|
+
|
|
36
|
+
// simulate full cache, used for allocating worst-case compute buffers
|
|
37
|
+
virtual void set_full() = 0;
|
|
38
|
+
|
|
39
|
+
//
|
|
40
|
+
// batch processing
|
|
41
|
+
//
|
|
42
|
+
|
|
43
|
+
virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
|
|
44
|
+
|
|
45
|
+
// different KV caches require different batch splitting strategies
|
|
46
|
+
virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
|
|
47
|
+
|
|
48
|
+
// find an empty slot of size "n_tokens" in the cache
|
|
49
|
+
virtual bool find_slot(const llama_ubatch & batch) = 0;
|
|
50
|
+
|
|
51
|
+
// getters
|
|
52
|
+
virtual int32_t get_n_tokens() const = 0;
|
|
53
|
+
virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
|
|
54
|
+
virtual llama_pos get_pos_max() const = 0;
|
|
55
|
+
virtual bool get_can_shift() const = 0;
|
|
27
56
|
|
|
28
57
|
bool get_can_edit() const override { return get_can_shift(); }
|
|
58
|
+
|
|
59
|
+
//
|
|
60
|
+
// state write/read
|
|
61
|
+
//
|
|
62
|
+
|
|
63
|
+
virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
|
|
64
|
+
virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
|
|
29
65
|
};
|
|
30
66
|
|
|
67
|
+
//
|
|
68
|
+
// llama_kv_cache_guard
|
|
69
|
+
//
|
|
70
|
+
|
|
31
71
|
struct llama_kv_cache_guard {
|
|
32
72
|
llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
|
|
33
73
|
|
|
@@ -43,65 +83,50 @@ private:
|
|
|
43
83
|
llama_kv_cache * kv;
|
|
44
84
|
};
|
|
45
85
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
int32_t src = -1; // used by recurrent state models to copy states
|
|
50
|
-
int32_t tail = -1;
|
|
86
|
+
//
|
|
87
|
+
// llama_kv_cache_unified
|
|
88
|
+
//
|
|
51
89
|
|
|
52
|
-
|
|
90
|
+
// TODO: add notion of max sequences
|
|
91
|
+
class llama_kv_cache_unified : public llama_kv_cache {
|
|
92
|
+
public:
|
|
93
|
+
struct kv_cell {
|
|
94
|
+
llama_pos pos = -1;
|
|
95
|
+
llama_pos delta = 0;
|
|
53
96
|
|
|
54
|
-
|
|
55
|
-
return seq_id.find(id) != seq_id.end();
|
|
56
|
-
}
|
|
97
|
+
std::set<llama_seq_id> seq_id;
|
|
57
98
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
99
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
|
100
|
+
return seq_id.find(id) != seq_id.end();
|
|
101
|
+
}
|
|
61
102
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
};
|
|
103
|
+
bool is_empty() const {
|
|
104
|
+
return seq_id.empty();
|
|
105
|
+
}
|
|
66
106
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
class llama_kv_cache_unified : public llama_kv_cache {
|
|
71
|
-
public:
|
|
72
|
-
// can be used to query data from the model if needed
|
|
73
|
-
struct callbacks {
|
|
74
|
-
std::function<ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
|
|
107
|
+
bool is_same_seq(const kv_cell & other) const {
|
|
108
|
+
return seq_id == other.seq_id;
|
|
109
|
+
}
|
|
75
110
|
};
|
|
76
111
|
|
|
77
|
-
|
|
78
|
-
const llama_hparams & hparams,
|
|
79
|
-
callbacks cbs);
|
|
80
|
-
|
|
81
|
-
virtual ~llama_kv_cache_unified() = default;
|
|
112
|
+
static uint32_t get_padding(const llama_cparams & cparams);
|
|
82
113
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
const llama_model & model, // TODO: do not reference the model
|
|
86
|
-
const llama_cparams & cparams,
|
|
114
|
+
llama_kv_cache_unified(
|
|
115
|
+
const llama_model & model,
|
|
87
116
|
ggml_type type_k,
|
|
88
117
|
ggml_type type_v,
|
|
118
|
+
bool v_trans,
|
|
119
|
+
bool offload,
|
|
89
120
|
uint32_t kv_size,
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
int32_t get_n_tokens() const override;
|
|
93
|
-
int32_t get_used_cells() const override;
|
|
121
|
+
uint32_t padding);
|
|
94
122
|
|
|
95
|
-
|
|
123
|
+
~llama_kv_cache_unified() = default;
|
|
96
124
|
|
|
97
|
-
//
|
|
98
|
-
|
|
125
|
+
//
|
|
126
|
+
// llama_memory_i
|
|
127
|
+
//
|
|
99
128
|
|
|
100
129
|
void clear() override;
|
|
101
|
-
void defrag() override;
|
|
102
|
-
|
|
103
|
-
virtual void restore() override;
|
|
104
|
-
virtual void commit() override;
|
|
105
130
|
|
|
106
131
|
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
107
132
|
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
@@ -111,25 +136,73 @@ public:
|
|
|
111
136
|
|
|
112
137
|
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
|
113
138
|
|
|
114
|
-
|
|
139
|
+
//
|
|
140
|
+
// llama_kv_cache
|
|
141
|
+
//
|
|
142
|
+
|
|
143
|
+
void restore() override;
|
|
144
|
+
void commit() override;
|
|
145
|
+
|
|
146
|
+
bool update(llama_context & ctx) override;
|
|
147
|
+
|
|
148
|
+
void defrag_sched(float thold) override;
|
|
149
|
+
|
|
150
|
+
void set_full() override;
|
|
151
|
+
|
|
152
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
|
153
|
+
|
|
154
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
|
115
155
|
|
|
116
|
-
// find an empty slot of size "n_tokens" in the cache
|
|
117
156
|
// updates the cache head
|
|
118
157
|
// Note: On success, it's important that cache.head points
|
|
119
158
|
// to the first cell of the slot.
|
|
120
|
-
bool find_slot(const llama_ubatch & batch);
|
|
159
|
+
bool find_slot(const llama_ubatch & batch) override;
|
|
121
160
|
|
|
122
|
-
|
|
123
|
-
|
|
161
|
+
int32_t get_n_tokens() const override;
|
|
162
|
+
int32_t get_used_cells() const override;
|
|
124
163
|
|
|
125
|
-
//
|
|
126
|
-
|
|
164
|
+
// TODO: better data structures to reduce the cost of this operation
|
|
165
|
+
llama_pos get_pos_max() const override;
|
|
127
166
|
|
|
128
|
-
|
|
129
|
-
size_t size_v_bytes() const;
|
|
167
|
+
bool get_can_shift() const override;
|
|
130
168
|
|
|
131
|
-
//
|
|
169
|
+
// state write/load
|
|
170
|
+
|
|
171
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
|
172
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
|
173
|
+
|
|
174
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
|
175
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
|
176
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
177
|
+
|
|
178
|
+
// computed before each graph build
|
|
179
|
+
uint32_t n = 0;
|
|
180
|
+
|
|
181
|
+
std::vector<kv_cell> cells;
|
|
182
|
+
|
|
183
|
+
std::vector<ggml_tensor *> k_l; // per layer
|
|
184
|
+
std::vector<ggml_tensor *> v_l;
|
|
185
|
+
|
|
186
|
+
private:
|
|
187
|
+
const llama_model & model;
|
|
188
|
+
const llama_hparams & hparams;
|
|
189
|
+
|
|
190
|
+
bool has_shift = false;
|
|
191
|
+
bool do_defrag = false;
|
|
192
|
+
|
|
193
|
+
bool v_trans = true; // the value tensor is transposed
|
|
194
|
+
bool can_shift = false;
|
|
195
|
+
|
|
196
|
+
// required padding
|
|
197
|
+
uint32_t padding = 1;
|
|
198
|
+
|
|
199
|
+
ggml_type type_k = GGML_TYPE_F16;
|
|
200
|
+
ggml_type type_v = GGML_TYPE_F16;
|
|
132
201
|
|
|
202
|
+
std::vector<ggml_context_ptr> ctxs;
|
|
203
|
+
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
204
|
+
|
|
205
|
+
// defrag
|
|
133
206
|
struct {
|
|
134
207
|
std::vector<uint32_t> ids;
|
|
135
208
|
} defrag_info;
|
|
@@ -138,7 +211,6 @@ public:
|
|
|
138
211
|
bool defrag_prepare(int32_t n_max_nodes);
|
|
139
212
|
|
|
140
213
|
// commit/restore cache
|
|
141
|
-
|
|
142
214
|
struct slot_range {
|
|
143
215
|
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
|
|
144
216
|
uint32_t c1 = 0;
|
|
@@ -149,48 +221,167 @@ public:
|
|
|
149
221
|
std::vector<slot_range> ranges;
|
|
150
222
|
} pending;
|
|
151
223
|
|
|
152
|
-
//
|
|
224
|
+
// find how many cells are currently in use
|
|
225
|
+
uint32_t cell_max() const;
|
|
153
226
|
|
|
154
|
-
|
|
155
|
-
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
|
|
227
|
+
size_t total_size() const;
|
|
156
228
|
|
|
157
|
-
|
|
229
|
+
size_t size_k_bytes() const;
|
|
230
|
+
size_t size_v_bytes() const;
|
|
158
231
|
|
|
159
|
-
|
|
232
|
+
ggml_tensor * build_rope_shift(
|
|
233
|
+
const llama_cparams & cparams,
|
|
234
|
+
ggml_context * ctx,
|
|
235
|
+
ggml_tensor * cur,
|
|
236
|
+
ggml_tensor * shift,
|
|
237
|
+
ggml_tensor * factors,
|
|
238
|
+
float freq_base,
|
|
239
|
+
float freq_scale) const;
|
|
240
|
+
|
|
241
|
+
llm_graph_result_ptr build_graph_shift(
|
|
242
|
+
const llama_cparams & cparams,
|
|
243
|
+
ggml_context * ctx,
|
|
244
|
+
ggml_cgraph * gf) const;
|
|
245
|
+
|
|
246
|
+
llm_graph_result_ptr build_graph_defrag(
|
|
247
|
+
const llama_cparams & cparams,
|
|
248
|
+
ggml_context * ctx,
|
|
249
|
+
ggml_cgraph * gf) const;
|
|
160
250
|
|
|
161
|
-
|
|
251
|
+
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
252
|
+
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
162
253
|
|
|
163
|
-
bool
|
|
164
|
-
bool
|
|
254
|
+
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
|
255
|
+
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
//
|
|
259
|
+
// llama_kv_cache_recurrent
|
|
260
|
+
//
|
|
165
261
|
|
|
166
|
-
|
|
167
|
-
|
|
262
|
+
class llama_kv_cache_recurrent : public llama_kv_cache {
|
|
263
|
+
public:
|
|
264
|
+
struct kv_cell {
|
|
265
|
+
llama_pos pos = -1;
|
|
266
|
+
int32_t src = -1; // used to copy states
|
|
267
|
+
int32_t tail = -1;
|
|
168
268
|
|
|
169
|
-
|
|
170
|
-
|
|
269
|
+
std::set<llama_seq_id> seq_id;
|
|
270
|
+
|
|
271
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
|
272
|
+
return seq_id.find(id) != seq_id.end();
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
bool is_empty() const {
|
|
276
|
+
return seq_id.empty();
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
bool is_same_seq(const kv_cell & other) const {
|
|
280
|
+
return seq_id == other.seq_id;
|
|
281
|
+
}
|
|
282
|
+
};
|
|
283
|
+
|
|
284
|
+
llama_kv_cache_recurrent(
|
|
285
|
+
const llama_model & model,
|
|
286
|
+
ggml_type type_k,
|
|
287
|
+
ggml_type type_v,
|
|
288
|
+
bool offload,
|
|
289
|
+
uint32_t kv_size);
|
|
290
|
+
|
|
291
|
+
~llama_kv_cache_recurrent() = default;
|
|
292
|
+
|
|
293
|
+
//
|
|
294
|
+
// llama_memory_i
|
|
295
|
+
//
|
|
296
|
+
|
|
297
|
+
void clear() override;
|
|
298
|
+
|
|
299
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
300
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
301
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
302
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
303
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
304
|
+
|
|
305
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
|
306
|
+
|
|
307
|
+
//
|
|
308
|
+
// llama_kv_cache
|
|
309
|
+
//
|
|
171
310
|
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
311
|
+
void restore() override;
|
|
312
|
+
void commit() override;
|
|
313
|
+
|
|
314
|
+
bool update(llama_context & lctx) override;
|
|
315
|
+
|
|
316
|
+
void defrag_sched(float thold) override;
|
|
317
|
+
|
|
318
|
+
void set_full() override;
|
|
319
|
+
|
|
320
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
|
321
|
+
|
|
322
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
|
323
|
+
|
|
324
|
+
bool find_slot(const llama_ubatch & batch) override;
|
|
325
|
+
|
|
326
|
+
int32_t get_n_tokens() const override;
|
|
327
|
+
int32_t get_used_cells() const override;
|
|
328
|
+
|
|
329
|
+
// TODO: better data structures to reduce the cost of this operation
|
|
330
|
+
llama_pos get_pos_max() const override;
|
|
331
|
+
|
|
332
|
+
bool get_can_shift() const override;
|
|
333
|
+
|
|
334
|
+
// TODO: temporary methods - they are not really const as they do const_cast<>, fix this
|
|
335
|
+
int32_t s_copy(int i) const;
|
|
336
|
+
float s_mask(int i) const;
|
|
337
|
+
|
|
338
|
+
// state write/load
|
|
339
|
+
|
|
340
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
|
341
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
|
342
|
+
|
|
343
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
|
344
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
|
177
345
|
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
178
346
|
|
|
179
347
|
// computed before each graph build
|
|
180
348
|
uint32_t n = 0;
|
|
181
349
|
|
|
182
|
-
std::vector<
|
|
350
|
+
std::vector<kv_cell> cells;
|
|
183
351
|
|
|
184
352
|
std::vector<ggml_tensor *> k_l; // per layer
|
|
185
353
|
std::vector<ggml_tensor *> v_l;
|
|
186
354
|
|
|
187
355
|
private:
|
|
356
|
+
//const llama_model & model;
|
|
357
|
+
const llama_hparams & hparams;
|
|
358
|
+
|
|
359
|
+
// commit/restore cache
|
|
360
|
+
// TODO: rework for recurrent cache
|
|
361
|
+
struct slot_range {
|
|
362
|
+
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
|
|
363
|
+
uint32_t c1 = 0;
|
|
364
|
+
};
|
|
365
|
+
|
|
366
|
+
// pending cell updates that are not yet committed
|
|
367
|
+
struct {
|
|
368
|
+
std::vector<slot_range> ranges;
|
|
369
|
+
} pending;
|
|
370
|
+
|
|
188
371
|
ggml_type type_k = GGML_TYPE_F16;
|
|
189
372
|
ggml_type type_v = GGML_TYPE_F16;
|
|
190
373
|
|
|
191
374
|
std::vector<ggml_context_ptr> ctxs;
|
|
192
375
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
193
376
|
|
|
377
|
+
// find how many cells are currently in use
|
|
378
|
+
uint32_t cell_max() const;
|
|
379
|
+
|
|
380
|
+
size_t total_size() const;
|
|
381
|
+
|
|
382
|
+
size_t size_k_bytes() const;
|
|
383
|
+
size_t size_v_bytes() const;
|
|
384
|
+
|
|
194
385
|
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
195
386
|
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
196
387
|
|
|
@@ -198,11 +389,6 @@ private:
|
|
|
198
389
|
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
199
390
|
};
|
|
200
391
|
|
|
201
|
-
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
|
|
202
|
-
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
|
|
203
|
-
//public:
|
|
204
|
-
// using llama_kv_cache_unified::llama_kv_cache_unified;
|
|
205
|
-
//};
|
|
206
392
|
|
|
207
393
|
//
|
|
208
394
|
// kv cache view
|
|
@@ -2,12 +2,22 @@
|
|
|
2
2
|
|
|
3
3
|
#include "llama.h"
|
|
4
4
|
|
|
5
|
+
struct llama_memory_params {
|
|
6
|
+
// kv cache
|
|
7
|
+
ggml_type type_k;
|
|
8
|
+
ggml_type type_v;
|
|
9
|
+
|
|
10
|
+
// parameters for other types of memory
|
|
11
|
+
// ...
|
|
12
|
+
};
|
|
13
|
+
|
|
5
14
|
// general concept of LLM memory
|
|
6
15
|
// the KV cache is a type of LLM memory, but there can be other types
|
|
7
16
|
class llama_memory_i {
|
|
8
17
|
public:
|
|
18
|
+
virtual ~llama_memory_i() = default;
|
|
19
|
+
|
|
9
20
|
virtual void clear() = 0;
|
|
10
|
-
virtual void defrag() = 0;
|
|
11
21
|
|
|
12
22
|
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
|
13
23
|
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
|
@@ -301,12 +301,12 @@ namespace GGUFMeta {
|
|
|
301
301
|
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
|
302
302
|
|
|
303
303
|
switch (arr_info.gt) {
|
|
304
|
-
case
|
|
305
|
-
case GGUF_TYPE_INT32: GGML_ASSERT(
|
|
306
|
-
|
|
307
|
-
|
|
304
|
+
case GGUF_TYPE_UINT32:
|
|
305
|
+
case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
|
|
306
|
+
(std::is_same<T, uint32_t>::value)); break;
|
|
307
|
+
case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
|
|
308
308
|
default:
|
|
309
|
-
throw std::runtime_error(format("%s is not a float32
|
|
309
|
+
throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str()));
|
|
310
310
|
}
|
|
311
311
|
|
|
312
312
|
result.resize(arr_info.length);
|
|
@@ -330,12 +330,12 @@ namespace GGUFMeta {
|
|
|
330
330
|
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
|
331
331
|
|
|
332
332
|
switch (arr_info.gt) {
|
|
333
|
-
case
|
|
334
|
-
case GGUF_TYPE_INT32: GGML_ASSERT(
|
|
335
|
-
|
|
336
|
-
|
|
333
|
+
case GGUF_TYPE_UINT32:
|
|
334
|
+
case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
|
|
335
|
+
(std::is_same<T, uint32_t>::value)); break;
|
|
336
|
+
case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
|
|
337
337
|
default:
|
|
338
|
-
throw std::runtime_error(format("%s is not a float32
|
|
338
|
+
throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str()));
|
|
339
339
|
}
|
|
340
340
|
|
|
341
341
|
if (arr_info.length > N_MAX) {
|
|
@@ -469,7 +469,7 @@ llama_model_loader::llama_model_loader(
|
|
|
469
469
|
|
|
470
470
|
meta.reset(gguf_init_from_file(fname.c_str(), params));
|
|
471
471
|
if (!meta) {
|
|
472
|
-
throw std::runtime_error(format("%s: failed to load model from %s
|
|
472
|
+
throw std::runtime_error(format("%s: failed to load model from %s", __func__, fname.c_str()));
|
|
473
473
|
}
|
|
474
474
|
|
|
475
475
|
get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
|
|
@@ -528,7 +528,7 @@ llama_model_loader::llama_model_loader(
|
|
|
528
528
|
};
|
|
529
529
|
gguf_context_ptr ctx_gguf { gguf_init_from_file(fname_split, split_params) };
|
|
530
530
|
if (!ctx_gguf) {
|
|
531
|
-
throw std::runtime_error(format("%s: failed to load GGUF split from %s
|
|
531
|
+
throw std::runtime_error(format("%s: failed to load GGUF split from %s", __func__, fname_split));
|
|
532
532
|
}
|
|
533
533
|
|
|
534
534
|
// check idx
|
|
@@ -822,9 +822,18 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps
|
|
|
822
822
|
mappings.reserve(files.size());
|
|
823
823
|
mmaps_used.reserve(files.size());
|
|
824
824
|
for (const auto & file : files) {
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
825
|
+
bool is_numa = false;
|
|
826
|
+
|
|
827
|
+
auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
|
|
828
|
+
if (dev) {
|
|
829
|
+
auto * reg = ggml_backend_dev_backend_reg(dev);
|
|
830
|
+
auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
|
|
831
|
+
if (is_numa_fn) {
|
|
832
|
+
is_numa = is_numa_fn();
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
std::unique_ptr<llama_mmap> mapping = std::make_unique<llama_mmap>(file.get(), prefetch ? -1 : 0, is_numa);
|
|
828
837
|
mmaps_used.emplace_back(mapping->size(), 0);
|
|
829
838
|
if (mlock_mmaps) {
|
|
830
839
|
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
|