@fugood/llama.node 0.3.6 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -2
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +3 -1
- package/lib/index.js +16 -1
- package/lib/index.ts +16 -0
- package/package.json +1 -1
- package/src/EmbeddingWorker.cpp +4 -3
- package/src/LlamaCompletionWorker.cpp +4 -2
- package/src/LlamaContext.cpp +61 -6
- package/src/LlamaContext.h +1 -0
- package/src/common.hpp +6 -11
- package/src/llama.cpp/.github/workflows/build.yml +19 -17
- package/src/llama.cpp/.github/workflows/docker.yml +77 -30
- package/src/llama.cpp/.github/workflows/editorconfig.yml +3 -1
- package/src/llama.cpp/.github/workflows/server.yml +22 -3
- package/src/llama.cpp/CMakeLists.txt +49 -24
- package/src/llama.cpp/common/arg.cpp +82 -26
- package/src/llama.cpp/common/arg.h +3 -0
- package/src/llama.cpp/common/common.cpp +192 -72
- package/src/llama.cpp/common/common.h +51 -18
- package/src/llama.cpp/common/ngram-cache.cpp +12 -12
- package/src/llama.cpp/common/ngram-cache.h +2 -2
- package/src/llama.cpp/common/sampling.cpp +11 -6
- package/src/llama.cpp/common/speculative.cpp +18 -15
- package/src/llama.cpp/docs/build.md +2 -0
- package/src/llama.cpp/examples/batched/batched.cpp +9 -7
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +3 -3
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +10 -8
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +11 -8
- package/src/llama.cpp/examples/cvector-generator/mean.hpp +1 -1
- package/src/llama.cpp/examples/cvector-generator/pca.hpp +1 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +8 -7
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +7 -6
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +8 -7
- package/src/llama.cpp/examples/gguf/gguf.cpp +10 -6
- package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +1 -0
- package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +8 -7
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +13 -10
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +13 -12
- package/src/llama.cpp/examples/infill/infill.cpp +23 -24
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +44 -13
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +11 -6
- package/src/llama.cpp/examples/llava/clip.cpp +4 -2
- package/src/llama.cpp/examples/llava/llava-cli.cpp +9 -6
- package/src/llama.cpp/examples/llava/llava.cpp +2 -2
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +8 -4
- package/src/llama.cpp/examples/llava/qwen2vl-cli.cpp +11 -8
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +6 -7
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +4 -9
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +3 -7
- package/src/llama.cpp/examples/lookup/lookup.cpp +5 -6
- package/src/llama.cpp/examples/main/main.cpp +51 -29
- package/src/llama.cpp/examples/parallel/parallel.cpp +5 -6
- package/src/llama.cpp/examples/passkey/passkey.cpp +7 -5
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +37 -23
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +12 -14
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +8 -8
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +12 -0
- package/src/llama.cpp/examples/run/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/run/linenoise.cpp/linenoise.cpp +1351 -0
- package/src/llama.cpp/examples/run/linenoise.cpp/linenoise.h +114 -0
- package/src/llama.cpp/examples/run/run.cpp +175 -61
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +4 -25
- package/src/llama.cpp/examples/server/CMakeLists.txt +1 -0
- package/src/llama.cpp/examples/server/httplib.h +1295 -409
- package/src/llama.cpp/examples/server/server.cpp +387 -181
- package/src/llama.cpp/examples/server/tests/requirements.txt +1 -0
- package/src/llama.cpp/examples/server/utils.hpp +170 -58
- package/src/llama.cpp/examples/simple/simple.cpp +9 -8
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +16 -12
- package/src/llama.cpp/examples/speculative/speculative.cpp +22 -23
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +8 -12
- package/src/llama.cpp/examples/tokenize/tokenize.cpp +17 -5
- package/src/llama.cpp/examples/tts/tts.cpp +64 -23
- package/src/llama.cpp/ggml/CMakeLists.txt +5 -21
- package/src/llama.cpp/ggml/include/ggml-backend.h +2 -0
- package/src/llama.cpp/ggml/include/ggml-cpp.h +1 -0
- package/src/llama.cpp/ggml/include/ggml.h +36 -145
- package/src/llama.cpp/ggml/include/gguf.h +202 -0
- package/src/llama.cpp/ggml/src/CMakeLists.txt +6 -3
- package/src/llama.cpp/ggml/src/ggml-alloc.c +5 -0
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -1
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +79 -49
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +5 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +33 -23
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +57 -72
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +87 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +335 -66
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +10 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1090 -378
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h +2 -2
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +1 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +3 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +3 -0
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +3 -1
- package/src/llama.cpp/ggml/src/ggml-impl.h +11 -16
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +16 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +6 -6
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +154 -35
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +9 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +18 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +40 -95
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +48 -48
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +24 -24
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +238 -164
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +105 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +8 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +3 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +7 -5
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +1 -2
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +74 -4
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +314 -116
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +4 -2
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +9 -3
- package/src/llama.cpp/ggml/src/ggml.c +117 -1327
- package/src/llama.cpp/ggml/src/gguf.cpp +1329 -0
- package/src/llama.cpp/include/llama-cpp.h +6 -1
- package/src/llama.cpp/include/llama.h +138 -75
- package/src/llama.cpp/src/CMakeLists.txt +13 -1
- package/src/llama.cpp/src/llama-adapter.cpp +347 -0
- package/src/llama.cpp/src/llama-adapter.h +74 -0
- package/src/llama.cpp/src/llama-arch.cpp +1487 -0
- package/src/llama.cpp/src/llama-arch.h +400 -0
- package/src/llama.cpp/src/llama-batch.cpp +368 -0
- package/src/llama.cpp/src/llama-batch.h +88 -0
- package/src/llama.cpp/src/llama-chat.cpp +578 -0
- package/src/llama.cpp/src/llama-chat.h +52 -0
- package/src/llama.cpp/src/llama-context.cpp +1775 -0
- package/src/llama.cpp/src/llama-context.h +128 -0
- package/src/llama.cpp/src/llama-cparams.cpp +1 -0
- package/src/llama.cpp/src/llama-cparams.h +37 -0
- package/src/llama.cpp/src/llama-grammar.cpp +5 -4
- package/src/llama.cpp/src/llama-grammar.h +3 -1
- package/src/llama.cpp/src/llama-hparams.cpp +71 -0
- package/src/llama.cpp/src/llama-hparams.h +139 -0
- package/src/llama.cpp/src/llama-impl.cpp +167 -0
- package/src/llama.cpp/src/llama-impl.h +16 -136
- package/src/llama.cpp/src/llama-kv-cache.cpp +718 -0
- package/src/llama.cpp/src/llama-kv-cache.h +218 -0
- package/src/llama.cpp/src/llama-mmap.cpp +589 -0
- package/src/llama.cpp/src/llama-mmap.h +67 -0
- package/src/llama.cpp/src/llama-model-loader.cpp +1124 -0
- package/src/llama.cpp/src/llama-model-loader.h +167 -0
- package/src/llama.cpp/src/llama-model.cpp +3953 -0
- package/src/llama.cpp/src/llama-model.h +370 -0
- package/src/llama.cpp/src/llama-quant.cpp +934 -0
- package/src/llama.cpp/src/llama-quant.h +1 -0
- package/src/llama.cpp/src/llama-sampling.cpp +147 -32
- package/src/llama.cpp/src/llama-sampling.h +3 -19
- package/src/llama.cpp/src/llama-vocab.cpp +1832 -575
- package/src/llama.cpp/src/llama-vocab.h +97 -142
- package/src/llama.cpp/src/llama.cpp +7160 -20314
- package/src/llama.cpp/src/unicode.cpp +8 -3
- package/src/llama.cpp/tests/CMakeLists.txt +2 -0
- package/src/llama.cpp/tests/test-autorelease.cpp +3 -3
- package/src/llama.cpp/tests/test-backend-ops.cpp +370 -59
- package/src/llama.cpp/tests/test-chat-template.cpp +162 -125
- package/src/llama.cpp/tests/test-gguf.cpp +222 -187
- package/src/llama.cpp/tests/test-model-load-cancel.cpp +1 -1
- package/src/llama.cpp/tests/test-sampling.cpp +0 -1
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +4 -4
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +9 -7
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +8 -6
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
#include "llama-adapter.h"
|
|
2
|
+
|
|
3
|
+
#include "llama-impl.h"
|
|
4
|
+
#include "llama-mmap.h"
|
|
5
|
+
#include "llama-model.h"
|
|
6
|
+
|
|
7
|
+
#include <algorithm>
|
|
8
|
+
#include <map>
|
|
9
|
+
#include <cassert>
|
|
10
|
+
#include <stdexcept>
|
|
11
|
+
|
|
12
|
+
// vec
|
|
13
|
+
|
|
14
|
+
struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const {
|
|
15
|
+
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
|
|
16
|
+
return nullptr;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
return tensors[il];
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
struct ggml_tensor * llama_adapter_cvec::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
|
|
23
|
+
ggml_tensor * layer_dir = tensor_for(il);
|
|
24
|
+
if (layer_dir != nullptr) {
|
|
25
|
+
cur = ggml_add(ctx, cur, layer_dir);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
return cur;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
bool llama_adapter_cvec::init(const llama_model & model) {
|
|
32
|
+
const auto & hparams = model.hparams;
|
|
33
|
+
|
|
34
|
+
GGML_ASSERT(tensors.empty());
|
|
35
|
+
GGML_ASSERT(ctxs.empty());
|
|
36
|
+
GGML_ASSERT(bufs.empty());
|
|
37
|
+
|
|
38
|
+
// create a context for each buffer type
|
|
39
|
+
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
|
|
40
|
+
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
|
41
|
+
auto it = ctx_map.find(buft);
|
|
42
|
+
if (it == ctx_map.end()) {
|
|
43
|
+
struct ggml_init_params params = {
|
|
44
|
+
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
|
|
45
|
+
/*.mem_buffer =*/ NULL,
|
|
46
|
+
/*.no_alloc =*/ true,
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
ggml_context * ctx = ggml_init(params);
|
|
50
|
+
if (!ctx) {
|
|
51
|
+
return nullptr;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
ctx_map[buft] = ctx;
|
|
55
|
+
ctxs.emplace_back(ctx);
|
|
56
|
+
|
|
57
|
+
return ctx;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return it->second;
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
// make tensors
|
|
64
|
+
tensors.reserve(hparams.n_layer);
|
|
65
|
+
tensors.push_back(nullptr); // there's never a tensor for layer 0
|
|
66
|
+
for (size_t il = 1; il < hparams.n_layer; il++) {
|
|
67
|
+
ggml_backend_buffer_type_t buft = model.select_buft(il);
|
|
68
|
+
ggml_context * ctx = ctx_for_buft(buft);
|
|
69
|
+
if (!ctx) {
|
|
70
|
+
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
|
|
71
|
+
return false;
|
|
72
|
+
}
|
|
73
|
+
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
|
|
74
|
+
tensors.push_back(tensor);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// allocate tensors / buffers and zero
|
|
78
|
+
bufs.reserve(ctx_map.size());
|
|
79
|
+
for (auto it : ctx_map) {
|
|
80
|
+
ggml_backend_buffer_type_t buft = it.first;
|
|
81
|
+
ggml_context * ctx = it.second;
|
|
82
|
+
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
|
|
83
|
+
if (!buf) {
|
|
84
|
+
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
|
|
85
|
+
return false;
|
|
86
|
+
}
|
|
87
|
+
ggml_backend_buffer_clear(buf, 0);
|
|
88
|
+
bufs.emplace_back(buf);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return true;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
int32_t llama_adapter_cvec::apply(
|
|
95
|
+
const llama_model & model,
|
|
96
|
+
const float * data,
|
|
97
|
+
size_t len,
|
|
98
|
+
int32_t n_embd,
|
|
99
|
+
int32_t il_start,
|
|
100
|
+
int32_t il_end) {
|
|
101
|
+
const auto & hparams = model.hparams;
|
|
102
|
+
|
|
103
|
+
if (data == nullptr) {
|
|
104
|
+
// disable the current control vector (but leave allocated for later)
|
|
105
|
+
layer_start = -1;
|
|
106
|
+
layer_end = -1;
|
|
107
|
+
return 0;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (n_embd != (int) hparams.n_embd) {
|
|
111
|
+
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
|
|
112
|
+
return 1;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (tensors.empty()) {
|
|
116
|
+
if (!init(model)) {
|
|
117
|
+
return 1;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
layer_start = il_start;
|
|
122
|
+
layer_end = il_end;
|
|
123
|
+
|
|
124
|
+
for (size_t il = 1; il < hparams.n_layer; il++) {
|
|
125
|
+
assert(tensors[il] != nullptr);
|
|
126
|
+
|
|
127
|
+
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
|
|
128
|
+
if (off + n_embd <= len) {
|
|
129
|
+
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
return 0;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// lora
|
|
137
|
+
|
|
138
|
+
llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * w) {
|
|
139
|
+
const std::string name(w->name);
|
|
140
|
+
|
|
141
|
+
const auto pos = ab_map.find(name);
|
|
142
|
+
if (pos != ab_map.end()) {
|
|
143
|
+
return &pos->second;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return nullptr;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
static void llama_adapter_lora_init_impl(struct llama_model & model, const char * path_lora, struct llama_adapter_lora & adapter) {
|
|
150
|
+
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
|
|
151
|
+
|
|
152
|
+
ggml_context * ctx_init;
|
|
153
|
+
struct gguf_init_params meta_gguf_params = {
|
|
154
|
+
/* .no_alloc = */ true,
|
|
155
|
+
/* .ctx = */ &ctx_init,
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
|
|
159
|
+
if (!ctx_gguf) {
|
|
160
|
+
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
ggml_context_ptr ctx { ctx_init };
|
|
164
|
+
|
|
165
|
+
// check metadata
|
|
166
|
+
{
|
|
167
|
+
auto get_kv_str = [&](const std::string & key) -> std::string {
|
|
168
|
+
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
|
|
169
|
+
return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id));
|
|
170
|
+
};
|
|
171
|
+
auto get_kv_f32 = [&](const std::string & key) -> float {
|
|
172
|
+
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
|
|
173
|
+
return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id);
|
|
174
|
+
};
|
|
175
|
+
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
|
176
|
+
|
|
177
|
+
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
|
|
178
|
+
if (general_type != "adapter") {
|
|
179
|
+
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
|
|
183
|
+
auto general_arch = llm_arch_from_string(general_arch_str);
|
|
184
|
+
if (general_arch != model.arch) {
|
|
185
|
+
throw std::runtime_error("model arch and LoRA arch mismatch");
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
|
|
189
|
+
if (adapter_type != "lora") {
|
|
190
|
+
throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
|
|
197
|
+
|
|
198
|
+
// contexts for each buffer type
|
|
199
|
+
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
|
|
200
|
+
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
|
201
|
+
auto it = ctx_map.find(buft);
|
|
202
|
+
if (it == ctx_map.end()) {
|
|
203
|
+
// add a new context
|
|
204
|
+
struct ggml_init_params params = {
|
|
205
|
+
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
|
|
206
|
+
/*.mem_buffer =*/ NULL,
|
|
207
|
+
/*.no_alloc =*/ true,
|
|
208
|
+
};
|
|
209
|
+
ggml_context * buft_ctx = ggml_init(params);
|
|
210
|
+
if (!buft_ctx) {
|
|
211
|
+
return nullptr;
|
|
212
|
+
}
|
|
213
|
+
ctx_map[buft] = buft_ctx;
|
|
214
|
+
adapter.ctxs.emplace_back(buft_ctx);
|
|
215
|
+
return buft_ctx;
|
|
216
|
+
};
|
|
217
|
+
return it->second;
|
|
218
|
+
};
|
|
219
|
+
|
|
220
|
+
// bundle lora_a and lora_b into pairs
|
|
221
|
+
std::map<std::string, llama_adapter_lora_weight> ab_map;
|
|
222
|
+
auto str_endswith = [](const std::string & str, const std::string & suffix) {
|
|
223
|
+
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
|
|
224
|
+
};
|
|
225
|
+
|
|
226
|
+
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
|
|
227
|
+
std::string name(cur->name);
|
|
228
|
+
if (str_endswith(name, ".lora_a")) {
|
|
229
|
+
replace_all(name, ".lora_a", "");
|
|
230
|
+
if (ab_map.find(name) == ab_map.end()) {
|
|
231
|
+
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
|
|
232
|
+
} else {
|
|
233
|
+
ab_map[name].a = cur;
|
|
234
|
+
}
|
|
235
|
+
} else if (str_endswith(name, ".lora_b")) {
|
|
236
|
+
replace_all(name, ".lora_b", "");
|
|
237
|
+
if (ab_map.find(name) == ab_map.end()) {
|
|
238
|
+
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
|
|
239
|
+
} else {
|
|
240
|
+
ab_map[name].b = cur;
|
|
241
|
+
}
|
|
242
|
+
} else if (str_endswith(name, "_norm.weight")) {
|
|
243
|
+
// TODO: add support for norm vector
|
|
244
|
+
// for now, we don't really care because most adapters still work fine without it
|
|
245
|
+
continue;
|
|
246
|
+
} else {
|
|
247
|
+
throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// add tensors
|
|
252
|
+
for (auto & it : ab_map) {
|
|
253
|
+
const std::string & name = it.first;
|
|
254
|
+
llama_adapter_lora_weight & w = it.second;
|
|
255
|
+
bool is_token_embd = str_endswith(name, "token_embd.weight");
|
|
256
|
+
|
|
257
|
+
if (!w.a || !w.b) {
|
|
258
|
+
throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// device buft and device ctx
|
|
262
|
+
const auto * model_tensor = model.get_tensor(name.c_str());
|
|
263
|
+
if (!model_tensor) {
|
|
264
|
+
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
|
|
268
|
+
// validate tensor shape
|
|
269
|
+
if (is_token_embd) {
|
|
270
|
+
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
|
|
271
|
+
if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) {
|
|
272
|
+
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
|
273
|
+
}
|
|
274
|
+
} else {
|
|
275
|
+
if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
|
|
276
|
+
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
|
277
|
+
}
|
|
278
|
+
if (w.a->ne[1] != w.b->ne[0]) {
|
|
279
|
+
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// save tensor to adapter
|
|
284
|
+
struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
|
|
285
|
+
struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
|
|
286
|
+
ggml_set_name(tensor_a, w.a->name);
|
|
287
|
+
ggml_set_name(tensor_b, w.b->name);
|
|
288
|
+
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// allocate tensors / buffers and zero
|
|
292
|
+
{
|
|
293
|
+
adapter.ctxs.reserve(ctx_map.size());
|
|
294
|
+
adapter.bufs.reserve(ctx_map.size());
|
|
295
|
+
for (auto & it : ctx_map) {
|
|
296
|
+
ggml_backend_buffer_type_t buft = it.first;
|
|
297
|
+
ggml_context * ctx_dev = it.second;
|
|
298
|
+
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
|
|
299
|
+
if (!buf) {
|
|
300
|
+
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
|
|
301
|
+
}
|
|
302
|
+
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
|
|
303
|
+
adapter.bufs.emplace_back(std::move(buf));
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// set tensor data
|
|
308
|
+
{
|
|
309
|
+
llama_file gguf_file(path_lora, "rb");
|
|
310
|
+
std::vector<uint8_t> read_buf;
|
|
311
|
+
auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
|
|
312
|
+
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
|
|
313
|
+
size_t size = ggml_nbytes(orig);
|
|
314
|
+
read_buf.resize(size);
|
|
315
|
+
gguf_file.seek(offs, SEEK_SET);
|
|
316
|
+
gguf_file.read_raw(read_buf.data(), size);
|
|
317
|
+
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
|
|
318
|
+
};
|
|
319
|
+
for (auto & it : adapter.ab_map) {
|
|
320
|
+
auto orig = ab_map[it.first];
|
|
321
|
+
auto dev = it.second;
|
|
322
|
+
set_tensor(orig.a, dev.a);
|
|
323
|
+
set_tensor(orig.b, dev.b);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, const char * path_lora) {
|
|
331
|
+
struct llama_adapter_lora * adapter = new llama_adapter_lora();
|
|
332
|
+
|
|
333
|
+
try {
|
|
334
|
+
llama_adapter_lora_init_impl(*model, path_lora, *adapter);
|
|
335
|
+
return adapter;
|
|
336
|
+
} catch (const std::exception & err) {
|
|
337
|
+
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
|
338
|
+
|
|
339
|
+
delete adapter;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
return nullptr;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
void llama_adapter_lora_free(struct llama_adapter_lora * adapter) {
|
|
346
|
+
delete adapter;
|
|
347
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
#include "ggml-cpp.h"
|
|
6
|
+
|
|
7
|
+
#include <string>
|
|
8
|
+
#include <unordered_map>
|
|
9
|
+
#include <vector>
|
|
10
|
+
|
|
11
|
+
// TODO: pimpl
|
|
12
|
+
|
|
13
|
+
//
|
|
14
|
+
// llama_adapter_cvec
|
|
15
|
+
//
|
|
16
|
+
|
|
17
|
+
struct llama_adapter_cvec {
|
|
18
|
+
struct ggml_tensor * tensor_for(int il) const;
|
|
19
|
+
|
|
20
|
+
struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const;
|
|
21
|
+
|
|
22
|
+
int32_t apply(
|
|
23
|
+
const llama_model & model,
|
|
24
|
+
const float * data,
|
|
25
|
+
size_t len,
|
|
26
|
+
int32_t n_embd,
|
|
27
|
+
int32_t il_start,
|
|
28
|
+
int32_t il_end);
|
|
29
|
+
|
|
30
|
+
private:
|
|
31
|
+
bool init(const llama_model & model);
|
|
32
|
+
|
|
33
|
+
int32_t layer_start = -1;
|
|
34
|
+
int32_t layer_end = -1;
|
|
35
|
+
|
|
36
|
+
std::vector<ggml_context_ptr> ctxs;
|
|
37
|
+
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
38
|
+
|
|
39
|
+
std::vector<struct ggml_tensor *> tensors; // per layer
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
//
|
|
43
|
+
// llama_adapter_lora
|
|
44
|
+
//
|
|
45
|
+
|
|
46
|
+
struct llama_adapter_lora_weight {
|
|
47
|
+
struct ggml_tensor * a = nullptr;
|
|
48
|
+
struct ggml_tensor * b = nullptr;
|
|
49
|
+
|
|
50
|
+
// get actual scale based on rank and alpha
|
|
51
|
+
float get_scale(float alpha, float adapter_scale) const {
|
|
52
|
+
const float rank = (float) b->ne[0];
|
|
53
|
+
const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
|
|
54
|
+
return scale;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
llama_adapter_lora_weight() = default;
|
|
58
|
+
llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
struct llama_adapter_lora {
|
|
62
|
+
// map tensor name to lora_a_b
|
|
63
|
+
std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
|
|
64
|
+
|
|
65
|
+
std::vector<ggml_context_ptr> ctxs;
|
|
66
|
+
std::vector<ggml_backend_buffer_ptr> bufs;
|
|
67
|
+
|
|
68
|
+
float alpha;
|
|
69
|
+
|
|
70
|
+
llama_adapter_lora() = default;
|
|
71
|
+
~llama_adapter_lora() = default;
|
|
72
|
+
|
|
73
|
+
llama_adapter_lora_weight * get_weight(struct ggml_tensor * w);
|
|
74
|
+
};
|