llama-cpp-capacitor 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +9 -9
- package/cpp/LICENSE +21 -0
- package/cpp/README.md +4 -0
- package/cpp/anyascii.c +22223 -0
- package/cpp/anyascii.h +42 -0
- package/cpp/chat-parser.cpp +393 -0
- package/cpp/chat-parser.h +120 -0
- package/cpp/chat.cpp +2315 -0
- package/cpp/chat.h +221 -0
- package/cpp/common.cpp +1619 -0
- package/cpp/common.h +744 -0
- package/cpp/ggml-alloc.c +1028 -0
- package/cpp/ggml-alloc.h +76 -0
- package/cpp/ggml-backend-impl.h +255 -0
- package/cpp/ggml-backend-reg.cpp +600 -0
- package/cpp/ggml-backend.cpp +2118 -0
- package/cpp/ggml-backend.h +354 -0
- package/cpp/ggml-common.h +1878 -0
- package/cpp/ggml-cpp.h +39 -0
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
- package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
- package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
- package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
- package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
- package/cpp/ggml-cpu/arch-fallback.h +215 -0
- package/cpp/ggml-cpu/binary-ops.cpp +158 -0
- package/cpp/ggml-cpu/binary-ops.h +16 -0
- package/cpp/ggml-cpu/common.h +73 -0
- package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
- package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
- package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
- package/cpp/ggml-cpu/ops.cpp +10587 -0
- package/cpp/ggml-cpu/ops.h +114 -0
- package/cpp/ggml-cpu/quants.c +1193 -0
- package/cpp/ggml-cpu/quants.h +97 -0
- package/cpp/ggml-cpu/repack.cpp +1982 -0
- package/cpp/ggml-cpu/repack.h +120 -0
- package/cpp/ggml-cpu/simd-mappings.h +1184 -0
- package/cpp/ggml-cpu/traits.cpp +36 -0
- package/cpp/ggml-cpu/traits.h +38 -0
- package/cpp/ggml-cpu/unary-ops.cpp +186 -0
- package/cpp/ggml-cpu/unary-ops.h +28 -0
- package/cpp/ggml-cpu/vec.cpp +348 -0
- package/cpp/ggml-cpu/vec.h +1121 -0
- package/cpp/ggml-cpu.h +145 -0
- package/cpp/ggml-impl.h +622 -0
- package/cpp/ggml-metal-impl.h +688 -0
- package/cpp/ggml-metal.h +66 -0
- package/cpp/ggml-metal.m +6833 -0
- package/cpp/ggml-opt.cpp +1093 -0
- package/cpp/ggml-opt.h +256 -0
- package/cpp/ggml-quants.c +5324 -0
- package/cpp/ggml-quants.h +106 -0
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +14 -0
- package/cpp/ggml.c +7108 -0
- package/cpp/ggml.h +2492 -0
- package/cpp/gguf.cpp +1358 -0
- package/cpp/gguf.h +202 -0
- package/cpp/json-partial.cpp +256 -0
- package/cpp/json-partial.h +38 -0
- package/cpp/json-schema-to-grammar.cpp +985 -0
- package/cpp/json-schema-to-grammar.h +21 -0
- package/cpp/llama-adapter.cpp +388 -0
- package/cpp/llama-adapter.h +76 -0
- package/cpp/llama-arch.cpp +2355 -0
- package/cpp/llama-arch.h +499 -0
- package/cpp/llama-batch.cpp +875 -0
- package/cpp/llama-batch.h +160 -0
- package/cpp/llama-chat.cpp +783 -0
- package/cpp/llama-chat.h +65 -0
- package/cpp/llama-context.cpp +2748 -0
- package/cpp/llama-context.h +306 -0
- package/cpp/llama-cparams.cpp +5 -0
- package/cpp/llama-cparams.h +41 -0
- package/cpp/llama-cpp.h +30 -0
- package/cpp/llama-grammar.cpp +1229 -0
- package/cpp/llama-grammar.h +173 -0
- package/cpp/llama-graph.cpp +1891 -0
- package/cpp/llama-graph.h +810 -0
- package/cpp/llama-hparams.cpp +180 -0
- package/cpp/llama-hparams.h +233 -0
- package/cpp/llama-impl.cpp +167 -0
- package/cpp/llama-impl.h +61 -0
- package/cpp/llama-io.cpp +15 -0
- package/cpp/llama-io.h +35 -0
- package/cpp/llama-kv-cache-iswa.cpp +318 -0
- package/cpp/llama-kv-cache-iswa.h +135 -0
- package/cpp/llama-kv-cache.cpp +2059 -0
- package/cpp/llama-kv-cache.h +374 -0
- package/cpp/llama-kv-cells.h +491 -0
- package/cpp/llama-memory-hybrid.cpp +258 -0
- package/cpp/llama-memory-hybrid.h +137 -0
- package/cpp/llama-memory-recurrent.cpp +1146 -0
- package/cpp/llama-memory-recurrent.h +179 -0
- package/cpp/llama-memory.cpp +59 -0
- package/cpp/llama-memory.h +119 -0
- package/cpp/llama-mmap.cpp +600 -0
- package/cpp/llama-mmap.h +68 -0
- package/cpp/llama-model-loader.cpp +1164 -0
- package/cpp/llama-model-loader.h +170 -0
- package/cpp/llama-model-saver.cpp +282 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +19042 -0
- package/cpp/llama-model.h +491 -0
- package/cpp/llama-sampling.cpp +2575 -0
- package/cpp/llama-sampling.h +32 -0
- package/cpp/llama-vocab.cpp +3792 -0
- package/cpp/llama-vocab.h +176 -0
- package/cpp/llama.cpp +358 -0
- package/cpp/llama.h +1373 -0
- package/cpp/log.cpp +427 -0
- package/cpp/log.h +103 -0
- package/cpp/minja/chat-template.hpp +550 -0
- package/cpp/minja/minja.hpp +3009 -0
- package/cpp/nlohmann/json.hpp +25526 -0
- package/cpp/nlohmann/json_fwd.hpp +187 -0
- package/cpp/regex-partial.cpp +204 -0
- package/cpp/regex-partial.h +56 -0
- package/cpp/rn-completion.cpp +681 -0
- package/cpp/rn-completion.h +116 -0
- package/cpp/rn-llama.cpp +345 -0
- package/cpp/rn-llama.h +149 -0
- package/cpp/rn-mtmd.hpp +602 -0
- package/cpp/rn-tts.cpp +591 -0
- package/cpp/rn-tts.h +59 -0
- package/cpp/sampling.cpp +579 -0
- package/cpp/sampling.h +107 -0
- package/cpp/tools/mtmd/clip-impl.h +473 -0
- package/cpp/tools/mtmd/clip.cpp +4322 -0
- package/cpp/tools/mtmd/clip.h +106 -0
- package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
- package/cpp/tools/mtmd/mtmd-audio.h +47 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
- package/cpp/tools/mtmd/mtmd-helper.h +91 -0
- package/cpp/tools/mtmd/mtmd.cpp +1066 -0
- package/cpp/tools/mtmd/mtmd.h +298 -0
- package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
- package/cpp/unicode-data.cpp +7034 -0
- package/cpp/unicode-data.h +20 -0
- package/cpp/unicode.cpp +1061 -0
- package/cpp/unicode.h +68 -0
- package/package.json +2 -1
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
#include "llama-impl.h"
|
|
6
|
+
#include "llama-arch.h"
|
|
7
|
+
#include "llama-mmap.h"
|
|
8
|
+
|
|
9
|
+
#include "ggml-cpp.h"
|
|
10
|
+
|
|
11
|
+
#include <cstddef>
|
|
12
|
+
#include <map>
|
|
13
|
+
#include <stdexcept>
|
|
14
|
+
#include <unordered_map>
|
|
15
|
+
|
|
16
|
+
using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
|
|
17
|
+
|
|
18
|
+
enum llama_fver {
|
|
19
|
+
LM_GGUF_FILE_VERSION_V1 = 1,
|
|
20
|
+
LM_GGUF_FILE_VERSION_V2 = 2,
|
|
21
|
+
LM_GGUF_FILE_VERSION_V3 = 3,
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
const char * llama_file_version_name(llama_fver version);
|
|
25
|
+
|
|
26
|
+
struct llama_model_loader {
|
|
27
|
+
// Holds information on a model weight
|
|
28
|
+
struct llama_tensor_weight {
|
|
29
|
+
uint16_t idx; // source file index
|
|
30
|
+
size_t offs; // tensor data offset in the original file
|
|
31
|
+
|
|
32
|
+
lm_ggml_tensor * tensor;
|
|
33
|
+
|
|
34
|
+
llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
|
|
35
|
+
const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
|
|
36
|
+
if (tensor_idx < 0) {
|
|
37
|
+
throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
|
|
41
|
+
if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
|
|
42
|
+
throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
// custom comparator to sort weights more nicely by layer
|
|
48
|
+
struct weight_name_comparer {
|
|
49
|
+
bool operator()(const std::string & a, const std::string & b) const {
|
|
50
|
+
int a_layer = -1;
|
|
51
|
+
int b_layer = -1;
|
|
52
|
+
sscanf(a.c_str(), "blk.%d.", &a_layer);
|
|
53
|
+
sscanf(b.c_str(), "blk.%d.", &b_layer);
|
|
54
|
+
if (a_layer != b_layer) {
|
|
55
|
+
return a_layer < b_layer;
|
|
56
|
+
}
|
|
57
|
+
return a < b;
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
static const int TENSOR_NOT_REQUIRED = 1 << 0;
|
|
62
|
+
static const int TENSOR_DUPLICATED = 1 << 1;
|
|
63
|
+
static const int TENSOR_SKIP = 1 << 2;
|
|
64
|
+
|
|
65
|
+
int n_kv = 0;
|
|
66
|
+
int n_tensors = 0;
|
|
67
|
+
int n_created = 0;
|
|
68
|
+
|
|
69
|
+
uint64_t n_elements = 0;
|
|
70
|
+
size_t n_bytes = 0;
|
|
71
|
+
|
|
72
|
+
bool use_mmap = false;
|
|
73
|
+
bool check_tensors;
|
|
74
|
+
|
|
75
|
+
llama_files files;
|
|
76
|
+
llama_ftype ftype;
|
|
77
|
+
llama_fver fver;
|
|
78
|
+
|
|
79
|
+
llama_mmaps mappings;
|
|
80
|
+
|
|
81
|
+
std::map<std::string, llama_tensor_weight, weight_name_comparer> weights_map;
|
|
82
|
+
std::unordered_map<std::string, llama_model_kv_override> kv_overrides;
|
|
83
|
+
const llama_model_tensor_buft_override * tensor_buft_overrides;
|
|
84
|
+
|
|
85
|
+
lm_gguf_context_ptr meta;
|
|
86
|
+
std::vector<lm_ggml_context_ptr> contexts;
|
|
87
|
+
|
|
88
|
+
std::string arch_name;
|
|
89
|
+
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
|
90
|
+
|
|
91
|
+
size_t size_done = 0;
|
|
92
|
+
size_t size_data = 0;
|
|
93
|
+
std::vector<std::pair<size_t, size_t>> mmaps_used;
|
|
94
|
+
|
|
95
|
+
llama_model_loader(
|
|
96
|
+
const std::string & fname,
|
|
97
|
+
std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
|
|
98
|
+
bool use_mmap,
|
|
99
|
+
bool check_tensors,
|
|
100
|
+
const llama_model_kv_override * param_overrides_p,
|
|
101
|
+
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p);
|
|
102
|
+
|
|
103
|
+
template<typename T>
|
|
104
|
+
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
|
105
|
+
get_arr_n(const std::string & key, T & result, bool required = true);
|
|
106
|
+
|
|
107
|
+
template<typename T>
|
|
108
|
+
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
|
109
|
+
get_arr_n(enum llm_kv kid, T & result, bool required = true);
|
|
110
|
+
|
|
111
|
+
template<typename T>
|
|
112
|
+
bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
|
|
113
|
+
|
|
114
|
+
template<typename T, size_t N_MAX>
|
|
115
|
+
bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
|
|
116
|
+
|
|
117
|
+
template<typename T>
|
|
118
|
+
bool get_arr(enum llm_kv kid, T & result, bool required = true);
|
|
119
|
+
|
|
120
|
+
template<typename T>
|
|
121
|
+
bool get_key(const std::string & key, T & result, bool required = true);
|
|
122
|
+
|
|
123
|
+
template<typename T>
|
|
124
|
+
bool get_key(enum llm_kv kid, T & result, bool required = true);
|
|
125
|
+
|
|
126
|
+
template<typename T, size_t N_MAX>
|
|
127
|
+
bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
|
|
128
|
+
|
|
129
|
+
template<typename T>
|
|
130
|
+
bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
|
|
131
|
+
|
|
132
|
+
std::string get_arch_name() const;
|
|
133
|
+
|
|
134
|
+
enum llm_arch get_arch() const;
|
|
135
|
+
|
|
136
|
+
const llama_tensor_weight * get_weight(const char * name) const;
|
|
137
|
+
|
|
138
|
+
const llama_tensor_weight & require_weight(const char * name) const;
|
|
139
|
+
|
|
140
|
+
struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
|
|
141
|
+
|
|
142
|
+
struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
|
|
143
|
+
|
|
144
|
+
const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
|
|
145
|
+
|
|
146
|
+
struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
|
|
147
|
+
|
|
148
|
+
struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
|
|
149
|
+
|
|
150
|
+
void done_getting_tensors() const;
|
|
151
|
+
|
|
152
|
+
void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
|
|
153
|
+
|
|
154
|
+
void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
|
|
155
|
+
|
|
156
|
+
// for backwards compatibility, does not support ggml-backend
|
|
157
|
+
void load_data_for(struct lm_ggml_tensor * cur) const;
|
|
158
|
+
|
|
159
|
+
// Returns false if cancelled by progress_callback
|
|
160
|
+
bool load_all_data(
|
|
161
|
+
struct lm_ggml_context * ctx,
|
|
162
|
+
llama_buf_map & bufs,
|
|
163
|
+
llama_mlocks * lmlocks,
|
|
164
|
+
llama_progress_callback progress_callback,
|
|
165
|
+
void * progress_callback_user_data);
|
|
166
|
+
|
|
167
|
+
std::string ftype_name() const;
|
|
168
|
+
|
|
169
|
+
void print_info() const;
|
|
170
|
+
};
|
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
#include "llama-model-saver.h"
|
|
2
|
+
|
|
3
|
+
#include "gguf.h"
|
|
4
|
+
|
|
5
|
+
#include "llama.h"
|
|
6
|
+
#include "llama-hparams.h"
|
|
7
|
+
#include "llama-model.h"
|
|
8
|
+
#include "llama-vocab.h"
|
|
9
|
+
|
|
10
|
+
#include <string>
|
|
11
|
+
|
|
12
|
+
llama_model_saver::llama_model_saver(const struct llama_model & model) : model(model), llm_kv(model.arch) {
|
|
13
|
+
lm_gguf_ctx = lm_gguf_init_empty();
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
llama_model_saver::~llama_model_saver() {
|
|
17
|
+
lm_gguf_free(lm_gguf_ctx);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const uint32_t value) {
|
|
21
|
+
lm_gguf_set_val_u32(lm_gguf_ctx, llm_kv(key).c_str(), value);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const int32_t value) {
|
|
25
|
+
lm_gguf_set_val_i32(lm_gguf_ctx, llm_kv(key).c_str(), value);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const float value) {
|
|
29
|
+
lm_gguf_set_val_f32(lm_gguf_ctx, llm_kv(key).c_str(), value);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const bool value) {
|
|
33
|
+
lm_gguf_set_val_bool(lm_gguf_ctx, llm_kv(key).c_str(), value);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const char * value) {
|
|
37
|
+
lm_gguf_set_val_str(lm_gguf_ctx, llm_kv(key).c_str(), value);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
[[noreturn]]
|
|
41
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const char value) {
|
|
42
|
+
LM_GGML_UNUSED(key);
|
|
43
|
+
LM_GGML_UNUSED(value);
|
|
44
|
+
LM_GGML_ABORT("fatal error"); // this should never be called, only needed to make the template below compile
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
template <typename Container>
|
|
48
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const Container & value, const bool per_layer) {
|
|
49
|
+
const size_t n_values = per_layer ? size_t(model.hparams.n_layer) : value.size();
|
|
50
|
+
LM_GGML_ASSERT(n_values <= value.size());
|
|
51
|
+
|
|
52
|
+
if (n_values == 0) {
|
|
53
|
+
return;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (per_layer) {
|
|
57
|
+
bool all_values_the_same = true;
|
|
58
|
+
for (size_t i = 1; i < n_values; ++i) {
|
|
59
|
+
if (value[i] != value[0]) {
|
|
60
|
+
all_values_the_same = false;
|
|
61
|
+
break;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
if (all_values_the_same) {
|
|
65
|
+
add_kv(key, value[0]);
|
|
66
|
+
return;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (std::is_same<typename Container::value_type, uint8_t>::value) {
|
|
71
|
+
lm_gguf_set_arr_data(lm_gguf_ctx, llm_kv(key).c_str(), LM_GGUF_TYPE_UINT8, value.data(), n_values);
|
|
72
|
+
} else if (std::is_same<typename Container::value_type, int8_t>::value) {
|
|
73
|
+
lm_gguf_set_arr_data(lm_gguf_ctx, llm_kv(key).c_str(), LM_GGUF_TYPE_INT8, value.data(), n_values);
|
|
74
|
+
} else if (std::is_same<typename Container::value_type, uint32_t>::value) {
|
|
75
|
+
lm_gguf_set_arr_data(lm_gguf_ctx, llm_kv(key).c_str(), LM_GGUF_TYPE_UINT32, value.data(), n_values);
|
|
76
|
+
} else if (std::is_same<typename Container::value_type, int32_t>::value) {
|
|
77
|
+
lm_gguf_set_arr_data(lm_gguf_ctx, llm_kv(key).c_str(), LM_GGUF_TYPE_INT32, value.data(), n_values);
|
|
78
|
+
} else if (std::is_same<typename Container::value_type, float>::value) {
|
|
79
|
+
lm_gguf_set_arr_data(lm_gguf_ctx, llm_kv(key).c_str(), LM_GGUF_TYPE_FLOAT32, value.data(), n_values);
|
|
80
|
+
} else if (std::is_same<Container, std::string>::value) {
|
|
81
|
+
lm_gguf_set_val_str(lm_gguf_ctx, llm_kv(key).c_str(), reinterpret_cast<const char *>(value.data()));
|
|
82
|
+
} else {
|
|
83
|
+
LM_GGML_ABORT("fatal error");
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
void llama_model_saver::add_kv(const enum llm_kv key, const std::vector<std::string> & value) {
|
|
88
|
+
std::vector<const char *> tmp(value.size());
|
|
89
|
+
for (size_t i = 0; i < value.size(); ++i) {
|
|
90
|
+
tmp[i] = value[i].c_str();
|
|
91
|
+
}
|
|
92
|
+
lm_gguf_set_arr_str(lm_gguf_ctx, llm_kv(key).c_str(), tmp.data(), tmp.size());
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
void llama_model_saver::add_tensor(const struct lm_ggml_tensor * tensor) {
|
|
96
|
+
if (!tensor) {
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
if (lm_gguf_find_tensor(lm_gguf_ctx, tensor->name) >= 0) {
|
|
100
|
+
LM_GGML_ASSERT(std::string(tensor->name) == "rope_freqs.weight"); // FIXME
|
|
101
|
+
return;
|
|
102
|
+
}
|
|
103
|
+
lm_gguf_add_tensor(lm_gguf_ctx, tensor);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
void llama_model_saver::add_kv_from_model() {
|
|
107
|
+
const llama_hparams & hparams = model.hparams;
|
|
108
|
+
const llama_vocab & vocab = model.vocab;
|
|
109
|
+
|
|
110
|
+
const int32_t n_vocab = vocab.n_tokens();
|
|
111
|
+
std::vector<std::string> tokens(n_vocab);
|
|
112
|
+
std::vector<float> scores(n_vocab);
|
|
113
|
+
std::vector<int32_t> token_types(n_vocab);
|
|
114
|
+
|
|
115
|
+
for (int32_t id = 0; id < n_vocab; ++id) {
|
|
116
|
+
const llama_vocab::token_data & token_data = vocab.get_token_data(id);
|
|
117
|
+
|
|
118
|
+
tokens[id] = token_data.text;
|
|
119
|
+
scores[id] = token_data.score;
|
|
120
|
+
|
|
121
|
+
switch(token_data.attr) {
|
|
122
|
+
case LLAMA_TOKEN_ATTR_UNKNOWN: token_types[id] = LLAMA_TOKEN_TYPE_UNKNOWN; break;
|
|
123
|
+
case LLAMA_TOKEN_ATTR_UNUSED: token_types[id] = LLAMA_TOKEN_TYPE_UNUSED; break;
|
|
124
|
+
case LLAMA_TOKEN_ATTR_NORMAL: token_types[id] = LLAMA_TOKEN_TYPE_NORMAL; break;
|
|
125
|
+
case LLAMA_TOKEN_ATTR_CONTROL: token_types[id] = LLAMA_TOKEN_TYPE_CONTROL; break;
|
|
126
|
+
case LLAMA_TOKEN_ATTR_USER_DEFINED: token_types[id] = LLAMA_TOKEN_TYPE_USER_DEFINED; break;
|
|
127
|
+
case LLAMA_TOKEN_ATTR_BYTE: token_types[id] = LLAMA_TOKEN_TYPE_BYTE; break;
|
|
128
|
+
case LLAMA_TOKEN_ATTR_UNDEFINED:
|
|
129
|
+
default: token_types[id] = LLAMA_TOKEN_TYPE_UNDEFINED; break;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// add_kv(LLM_KV_GENERAL_TYPE, ???);
|
|
134
|
+
add_kv(LLM_KV_GENERAL_ARCHITECTURE, model.arch_name());
|
|
135
|
+
// add_kv(LLM_KV_GENERAL_QUANTIZATION_VERSION, ???);
|
|
136
|
+
// add_kv(LLM_KV_GENERAL_ALIGNMENT, ???);
|
|
137
|
+
add_kv(LLM_KV_GENERAL_NAME, model.name);
|
|
138
|
+
// add_kv(LLM_KV_GENERAL_AUTHOR, ???);
|
|
139
|
+
// add_kv(LLM_KV_GENERAL_VERSION, ???);
|
|
140
|
+
// add_kv(LLM_KV_GENERAL_URL, ???);
|
|
141
|
+
// add_kv(LLM_KV_GENERAL_DESCRIPTION, ???);
|
|
142
|
+
// add_kv(LLM_KV_GENERAL_LICENSE, ???);
|
|
143
|
+
// add_kv(LLM_KV_GENERAL_SOURCE_URL, ???);
|
|
144
|
+
// add_kv(LLM_KV_GENERAL_SOURCE_HF_REPO, ???);
|
|
145
|
+
|
|
146
|
+
add_kv(LLM_KV_VOCAB_SIZE, vocab.n_tokens());
|
|
147
|
+
add_kv(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
|
148
|
+
add_kv(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
|
149
|
+
add_kv(LLM_KV_BLOCK_COUNT, hparams.n_layer);
|
|
150
|
+
add_kv(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
|
|
151
|
+
add_kv(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, true);
|
|
152
|
+
add_kv(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
|
|
153
|
+
add_kv(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
|
|
154
|
+
add_kv(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
|
|
155
|
+
// add_kv(LLM_KV_TENSOR_DATA_LAYOUT, ???);
|
|
156
|
+
add_kv(LLM_KV_EXPERT_COUNT, hparams.n_expert);
|
|
157
|
+
add_kv(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used);
|
|
158
|
+
add_kv(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
|
|
159
|
+
add_kv(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
|
|
160
|
+
add_kv(LLM_KV_POOLING_TYPE, uint32_t(hparams.pooling_type));
|
|
161
|
+
add_kv(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
|
|
162
|
+
add_kv(LLM_KV_DECODER_START_TOKEN_ID, hparams.dec_start_token_id);
|
|
163
|
+
add_kv(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping);
|
|
164
|
+
add_kv(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping);
|
|
165
|
+
add_kv(LLM_KV_SWIN_NORM, hparams.swin_norm);
|
|
166
|
+
add_kv(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers);
|
|
167
|
+
add_kv(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
|
|
168
|
+
add_kv(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
|
|
169
|
+
add_kv(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
|
|
170
|
+
add_kv(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
|
|
171
|
+
|
|
172
|
+
add_kv(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, true);
|
|
173
|
+
add_kv(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, true);
|
|
174
|
+
add_kv(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
|
|
175
|
+
add_kv(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
|
|
176
|
+
add_kv(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k);
|
|
177
|
+
add_kv(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v);
|
|
178
|
+
add_kv(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
|
179
|
+
add_kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
180
|
+
add_kv(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
|
181
|
+
add_kv(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
|
|
182
|
+
add_kv(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
|
|
183
|
+
add_kv(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
|
|
184
|
+
add_kv(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
|
|
185
|
+
add_kv(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
|
|
186
|
+
|
|
187
|
+
const float rope_scaling_factor = hparams.rope_freq_scale_train == 1.0f ? 0.0f : 1.0f/hparams.rope_freq_scale_train;
|
|
188
|
+
|
|
189
|
+
add_kv(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot);
|
|
190
|
+
add_kv(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train);
|
|
191
|
+
// add_kv(LLM_KV_ROPE_SCALE_LINEAR, rope_scaling_factor); // old name
|
|
192
|
+
add_kv(LLM_KV_ROPE_SCALING_TYPE, llama_rope_scaling_type_name(hparams.rope_scaling_type_train));
|
|
193
|
+
add_kv(LLM_KV_ROPE_SCALING_FACTOR, rope_scaling_factor);
|
|
194
|
+
add_kv(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor);
|
|
195
|
+
add_kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn);
|
|
196
|
+
add_kv(LLM_KV_ROPE_SCALING_FINETUNED, hparams.rope_finetuned);
|
|
197
|
+
add_kv(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
|
|
198
|
+
|
|
199
|
+
// TODO: implement split file support
|
|
200
|
+
// add_kv(LLM_KV_SPLIT_NO, ???);
|
|
201
|
+
// add_kv(LLM_KV_SPLIT_COUNT, ???);
|
|
202
|
+
// add_kv(LLM_KV_SPLIT_TENSORS_COUNT, ???);
|
|
203
|
+
|
|
204
|
+
add_kv(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
|
|
205
|
+
add_kv(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
|
|
206
|
+
add_kv(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
|
|
207
|
+
add_kv(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
|
|
208
|
+
add_kv(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms);
|
|
209
|
+
|
|
210
|
+
add_kv(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
|
|
211
|
+
|
|
212
|
+
add_kv(LLM_KV_TOKENIZER_MODEL, vocab.get_tokenizer_model());
|
|
213
|
+
add_kv(LLM_KV_TOKENIZER_PRE, vocab.get_tokenizer_pre());
|
|
214
|
+
add_kv(LLM_KV_TOKENIZER_LIST, tokens);
|
|
215
|
+
add_kv(LLM_KV_TOKENIZER_TOKEN_TYPE, token_types);
|
|
216
|
+
add_kv(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, vocab.n_token_types());
|
|
217
|
+
add_kv(LLM_KV_TOKENIZER_SCORES, scores);
|
|
218
|
+
add_kv(LLM_KV_TOKENIZER_MERGES, vocab.get_bpe_merges());
|
|
219
|
+
// FIXME llama_token is type i32 but when reading in a GGUF file u32 is expected, not an issue for writing though
|
|
220
|
+
add_kv(LLM_KV_TOKENIZER_BOS_ID, uint32_t(vocab.token_bos()));
|
|
221
|
+
add_kv(LLM_KV_TOKENIZER_EOS_ID, uint32_t(vocab.token_eos()));
|
|
222
|
+
add_kv(LLM_KV_TOKENIZER_EOT_ID, uint32_t(vocab.token_eot()));
|
|
223
|
+
add_kv(LLM_KV_TOKENIZER_EOM_ID, uint32_t(vocab.token_eom()));
|
|
224
|
+
add_kv(LLM_KV_TOKENIZER_UNK_ID, uint32_t(vocab.token_unk()));
|
|
225
|
+
add_kv(LLM_KV_TOKENIZER_SEP_ID, uint32_t(vocab.token_sep()));
|
|
226
|
+
add_kv(LLM_KV_TOKENIZER_PAD_ID, uint32_t(vocab.token_pad()));
|
|
227
|
+
// add_kv(LLM_KV_TOKENIZER_CLS_ID, uint32_t(vocab.token_bos())); // deprecated
|
|
228
|
+
// add_kv(LLM_KV_TOKENIZER_MASK_ID, ???);
|
|
229
|
+
add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos());
|
|
230
|
+
add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos());
|
|
231
|
+
add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep());
|
|
232
|
+
add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix());
|
|
233
|
+
add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces());
|
|
234
|
+
add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap());
|
|
235
|
+
// add_kv(LLM_KV_TOKENIZER_HF_JSON, ???);
|
|
236
|
+
// add_kv(LLM_KV_TOKENIZER_RWKV, ???);
|
|
237
|
+
add_kv(LLM_KV_TOKENIZER_FIM_PRE_ID, uint32_t(vocab.token_fim_pre()));
|
|
238
|
+
add_kv(LLM_KV_TOKENIZER_FIM_SUF_ID, uint32_t(vocab.token_fim_suf()));
|
|
239
|
+
add_kv(LLM_KV_TOKENIZER_FIM_MID_ID, uint32_t(vocab.token_fim_mid()));
|
|
240
|
+
add_kv(LLM_KV_TOKENIZER_FIM_PAD_ID, uint32_t(vocab.token_fim_pad()));
|
|
241
|
+
add_kv(LLM_KV_TOKENIZER_FIM_REP_ID, uint32_t(vocab.token_fim_rep()));
|
|
242
|
+
add_kv(LLM_KV_TOKENIZER_FIM_SEP_ID, uint32_t(vocab.token_fim_sep()));
|
|
243
|
+
|
|
244
|
+
// TODO: implement LoRA support
|
|
245
|
+
// add_kv(LLM_KV_ADAPTER_TYPE, ???);
|
|
246
|
+
// add_kv(LLM_KV_ADAPTER_LORA_ALPHA, ???);
|
|
247
|
+
|
|
248
|
+
// deprecated
|
|
249
|
+
// add_kv(LLM_KV_TOKENIZER_PREFIX_ID, ???);
|
|
250
|
+
// add_kv(LLM_KV_TOKENIZER_SUFFIX_ID, ???);
|
|
251
|
+
// add_kv(LLM_KV_TOKENIZER_MIDDLE_ID, ???);
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
void llama_model_saver::add_tensors_from_model() {
|
|
255
|
+
if (std::string(model.output->name) != std::string(model.tok_embd->name)) {
|
|
256
|
+
add_tensor(model.tok_embd); // some models use the same tensor for tok_embd and output
|
|
257
|
+
}
|
|
258
|
+
add_tensor(model.type_embd);
|
|
259
|
+
add_tensor(model.pos_embd);
|
|
260
|
+
add_tensor(model.tok_norm);
|
|
261
|
+
add_tensor(model.tok_norm_b);
|
|
262
|
+
add_tensor(model.output_norm);
|
|
263
|
+
add_tensor(model.output_norm_b);
|
|
264
|
+
add_tensor(model.output);
|
|
265
|
+
add_tensor(model.output_b);
|
|
266
|
+
add_tensor(model.output_norm_enc);
|
|
267
|
+
add_tensor(model.cls);
|
|
268
|
+
add_tensor(model.cls_b);
|
|
269
|
+
add_tensor(model.cls_out);
|
|
270
|
+
add_tensor(model.cls_out_b);
|
|
271
|
+
|
|
272
|
+
for (const struct llama_layer & layer : model.layers) {
|
|
273
|
+
for (size_t i = 0; i < sizeof(layer)/sizeof(struct lm_ggml_tensor *); ++i) {
|
|
274
|
+
add_tensor(reinterpret_cast<const struct lm_ggml_tensor * const *>(&layer)[i]);
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
void llama_model_saver::save(const std::string & path_model) {
|
|
280
|
+
lm_gguf_write_to_file(lm_gguf_ctx, path_model.c_str(), false);
|
|
281
|
+
}
|
|
282
|
+
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
#include "llama-arch.h"
|
|
5
|
+
|
|
6
|
+
#include <vector>
|
|
7
|
+
|
|
8
|
+
struct llama_model_saver {
|
|
9
|
+
struct lm_gguf_context * lm_gguf_ctx = nullptr;
|
|
10
|
+
const struct llama_model & model;
|
|
11
|
+
const struct LLM_KV llm_kv;
|
|
12
|
+
|
|
13
|
+
llama_model_saver(const struct llama_model & model);
|
|
14
|
+
~llama_model_saver();
|
|
15
|
+
|
|
16
|
+
void add_kv(enum llm_kv key, uint32_t value);
|
|
17
|
+
void add_kv(enum llm_kv key, int32_t value);
|
|
18
|
+
void add_kv(enum llm_kv key, float value);
|
|
19
|
+
void add_kv(enum llm_kv key, bool value);
|
|
20
|
+
void add_kv(enum llm_kv key, const char * value);
|
|
21
|
+
|
|
22
|
+
[[noreturn]]
|
|
23
|
+
void add_kv(enum llm_kv key, char value); // needed to make the template below compile
|
|
24
|
+
|
|
25
|
+
template <typename Container>
|
|
26
|
+
void add_kv(enum llm_kv key, const Container & value, bool per_layer = false);
|
|
27
|
+
|
|
28
|
+
void add_kv(enum llm_kv key, const std::vector<std::string> & value);
|
|
29
|
+
|
|
30
|
+
void add_tensor(const struct lm_ggml_tensor * tensor);
|
|
31
|
+
|
|
32
|
+
void add_kv_from_model();
|
|
33
|
+
|
|
34
|
+
void add_tensors_from_model();
|
|
35
|
+
|
|
36
|
+
void save(const std::string & path_model);
|
|
37
|
+
};
|