cui-llama.rn 1.3.4 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +14 -8
- package/android/src/main/jni.cpp +38 -37
- package/cpp/common.cpp +50 -30
- package/cpp/common.h +32 -13
- package/cpp/ggml-alloc.c +0 -1
- package/cpp/ggml-backend-reg.cpp +79 -49
- package/cpp/ggml-backend.cpp +5 -2
- package/cpp/ggml-cpp.h +1 -0
- package/cpp/ggml-cpu-aarch64.cpp +57 -72
- package/cpp/ggml-cpu-quants.c +5 -1
- package/cpp/ggml-cpu.c +6 -6
- package/cpp/ggml-cpu.cpp +9 -0
- package/cpp/ggml-impl.h +11 -0
- package/cpp/ggml-metal.m +2 -2
- package/cpp/ggml.c +129 -1388
- package/cpp/ggml.h +29 -152
- package/cpp/gguf.cpp +1325 -0
- package/cpp/gguf.h +202 -0
- package/cpp/llama-adapter.cpp +346 -0
- package/cpp/llama-adapter.h +73 -0
- package/cpp/llama-arch.cpp +1434 -0
- package/cpp/llama-arch.h +395 -0
- package/cpp/llama-batch.cpp +368 -0
- package/cpp/llama-batch.h +88 -0
- package/cpp/llama-chat.cpp +567 -0
- package/cpp/llama-chat.h +51 -0
- package/cpp/llama-context.cpp +1771 -0
- package/cpp/llama-context.h +128 -0
- package/cpp/llama-cparams.cpp +1 -0
- package/cpp/llama-cparams.h +37 -0
- package/cpp/llama-cpp.h +30 -0
- package/cpp/llama-grammar.cpp +16 -15
- package/cpp/llama-grammar.h +5 -6
- package/cpp/llama-hparams.cpp +71 -0
- package/cpp/llama-hparams.h +140 -0
- package/cpp/llama-impl.cpp +167 -0
- package/cpp/llama-impl.h +16 -136
- package/cpp/llama-kv-cache.cpp +718 -0
- package/cpp/llama-kv-cache.h +218 -0
- package/cpp/llama-mmap.cpp +589 -0
- package/cpp/llama-mmap.h +67 -0
- package/cpp/llama-model-loader.cpp +1011 -0
- package/cpp/llama-model-loader.h +158 -0
- package/cpp/llama-model.cpp +2202 -0
- package/cpp/llama-model.h +391 -0
- package/cpp/llama-sampling.cpp +117 -4
- package/cpp/llama-vocab.cpp +26 -29
- package/cpp/llama-vocab.h +14 -2
- package/cpp/llama.cpp +8839 -19131
- package/cpp/llama.cpp.rej +23 -0
- package/cpp/llama.h +31 -9
- package/cpp/rn-llama.hpp +39 -37
- package/cpp/sgemm.cpp +1091 -378
- package/cpp/sgemm.h +2 -2
- package/cpp/unicode.cpp +6 -0
- package/package.json +1 -1
@@ -0,0 +1,1011 @@
|
|
1
|
+
#include "llama-model-loader.h"
|
2
|
+
|
3
|
+
#include "ggml.h"
|
4
|
+
|
5
|
+
#include <array>
|
6
|
+
#include <cinttypes>
|
7
|
+
#include <cstring>
|
8
|
+
#include <future>
|
9
|
+
|
10
|
+
const char * llama_file_version_name(llama_fver version) {
|
11
|
+
switch (version) {
|
12
|
+
case LM_GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
|
13
|
+
case LM_GGUF_FILE_VERSION_V2: return "GGUF V2";
|
14
|
+
case LM_GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
|
15
|
+
}
|
16
|
+
|
17
|
+
return "unknown";
|
18
|
+
}
|
19
|
+
|
20
|
+
namespace GGUFMeta {
|
21
|
+
template <typename T, lm_gguf_type gt_, T (*gfun)(const lm_gguf_context *, const int64_t)>
|
22
|
+
struct GKV_Base_Type {
|
23
|
+
static constexpr lm_gguf_type gt = gt_;
|
24
|
+
|
25
|
+
static T getter(const lm_gguf_context * ctx, const int kid) {
|
26
|
+
return gfun(ctx, kid);
|
27
|
+
}
|
28
|
+
};
|
29
|
+
|
30
|
+
template<typename T> struct GKV_Base;
|
31
|
+
|
32
|
+
template<> struct GKV_Base<bool >: GKV_Base_Type<bool, LM_GGUF_TYPE_BOOL, lm_gguf_get_val_bool> {};
|
33
|
+
template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, LM_GGUF_TYPE_UINT8, lm_gguf_get_val_u8 > {};
|
34
|
+
template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, LM_GGUF_TYPE_UINT16, lm_gguf_get_val_u16 > {};
|
35
|
+
template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, LM_GGUF_TYPE_UINT32, lm_gguf_get_val_u32 > {};
|
36
|
+
template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, LM_GGUF_TYPE_UINT64, lm_gguf_get_val_u64 > {};
|
37
|
+
template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, LM_GGUF_TYPE_INT8, lm_gguf_get_val_i8 > {};
|
38
|
+
template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, LM_GGUF_TYPE_INT16, lm_gguf_get_val_i16 > {};
|
39
|
+
template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, LM_GGUF_TYPE_INT32, lm_gguf_get_val_i32 > {};
|
40
|
+
template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, LM_GGUF_TYPE_INT64, lm_gguf_get_val_i64 > {};
|
41
|
+
template<> struct GKV_Base<float >: GKV_Base_Type<float, LM_GGUF_TYPE_FLOAT32, lm_gguf_get_val_f32 > {};
|
42
|
+
template<> struct GKV_Base<double >: GKV_Base_Type<double, LM_GGUF_TYPE_FLOAT64, lm_gguf_get_val_f64 > {};
|
43
|
+
template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, LM_GGUF_TYPE_STRING, lm_gguf_get_val_str > {};
|
44
|
+
|
45
|
+
template<> struct GKV_Base<std::string> {
|
46
|
+
static constexpr lm_gguf_type gt = LM_GGUF_TYPE_STRING;
|
47
|
+
|
48
|
+
static std::string getter(const lm_gguf_context * ctx, const int kid) {
|
49
|
+
return lm_gguf_get_val_str(ctx, kid);
|
50
|
+
}
|
51
|
+
};
|
52
|
+
|
53
|
+
struct ArrayInfo {
|
54
|
+
const lm_gguf_type gt;
|
55
|
+
const size_t length;
|
56
|
+
const void * data;
|
57
|
+
};
|
58
|
+
|
59
|
+
template<> struct GKV_Base<ArrayInfo> {
|
60
|
+
public:
|
61
|
+
static constexpr lm_gguf_type gt = LM_GGUF_TYPE_ARRAY;
|
62
|
+
static ArrayInfo getter(const lm_gguf_context *ctx, const int k) {
|
63
|
+
const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx, k);
|
64
|
+
return ArrayInfo {
|
65
|
+
arr_type,
|
66
|
+
size_t(lm_gguf_get_arr_n(ctx, k)),
|
67
|
+
arr_type == LM_GGUF_TYPE_STRING ? nullptr : lm_gguf_get_arr_data(ctx, k),
|
68
|
+
};
|
69
|
+
}
|
70
|
+
};
|
71
|
+
|
72
|
+
template<typename T>
|
73
|
+
class GKV : public GKV_Base<T> {
|
74
|
+
GKV() = delete;
|
75
|
+
|
76
|
+
public:
|
77
|
+
static T get_kv(const lm_gguf_context * ctx, const int k) {
|
78
|
+
const enum lm_gguf_type kt = lm_gguf_get_kv_type(ctx, k);
|
79
|
+
|
80
|
+
if (kt != GKV::gt) {
|
81
|
+
throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
|
82
|
+
lm_gguf_get_key(ctx, k), lm_gguf_type_name(kt), lm_gguf_type_name(GKV::gt)));
|
83
|
+
}
|
84
|
+
return GKV::getter(ctx, k);
|
85
|
+
}
|
86
|
+
|
87
|
+
static const char * override_type_to_str(const llama_model_kv_override_type ty) {
|
88
|
+
switch (ty) {
|
89
|
+
case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
|
90
|
+
case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
|
91
|
+
case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
|
92
|
+
case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
|
93
|
+
}
|
94
|
+
return "unknown";
|
95
|
+
}
|
96
|
+
|
97
|
+
static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
|
98
|
+
if (!ovrd) { return false; }
|
99
|
+
if (ovrd->tag == expected_type) {
|
100
|
+
LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
|
101
|
+
__func__, override_type_to_str(ovrd->tag), ovrd->key);
|
102
|
+
switch (ovrd->tag) {
|
103
|
+
case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
|
104
|
+
LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
|
105
|
+
} break;
|
106
|
+
case LLAMA_KV_OVERRIDE_TYPE_INT: {
|
107
|
+
LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
|
108
|
+
} break;
|
109
|
+
case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
|
110
|
+
LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
|
111
|
+
} break;
|
112
|
+
case LLAMA_KV_OVERRIDE_TYPE_STR: {
|
113
|
+
LLAMA_LOG_INFO("%s\n", ovrd->val_str);
|
114
|
+
} break;
|
115
|
+
default:
|
116
|
+
// Shouldn't be possible to end up here, but just in case...
|
117
|
+
throw std::runtime_error(
|
118
|
+
format("Unsupported attempt to override %s type for metadata key %s\n",
|
119
|
+
override_type_to_str(ovrd->tag), ovrd->key));
|
120
|
+
}
|
121
|
+
return true;
|
122
|
+
}
|
123
|
+
LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
|
124
|
+
__func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
|
125
|
+
return false;
|
126
|
+
}
|
127
|
+
|
128
|
+
template<typename OT>
|
129
|
+
static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
|
130
|
+
try_override(OT & target, const struct llama_model_kv_override * ovrd) {
|
131
|
+
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
|
132
|
+
target = ovrd->val_bool;
|
133
|
+
return true;
|
134
|
+
}
|
135
|
+
return false;
|
136
|
+
}
|
137
|
+
|
138
|
+
template<typename OT>
|
139
|
+
static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
|
140
|
+
try_override(OT & target, const struct llama_model_kv_override * ovrd) {
|
141
|
+
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
|
142
|
+
target = ovrd->val_i64;
|
143
|
+
return true;
|
144
|
+
}
|
145
|
+
return false;
|
146
|
+
}
|
147
|
+
|
148
|
+
template<typename OT>
|
149
|
+
static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
|
150
|
+
try_override(T & target, const struct llama_model_kv_override * ovrd) {
|
151
|
+
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
|
152
|
+
target = ovrd->val_f64;
|
153
|
+
return true;
|
154
|
+
}
|
155
|
+
return false;
|
156
|
+
}
|
157
|
+
|
158
|
+
template<typename OT>
|
159
|
+
static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
|
160
|
+
try_override(T & target, const struct llama_model_kv_override * ovrd) {
|
161
|
+
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
|
162
|
+
target = ovrd->val_str;
|
163
|
+
return true;
|
164
|
+
}
|
165
|
+
return false;
|
166
|
+
}
|
167
|
+
|
168
|
+
static bool set(const lm_gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
|
169
|
+
if (try_override<T>(target, ovrd)) {
|
170
|
+
return true;
|
171
|
+
}
|
172
|
+
if (k < 0) { return false; }
|
173
|
+
target = get_kv(ctx, k);
|
174
|
+
return true;
|
175
|
+
}
|
176
|
+
|
177
|
+
static bool set(const lm_gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
|
178
|
+
return set(ctx, lm_gguf_find_key(ctx, key), target, ovrd);
|
179
|
+
}
|
180
|
+
|
181
|
+
static bool set(const lm_gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
|
182
|
+
return set(ctx, key.c_str(), target, ovrd);
|
183
|
+
}
|
184
|
+
};
|
185
|
+
}
|
186
|
+
|
187
|
+
template<typename T>
|
188
|
+
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
189
|
+
llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) {
|
190
|
+
const int kid = lm_gguf_find_key(meta.get(), key.c_str());
|
191
|
+
|
192
|
+
if (kid < 0) {
|
193
|
+
if (required) {
|
194
|
+
throw std::runtime_error(format("key not found in model: %s", key.c_str()));
|
195
|
+
}
|
196
|
+
return false;
|
197
|
+
}
|
198
|
+
|
199
|
+
struct GGUFMeta::ArrayInfo arr_info =
|
200
|
+
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
201
|
+
|
202
|
+
|
203
|
+
result = arr_info.length;
|
204
|
+
return true;
|
205
|
+
}
|
206
|
+
|
207
|
+
template<typename T>
|
208
|
+
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
209
|
+
llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) {
|
210
|
+
return get_arr_n(llm_kv(kid), result, required);
|
211
|
+
}
|
212
|
+
|
213
|
+
template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required);
|
214
|
+
|
215
|
+
template<typename T>
|
216
|
+
bool llama_model_loader::get_arr(const std::string & key, std::vector<T> & result, bool required) {
|
217
|
+
const int kid = lm_gguf_find_key(meta.get(), key.c_str());
|
218
|
+
|
219
|
+
if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
|
220
|
+
if (required) {
|
221
|
+
throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
|
222
|
+
}
|
223
|
+
return false;
|
224
|
+
}
|
225
|
+
|
226
|
+
struct GGUFMeta::ArrayInfo arr_info =
|
227
|
+
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
228
|
+
|
229
|
+
switch (arr_info.gt) {
|
230
|
+
case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
|
231
|
+
case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
|
232
|
+
(std::is_same<T, int32_t>::value) ||
|
233
|
+
(std::is_same<T, uint32_t>::value)); break;
|
234
|
+
default:
|
235
|
+
throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
|
236
|
+
}
|
237
|
+
|
238
|
+
result.resize(arr_info.length);
|
239
|
+
result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
|
240
|
+
|
241
|
+
return true;
|
242
|
+
}
|
243
|
+
|
244
|
+
template<typename T, size_t N_MAX>
|
245
|
+
bool llama_model_loader::get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required) {
|
246
|
+
const int kid = lm_gguf_find_key(meta.get(), key.c_str());
|
247
|
+
|
248
|
+
if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
|
249
|
+
if (required) {
|
250
|
+
throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
|
251
|
+
}
|
252
|
+
return false;
|
253
|
+
}
|
254
|
+
|
255
|
+
struct GGUFMeta::ArrayInfo arr_info =
|
256
|
+
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
257
|
+
|
258
|
+
switch (arr_info.gt) {
|
259
|
+
case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
|
260
|
+
case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
|
261
|
+
(std::is_same<T, int32_t>::value) ||
|
262
|
+
(std::is_same<T, uint32_t>::value)); break;
|
263
|
+
default:
|
264
|
+
throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
|
265
|
+
}
|
266
|
+
|
267
|
+
if (arr_info.length > N_MAX) {
|
268
|
+
throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
|
269
|
+
}
|
270
|
+
|
271
|
+
std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
|
272
|
+
|
273
|
+
return true;
|
274
|
+
}
|
275
|
+
|
276
|
+
template<typename T>
|
277
|
+
bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) {
|
278
|
+
return get_arr(llm_kv(kid), result, required);
|
279
|
+
}
|
280
|
+
|
281
|
+
template<typename T>
|
282
|
+
bool llama_model_loader::get_key(const std::string & key, T & result, bool required) {
|
283
|
+
auto it = kv_overrides.find(key);
|
284
|
+
|
285
|
+
const struct llama_model_kv_override * override =
|
286
|
+
it != kv_overrides.end() ? &it->second : nullptr;
|
287
|
+
|
288
|
+
const bool found = GGUFMeta::GKV<T>::set(meta.get(), key, result, override);
|
289
|
+
|
290
|
+
if (required && !found) {
|
291
|
+
throw std::runtime_error(format("key not found in model: %s", key.c_str()));
|
292
|
+
}
|
293
|
+
|
294
|
+
return found;
|
295
|
+
}
|
296
|
+
|
297
|
+
template<typename T>
|
298
|
+
bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) {
|
299
|
+
return get_key(llm_kv(kid), result, required);
|
300
|
+
}
|
301
|
+
|
302
|
+
template bool llama_model_loader::get_key<bool> (enum llm_kv kid, bool & result, bool required);
|
303
|
+
template bool llama_model_loader::get_key<float> (enum llm_kv kid, float & result, bool required);
|
304
|
+
template bool llama_model_loader::get_key<uint32_t> (enum llm_kv kid, uint32_t & result, bool required);
|
305
|
+
template bool llama_model_loader::get_key<std::string>(enum llm_kv kid, std::string & result, bool required);
|
306
|
+
|
307
|
+
template<>
|
308
|
+
bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) {
|
309
|
+
uint32_t tmp;
|
310
|
+
const bool found = get_key(kid, tmp, required);
|
311
|
+
if (found) {
|
312
|
+
result = (enum llama_pooling_type) tmp;
|
313
|
+
} else {
|
314
|
+
result = LLAMA_POOLING_TYPE_UNSPECIFIED;
|
315
|
+
}
|
316
|
+
return found;
|
317
|
+
}
|
318
|
+
|
319
|
+
// get array of n <= N_MAX elements, or a single element repeated n times
|
320
|
+
template<typename T, size_t N_MAX>
|
321
|
+
bool llama_model_loader::get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required) {
|
322
|
+
const int kid = lm_gguf_find_key(meta.get(), key.c_str());
|
323
|
+
|
324
|
+
if (kid < 0) {
|
325
|
+
if (required) {
|
326
|
+
throw std::runtime_error(format("key not found in model: %s", key.c_str()));
|
327
|
+
}
|
328
|
+
return false;
|
329
|
+
}
|
330
|
+
|
331
|
+
if (n > N_MAX) {
|
332
|
+
throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
|
333
|
+
}
|
334
|
+
|
335
|
+
if (lm_gguf_get_kv_type(meta.get(), kid) == LM_GGUF_TYPE_ARRAY) {
|
336
|
+
struct GGUFMeta::ArrayInfo arr_info =
|
337
|
+
GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
|
338
|
+
|
339
|
+
if (n != arr_info.length) {
|
340
|
+
throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
|
341
|
+
}
|
342
|
+
|
343
|
+
return get_arr(key, result, required);
|
344
|
+
}
|
345
|
+
|
346
|
+
T value;
|
347
|
+
|
348
|
+
bool ok = get_key(key, value, required);
|
349
|
+
if (!ok) {
|
350
|
+
return false;
|
351
|
+
}
|
352
|
+
|
353
|
+
for (uint32_t i = 0; i < n; i++) {
|
354
|
+
result[i] = value;
|
355
|
+
}
|
356
|
+
|
357
|
+
return true;
|
358
|
+
}
|
359
|
+
|
360
|
+
template<typename T>
|
361
|
+
bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) {
|
362
|
+
return get_key_or_arr(llm_kv(kid), result, n, required);
|
363
|
+
}
|
364
|
+
|
365
|
+
// TODO: this is not very clever - figure out something better
|
366
|
+
template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
|
367
|
+
template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
|
368
|
+
|
369
|
+
llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
|
370
|
+
int trace = 0;
|
371
|
+
if (getenv("LLAMA_TRACE")) {
|
372
|
+
trace = atoi(getenv("LLAMA_TRACE"));
|
373
|
+
}
|
374
|
+
|
375
|
+
if (param_overrides_p != nullptr) {
|
376
|
+
for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
|
377
|
+
kv_overrides.insert({std::string(p->key), *p});
|
378
|
+
}
|
379
|
+
}
|
380
|
+
|
381
|
+
struct lm_ggml_context * ctx = NULL;
|
382
|
+
struct lm_gguf_init_params params = {
|
383
|
+
/*.no_alloc = */ true,
|
384
|
+
/*.ctx = */ &ctx,
|
385
|
+
};
|
386
|
+
|
387
|
+
meta.reset(lm_gguf_init_from_file(fname.c_str(), params));
|
388
|
+
if (!meta) {
|
389
|
+
throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
|
390
|
+
}
|
391
|
+
|
392
|
+
get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
|
393
|
+
llm_kv = LLM_KV(llm_arch_from_string(arch_name));
|
394
|
+
|
395
|
+
files.emplace_back(new llama_file(fname.c_str(), "rb"));
|
396
|
+
contexts.emplace_back(ctx);
|
397
|
+
|
398
|
+
// Save tensors data offset of the main file.
|
399
|
+
// For subsidiary files, `meta` tensor data offset must not be used,
|
400
|
+
// so we build a unified tensors index for weights.
|
401
|
+
for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
|
402
|
+
std::string tensor_name = std::string(cur->name);
|
403
|
+
// make sure there is no duplicated tensor names
|
404
|
+
if (weights_map.find(tensor_name) != weights_map.end()) {
|
405
|
+
throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
|
406
|
+
}
|
407
|
+
n_elements += lm_ggml_nelements(cur);
|
408
|
+
n_bytes += lm_ggml_nbytes(cur);
|
409
|
+
weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur));
|
410
|
+
}
|
411
|
+
uint16_t n_split = 0;
|
412
|
+
get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
|
413
|
+
|
414
|
+
// Load additional GGML contexts
|
415
|
+
if (n_split > 1) {
|
416
|
+
uint16_t idx = 0;
|
417
|
+
get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
|
418
|
+
if (idx != 0) {
|
419
|
+
throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
|
420
|
+
}
|
421
|
+
|
422
|
+
std::vector<char> split_prefix(llama_path_max(), 0);
|
423
|
+
if (!llama_split_prefix(split_prefix.data(), split_prefix.size(), fname.c_str(), idx, n_split)) {
|
424
|
+
throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
|
425
|
+
}
|
426
|
+
|
427
|
+
if (trace > 0) {
|
428
|
+
LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
|
429
|
+
}
|
430
|
+
|
431
|
+
std::vector<char> split_path(llama_path_max(), 0);
|
432
|
+
for (idx = 1; idx < n_split; idx++) {
|
433
|
+
llama_split_path(split_path.data(), split_path.size(), split_prefix.data(), idx, n_split);
|
434
|
+
|
435
|
+
struct lm_gguf_init_params split_params = {
|
436
|
+
/*.no_alloc = */ true,
|
437
|
+
/*.ctx = */ &ctx,
|
438
|
+
};
|
439
|
+
lm_gguf_context_ptr ctx_gguf { lm_gguf_init_from_file(split_path.data(), split_params) };
|
440
|
+
if (!ctx_gguf) {
|
441
|
+
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path.data()));
|
442
|
+
}
|
443
|
+
|
444
|
+
files.emplace_back(new llama_file(split_path.data(), "rb"));
|
445
|
+
contexts.emplace_back(ctx);
|
446
|
+
|
447
|
+
// Save tensors data offset info of the shard.
|
448
|
+
for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
|
449
|
+
std::string tensor_name = std::string(cur->name);
|
450
|
+
// make sure there is no duplicated tensor names
|
451
|
+
if (weights_map.find(tensor_name) != weights_map.end()) {
|
452
|
+
throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
|
453
|
+
}
|
454
|
+
n_elements += lm_ggml_nelements(cur);
|
455
|
+
n_bytes += lm_ggml_nbytes(cur);
|
456
|
+
weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur));
|
457
|
+
}
|
458
|
+
}
|
459
|
+
|
460
|
+
get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
|
461
|
+
|
462
|
+
// sanity check
|
463
|
+
{
|
464
|
+
const int n_tensors_loaded = (int) weights_map.size();
|
465
|
+
if (n_tensors != n_tensors_loaded) {
|
466
|
+
throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
|
467
|
+
}
|
468
|
+
}
|
469
|
+
|
470
|
+
LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
|
471
|
+
}
|
472
|
+
|
473
|
+
n_kv = lm_gguf_get_n_kv(meta.get());
|
474
|
+
n_tensors = weights_map.size();
|
475
|
+
|
476
|
+
fver = (enum llama_fver) lm_gguf_get_version(meta.get());
|
477
|
+
|
478
|
+
LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
|
479
|
+
__func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
|
480
|
+
|
481
|
+
// determine file type based on the number of tensors for each quantization and print meta data
|
482
|
+
// TODO: make optional
|
483
|
+
{
|
484
|
+
std::map<enum lm_ggml_type, uint32_t> n_type;
|
485
|
+
|
486
|
+
uint32_t n_type_max = 0;
|
487
|
+
enum lm_ggml_type type_max = LM_GGML_TYPE_F32;
|
488
|
+
|
489
|
+
for (const auto & it : weights_map) {
|
490
|
+
const llama_tensor_weight & w = it.second;
|
491
|
+
const lm_ggml_tensor * tensor = w.tensor;
|
492
|
+
|
493
|
+
enum lm_ggml_type type = tensor->type;
|
494
|
+
|
495
|
+
n_type[type]++;
|
496
|
+
|
497
|
+
if (n_type_max < n_type[type]) {
|
498
|
+
n_type_max = n_type[type];
|
499
|
+
type_max = type;
|
500
|
+
}
|
501
|
+
|
502
|
+
if (trace > 0) {
|
503
|
+
const uint16_t sid = w.idx;
|
504
|
+
LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, lm_ggml_get_name(tensor), lm_ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
|
505
|
+
}
|
506
|
+
}
|
507
|
+
|
508
|
+
switch (type_max) {
|
509
|
+
case LM_GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
|
510
|
+
case LM_GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
|
511
|
+
case LM_GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
|
512
|
+
case LM_GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
|
513
|
+
case LM_GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
|
514
|
+
case LM_GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
|
515
|
+
case LM_GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
|
516
|
+
case LM_GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
|
517
|
+
case LM_GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
|
518
|
+
case LM_GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
|
519
|
+
case LM_GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
|
520
|
+
case LM_GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
|
521
|
+
case LM_GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
|
522
|
+
case LM_GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break;
|
523
|
+
case LM_GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break;
|
524
|
+
case LM_GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
|
525
|
+
case LM_GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
|
526
|
+
case LM_GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
|
527
|
+
case LM_GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
|
528
|
+
case LM_GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
|
529
|
+
case LM_GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
|
530
|
+
case LM_GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
|
531
|
+
case LM_GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
|
532
|
+
case LM_GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
|
533
|
+
default:
|
534
|
+
{
|
535
|
+
LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, lm_ggml_type_name(type_max));
|
536
|
+
ftype = LLAMA_FTYPE_ALL_F32;
|
537
|
+
} break;
|
538
|
+
}
|
539
|
+
|
540
|
+
// this is a way to mark that we have "guessed" the file type
|
541
|
+
ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
|
542
|
+
|
543
|
+
{
|
544
|
+
const int kid = lm_gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
|
545
|
+
if (kid >= 0) {
|
546
|
+
ftype = (llama_ftype) lm_gguf_get_val_u32(meta.get(), kid);
|
547
|
+
}
|
548
|
+
}
|
549
|
+
|
550
|
+
LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
|
551
|
+
|
552
|
+
for (int i = 0; i < n_kv; i++) {
|
553
|
+
const char * name = lm_gguf_get_key(meta.get(), i);
|
554
|
+
const enum lm_gguf_type type = lm_gguf_get_kv_type(meta.get(), i);
|
555
|
+
const std::string type_name =
|
556
|
+
type == LM_GGUF_TYPE_ARRAY
|
557
|
+
? format("%s[%s,%zu]", lm_gguf_type_name(type), lm_gguf_type_name(lm_gguf_get_arr_type(meta.get(), i)), lm_gguf_get_arr_n(meta.get(), i))
|
558
|
+
: lm_gguf_type_name(type);
|
559
|
+
|
560
|
+
std::string value = lm_gguf_kv_to_str(meta.get(), i);
|
561
|
+
const size_t MAX_VALUE_LEN = 40;
|
562
|
+
if (value.size() > MAX_VALUE_LEN) {
|
563
|
+
value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
|
564
|
+
}
|
565
|
+
replace_all(value, "\n", "\\n");
|
566
|
+
|
567
|
+
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
|
568
|
+
}
|
569
|
+
|
570
|
+
// print type counts
|
571
|
+
for (auto & kv : n_type) {
|
572
|
+
if (kv.second == 0) {
|
573
|
+
continue;
|
574
|
+
}
|
575
|
+
|
576
|
+
LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, lm_ggml_type_name(kv.first), kv.second);
|
577
|
+
}
|
578
|
+
}
|
579
|
+
|
580
|
+
if (!llama_mmap::SUPPORTED) {
|
581
|
+
LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
|
582
|
+
use_mmap = false;
|
583
|
+
}
|
584
|
+
|
585
|
+
this->use_mmap = use_mmap;
|
586
|
+
this->check_tensors = check_tensors;
|
587
|
+
}
|
588
|
+
|
589
|
+
std::string llama_model_loader::get_arch_name() const {
|
590
|
+
return arch_name;
|
591
|
+
}
|
592
|
+
|
593
|
+
enum llm_arch llama_model_loader::get_arch() const {
|
594
|
+
return llm_kv.arch;
|
595
|
+
}
|
596
|
+
|
597
|
+
const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const {
|
598
|
+
auto pos = weights_map.find(name);
|
599
|
+
if (pos != weights_map.end()) {
|
600
|
+
return &pos->second;
|
601
|
+
}
|
602
|
+
|
603
|
+
return nullptr;
|
604
|
+
}
|
605
|
+
|
606
|
+
const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const {
|
607
|
+
const llama_tensor_weight * weight = get_weight(name);
|
608
|
+
if (!weight) {
|
609
|
+
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
|
610
|
+
}
|
611
|
+
return *weight;
|
612
|
+
}
|
613
|
+
|
614
|
+
struct lm_ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const {
|
615
|
+
const auto * weight = get_weight(name);
|
616
|
+
if (!weight) {
|
617
|
+
return nullptr;
|
618
|
+
}
|
619
|
+
return weight->tensor;
|
620
|
+
}
|
621
|
+
|
622
|
+
struct lm_ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const {
|
623
|
+
struct lm_ggml_tensor * tensor = get_tensor_meta(name.c_str());
|
624
|
+
if (!tensor) {
|
625
|
+
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
|
626
|
+
}
|
627
|
+
return tensor;
|
628
|
+
}
|
629
|
+
|
630
|
+
const struct lm_ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
|
631
|
+
const struct lm_ggml_tensor * cur = get_tensor_meta(name.c_str());
|
632
|
+
|
633
|
+
if (cur == NULL) {
|
634
|
+
if (!required) {
|
635
|
+
return NULL;
|
636
|
+
}
|
637
|
+
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
|
638
|
+
}
|
639
|
+
|
640
|
+
{
|
641
|
+
bool is_ok = true;
|
642
|
+
for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
|
643
|
+
if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
|
644
|
+
is_ok = false;
|
645
|
+
break;
|
646
|
+
}
|
647
|
+
}
|
648
|
+
if (!is_ok) {
|
649
|
+
throw std::runtime_error(
|
650
|
+
format("%s: tensor '%s' has wrong shape; expected %s, got %s",
|
651
|
+
__func__, name.c_str(),
|
652
|
+
llama_format_tensor_shape(ne).c_str(),
|
653
|
+
llama_format_tensor_shape(cur).c_str()));
|
654
|
+
}
|
655
|
+
}
|
656
|
+
|
657
|
+
return cur;
|
658
|
+
}
|
659
|
+
|
660
|
+
struct lm_ggml_tensor * llama_model_loader::create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags) {
|
661
|
+
const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
|
662
|
+
|
663
|
+
if (cur == NULL) {
|
664
|
+
return NULL;
|
665
|
+
}
|
666
|
+
|
667
|
+
bool duplicated = flags & TENSOR_DUPLICATED;
|
668
|
+
|
669
|
+
struct lm_ggml_tensor * tensor = lm_ggml_dup_tensor(ctx, cur);
|
670
|
+
lm_ggml_set_name(tensor, lm_ggml_get_name(cur));
|
671
|
+
|
672
|
+
if (duplicated) {
|
673
|
+
size_data += lm_ggml_nbytes(cur);
|
674
|
+
} else {
|
675
|
+
n_created++;
|
676
|
+
}
|
677
|
+
|
678
|
+
return tensor;
|
679
|
+
|
680
|
+
}
|
681
|
+
|
682
|
+
struct lm_ggml_tensor * llama_model_loader::create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required) {
|
683
|
+
const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, required);
|
684
|
+
|
685
|
+
if (cur == NULL) {
|
686
|
+
return NULL;
|
687
|
+
}
|
688
|
+
|
689
|
+
if (cur->type != base->type) {
|
690
|
+
throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), lm_ggml_type_name(base->type), lm_ggml_type_name(cur->type)));
|
691
|
+
}
|
692
|
+
|
693
|
+
std::array<int64_t, LM_GGML_MAX_DIMS> dims;
|
694
|
+
for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
|
695
|
+
dims[i] = i < ne.size() ? ne.begin()[i] : 1;
|
696
|
+
}
|
697
|
+
|
698
|
+
struct lm_ggml_tensor * tensor = lm_ggml_view_4d(ctx, base,
|
699
|
+
dims[0], dims[1], dims[2], dims[3],
|
700
|
+
cur->nb[1], cur->nb[2], cur->nb[3],
|
701
|
+
offset);
|
702
|
+
|
703
|
+
lm_ggml_set_name(tensor, name.c_str());
|
704
|
+
|
705
|
+
n_created++;
|
706
|
+
|
707
|
+
return tensor;
|
708
|
+
}
|
709
|
+
|
710
|
+
void llama_model_loader::done_getting_tensors() const {
|
711
|
+
if (n_created != n_tensors) {
|
712
|
+
throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
|
713
|
+
}
|
714
|
+
}
|
715
|
+
|
716
|
+
void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) {
|
717
|
+
if (use_mmap) {
|
718
|
+
mappings.reserve(files.size());
|
719
|
+
mmaps_used.reserve(files.size());
|
720
|
+
for (const auto & file : files) {
|
721
|
+
auto * reg = lm_ggml_backend_dev_backend_reg(lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU));
|
722
|
+
auto * is_numa_fn = (decltype(lm_ggml_is_numa) *) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_cpu_is_numa");
|
723
|
+
std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
|
724
|
+
mmaps_used.emplace_back(mapping->size(), 0);
|
725
|
+
if (mlock_mmaps) {
|
726
|
+
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
|
727
|
+
mlock_mmap->init(mapping->addr());
|
728
|
+
mlock_mmaps->emplace_back(std::move(mlock_mmap));
|
729
|
+
}
|
730
|
+
mappings.emplace_back(std::move(mapping));
|
731
|
+
}
|
732
|
+
}
|
733
|
+
|
734
|
+
// compute the total size of all tensors for progress reporting
|
735
|
+
for (const auto & it : weights_map) {
|
736
|
+
size_data += lm_ggml_nbytes(it.second.tensor);
|
737
|
+
}
|
738
|
+
}
|
739
|
+
|
740
|
+
void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const {
|
741
|
+
LM_GGML_ASSERT(!mappings.empty());
|
742
|
+
const auto & mapping = mappings.at(idx);
|
743
|
+
|
744
|
+
*first = mapping->size();
|
745
|
+
*last = 0;
|
746
|
+
*addr = mapping->addr();
|
747
|
+
for (lm_ggml_tensor * tensor = lm_ggml_get_first_tensor(ctx); tensor; tensor = lm_ggml_get_next_tensor(ctx, tensor)) {
|
748
|
+
const auto * weight = get_weight(lm_ggml_get_name(tensor));
|
749
|
+
if (!weight || weight->idx != idx) {
|
750
|
+
continue;
|
751
|
+
}
|
752
|
+
*first = std::min(*first, weight->offs);
|
753
|
+
*last = std::max(*last, weight->offs + lm_ggml_nbytes(tensor));
|
754
|
+
}
|
755
|
+
}
|
756
|
+
|
757
|
+
void llama_model_loader::load_data_for(struct lm_ggml_tensor * cur) const {
|
758
|
+
const auto & w = require_weight(lm_ggml_get_name(cur));
|
759
|
+
|
760
|
+
if (use_mmap) {
|
761
|
+
const auto & mapping = mappings.at(w.idx);
|
762
|
+
if (cur->data == nullptr) {
|
763
|
+
cur->data = (uint8_t *)mapping->addr() + w.offs;
|
764
|
+
} else {
|
765
|
+
memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, lm_ggml_nbytes(cur));
|
766
|
+
}
|
767
|
+
} else {
|
768
|
+
LM_GGML_ASSERT(cur->data != nullptr);
|
769
|
+
LM_GGML_ASSERT(w.idx < files.size());
|
770
|
+
const auto & file = files.at(w.idx);
|
771
|
+
file->seek(w.offs, SEEK_SET);
|
772
|
+
file->read_raw(cur->data, lm_ggml_nbytes(cur));
|
773
|
+
}
|
774
|
+
|
775
|
+
if (check_tensors && !lm_ggml_validate_row_data(cur->type, cur->data, lm_ggml_nbytes(cur))) {
|
776
|
+
throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
|
777
|
+
}
|
778
|
+
}
|
779
|
+
|
780
|
+
bool llama_model_loader::load_all_data(
|
781
|
+
struct lm_ggml_context * ctx,
|
782
|
+
llama_buf_map & bufs,
|
783
|
+
llama_mlocks * lmlocks,
|
784
|
+
llama_progress_callback progress_callback,
|
785
|
+
void * progress_callback_user_data) {
|
786
|
+
LM_GGML_ASSERT(size_data != 0 && "call init_mappings() first");
|
787
|
+
|
788
|
+
std::vector<no_init<uint8_t>> read_buf;
|
789
|
+
std::vector<std::future<std::pair<lm_ggml_tensor *, bool>>> validation_result;
|
790
|
+
|
791
|
+
// 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
|
792
|
+
// NVMe raid configurations might require more / larger buffers.
|
793
|
+
constexpr size_t n_buffers = 4;
|
794
|
+
constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
|
795
|
+
|
796
|
+
std::vector<lm_ggml_backend_buffer_t> host_buffers;
|
797
|
+
std::vector<lm_ggml_backend_event_t> events;
|
798
|
+
std::vector<void *> host_ptrs;
|
799
|
+
size_t buffer_idx = 0; // buffer to use for async loads
|
800
|
+
lm_ggml_backend_t upload_backend = [&](const char * func) -> lm_ggml_backend_t {
|
801
|
+
if (use_mmap || check_tensors) {
|
802
|
+
return nullptr;
|
803
|
+
}
|
804
|
+
// When not using mmaped io use async uploads from pinned memory to GPU memory.
|
805
|
+
// First determine if the backend supports the necessary features for async uploads.
|
806
|
+
auto * buf = bufs.count(0) ? bufs.at(0) : nullptr;
|
807
|
+
if (!buf) {
|
808
|
+
LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func);
|
809
|
+
return nullptr;
|
810
|
+
}
|
811
|
+
|
812
|
+
auto * buft = lm_ggml_backend_buffer_get_type(buf);
|
813
|
+
auto * dev = lm_ggml_backend_buft_get_device(buft);
|
814
|
+
if (!dev) {
|
815
|
+
LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func,
|
816
|
+
lm_ggml_backend_buft_name(buft));
|
817
|
+
return nullptr;
|
818
|
+
}
|
819
|
+
|
820
|
+
if (buft != lm_ggml_backend_dev_buffer_type(dev)) {
|
821
|
+
LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func,
|
822
|
+
lm_ggml_backend_buft_name(buft), lm_ggml_backend_dev_name(dev));
|
823
|
+
return nullptr;
|
824
|
+
}
|
825
|
+
|
826
|
+
lm_ggml_backend_dev_props props;
|
827
|
+
lm_ggml_backend_dev_get_props(dev, &props);
|
828
|
+
if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) {
|
829
|
+
LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func,
|
830
|
+
lm_ggml_backend_dev_name(dev));
|
831
|
+
return nullptr;
|
832
|
+
}
|
833
|
+
|
834
|
+
auto * host_buft = lm_ggml_backend_dev_host_buffer_type(dev);
|
835
|
+
if (!host_buft) {
|
836
|
+
LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func,
|
837
|
+
lm_ggml_backend_dev_name(dev));
|
838
|
+
return nullptr;
|
839
|
+
}
|
840
|
+
|
841
|
+
// If the backend is supported, create pinned memory buffers and events for synchronisation.
|
842
|
+
for (size_t idx = 0; idx < n_buffers; ++idx) {
|
843
|
+
auto * buf = lm_ggml_backend_buft_alloc_buffer(host_buft, buffer_size);
|
844
|
+
if (!buf) {
|
845
|
+
LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func,
|
846
|
+
lm_ggml_backend_dev_name(dev));
|
847
|
+
return nullptr;
|
848
|
+
}
|
849
|
+
|
850
|
+
host_buffers.emplace_back(buf);
|
851
|
+
host_ptrs.emplace_back(lm_ggml_backend_buffer_get_base(buf));
|
852
|
+
|
853
|
+
auto * event = lm_ggml_backend_event_new(dev);
|
854
|
+
if (!event) {
|
855
|
+
LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func,
|
856
|
+
lm_ggml_backend_dev_name(dev));
|
857
|
+
return nullptr;
|
858
|
+
}
|
859
|
+
|
860
|
+
events.emplace_back(event);
|
861
|
+
}
|
862
|
+
|
863
|
+
lm_ggml_backend_t backend = lm_ggml_backend_dev_init(dev, nullptr);
|
864
|
+
if (!backend) {
|
865
|
+
LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func,
|
866
|
+
lm_ggml_backend_dev_name(dev));
|
867
|
+
return nullptr;
|
868
|
+
}
|
869
|
+
|
870
|
+
return backend;
|
871
|
+
}(__func__);
|
872
|
+
|
873
|
+
if (upload_backend) {
|
874
|
+
LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__,
|
875
|
+
lm_ggml_backend_dev_name(lm_ggml_backend_get_device(upload_backend)),
|
876
|
+
lm_ggml_backend_buft_name(lm_ggml_backend_buffer_get_type(bufs.at(0))),
|
877
|
+
lm_ggml_backend_name(upload_backend));
|
878
|
+
}
|
879
|
+
|
880
|
+
for (struct lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur != NULL; cur = lm_ggml_get_next_tensor(ctx, cur)) {
|
881
|
+
const auto * weight = get_weight(lm_ggml_get_name(cur));
|
882
|
+
if (weight == nullptr) {
|
883
|
+
// this can happen with split experts models
|
884
|
+
continue;
|
885
|
+
}
|
886
|
+
|
887
|
+
if (progress_callback) {
|
888
|
+
if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
|
889
|
+
return false;
|
890
|
+
}
|
891
|
+
}
|
892
|
+
|
893
|
+
size_t n_size = lm_ggml_nbytes(cur);
|
894
|
+
|
895
|
+
if (use_mmap) {
|
896
|
+
const auto & mapping = mappings.at(weight->idx);
|
897
|
+
lm_ggml_backend_buffer_t buf_mmap = nullptr;
|
898
|
+
if (bufs.count(weight->idx)) {
|
899
|
+
buf_mmap = bufs.at(weight->idx);
|
900
|
+
}
|
901
|
+
uint8_t * data = (uint8_t *) mapping->addr() + weight->offs;
|
902
|
+
|
903
|
+
if (check_tensors) {
|
904
|
+
validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
|
905
|
+
return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, data, n_size));
|
906
|
+
}));
|
907
|
+
}
|
908
|
+
|
909
|
+
LM_GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
|
910
|
+
if (buf_mmap && cur->data == nullptr) {
|
911
|
+
lm_ggml_backend_tensor_alloc(buf_mmap, cur, data);
|
912
|
+
if (lmlocks) {
|
913
|
+
const auto & lmlock = lmlocks->at(weight->idx);
|
914
|
+
lmlock->grow_to(weight->offs + n_size);
|
915
|
+
}
|
916
|
+
|
917
|
+
auto & mmap_used = mmaps_used[weight->idx];
|
918
|
+
mmap_used.first = std::min(mmap_used.first, weight->offs);
|
919
|
+
mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
|
920
|
+
} else {
|
921
|
+
lm_ggml_backend_tensor_set(cur, data, 0, n_size);
|
922
|
+
}
|
923
|
+
} else {
|
924
|
+
const auto & file = files.at(weight->idx);
|
925
|
+
if (lm_ggml_backend_buffer_is_host(cur->buffer)) {
|
926
|
+
file->seek(weight->offs, SEEK_SET);
|
927
|
+
file->read_raw(cur->data, n_size);
|
928
|
+
if (check_tensors) {
|
929
|
+
validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
|
930
|
+
return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, cur->data, n_size));
|
931
|
+
}));
|
932
|
+
}
|
933
|
+
} else {
|
934
|
+
// If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
|
935
|
+
if (upload_backend) {
|
936
|
+
file->seek(weight->offs, SEEK_SET);
|
937
|
+
|
938
|
+
size_t bytes_read = 0;
|
939
|
+
|
940
|
+
while (bytes_read < n_size) {
|
941
|
+
size_t read_iteration = std::min<size_t>(buffer_size, n_size - bytes_read);
|
942
|
+
|
943
|
+
lm_ggml_backend_event_synchronize(events[buffer_idx]);
|
944
|
+
file->read_raw(host_ptrs[buffer_idx], read_iteration);
|
945
|
+
lm_ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
|
946
|
+
lm_ggml_backend_event_record(events[buffer_idx], upload_backend);
|
947
|
+
|
948
|
+
bytes_read += read_iteration;
|
949
|
+
++buffer_idx;
|
950
|
+
buffer_idx %= n_buffers;
|
951
|
+
}
|
952
|
+
} else {
|
953
|
+
read_buf.resize(n_size);
|
954
|
+
file->seek(weight->offs, SEEK_SET);
|
955
|
+
file->read_raw(read_buf.data(), n_size);
|
956
|
+
lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
|
957
|
+
if (check_tensors && !lm_ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
|
958
|
+
throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
|
959
|
+
}
|
960
|
+
}
|
961
|
+
}
|
962
|
+
}
|
963
|
+
|
964
|
+
size_done += n_size;
|
965
|
+
}
|
966
|
+
|
967
|
+
// free temporary resources used for async uploads
|
968
|
+
for (auto * event : events) {
|
969
|
+
lm_ggml_backend_event_synchronize(event);
|
970
|
+
lm_ggml_backend_event_free(event);
|
971
|
+
}
|
972
|
+
for (auto * buf : host_buffers) {
|
973
|
+
lm_ggml_backend_buffer_free(buf);
|
974
|
+
}
|
975
|
+
lm_ggml_backend_free(upload_backend);
|
976
|
+
|
977
|
+
// check validation results
|
978
|
+
bool validation_failed = false;
|
979
|
+
for (auto & future : validation_result) {
|
980
|
+
auto result = future.get();
|
981
|
+
if (!result.second) {
|
982
|
+
LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, lm_ggml_get_name(result.first));
|
983
|
+
validation_failed = true;
|
984
|
+
}
|
985
|
+
}
|
986
|
+
if (validation_failed) {
|
987
|
+
throw std::runtime_error("found tensors with invalid data");
|
988
|
+
}
|
989
|
+
|
990
|
+
// check if this is the last call and do final cleanup
|
991
|
+
if (size_done >= size_data) {
|
992
|
+
// unmap offloaded tensors and metadata
|
993
|
+
if (use_mmap) {
|
994
|
+
for (uint32_t idx = 0; idx < mappings.size(); idx++) {
|
995
|
+
const auto & mmap_used = mmaps_used.at(idx);
|
996
|
+
auto & mapping = mappings.at(idx);
|
997
|
+
mapping->unmap_fragment(0, mmap_used.first);
|
998
|
+
if (mmap_used.second != 0) {
|
999
|
+
mapping->unmap_fragment(mmap_used.second, mapping->size());
|
1000
|
+
}
|
1001
|
+
}
|
1002
|
+
}
|
1003
|
+
if (progress_callback) {
|
1004
|
+
// Even though the model is done loading, we still honor
|
1005
|
+
// cancellation since we need to free allocations.
|
1006
|
+
return progress_callback(1.0f, progress_callback_user_data);
|
1007
|
+
}
|
1008
|
+
}
|
1009
|
+
|
1010
|
+
return true;
|
1011
|
+
}
|