cui-llama.rn 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +4 -23
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +13 -7
  4. package/android/src/main/java/com/rnllama/LlamaContext.java +27 -20
  5. package/android/src/main/java/com/rnllama/RNLlama.java +5 -1
  6. package/android/src/main/jni.cpp +15 -12
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  14. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  15. package/cpp/README.md +1 -1
  16. package/cpp/common.cpp +158 -267
  17. package/cpp/common.h +46 -12
  18. package/cpp/ggml-alloc.c +1042 -1037
  19. package/cpp/ggml-backend-impl.h +255 -256
  20. package/cpp/ggml-backend-reg.cpp +582 -582
  21. package/cpp/ggml-backend.cpp +2002 -2002
  22. package/cpp/ggml-backend.h +354 -352
  23. package/cpp/ggml-common.h +1853 -1853
  24. package/cpp/ggml-cpp.h +39 -39
  25. package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
  26. package/cpp/ggml-cpu-aarch64.h +8 -8
  27. package/cpp/ggml-cpu-impl.h +386 -386
  28. package/cpp/ggml-cpu-quants.c +10920 -10839
  29. package/cpp/ggml-cpu-traits.cpp +36 -36
  30. package/cpp/ggml-cpu-traits.h +38 -38
  31. package/cpp/ggml-cpu.c +329 -60
  32. package/cpp/ggml-cpu.cpp +10 -2
  33. package/cpp/ggml-cpu.h +135 -135
  34. package/cpp/ggml-impl.h +567 -567
  35. package/cpp/ggml-metal-impl.h +17 -17
  36. package/cpp/ggml-metal.m +4884 -4884
  37. package/cpp/ggml-quants.c +5238 -5238
  38. package/cpp/ggml-threading.h +14 -14
  39. package/cpp/ggml.c +6514 -6448
  40. package/cpp/ggml.h +2194 -2163
  41. package/cpp/gguf.cpp +1329 -1325
  42. package/cpp/gguf.h +202 -202
  43. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  44. package/cpp/json-schema-to-grammar.h +8 -8
  45. package/cpp/json.hpp +24766 -24766
  46. package/cpp/llama-adapter.cpp +347 -346
  47. package/cpp/llama-adapter.h +74 -73
  48. package/cpp/llama-arch.cpp +1487 -1434
  49. package/cpp/llama-arch.h +400 -395
  50. package/cpp/llama-batch.cpp +368 -368
  51. package/cpp/llama-batch.h +88 -88
  52. package/cpp/llama-chat.cpp +578 -567
  53. package/cpp/llama-chat.h +52 -51
  54. package/cpp/llama-context.cpp +1775 -1771
  55. package/cpp/llama-context.h +128 -128
  56. package/cpp/llama-cparams.cpp +1 -1
  57. package/cpp/llama-cparams.h +37 -37
  58. package/cpp/llama-cpp.h +30 -30
  59. package/cpp/llama-grammar.cpp +1139 -1139
  60. package/cpp/llama-grammar.h +143 -143
  61. package/cpp/llama-hparams.cpp +71 -71
  62. package/cpp/llama-hparams.h +139 -140
  63. package/cpp/llama-impl.cpp +167 -167
  64. package/cpp/llama-impl.h +61 -61
  65. package/cpp/llama-kv-cache.cpp +718 -718
  66. package/cpp/llama-kv-cache.h +218 -218
  67. package/cpp/llama-mmap.cpp +2 -1
  68. package/cpp/llama-mmap.h +67 -67
  69. package/cpp/llama-model-loader.cpp +1124 -1011
  70. package/cpp/llama-model-loader.h +167 -158
  71. package/cpp/llama-model.cpp +3997 -2202
  72. package/cpp/llama-model.h +370 -391
  73. package/cpp/llama-sampling.cpp +2408 -2406
  74. package/cpp/llama-sampling.h +32 -48
  75. package/cpp/llama-vocab.cpp +3247 -1982
  76. package/cpp/llama-vocab.h +125 -182
  77. package/cpp/llama.cpp +416 -2886
  78. package/cpp/llama.h +1323 -1285
  79. package/cpp/log.cpp +401 -401
  80. package/cpp/log.h +121 -121
  81. package/cpp/rn-llama.cpp +822 -0
  82. package/cpp/rn-llama.h +123 -0
  83. package/cpp/rn-llama.hpp +18 -12
  84. package/cpp/sampling.cpp +505 -500
  85. package/cpp/sgemm.cpp +2597 -2597
  86. package/cpp/speculative.cpp +277 -274
  87. package/cpp/speculative.h +28 -28
  88. package/cpp/unicode.cpp +2 -3
  89. package/ios/CMakeLists.txt +99 -0
  90. package/ios/RNLlama.h +5 -1
  91. package/ios/RNLlama.mm +2 -2
  92. package/ios/RNLlamaContext.h +8 -1
  93. package/ios/RNLlamaContext.mm +15 -11
  94. package/ios/rnllama.xcframework/Info.plist +74 -0
  95. package/jest/mock.js +3 -2
  96. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  97. package/lib/commonjs/index.js +4 -2
  98. package/lib/commonjs/index.js.map +1 -1
  99. package/lib/module/NativeRNLlama.js.map +1 -1
  100. package/lib/module/index.js +4 -2
  101. package/lib/module/index.js.map +1 -1
  102. package/lib/typescript/NativeRNLlama.d.ts +5 -1
  103. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  104. package/lib/typescript/index.d.ts.map +1 -1
  105. package/llama-rn.podspec +8 -2
  106. package/package.json +5 -2
  107. package/src/NativeRNLlama.ts +5 -1
  108. package/src/index.ts +9 -2
@@ -1,1011 +1,1124 @@
1
- #include "llama-model-loader.h"
2
-
3
- #include "ggml.h"
4
-
5
- #include <array>
6
- #include <cinttypes>
7
- #include <cstring>
8
- #include <future>
9
-
10
- const char * llama_file_version_name(llama_fver version) {
11
- switch (version) {
12
- case LM_GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
13
- case LM_GGUF_FILE_VERSION_V2: return "GGUF V2";
14
- case LM_GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
15
- }
16
-
17
- return "unknown";
18
- }
19
-
20
- namespace GGUFMeta {
21
- template <typename T, lm_gguf_type gt_, T (*gfun)(const lm_gguf_context *, const int64_t)>
22
- struct GKV_Base_Type {
23
- static constexpr lm_gguf_type gt = gt_;
24
-
25
- static T getter(const lm_gguf_context * ctx, const int kid) {
26
- return gfun(ctx, kid);
27
- }
28
- };
29
-
30
- template<typename T> struct GKV_Base;
31
-
32
- template<> struct GKV_Base<bool >: GKV_Base_Type<bool, LM_GGUF_TYPE_BOOL, lm_gguf_get_val_bool> {};
33
- template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, LM_GGUF_TYPE_UINT8, lm_gguf_get_val_u8 > {};
34
- template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, LM_GGUF_TYPE_UINT16, lm_gguf_get_val_u16 > {};
35
- template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, LM_GGUF_TYPE_UINT32, lm_gguf_get_val_u32 > {};
36
- template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, LM_GGUF_TYPE_UINT64, lm_gguf_get_val_u64 > {};
37
- template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, LM_GGUF_TYPE_INT8, lm_gguf_get_val_i8 > {};
38
- template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, LM_GGUF_TYPE_INT16, lm_gguf_get_val_i16 > {};
39
- template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, LM_GGUF_TYPE_INT32, lm_gguf_get_val_i32 > {};
40
- template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, LM_GGUF_TYPE_INT64, lm_gguf_get_val_i64 > {};
41
- template<> struct GKV_Base<float >: GKV_Base_Type<float, LM_GGUF_TYPE_FLOAT32, lm_gguf_get_val_f32 > {};
42
- template<> struct GKV_Base<double >: GKV_Base_Type<double, LM_GGUF_TYPE_FLOAT64, lm_gguf_get_val_f64 > {};
43
- template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, LM_GGUF_TYPE_STRING, lm_gguf_get_val_str > {};
44
-
45
- template<> struct GKV_Base<std::string> {
46
- static constexpr lm_gguf_type gt = LM_GGUF_TYPE_STRING;
47
-
48
- static std::string getter(const lm_gguf_context * ctx, const int kid) {
49
- return lm_gguf_get_val_str(ctx, kid);
50
- }
51
- };
52
-
53
- struct ArrayInfo {
54
- const lm_gguf_type gt;
55
- const size_t length;
56
- const void * data;
57
- };
58
-
59
- template<> struct GKV_Base<ArrayInfo> {
60
- public:
61
- static constexpr lm_gguf_type gt = LM_GGUF_TYPE_ARRAY;
62
- static ArrayInfo getter(const lm_gguf_context *ctx, const int k) {
63
- const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx, k);
64
- return ArrayInfo {
65
- arr_type,
66
- size_t(lm_gguf_get_arr_n(ctx, k)),
67
- arr_type == LM_GGUF_TYPE_STRING ? nullptr : lm_gguf_get_arr_data(ctx, k),
68
- };
69
- }
70
- };
71
-
72
- template<typename T>
73
- class GKV : public GKV_Base<T> {
74
- GKV() = delete;
75
-
76
- public:
77
- static T get_kv(const lm_gguf_context * ctx, const int k) {
78
- const enum lm_gguf_type kt = lm_gguf_get_kv_type(ctx, k);
79
-
80
- if (kt != GKV::gt) {
81
- throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
82
- lm_gguf_get_key(ctx, k), lm_gguf_type_name(kt), lm_gguf_type_name(GKV::gt)));
83
- }
84
- return GKV::getter(ctx, k);
85
- }
86
-
87
- static const char * override_type_to_str(const llama_model_kv_override_type ty) {
88
- switch (ty) {
89
- case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
90
- case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
91
- case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
92
- case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
93
- }
94
- return "unknown";
95
- }
96
-
97
- static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
98
- if (!ovrd) { return false; }
99
- if (ovrd->tag == expected_type) {
100
- LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
101
- __func__, override_type_to_str(ovrd->tag), ovrd->key);
102
- switch (ovrd->tag) {
103
- case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
104
- LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
105
- } break;
106
- case LLAMA_KV_OVERRIDE_TYPE_INT: {
107
- LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
108
- } break;
109
- case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
110
- LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
111
- } break;
112
- case LLAMA_KV_OVERRIDE_TYPE_STR: {
113
- LLAMA_LOG_INFO("%s\n", ovrd->val_str);
114
- } break;
115
- default:
116
- // Shouldn't be possible to end up here, but just in case...
117
- throw std::runtime_error(
118
- format("Unsupported attempt to override %s type for metadata key %s\n",
119
- override_type_to_str(ovrd->tag), ovrd->key));
120
- }
121
- return true;
122
- }
123
- LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
124
- __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
125
- return false;
126
- }
127
-
128
- template<typename OT>
129
- static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
130
- try_override(OT & target, const struct llama_model_kv_override * ovrd) {
131
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
132
- target = ovrd->val_bool;
133
- return true;
134
- }
135
- return false;
136
- }
137
-
138
- template<typename OT>
139
- static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
140
- try_override(OT & target, const struct llama_model_kv_override * ovrd) {
141
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
142
- target = ovrd->val_i64;
143
- return true;
144
- }
145
- return false;
146
- }
147
-
148
- template<typename OT>
149
- static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
150
- try_override(T & target, const struct llama_model_kv_override * ovrd) {
151
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
152
- target = ovrd->val_f64;
153
- return true;
154
- }
155
- return false;
156
- }
157
-
158
- template<typename OT>
159
- static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
160
- try_override(T & target, const struct llama_model_kv_override * ovrd) {
161
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
162
- target = ovrd->val_str;
163
- return true;
164
- }
165
- return false;
166
- }
167
-
168
- static bool set(const lm_gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
169
- if (try_override<T>(target, ovrd)) {
170
- return true;
171
- }
172
- if (k < 0) { return false; }
173
- target = get_kv(ctx, k);
174
- return true;
175
- }
176
-
177
- static bool set(const lm_gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
178
- return set(ctx, lm_gguf_find_key(ctx, key), target, ovrd);
179
- }
180
-
181
- static bool set(const lm_gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
182
- return set(ctx, key.c_str(), target, ovrd);
183
- }
184
- };
185
- }
186
-
187
- template<typename T>
188
- typename std::enable_if<std::is_integral<T>::value, bool>::type
189
- llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) {
190
- const int kid = lm_gguf_find_key(meta.get(), key.c_str());
191
-
192
- if (kid < 0) {
193
- if (required) {
194
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
195
- }
196
- return false;
197
- }
198
-
199
- struct GGUFMeta::ArrayInfo arr_info =
200
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
201
-
202
-
203
- result = arr_info.length;
204
- return true;
205
- }
206
-
207
- template<typename T>
208
- typename std::enable_if<std::is_integral<T>::value, bool>::type
209
- llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) {
210
- return get_arr_n(llm_kv(kid), result, required);
211
- }
212
-
213
- template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required);
214
-
215
- template<typename T>
216
- bool llama_model_loader::get_arr(const std::string & key, std::vector<T> & result, bool required) {
217
- const int kid = lm_gguf_find_key(meta.get(), key.c_str());
218
-
219
- if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
220
- if (required) {
221
- throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
222
- }
223
- return false;
224
- }
225
-
226
- struct GGUFMeta::ArrayInfo arr_info =
227
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
228
-
229
- switch (arr_info.gt) {
230
- case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
231
- case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
232
- (std::is_same<T, int32_t>::value) ||
233
- (std::is_same<T, uint32_t>::value)); break;
234
- default:
235
- throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
236
- }
237
-
238
- result.resize(arr_info.length);
239
- result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
240
-
241
- return true;
242
- }
243
-
244
- template<typename T, size_t N_MAX>
245
- bool llama_model_loader::get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required) {
246
- const int kid = lm_gguf_find_key(meta.get(), key.c_str());
247
-
248
- if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
249
- if (required) {
250
- throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
251
- }
252
- return false;
253
- }
254
-
255
- struct GGUFMeta::ArrayInfo arr_info =
256
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
257
-
258
- switch (arr_info.gt) {
259
- case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
260
- case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
261
- (std::is_same<T, int32_t>::value) ||
262
- (std::is_same<T, uint32_t>::value)); break;
263
- default:
264
- throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
265
- }
266
-
267
- if (arr_info.length > N_MAX) {
268
- throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
269
- }
270
-
271
- std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
272
-
273
- return true;
274
- }
275
-
276
- template<typename T>
277
- bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) {
278
- return get_arr(llm_kv(kid), result, required);
279
- }
280
-
281
- template<typename T>
282
- bool llama_model_loader::get_key(const std::string & key, T & result, bool required) {
283
- auto it = kv_overrides.find(key);
284
-
285
- const struct llama_model_kv_override * override =
286
- it != kv_overrides.end() ? &it->second : nullptr;
287
-
288
- const bool found = GGUFMeta::GKV<T>::set(meta.get(), key, result, override);
289
-
290
- if (required && !found) {
291
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
292
- }
293
-
294
- return found;
295
- }
296
-
297
- template<typename T>
298
- bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) {
299
- return get_key(llm_kv(kid), result, required);
300
- }
301
-
302
- template bool llama_model_loader::get_key<bool> (enum llm_kv kid, bool & result, bool required);
303
- template bool llama_model_loader::get_key<float> (enum llm_kv kid, float & result, bool required);
304
- template bool llama_model_loader::get_key<uint32_t> (enum llm_kv kid, uint32_t & result, bool required);
305
- template bool llama_model_loader::get_key<std::string>(enum llm_kv kid, std::string & result, bool required);
306
-
307
- template<>
308
- bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) {
309
- uint32_t tmp;
310
- const bool found = get_key(kid, tmp, required);
311
- if (found) {
312
- result = (enum llama_pooling_type) tmp;
313
- } else {
314
- result = LLAMA_POOLING_TYPE_UNSPECIFIED;
315
- }
316
- return found;
317
- }
318
-
319
- // get array of n <= N_MAX elements, or a single element repeated n times
320
- template<typename T, size_t N_MAX>
321
- bool llama_model_loader::get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required) {
322
- const int kid = lm_gguf_find_key(meta.get(), key.c_str());
323
-
324
- if (kid < 0) {
325
- if (required) {
326
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
327
- }
328
- return false;
329
- }
330
-
331
- if (n > N_MAX) {
332
- throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
333
- }
334
-
335
- if (lm_gguf_get_kv_type(meta.get(), kid) == LM_GGUF_TYPE_ARRAY) {
336
- struct GGUFMeta::ArrayInfo arr_info =
337
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
338
-
339
- if (n != arr_info.length) {
340
- throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
341
- }
342
-
343
- return get_arr(key, result, required);
344
- }
345
-
346
- T value;
347
-
348
- bool ok = get_key(key, value, required);
349
- if (!ok) {
350
- return false;
351
- }
352
-
353
- for (uint32_t i = 0; i < n; i++) {
354
- result[i] = value;
355
- }
356
-
357
- return true;
358
- }
359
-
360
- template<typename T>
361
- bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) {
362
- return get_key_or_arr(llm_kv(kid), result, n, required);
363
- }
364
-
365
- // TODO: this is not very clever - figure out something better
366
- template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
367
- template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
368
-
369
- llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
370
- int trace = 0;
371
- if (getenv("LLAMA_TRACE")) {
372
- trace = atoi(getenv("LLAMA_TRACE"));
373
- }
374
-
375
- if (param_overrides_p != nullptr) {
376
- for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
377
- kv_overrides.insert({std::string(p->key), *p});
378
- }
379
- }
380
-
381
- struct lm_ggml_context * ctx = NULL;
382
- struct lm_gguf_init_params params = {
383
- /*.no_alloc = */ true,
384
- /*.ctx = */ &ctx,
385
- };
386
-
387
- meta.reset(lm_gguf_init_from_file(fname.c_str(), params));
388
- if (!meta) {
389
- throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
390
- }
391
-
392
- get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
393
- llm_kv = LLM_KV(llm_arch_from_string(arch_name));
394
-
395
- files.emplace_back(new llama_file(fname.c_str(), "rb"));
396
- contexts.emplace_back(ctx);
397
-
398
- // Save tensors data offset of the main file.
399
- // For subsidiary files, `meta` tensor data offset must not be used,
400
- // so we build a unified tensors index for weights.
401
- for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
402
- std::string tensor_name = std::string(cur->name);
403
- // make sure there is no duplicated tensor names
404
- if (weights_map.find(tensor_name) != weights_map.end()) {
405
- throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
406
- }
407
- n_elements += lm_ggml_nelements(cur);
408
- n_bytes += lm_ggml_nbytes(cur);
409
- weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur));
410
- }
411
- uint16_t n_split = 0;
412
- get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
413
-
414
- // Load additional GGML contexts
415
- if (n_split > 1) {
416
- uint16_t idx = 0;
417
- get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
418
- if (idx != 0) {
419
- throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
420
- }
421
-
422
- std::vector<char> split_prefix(llama_path_max(), 0);
423
- if (!llama_split_prefix(split_prefix.data(), split_prefix.size(), fname.c_str(), idx, n_split)) {
424
- throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
425
- }
426
-
427
- if (trace > 0) {
428
- LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
429
- }
430
-
431
- std::vector<char> split_path(llama_path_max(), 0);
432
- for (idx = 1; idx < n_split; idx++) {
433
- llama_split_path(split_path.data(), split_path.size(), split_prefix.data(), idx, n_split);
434
-
435
- struct lm_gguf_init_params split_params = {
436
- /*.no_alloc = */ true,
437
- /*.ctx = */ &ctx,
438
- };
439
- lm_gguf_context_ptr ctx_gguf { lm_gguf_init_from_file(split_path.data(), split_params) };
440
- if (!ctx_gguf) {
441
- throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path.data()));
442
- }
443
-
444
- files.emplace_back(new llama_file(split_path.data(), "rb"));
445
- contexts.emplace_back(ctx);
446
-
447
- // Save tensors data offset info of the shard.
448
- for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
449
- std::string tensor_name = std::string(cur->name);
450
- // make sure there is no duplicated tensor names
451
- if (weights_map.find(tensor_name) != weights_map.end()) {
452
- throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
453
- }
454
- n_elements += lm_ggml_nelements(cur);
455
- n_bytes += lm_ggml_nbytes(cur);
456
- weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur));
457
- }
458
- }
459
-
460
- get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
461
-
462
- // sanity check
463
- {
464
- const int n_tensors_loaded = (int) weights_map.size();
465
- if (n_tensors != n_tensors_loaded) {
466
- throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
467
- }
468
- }
469
-
470
- LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
471
- }
472
-
473
- n_kv = lm_gguf_get_n_kv(meta.get());
474
- n_tensors = weights_map.size();
475
-
476
- fver = (enum llama_fver) lm_gguf_get_version(meta.get());
477
-
478
- LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
479
- __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
480
-
481
- // determine file type based on the number of tensors for each quantization and print meta data
482
- // TODO: make optional
483
- {
484
- std::map<enum lm_ggml_type, uint32_t> n_type;
485
-
486
- uint32_t n_type_max = 0;
487
- enum lm_ggml_type type_max = LM_GGML_TYPE_F32;
488
-
489
- for (const auto & it : weights_map) {
490
- const llama_tensor_weight & w = it.second;
491
- const lm_ggml_tensor * tensor = w.tensor;
492
-
493
- enum lm_ggml_type type = tensor->type;
494
-
495
- n_type[type]++;
496
-
497
- if (n_type_max < n_type[type]) {
498
- n_type_max = n_type[type];
499
- type_max = type;
500
- }
501
-
502
- if (trace > 0) {
503
- const uint16_t sid = w.idx;
504
- LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, lm_ggml_get_name(tensor), lm_ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
505
- }
506
- }
507
-
508
- switch (type_max) {
509
- case LM_GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
510
- case LM_GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
511
- case LM_GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
512
- case LM_GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
513
- case LM_GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
514
- case LM_GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
515
- case LM_GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
516
- case LM_GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
517
- case LM_GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
518
- case LM_GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
519
- case LM_GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
520
- case LM_GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
521
- case LM_GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
522
- case LM_GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break;
523
- case LM_GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break;
524
- case LM_GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
525
- case LM_GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
526
- case LM_GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
527
- case LM_GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
528
- case LM_GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
529
- case LM_GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
530
- case LM_GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
531
- case LM_GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
532
- case LM_GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
533
- default:
534
- {
535
- LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, lm_ggml_type_name(type_max));
536
- ftype = LLAMA_FTYPE_ALL_F32;
537
- } break;
538
- }
539
-
540
- // this is a way to mark that we have "guessed" the file type
541
- ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
542
-
543
- {
544
- const int kid = lm_gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
545
- if (kid >= 0) {
546
- ftype = (llama_ftype) lm_gguf_get_val_u32(meta.get(), kid);
547
- }
548
- }
549
-
550
- LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
551
-
552
- for (int i = 0; i < n_kv; i++) {
553
- const char * name = lm_gguf_get_key(meta.get(), i);
554
- const enum lm_gguf_type type = lm_gguf_get_kv_type(meta.get(), i);
555
- const std::string type_name =
556
- type == LM_GGUF_TYPE_ARRAY
557
- ? format("%s[%s,%zu]", lm_gguf_type_name(type), lm_gguf_type_name(lm_gguf_get_arr_type(meta.get(), i)), lm_gguf_get_arr_n(meta.get(), i))
558
- : lm_gguf_type_name(type);
559
-
560
- std::string value = lm_gguf_kv_to_str(meta.get(), i);
561
- const size_t MAX_VALUE_LEN = 40;
562
- if (value.size() > MAX_VALUE_LEN) {
563
- value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
564
- }
565
- replace_all(value, "\n", "\\n");
566
-
567
- LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
568
- }
569
-
570
- // print type counts
571
- for (auto & kv : n_type) {
572
- if (kv.second == 0) {
573
- continue;
574
- }
575
-
576
- LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, lm_ggml_type_name(kv.first), kv.second);
577
- }
578
- }
579
-
580
- if (!llama_mmap::SUPPORTED) {
581
- LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
582
- use_mmap = false;
583
- }
584
-
585
- this->use_mmap = use_mmap;
586
- this->check_tensors = check_tensors;
587
- }
588
-
589
- std::string llama_model_loader::get_arch_name() const {
590
- return arch_name;
591
- }
592
-
593
- enum llm_arch llama_model_loader::get_arch() const {
594
- return llm_kv.arch;
595
- }
596
-
597
- const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const {
598
- auto pos = weights_map.find(name);
599
- if (pos != weights_map.end()) {
600
- return &pos->second;
601
- }
602
-
603
- return nullptr;
604
- }
605
-
606
- const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const {
607
- const llama_tensor_weight * weight = get_weight(name);
608
- if (!weight) {
609
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
610
- }
611
- return *weight;
612
- }
613
-
614
- struct lm_ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const {
615
- const auto * weight = get_weight(name);
616
- if (!weight) {
617
- return nullptr;
618
- }
619
- return weight->tensor;
620
- }
621
-
622
- struct lm_ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const {
623
- struct lm_ggml_tensor * tensor = get_tensor_meta(name.c_str());
624
- if (!tensor) {
625
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
626
- }
627
- return tensor;
628
- }
629
-
630
- const struct lm_ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
631
- const struct lm_ggml_tensor * cur = get_tensor_meta(name.c_str());
632
-
633
- if (cur == NULL) {
634
- if (!required) {
635
- return NULL;
636
- }
637
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
638
- }
639
-
640
- {
641
- bool is_ok = true;
642
- for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
643
- if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
644
- is_ok = false;
645
- break;
646
- }
647
- }
648
- if (!is_ok) {
649
- throw std::runtime_error(
650
- format("%s: tensor '%s' has wrong shape; expected %s, got %s",
651
- __func__, name.c_str(),
652
- llama_format_tensor_shape(ne).c_str(),
653
- llama_format_tensor_shape(cur).c_str()));
654
- }
655
- }
656
-
657
- return cur;
658
- }
659
-
660
- struct lm_ggml_tensor * llama_model_loader::create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags) {
661
- const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
662
-
663
- if (cur == NULL) {
664
- return NULL;
665
- }
666
-
667
- bool duplicated = flags & TENSOR_DUPLICATED;
668
-
669
- struct lm_ggml_tensor * tensor = lm_ggml_dup_tensor(ctx, cur);
670
- lm_ggml_set_name(tensor, lm_ggml_get_name(cur));
671
-
672
- if (duplicated) {
673
- size_data += lm_ggml_nbytes(cur);
674
- } else {
675
- n_created++;
676
- }
677
-
678
- return tensor;
679
-
680
- }
681
-
682
- struct lm_ggml_tensor * llama_model_loader::create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required) {
683
- const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, required);
684
-
685
- if (cur == NULL) {
686
- return NULL;
687
- }
688
-
689
- if (cur->type != base->type) {
690
- throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), lm_ggml_type_name(base->type), lm_ggml_type_name(cur->type)));
691
- }
692
-
693
- std::array<int64_t, LM_GGML_MAX_DIMS> dims;
694
- for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
695
- dims[i] = i < ne.size() ? ne.begin()[i] : 1;
696
- }
697
-
698
- struct lm_ggml_tensor * tensor = lm_ggml_view_4d(ctx, base,
699
- dims[0], dims[1], dims[2], dims[3],
700
- cur->nb[1], cur->nb[2], cur->nb[3],
701
- offset);
702
-
703
- lm_ggml_set_name(tensor, name.c_str());
704
-
705
- n_created++;
706
-
707
- return tensor;
708
- }
709
-
710
- void llama_model_loader::done_getting_tensors() const {
711
- if (n_created != n_tensors) {
712
- throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
713
- }
714
- }
715
-
716
- void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) {
717
- if (use_mmap) {
718
- mappings.reserve(files.size());
719
- mmaps_used.reserve(files.size());
720
- for (const auto & file : files) {
721
- auto * reg = lm_ggml_backend_dev_backend_reg(lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU));
722
- auto * is_numa_fn = (decltype(lm_ggml_is_numa) *) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_cpu_is_numa");
723
- std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
724
- mmaps_used.emplace_back(mapping->size(), 0);
725
- if (mlock_mmaps) {
726
- std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
727
- mlock_mmap->init(mapping->addr());
728
- mlock_mmaps->emplace_back(std::move(mlock_mmap));
729
- }
730
- mappings.emplace_back(std::move(mapping));
731
- }
732
- }
733
-
734
- // compute the total size of all tensors for progress reporting
735
- for (const auto & it : weights_map) {
736
- size_data += lm_ggml_nbytes(it.second.tensor);
737
- }
738
- }
739
-
740
- void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const {
741
- LM_GGML_ASSERT(!mappings.empty());
742
- const auto & mapping = mappings.at(idx);
743
-
744
- *first = mapping->size();
745
- *last = 0;
746
- *addr = mapping->addr();
747
- for (lm_ggml_tensor * tensor = lm_ggml_get_first_tensor(ctx); tensor; tensor = lm_ggml_get_next_tensor(ctx, tensor)) {
748
- const auto * weight = get_weight(lm_ggml_get_name(tensor));
749
- if (!weight || weight->idx != idx) {
750
- continue;
751
- }
752
- *first = std::min(*first, weight->offs);
753
- *last = std::max(*last, weight->offs + lm_ggml_nbytes(tensor));
754
- }
755
- }
756
-
757
- void llama_model_loader::load_data_for(struct lm_ggml_tensor * cur) const {
758
- const auto & w = require_weight(lm_ggml_get_name(cur));
759
-
760
- if (use_mmap) {
761
- const auto & mapping = mappings.at(w.idx);
762
- if (cur->data == nullptr) {
763
- cur->data = (uint8_t *)mapping->addr() + w.offs;
764
- } else {
765
- memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, lm_ggml_nbytes(cur));
766
- }
767
- } else {
768
- LM_GGML_ASSERT(cur->data != nullptr);
769
- LM_GGML_ASSERT(w.idx < files.size());
770
- const auto & file = files.at(w.idx);
771
- file->seek(w.offs, SEEK_SET);
772
- file->read_raw(cur->data, lm_ggml_nbytes(cur));
773
- }
774
-
775
- if (check_tensors && !lm_ggml_validate_row_data(cur->type, cur->data, lm_ggml_nbytes(cur))) {
776
- throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
777
- }
778
- }
779
-
780
- bool llama_model_loader::load_all_data(
781
- struct lm_ggml_context * ctx,
782
- llama_buf_map & bufs,
783
- llama_mlocks * lmlocks,
784
- llama_progress_callback progress_callback,
785
- void * progress_callback_user_data) {
786
- LM_GGML_ASSERT(size_data != 0 && "call init_mappings() first");
787
-
788
- std::vector<no_init<uint8_t>> read_buf;
789
- std::vector<std::future<std::pair<lm_ggml_tensor *, bool>>> validation_result;
790
-
791
- // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
792
- // NVMe raid configurations might require more / larger buffers.
793
- constexpr size_t n_buffers = 4;
794
- constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
795
-
796
- std::vector<lm_ggml_backend_buffer_t> host_buffers;
797
- std::vector<lm_ggml_backend_event_t> events;
798
- std::vector<void *> host_ptrs;
799
- size_t buffer_idx = 0; // buffer to use for async loads
800
- lm_ggml_backend_t upload_backend = [&](const char * func) -> lm_ggml_backend_t {
801
- if (use_mmap || check_tensors) {
802
- return nullptr;
803
- }
804
- // When not using mmaped io use async uploads from pinned memory to GPU memory.
805
- // First determine if the backend supports the necessary features for async uploads.
806
- auto * buf = bufs.count(0) ? bufs.at(0) : nullptr;
807
- if (!buf) {
808
- LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func);
809
- return nullptr;
810
- }
811
-
812
- auto * buft = lm_ggml_backend_buffer_get_type(buf);
813
- auto * dev = lm_ggml_backend_buft_get_device(buft);
814
- if (!dev) {
815
- LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func,
816
- lm_ggml_backend_buft_name(buft));
817
- return nullptr;
818
- }
819
-
820
- if (buft != lm_ggml_backend_dev_buffer_type(dev)) {
821
- LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func,
822
- lm_ggml_backend_buft_name(buft), lm_ggml_backend_dev_name(dev));
823
- return nullptr;
824
- }
825
-
826
- lm_ggml_backend_dev_props props;
827
- lm_ggml_backend_dev_get_props(dev, &props);
828
- if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) {
829
- LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func,
830
- lm_ggml_backend_dev_name(dev));
831
- return nullptr;
832
- }
833
-
834
- auto * host_buft = lm_ggml_backend_dev_host_buffer_type(dev);
835
- if (!host_buft) {
836
- LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func,
837
- lm_ggml_backend_dev_name(dev));
838
- return nullptr;
839
- }
840
-
841
- // If the backend is supported, create pinned memory buffers and events for synchronisation.
842
- for (size_t idx = 0; idx < n_buffers; ++idx) {
843
- auto * buf = lm_ggml_backend_buft_alloc_buffer(host_buft, buffer_size);
844
- if (!buf) {
845
- LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func,
846
- lm_ggml_backend_dev_name(dev));
847
- return nullptr;
848
- }
849
-
850
- host_buffers.emplace_back(buf);
851
- host_ptrs.emplace_back(lm_ggml_backend_buffer_get_base(buf));
852
-
853
- auto * event = lm_ggml_backend_event_new(dev);
854
- if (!event) {
855
- LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func,
856
- lm_ggml_backend_dev_name(dev));
857
- return nullptr;
858
- }
859
-
860
- events.emplace_back(event);
861
- }
862
-
863
- lm_ggml_backend_t backend = lm_ggml_backend_dev_init(dev, nullptr);
864
- if (!backend) {
865
- LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func,
866
- lm_ggml_backend_dev_name(dev));
867
- return nullptr;
868
- }
869
-
870
- return backend;
871
- }(__func__);
872
-
873
- if (upload_backend) {
874
- LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__,
875
- lm_ggml_backend_dev_name(lm_ggml_backend_get_device(upload_backend)),
876
- lm_ggml_backend_buft_name(lm_ggml_backend_buffer_get_type(bufs.at(0))),
877
- lm_ggml_backend_name(upload_backend));
878
- }
879
-
880
- for (struct lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur != NULL; cur = lm_ggml_get_next_tensor(ctx, cur)) {
881
- const auto * weight = get_weight(lm_ggml_get_name(cur));
882
- if (weight == nullptr) {
883
- // this can happen with split experts models
884
- continue;
885
- }
886
-
887
- if (progress_callback) {
888
- if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
889
- return false;
890
- }
891
- }
892
-
893
- size_t n_size = lm_ggml_nbytes(cur);
894
-
895
- if (use_mmap) {
896
- const auto & mapping = mappings.at(weight->idx);
897
- lm_ggml_backend_buffer_t buf_mmap = nullptr;
898
- if (bufs.count(weight->idx)) {
899
- buf_mmap = bufs.at(weight->idx);
900
- }
901
- uint8_t * data = (uint8_t *) mapping->addr() + weight->offs;
902
-
903
- if (check_tensors) {
904
- validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
905
- return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, data, n_size));
906
- }));
907
- }
908
-
909
- LM_GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
910
- if (buf_mmap && cur->data == nullptr) {
911
- lm_ggml_backend_tensor_alloc(buf_mmap, cur, data);
912
- if (lmlocks) {
913
- const auto & lmlock = lmlocks->at(weight->idx);
914
- lmlock->grow_to(weight->offs + n_size);
915
- }
916
-
917
- auto & mmap_used = mmaps_used[weight->idx];
918
- mmap_used.first = std::min(mmap_used.first, weight->offs);
919
- mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
920
- } else {
921
- lm_ggml_backend_tensor_set(cur, data, 0, n_size);
922
- }
923
- } else {
924
- const auto & file = files.at(weight->idx);
925
- if (lm_ggml_backend_buffer_is_host(cur->buffer)) {
926
- file->seek(weight->offs, SEEK_SET);
927
- file->read_raw(cur->data, n_size);
928
- if (check_tensors) {
929
- validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
930
- return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, cur->data, n_size));
931
- }));
932
- }
933
- } else {
934
- // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
935
- if (upload_backend) {
936
- file->seek(weight->offs, SEEK_SET);
937
-
938
- size_t bytes_read = 0;
939
-
940
- while (bytes_read < n_size) {
941
- size_t read_iteration = std::min<size_t>(buffer_size, n_size - bytes_read);
942
-
943
- lm_ggml_backend_event_synchronize(events[buffer_idx]);
944
- file->read_raw(host_ptrs[buffer_idx], read_iteration);
945
- lm_ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
946
- lm_ggml_backend_event_record(events[buffer_idx], upload_backend);
947
-
948
- bytes_read += read_iteration;
949
- ++buffer_idx;
950
- buffer_idx %= n_buffers;
951
- }
952
- } else {
953
- read_buf.resize(n_size);
954
- file->seek(weight->offs, SEEK_SET);
955
- file->read_raw(read_buf.data(), n_size);
956
- lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
957
- if (check_tensors && !lm_ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
958
- throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
959
- }
960
- }
961
- }
962
- }
963
-
964
- size_done += n_size;
965
- }
966
-
967
- // free temporary resources used for async uploads
968
- for (auto * event : events) {
969
- lm_ggml_backend_event_synchronize(event);
970
- lm_ggml_backend_event_free(event);
971
- }
972
- for (auto * buf : host_buffers) {
973
- lm_ggml_backend_buffer_free(buf);
974
- }
975
- lm_ggml_backend_free(upload_backend);
976
-
977
- // check validation results
978
- bool validation_failed = false;
979
- for (auto & future : validation_result) {
980
- auto result = future.get();
981
- if (!result.second) {
982
- LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, lm_ggml_get_name(result.first));
983
- validation_failed = true;
984
- }
985
- }
986
- if (validation_failed) {
987
- throw std::runtime_error("found tensors with invalid data");
988
- }
989
-
990
- // check if this is the last call and do final cleanup
991
- if (size_done >= size_data) {
992
- // unmap offloaded tensors and metadata
993
- if (use_mmap) {
994
- for (uint32_t idx = 0; idx < mappings.size(); idx++) {
995
- const auto & mmap_used = mmaps_used.at(idx);
996
- auto & mapping = mappings.at(idx);
997
- mapping->unmap_fragment(0, mmap_used.first);
998
- if (mmap_used.second != 0) {
999
- mapping->unmap_fragment(mmap_used.second, mapping->size());
1000
- }
1001
- }
1002
- }
1003
- if (progress_callback) {
1004
- // Even though the model is done loading, we still honor
1005
- // cancellation since we need to free allocations.
1006
- return progress_callback(1.0f, progress_callback_user_data);
1007
- }
1008
- }
1009
-
1010
- return true;
1011
- }
1
+ #include "llama-model-loader.h"
2
+
3
+ #include "ggml.h"
4
+
5
+ #include <array>
6
+ #include <cinttypes>
7
+ #include <cstring>
8
+ #include <future>
9
+
10
+ static const size_t kiB = 1024;
11
+ static const size_t MiB = 1024*kiB;
12
+ static const size_t GiB = 1024*MiB;
13
+
14
+ const char * llama_file_version_name(llama_fver version) {
15
+ switch (version) {
16
+ case LM_GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
17
+ case LM_GGUF_FILE_VERSION_V2: return "GGUF V2";
18
+ case LM_GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
19
+ }
20
+
21
+ return "unknown";
22
+ }
23
+
24
+ static std::string llama_model_ftype_name(llama_ftype ftype) {
25
+ if (ftype & LLAMA_FTYPE_GUESSED) {
26
+ return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
27
+ }
28
+
29
+ switch (ftype) {
30
+ case LLAMA_FTYPE_ALL_F32: return "all F32";
31
+ case LLAMA_FTYPE_MOSTLY_F16: return "F16";
32
+ case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
33
+ case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
34
+ case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
35
+ case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
36
+ case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
37
+ case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
38
+ case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
39
+ case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
40
+ case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
41
+ case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
42
+ case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
43
+ case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
44
+ case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
45
+ case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
46
+ case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
47
+ case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
48
+ case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary";
49
+ case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary";
50
+ case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw";
51
+ case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
52
+ case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
53
+ case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
54
+ case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
55
+ case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw";
56
+ case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw";
57
+ case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw";
58
+ case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
59
+ case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
60
+ case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
61
+ case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
62
+
63
+ default: return "unknown, may not work";
64
+ }
65
+ }
66
+
67
+ // return a list of splits for a given path
68
+ // for example, given "<name>-00002-of-00004.gguf", returns list of all 4 splits
69
+ static std::vector<std::string> llama_get_list_splits(const std::string & path, const int idx, const int n_split) {
70
+ std::vector<std::string> paths;
71
+ std::string split_prefix;
72
+ std::vector<char> buf(llama_path_max(), 0);
73
+
74
+ {
75
+ int ret = llama_split_prefix(buf.data(), buf.size(), path.c_str(), idx, n_split);
76
+ if (!ret) {
77
+ throw std::runtime_error(format("invalid split file name: %s", path.c_str()));
78
+ }
79
+ split_prefix = std::string(buf.data(), ret);
80
+ }
81
+
82
+ if (split_prefix.empty()) {
83
+ throw std::runtime_error(format("invalid split file: %s", path.c_str()));
84
+ }
85
+
86
+ for (int idx = 0; idx < n_split; ++idx) {
87
+ int ret = llama_split_path(buf.data(), buf.size(), split_prefix.c_str(), idx, n_split);
88
+ paths.push_back(std::string(buf.data(), ret));
89
+ }
90
+
91
+ return paths;
92
+ }
93
+
94
+ namespace GGUFMeta {
95
+ template <typename T, lm_gguf_type gt_, T (*gfun)(const lm_gguf_context *, const int64_t)>
96
+ struct GKV_Base_Type {
97
+ static constexpr lm_gguf_type gt = gt_;
98
+
99
+ static T getter(const lm_gguf_context * ctx, const int kid) {
100
+ return gfun(ctx, kid);
101
+ }
102
+ };
103
+
104
+ template<typename T> struct GKV_Base;
105
+
106
+ template<> struct GKV_Base<bool >: GKV_Base_Type<bool, LM_GGUF_TYPE_BOOL, lm_gguf_get_val_bool> {};
107
+ template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, LM_GGUF_TYPE_UINT8, lm_gguf_get_val_u8 > {};
108
+ template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, LM_GGUF_TYPE_UINT16, lm_gguf_get_val_u16 > {};
109
+ template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, LM_GGUF_TYPE_UINT32, lm_gguf_get_val_u32 > {};
110
+ template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, LM_GGUF_TYPE_UINT64, lm_gguf_get_val_u64 > {};
111
+ template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, LM_GGUF_TYPE_INT8, lm_gguf_get_val_i8 > {};
112
+ template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, LM_GGUF_TYPE_INT16, lm_gguf_get_val_i16 > {};
113
+ template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, LM_GGUF_TYPE_INT32, lm_gguf_get_val_i32 > {};
114
+ template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, LM_GGUF_TYPE_INT64, lm_gguf_get_val_i64 > {};
115
+ template<> struct GKV_Base<float >: GKV_Base_Type<float, LM_GGUF_TYPE_FLOAT32, lm_gguf_get_val_f32 > {};
116
+ template<> struct GKV_Base<double >: GKV_Base_Type<double, LM_GGUF_TYPE_FLOAT64, lm_gguf_get_val_f64 > {};
117
+ template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, LM_GGUF_TYPE_STRING, lm_gguf_get_val_str > {};
118
+
119
+ template<> struct GKV_Base<std::string> {
120
+ static constexpr lm_gguf_type gt = LM_GGUF_TYPE_STRING;
121
+
122
+ static std::string getter(const lm_gguf_context * ctx, const int kid) {
123
+ return lm_gguf_get_val_str(ctx, kid);
124
+ }
125
+ };
126
+
127
+ struct ArrayInfo {
128
+ const lm_gguf_type gt;
129
+ const size_t length;
130
+ const void * data;
131
+ };
132
+
133
+ template<> struct GKV_Base<ArrayInfo> {
134
+ public:
135
+ static constexpr lm_gguf_type gt = LM_GGUF_TYPE_ARRAY;
136
+ static ArrayInfo getter(const lm_gguf_context *ctx, const int k) {
137
+ const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx, k);
138
+ return ArrayInfo {
139
+ arr_type,
140
+ size_t(lm_gguf_get_arr_n(ctx, k)),
141
+ arr_type == LM_GGUF_TYPE_STRING ? nullptr : lm_gguf_get_arr_data(ctx, k),
142
+ };
143
+ }
144
+ };
145
+
146
+ template<typename T>
147
+ class GKV : public GKV_Base<T> {
148
+ GKV() = delete;
149
+
150
+ public:
151
+ static T get_kv(const lm_gguf_context * ctx, const int k) {
152
+ const enum lm_gguf_type kt = lm_gguf_get_kv_type(ctx, k);
153
+
154
+ if (kt != GKV::gt) {
155
+ throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
156
+ lm_gguf_get_key(ctx, k), lm_gguf_type_name(kt), lm_gguf_type_name(GKV::gt)));
157
+ }
158
+ return GKV::getter(ctx, k);
159
+ }
160
+
161
+ static const char * override_type_to_str(const llama_model_kv_override_type ty) {
162
+ switch (ty) {
163
+ case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
164
+ case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
165
+ case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
166
+ case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
167
+ }
168
+ return "unknown";
169
+ }
170
+
171
+ static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
172
+ if (!ovrd) { return false; }
173
+ if (ovrd->tag == expected_type) {
174
+ LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
175
+ __func__, override_type_to_str(ovrd->tag), ovrd->key);
176
+ switch (ovrd->tag) {
177
+ case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
178
+ LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
179
+ } break;
180
+ case LLAMA_KV_OVERRIDE_TYPE_INT: {
181
+ LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
182
+ } break;
183
+ case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
184
+ LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
185
+ } break;
186
+ case LLAMA_KV_OVERRIDE_TYPE_STR: {
187
+ LLAMA_LOG_INFO("%s\n", ovrd->val_str);
188
+ } break;
189
+ default:
190
+ // Shouldn't be possible to end up here, but just in case...
191
+ throw std::runtime_error(
192
+ format("Unsupported attempt to override %s type for metadata key %s\n",
193
+ override_type_to_str(ovrd->tag), ovrd->key));
194
+ }
195
+ return true;
196
+ }
197
+ LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
198
+ __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
199
+ return false;
200
+ }
201
+
202
+ template<typename OT>
203
+ static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
204
+ try_override(OT & target, const struct llama_model_kv_override * ovrd) {
205
+ if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
206
+ target = ovrd->val_bool;
207
+ return true;
208
+ }
209
+ return false;
210
+ }
211
+
212
+ template<typename OT>
213
+ static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
214
+ try_override(OT & target, const struct llama_model_kv_override * ovrd) {
215
+ if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
216
+ target = ovrd->val_i64;
217
+ return true;
218
+ }
219
+ return false;
220
+ }
221
+
222
+ template<typename OT>
223
+ static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
224
+ try_override(T & target, const struct llama_model_kv_override * ovrd) {
225
+ if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
226
+ target = ovrd->val_f64;
227
+ return true;
228
+ }
229
+ return false;
230
+ }
231
+
232
+ template<typename OT>
233
+ static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
234
+ try_override(T & target, const struct llama_model_kv_override * ovrd) {
235
+ if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
236
+ target = ovrd->val_str;
237
+ return true;
238
+ }
239
+ return false;
240
+ }
241
+
242
+ static bool set(const lm_gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
243
+ if (try_override<T>(target, ovrd)) {
244
+ return true;
245
+ }
246
+ if (k < 0) { return false; }
247
+ target = get_kv(ctx, k);
248
+ return true;
249
+ }
250
+
251
+ static bool set(const lm_gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
252
+ return set(ctx, lm_gguf_find_key(ctx, key), target, ovrd);
253
+ }
254
+
255
+ static bool set(const lm_gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
256
+ return set(ctx, key.c_str(), target, ovrd);
257
+ }
258
+ };
259
+ }
260
+
261
+ template<typename T>
262
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
263
+ llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) {
264
+ const int kid = lm_gguf_find_key(meta.get(), key.c_str());
265
+
266
+ if (kid < 0) {
267
+ if (required) {
268
+ throw std::runtime_error(format("key not found in model: %s", key.c_str()));
269
+ }
270
+ return false;
271
+ }
272
+
273
+ struct GGUFMeta::ArrayInfo arr_info =
274
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
275
+
276
+
277
+ result = arr_info.length;
278
+ return true;
279
+ }
280
+
281
+ template<typename T>
282
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
283
+ llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) {
284
+ return get_arr_n(llm_kv(kid), result, required);
285
+ }
286
+
287
+ template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required);
288
+
289
+ template<typename T>
290
+ bool llama_model_loader::get_arr(const std::string & key, std::vector<T> & result, bool required) {
291
+ const int kid = lm_gguf_find_key(meta.get(), key.c_str());
292
+
293
+ if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
294
+ if (required) {
295
+ throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
296
+ }
297
+ return false;
298
+ }
299
+
300
+ struct GGUFMeta::ArrayInfo arr_info =
301
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
302
+
303
+ switch (arr_info.gt) {
304
+ case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
305
+ case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
306
+ (std::is_same<T, int32_t>::value) ||
307
+ (std::is_same<T, uint32_t>::value)); break;
308
+ default:
309
+ throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
310
+ }
311
+
312
+ result.resize(arr_info.length);
313
+ result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
314
+
315
+ return true;
316
+ }
317
+
318
+ template<typename T, size_t N_MAX>
319
+ bool llama_model_loader::get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required) {
320
+ const int kid = lm_gguf_find_key(meta.get(), key.c_str());
321
+
322
+ if (kid < 0 || lm_gguf_get_kv_type(meta.get(), kid) != LM_GGUF_TYPE_ARRAY) {
323
+ if (required) {
324
+ throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
325
+ }
326
+ return false;
327
+ }
328
+
329
+ struct GGUFMeta::ArrayInfo arr_info =
330
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
331
+
332
+ switch (arr_info.gt) {
333
+ case LM_GGUF_TYPE_FLOAT32: LM_GGML_ASSERT((std::is_same<T, float>::value)); break;
334
+ case LM_GGUF_TYPE_INT32: LM_GGML_ASSERT(
335
+ (std::is_same<T, int32_t>::value) ||
336
+ (std::is_same<T, uint32_t>::value)); break;
337
+ default:
338
+ throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
339
+ }
340
+
341
+ if (arr_info.length > N_MAX) {
342
+ throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
343
+ }
344
+
345
+ std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
346
+
347
+ return true;
348
+ }
349
+
350
+ template<typename T>
351
+ bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) {
352
+ return get_arr(llm_kv(kid), result, required);
353
+ }
354
+
355
+ template<typename T>
356
+ bool llama_model_loader::get_key(const std::string & key, T & result, bool required) {
357
+ auto it = kv_overrides.find(key);
358
+
359
+ const struct llama_model_kv_override * override =
360
+ it != kv_overrides.end() ? &it->second : nullptr;
361
+
362
+ const bool found = GGUFMeta::GKV<T>::set(meta.get(), key, result, override);
363
+
364
+ if (required && !found) {
365
+ throw std::runtime_error(format("key not found in model: %s", key.c_str()));
366
+ }
367
+
368
+ return found;
369
+ }
370
+
371
+ template<typename T>
372
+ bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) {
373
+ return get_key(llm_kv(kid), result, required);
374
+ }
375
+
376
+ template bool llama_model_loader::get_key<bool> (enum llm_kv kid, bool & result, bool required);
377
+ template bool llama_model_loader::get_key<float> (enum llm_kv kid, float & result, bool required);
378
+ template bool llama_model_loader::get_key<uint32_t> (enum llm_kv kid, uint32_t & result, bool required);
379
+ template bool llama_model_loader::get_key<std::string>(enum llm_kv kid, std::string & result, bool required);
380
+
381
+ template<>
382
+ bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) {
383
+ uint32_t tmp;
384
+ const bool found = get_key(kid, tmp, required);
385
+ if (found) {
386
+ result = (enum llama_pooling_type) tmp;
387
+ } else {
388
+ result = LLAMA_POOLING_TYPE_UNSPECIFIED;
389
+ }
390
+ return found;
391
+ }
392
+
393
+ // get array of n <= N_MAX elements, or a single element repeated n times
394
+ template<typename T, size_t N_MAX>
395
+ bool llama_model_loader::get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required) {
396
+ const int kid = lm_gguf_find_key(meta.get(), key.c_str());
397
+
398
+ if (kid < 0) {
399
+ if (required) {
400
+ throw std::runtime_error(format("key not found in model: %s", key.c_str()));
401
+ }
402
+ return false;
403
+ }
404
+
405
+ if (n > N_MAX) {
406
+ throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
407
+ }
408
+
409
+ if (lm_gguf_get_kv_type(meta.get(), kid) == LM_GGUF_TYPE_ARRAY) {
410
+ struct GGUFMeta::ArrayInfo arr_info =
411
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
412
+
413
+ if (n != arr_info.length) {
414
+ throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
415
+ }
416
+
417
+ return get_arr(key, result, required);
418
+ }
419
+
420
+ T value;
421
+
422
+ bool ok = get_key(key, value, required);
423
+ if (!ok) {
424
+ return false;
425
+ }
426
+
427
+ for (uint32_t i = 0; i < n; i++) {
428
+ result[i] = value;
429
+ }
430
+
431
+ return true;
432
+ }
433
+
434
+ template<typename T>
435
+ bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) {
436
+ return get_key_or_arr(llm_kv(kid), result, n, required);
437
+ }
438
+
439
+ // TODO: this is not very clever - figure out something better
440
+ template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
441
+ template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
442
+
443
+ llama_model_loader::llama_model_loader(
444
+ const std::string & fname,
445
+ std::vector<std::string> & splits,
446
+ bool use_mmap,
447
+ bool check_tensors,
448
+ const struct llama_model_kv_override * param_overrides_p) {
449
+ int trace = 0;
450
+ if (getenv("LLAMA_TRACE")) {
451
+ trace = atoi(getenv("LLAMA_TRACE"));
452
+ }
453
+
454
+ if (param_overrides_p != nullptr) {
455
+ for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
456
+ kv_overrides.insert({std::string(p->key), *p});
457
+ }
458
+ }
459
+
460
+ // Load the main GGUF
461
+ struct lm_ggml_context * ctx = NULL;
462
+ struct lm_gguf_init_params params = {
463
+ /*.no_alloc = */ true,
464
+ /*.ctx = */ &ctx,
465
+ };
466
+
467
+ meta.reset(lm_gguf_init_from_file(fname.c_str(), params));
468
+ if (!meta) {
469
+ throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
470
+ }
471
+
472
+ get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
473
+ llm_kv = LLM_KV(llm_arch_from_string(arch_name));
474
+
475
+ files.emplace_back(new llama_file(fname.c_str(), "rb"));
476
+ contexts.emplace_back(ctx);
477
+
478
+ // Save tensors data offset of the main file.
479
+ // For subsidiary files, `meta` tensor data offset must not be used,
480
+ // so we build a unified tensors index for weights.
481
+ for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
482
+ std::string tensor_name = std::string(cur->name);
483
+ // make sure there is no duplicated tensor names
484
+ if (weights_map.find(tensor_name) != weights_map.end()) {
485
+ throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
486
+ }
487
+ n_elements += lm_ggml_nelements(cur);
488
+ n_bytes += lm_ggml_nbytes(cur);
489
+ weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur));
490
+ }
491
+ uint16_t n_split = 0;
492
+ get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
493
+
494
+ // Load additional GGML contexts
495
+ if (n_split > 1) {
496
+ // make sure the main file is loaded first
497
+ uint16_t idx = 0;
498
+ const std::string kv_split_no = llm_kv(LLM_KV_SPLIT_NO);
499
+ get_key(kv_split_no, idx);
500
+ if (idx != 0) {
501
+ throw std::runtime_error(format("illegal split file idx: %d (file: %s), model must be loaded with the first split", idx, fname.c_str()));
502
+ }
503
+
504
+ // generate list of splits if needed
505
+ if (splits.empty()) {
506
+ splits = llama_get_list_splits(fname, idx, n_split);
507
+ }
508
+
509
+ // in case user give a custom list of splits, check if it matches the expected number
510
+ if (n_split != (uint16_t)splits.size()) {
511
+ throw std::runtime_error(format("invalid split count, given: %zu splits, but expected %d", splits.size(), n_split));
512
+ }
513
+
514
+ if (trace > 0) {
515
+ LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
516
+ }
517
+
518
+ // load other splits
519
+ for (idx = 1; idx < n_split; idx++) {
520
+ const char * fname_split = splits[idx].c_str();
521
+
522
+ struct lm_gguf_init_params split_params = {
523
+ /*.no_alloc = */ true,
524
+ /*.ctx = */ &ctx,
525
+ };
526
+ lm_gguf_context_ptr ctx_gguf { lm_gguf_init_from_file(fname_split, split_params) };
527
+ if (!ctx_gguf) {
528
+ throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, fname_split));
529
+ }
530
+
531
+ // check idx
532
+ {
533
+ const int kid = lm_gguf_find_key(ctx_gguf.get(), kv_split_no.c_str());
534
+ if (kid < 0) {
535
+ throw std::runtime_error(format("missing key %s in GGUF split %s", kv_split_no.c_str(), fname_split));
536
+ }
537
+ int idx_gguf = lm_gguf_get_val_u16(ctx_gguf.get(), kid);
538
+ if (idx_gguf != idx) {
539
+ throw std::runtime_error(format("invalid split file idx: %d (file: %s), expected %d", idx_gguf, fname_split, idx));
540
+ }
541
+ }
542
+
543
+ files.emplace_back(new llama_file(fname_split, "rb"));
544
+ contexts.emplace_back(ctx);
545
+
546
+ // Save tensors data offset info of the shard.
547
+ for (lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur; cur = lm_ggml_get_next_tensor(ctx, cur)) {
548
+ std::string tensor_name = std::string(cur->name);
549
+ // make sure there is no duplicated tensor names
550
+ if (weights_map.find(tensor_name) != weights_map.end()) {
551
+ throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", lm_ggml_get_name(cur)));
552
+ }
553
+ n_elements += lm_ggml_nelements(cur);
554
+ n_bytes += lm_ggml_nbytes(cur);
555
+ weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur));
556
+ }
557
+ }
558
+
559
+ get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
560
+
561
+ // sanity check
562
+ {
563
+ const int n_tensors_loaded = (int) weights_map.size();
564
+ if (n_tensors != n_tensors_loaded) {
565
+ throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
566
+ }
567
+ }
568
+
569
+ LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
570
+ }
571
+
572
+ n_kv = lm_gguf_get_n_kv(meta.get());
573
+ n_tensors = weights_map.size();
574
+
575
+ fver = (enum llama_fver) lm_gguf_get_version(meta.get());
576
+
577
+ LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
578
+ __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
579
+
580
+ // determine file type based on the number of tensors for each quantization and print meta data
581
+ // TODO: make optional
582
+ {
583
+ std::map<enum lm_ggml_type, uint32_t> n_type;
584
+
585
+ uint32_t n_type_max = 0;
586
+ enum lm_ggml_type type_max = LM_GGML_TYPE_F32;
587
+
588
+ for (const auto & it : weights_map) {
589
+ const llama_tensor_weight & w = it.second;
590
+ const lm_ggml_tensor * tensor = w.tensor;
591
+
592
+ enum lm_ggml_type type = tensor->type;
593
+
594
+ n_type[type]++;
595
+
596
+ if (n_type_max < n_type[type]) {
597
+ n_type_max = n_type[type];
598
+ type_max = type;
599
+ }
600
+
601
+ if (trace > 0) {
602
+ const uint16_t sid = w.idx;
603
+ LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, lm_ggml_get_name(tensor), lm_ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
604
+ }
605
+ }
606
+
607
+ switch (type_max) {
608
+ case LM_GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
609
+ case LM_GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
610
+ case LM_GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
611
+ case LM_GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
612
+ case LM_GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
613
+ case LM_GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
614
+ case LM_GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
615
+ case LM_GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
616
+ case LM_GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
617
+ case LM_GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
618
+ case LM_GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
619
+ case LM_GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
620
+ case LM_GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
621
+ case LM_GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break;
622
+ case LM_GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break;
623
+ case LM_GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
624
+ case LM_GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
625
+ case LM_GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
626
+ case LM_GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
627
+ case LM_GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
628
+ case LM_GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
629
+ case LM_GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
630
+ case LM_GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
631
+ case LM_GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
632
+ default:
633
+ {
634
+ LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, lm_ggml_type_name(type_max));
635
+ ftype = LLAMA_FTYPE_ALL_F32;
636
+ } break;
637
+ }
638
+
639
+ // this is a way to mark that we have "guessed" the file type
640
+ ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
641
+
642
+ {
643
+ const int kid = lm_gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
644
+ if (kid >= 0) {
645
+ ftype = (llama_ftype) lm_gguf_get_val_u32(meta.get(), kid);
646
+ }
647
+ }
648
+
649
+ LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
650
+
651
+ for (int i = 0; i < n_kv; i++) {
652
+ const char * name = lm_gguf_get_key(meta.get(), i);
653
+ const enum lm_gguf_type type = lm_gguf_get_kv_type(meta.get(), i);
654
+ const std::string type_name =
655
+ type == LM_GGUF_TYPE_ARRAY
656
+ ? format("%s[%s,%zu]", lm_gguf_type_name(type), lm_gguf_type_name(lm_gguf_get_arr_type(meta.get(), i)), lm_gguf_get_arr_n(meta.get(), i))
657
+ : lm_gguf_type_name(type);
658
+
659
+ std::string value = lm_gguf_kv_to_str(meta.get(), i);
660
+ const size_t MAX_VALUE_LEN = 40;
661
+ if (value.size() > MAX_VALUE_LEN) {
662
+ value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
663
+ }
664
+ replace_all(value, "\n", "\\n");
665
+
666
+ LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
667
+ }
668
+
669
+ // print type counts
670
+ for (auto & kv : n_type) {
671
+ if (kv.second == 0) {
672
+ continue;
673
+ }
674
+
675
+ LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, lm_ggml_type_name(kv.first), kv.second);
676
+ }
677
+ }
678
+
679
+ if (!llama_mmap::SUPPORTED) {
680
+ LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
681
+ use_mmap = false;
682
+ }
683
+
684
+ this->use_mmap = use_mmap;
685
+ this->check_tensors = check_tensors;
686
+ }
687
+
688
+ std::string llama_model_loader::get_arch_name() const {
689
+ return arch_name;
690
+ }
691
+
692
+ enum llm_arch llama_model_loader::get_arch() const {
693
+ return llm_kv.arch;
694
+ }
695
+
696
+ const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const {
697
+ auto pos = weights_map.find(name);
698
+ if (pos != weights_map.end()) {
699
+ return &pos->second;
700
+ }
701
+
702
+ return nullptr;
703
+ }
704
+
705
+ const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const {
706
+ const llama_tensor_weight * weight = get_weight(name);
707
+ if (!weight) {
708
+ throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
709
+ }
710
+ return *weight;
711
+ }
712
+
713
+ struct lm_ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const {
714
+ const auto * weight = get_weight(name);
715
+ if (!weight) {
716
+ return nullptr;
717
+ }
718
+ return weight->tensor;
719
+ }
720
+
721
+ struct lm_ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const {
722
+ struct lm_ggml_tensor * tensor = get_tensor_meta(name.c_str());
723
+ if (!tensor) {
724
+ throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
725
+ }
726
+ return tensor;
727
+ }
728
+
729
+ const struct lm_ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
730
+ const struct lm_ggml_tensor * cur = get_tensor_meta(name.c_str());
731
+
732
+ if (cur == NULL) {
733
+ if (!required) {
734
+ return NULL;
735
+ }
736
+ throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
737
+ }
738
+
739
+ {
740
+ bool is_ok = true;
741
+ for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
742
+ if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
743
+ is_ok = false;
744
+ break;
745
+ }
746
+ }
747
+ if (!is_ok) {
748
+ throw std::runtime_error(
749
+ format("%s: tensor '%s' has wrong shape; expected %s, got %s",
750
+ __func__, name.c_str(),
751
+ llama_format_tensor_shape(ne).c_str(),
752
+ llama_format_tensor_shape(cur).c_str()));
753
+ }
754
+ }
755
+
756
+ return cur;
757
+ }
758
+
759
+ struct lm_ggml_tensor * llama_model_loader::create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags) {
760
+ const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
761
+
762
+ if (cur == NULL) {
763
+ return NULL;
764
+ }
765
+
766
+ bool duplicated = flags & TENSOR_DUPLICATED;
767
+
768
+ struct lm_ggml_tensor * tensor = lm_ggml_dup_tensor(ctx, cur);
769
+ lm_ggml_set_name(tensor, lm_ggml_get_name(cur));
770
+
771
+ if (duplicated) {
772
+ size_data += lm_ggml_nbytes(cur);
773
+ } else {
774
+ n_created++;
775
+ }
776
+
777
+ return tensor;
778
+
779
+ }
780
+
781
+ struct lm_ggml_tensor * llama_model_loader::create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required) {
782
+ const struct lm_ggml_tensor * cur = check_tensor_dims(name, ne, required);
783
+
784
+ if (cur == NULL) {
785
+ return NULL;
786
+ }
787
+
788
+ if (cur->type != base->type) {
789
+ throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), lm_ggml_type_name(base->type), lm_ggml_type_name(cur->type)));
790
+ }
791
+
792
+ std::array<int64_t, LM_GGML_MAX_DIMS> dims;
793
+ for (size_t i = 0; i < LM_GGML_MAX_DIMS; ++i) {
794
+ dims[i] = i < ne.size() ? ne.begin()[i] : 1;
795
+ }
796
+
797
+ struct lm_ggml_tensor * tensor = lm_ggml_view_4d(ctx, base,
798
+ dims[0], dims[1], dims[2], dims[3],
799
+ cur->nb[1], cur->nb[2], cur->nb[3],
800
+ offset);
801
+
802
+ lm_ggml_set_name(tensor, name.c_str());
803
+
804
+ n_created++;
805
+
806
+ return tensor;
807
+ }
808
+
809
+ void llama_model_loader::done_getting_tensors() const {
810
+ if (n_created != n_tensors) {
811
+ throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
812
+ }
813
+ }
814
+
815
+ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) {
816
+ if (use_mmap) {
817
+ mappings.reserve(files.size());
818
+ mmaps_used.reserve(files.size());
819
+ for (const auto & file : files) {
820
+ auto * reg = lm_ggml_backend_dev_backend_reg(lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU));
821
+ auto * is_numa_fn = (decltype(lm_ggml_is_numa) *) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_cpu_is_numa");
822
+ std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
823
+ mmaps_used.emplace_back(mapping->size(), 0);
824
+ if (mlock_mmaps) {
825
+ std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
826
+ mlock_mmap->init(mapping->addr());
827
+ mlock_mmaps->emplace_back(std::move(mlock_mmap));
828
+ }
829
+ mappings.emplace_back(std::move(mapping));
830
+ }
831
+ }
832
+
833
+ // compute the total size of all tensors for progress reporting
834
+ for (const auto & it : weights_map) {
835
+ size_data += lm_ggml_nbytes(it.second.tensor);
836
+ }
837
+ }
838
+
839
+ void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const {
840
+ LM_GGML_ASSERT(!mappings.empty());
841
+ const auto & mapping = mappings.at(idx);
842
+
843
+ *first = mapping->size();
844
+ *last = 0;
845
+ *addr = mapping->addr();
846
+ for (lm_ggml_tensor * tensor = lm_ggml_get_first_tensor(ctx); tensor; tensor = lm_ggml_get_next_tensor(ctx, tensor)) {
847
+ const auto * weight = get_weight(lm_ggml_get_name(tensor));
848
+ if (!weight || weight->idx != idx) {
849
+ continue;
850
+ }
851
+ *first = std::min(*first, weight->offs);
852
+ *last = std::max(*last, weight->offs + lm_ggml_nbytes(tensor));
853
+ }
854
+ }
855
+
856
+ void llama_model_loader::load_data_for(struct lm_ggml_tensor * cur) const {
857
+ const auto & w = require_weight(lm_ggml_get_name(cur));
858
+
859
+ if (use_mmap) {
860
+ const auto & mapping = mappings.at(w.idx);
861
+ if (cur->data == nullptr) {
862
+ cur->data = (uint8_t *)mapping->addr() + w.offs;
863
+ } else {
864
+ memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, lm_ggml_nbytes(cur));
865
+ }
866
+ } else {
867
+ LM_GGML_ASSERT(cur->data != nullptr);
868
+ LM_GGML_ASSERT(w.idx < files.size());
869
+ const auto & file = files.at(w.idx);
870
+ file->seek(w.offs, SEEK_SET);
871
+ file->read_raw(cur->data, lm_ggml_nbytes(cur));
872
+ }
873
+
874
+ if (check_tensors && !lm_ggml_validate_row_data(cur->type, cur->data, lm_ggml_nbytes(cur))) {
875
+ throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
876
+ }
877
+ }
878
+
879
+ bool llama_model_loader::load_all_data(
880
+ struct lm_ggml_context * ctx,
881
+ llama_buf_map & bufs,
882
+ llama_mlocks * lmlocks,
883
+ llama_progress_callback progress_callback,
884
+ void * progress_callback_user_data) {
885
+ LM_GGML_ASSERT(size_data != 0 && "call init_mappings() first");
886
+
887
+ std::vector<no_init<uint8_t>> read_buf;
888
+ std::vector<std::future<std::pair<lm_ggml_tensor *, bool>>> validation_result;
889
+
890
+ // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
891
+ // NVMe raid configurations might require more / larger buffers.
892
+ constexpr size_t n_buffers = 4;
893
+ constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
894
+
895
+ std::vector<lm_ggml_backend_buffer_t> host_buffers;
896
+ std::vector<lm_ggml_backend_event_t> events;
897
+ std::vector<void *> host_ptrs;
898
+ size_t buffer_idx = 0; // buffer to use for async loads
899
+ lm_ggml_backend_t upload_backend = [&](const char * func) -> lm_ggml_backend_t {
900
+ if (use_mmap || check_tensors) {
901
+ return nullptr;
902
+ }
903
+ // When not using mmaped io use async uploads from pinned memory to GPU memory.
904
+ // First determine if the backend supports the necessary features for async uploads.
905
+ auto * buf = bufs.count(0) ? bufs.at(0) : nullptr;
906
+ if (!buf) {
907
+ LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func);
908
+ return nullptr;
909
+ }
910
+
911
+ auto * buft = lm_ggml_backend_buffer_get_type(buf);
912
+ auto * dev = lm_ggml_backend_buft_get_device(buft);
913
+ if (!dev) {
914
+ LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func,
915
+ lm_ggml_backend_buft_name(buft));
916
+ return nullptr;
917
+ }
918
+
919
+ if (buft != lm_ggml_backend_dev_buffer_type(dev)) {
920
+ LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func,
921
+ lm_ggml_backend_buft_name(buft), lm_ggml_backend_dev_name(dev));
922
+ return nullptr;
923
+ }
924
+
925
+ lm_ggml_backend_dev_props props;
926
+ lm_ggml_backend_dev_get_props(dev, &props);
927
+ if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) {
928
+ LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func,
929
+ lm_ggml_backend_dev_name(dev));
930
+ return nullptr;
931
+ }
932
+
933
+ auto * host_buft = lm_ggml_backend_dev_host_buffer_type(dev);
934
+ if (!host_buft) {
935
+ LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func,
936
+ lm_ggml_backend_dev_name(dev));
937
+ return nullptr;
938
+ }
939
+
940
+ // If the backend is supported, create pinned memory buffers and events for synchronisation.
941
+ for (size_t idx = 0; idx < n_buffers; ++idx) {
942
+ auto * buf = lm_ggml_backend_buft_alloc_buffer(host_buft, buffer_size);
943
+ if (!buf) {
944
+ LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func,
945
+ lm_ggml_backend_dev_name(dev));
946
+ return nullptr;
947
+ }
948
+
949
+ host_buffers.emplace_back(buf);
950
+ host_ptrs.emplace_back(lm_ggml_backend_buffer_get_base(buf));
951
+
952
+ auto * event = lm_ggml_backend_event_new(dev);
953
+ if (!event) {
954
+ LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func,
955
+ lm_ggml_backend_dev_name(dev));
956
+ return nullptr;
957
+ }
958
+
959
+ events.emplace_back(event);
960
+ }
961
+
962
+ lm_ggml_backend_t backend = lm_ggml_backend_dev_init(dev, nullptr);
963
+ if (!backend) {
964
+ LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func,
965
+ lm_ggml_backend_dev_name(dev));
966
+ return nullptr;
967
+ }
968
+
969
+ return backend;
970
+ }(__func__);
971
+
972
+ if (upload_backend) {
973
+ LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__,
974
+ lm_ggml_backend_dev_name(lm_ggml_backend_get_device(upload_backend)),
975
+ lm_ggml_backend_buft_name(lm_ggml_backend_buffer_get_type(bufs.at(0))),
976
+ lm_ggml_backend_name(upload_backend));
977
+ }
978
+
979
+ for (struct lm_ggml_tensor * cur = lm_ggml_get_first_tensor(ctx); cur != NULL; cur = lm_ggml_get_next_tensor(ctx, cur)) {
980
+ const auto * weight = get_weight(lm_ggml_get_name(cur));
981
+ if (weight == nullptr) {
982
+ // this can happen with split experts models
983
+ continue;
984
+ }
985
+
986
+ if (progress_callback) {
987
+ if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
988
+ return false;
989
+ }
990
+ }
991
+
992
+ size_t n_size = lm_ggml_nbytes(cur);
993
+
994
+ if (use_mmap) {
995
+ const auto & mapping = mappings.at(weight->idx);
996
+ lm_ggml_backend_buffer_t buf_mmap = nullptr;
997
+ if (bufs.count(weight->idx)) {
998
+ buf_mmap = bufs.at(weight->idx);
999
+ }
1000
+ uint8_t * data = (uint8_t *) mapping->addr() + weight->offs;
1001
+
1002
+ if (check_tensors) {
1003
+ validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
1004
+ return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, data, n_size));
1005
+ }));
1006
+ }
1007
+
1008
+ LM_GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
1009
+ if (buf_mmap && cur->data == nullptr) {
1010
+ lm_ggml_backend_tensor_alloc(buf_mmap, cur, data);
1011
+ if (lmlocks) {
1012
+ const auto & lmlock = lmlocks->at(weight->idx);
1013
+ lmlock->grow_to(weight->offs + n_size);
1014
+ }
1015
+
1016
+ auto & mmap_used = mmaps_used[weight->idx];
1017
+ mmap_used.first = std::min(mmap_used.first, weight->offs);
1018
+ mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
1019
+ } else {
1020
+ lm_ggml_backend_tensor_set(cur, data, 0, n_size);
1021
+ }
1022
+ } else {
1023
+ const auto & file = files.at(weight->idx);
1024
+ if (lm_ggml_backend_buffer_is_host(cur->buffer)) {
1025
+ file->seek(weight->offs, SEEK_SET);
1026
+ file->read_raw(cur->data, n_size);
1027
+ if (check_tensors) {
1028
+ validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
1029
+ return std::make_pair(cur, lm_ggml_validate_row_data(cur->type, cur->data, n_size));
1030
+ }));
1031
+ }
1032
+ } else {
1033
+ // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
1034
+ if (upload_backend) {
1035
+ file->seek(weight->offs, SEEK_SET);
1036
+
1037
+ size_t bytes_read = 0;
1038
+
1039
+ while (bytes_read < n_size) {
1040
+ size_t read_iteration = std::min<size_t>(buffer_size, n_size - bytes_read);
1041
+
1042
+ lm_ggml_backend_event_synchronize(events[buffer_idx]);
1043
+ file->read_raw(host_ptrs[buffer_idx], read_iteration);
1044
+ lm_ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
1045
+ lm_ggml_backend_event_record(events[buffer_idx], upload_backend);
1046
+
1047
+ bytes_read += read_iteration;
1048
+ ++buffer_idx;
1049
+ buffer_idx %= n_buffers;
1050
+ }
1051
+ } else {
1052
+ read_buf.resize(n_size);
1053
+ file->seek(weight->offs, SEEK_SET);
1054
+ file->read_raw(read_buf.data(), n_size);
1055
+ lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
1056
+ if (check_tensors && !lm_ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
1057
+ throw std::runtime_error(format("tensor '%s' has invalid data", lm_ggml_get_name(cur)));
1058
+ }
1059
+ }
1060
+ }
1061
+ }
1062
+
1063
+ size_done += n_size;
1064
+ }
1065
+
1066
+ // free temporary resources used for async uploads
1067
+ for (auto * event : events) {
1068
+ lm_ggml_backend_event_synchronize(event);
1069
+ lm_ggml_backend_event_free(event);
1070
+ }
1071
+ for (auto * buf : host_buffers) {
1072
+ lm_ggml_backend_buffer_free(buf);
1073
+ }
1074
+ lm_ggml_backend_free(upload_backend);
1075
+
1076
+ // check validation results
1077
+ bool validation_failed = false;
1078
+ for (auto & future : validation_result) {
1079
+ auto result = future.get();
1080
+ if (!result.second) {
1081
+ LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, lm_ggml_get_name(result.first));
1082
+ validation_failed = true;
1083
+ }
1084
+ }
1085
+ if (validation_failed) {
1086
+ throw std::runtime_error("found tensors with invalid data");
1087
+ }
1088
+
1089
+ // check if this is the last call and do final cleanup
1090
+ if (size_done >= size_data) {
1091
+ // unmap offloaded tensors and metadata
1092
+ if (use_mmap) {
1093
+ for (uint32_t idx = 0; idx < mappings.size(); idx++) {
1094
+ const auto & mmap_used = mmaps_used.at(idx);
1095
+ auto & mapping = mappings.at(idx);
1096
+ mapping->unmap_fragment(0, mmap_used.first);
1097
+ if (mmap_used.second != 0) {
1098
+ mapping->unmap_fragment(mmap_used.second, mapping->size());
1099
+ }
1100
+ }
1101
+ }
1102
+ if (progress_callback) {
1103
+ // Even though the model is done loading, we still honor
1104
+ // cancellation since we need to free allocations.
1105
+ return progress_callback(1.0f, progress_callback_user_data);
1106
+ }
1107
+ }
1108
+
1109
+ return true;
1110
+ }
1111
+
1112
+ std::string llama_model_loader::ftype_name() const {
1113
+ return llama_model_ftype_name(ftype);
1114
+ }
1115
+
1116
+ void llama_model_loader::print_info() const {
1117
+ LLAMA_LOG_INFO("%s: file format = %s\n", __func__, llama_file_version_name(fver));
1118
+ LLAMA_LOG_INFO("%s: file type = %s\n", __func__, llama_model_ftype_name(ftype).c_str());
1119
+ if (n_bytes < GiB) {
1120
+ LLAMA_LOG_INFO("%s: file size = %.2f MiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements);
1121
+ } else {
1122
+ LLAMA_LOG_INFO("%s: file size = %.2f GiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0/1024.0, n_bytes*8.0/n_elements);
1123
+ }
1124
+ }