@fugood/llama.node 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. package/package.json +14 -14
  2. package/scripts/llama.cpp.patch +8 -8
  3. package/src/llama.cpp/common/CMakeLists.txt +2 -0
  4. package/src/llama.cpp/common/arg.cpp +44 -999
  5. package/src/llama.cpp/common/arg.h +2 -2
  6. package/src/llama.cpp/common/chat.cpp +17 -2
  7. package/src/llama.cpp/common/common.cpp +33 -0
  8. package/src/llama.cpp/common/common.h +15 -1
  9. package/src/llama.cpp/common/download.cpp +1054 -0
  10. package/src/llama.cpp/common/download.h +55 -0
  11. package/src/llama.cpp/ggml/CMakeLists.txt +1 -1
  12. package/src/llama.cpp/ggml/include/ggml.h +2 -0
  13. package/src/llama.cpp/ggml/src/CMakeLists.txt +6 -3
  14. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +29 -11
  15. package/src/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c +428 -26
  16. package/src/llama.cpp/ggml/src/ggml-cpu/arch/loongarch/quants.c +4 -5
  17. package/src/llama.cpp/ggml/src/ggml-cpu/arch/riscv/quants.c +108 -49
  18. package/src/llama.cpp/ggml/src/ggml-cpu/arch/s390/cpu-feats.cpp +50 -0
  19. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +3 -1
  20. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +21 -21
  21. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +172 -75
  22. package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +0 -4
  23. package/src/llama.cpp/ggml/src/ggml-cpu/repack.cpp +82 -21
  24. package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +25 -25
  25. package/src/llama.cpp/include/llama.h +7 -3
  26. package/src/llama.cpp/src/CMakeLists.txt +95 -0
  27. package/src/llama.cpp/src/llama-arch.cpp +108 -0
  28. package/src/llama.cpp/src/llama-arch.h +11 -0
  29. package/src/llama.cpp/src/llama-batch.cpp +63 -31
  30. package/src/llama.cpp/src/llama-batch.h +12 -1
  31. package/src/llama.cpp/src/llama-chat.cpp +32 -0
  32. package/src/llama.cpp/src/llama-chat.h +1 -0
  33. package/src/llama.cpp/src/llama-context.cpp +36 -13
  34. package/src/llama.cpp/src/llama-context.h +5 -5
  35. package/src/llama.cpp/src/llama-cparams.h +1 -0
  36. package/src/llama.cpp/src/llama-graph.cpp +3 -3
  37. package/src/llama.cpp/src/llama-hparams.cpp +11 -1
  38. package/src/llama.cpp/src/llama-hparams.h +6 -0
  39. package/src/llama.cpp/src/llama-kv-cache-iswa.cpp +3 -1
  40. package/src/llama.cpp/src/llama-kv-cache.cpp +33 -1
  41. package/src/llama.cpp/src/llama-kv-cells.h +44 -2
  42. package/src/llama.cpp/src/llama-memory-recurrent.cpp +4 -3
  43. package/src/llama.cpp/src/llama-model.cpp +320 -13171
  44. package/src/llama.cpp/src/llama-model.h +8 -0
  45. package/src/llama.cpp/src/llama-quant.cpp +1 -1
  46. package/src/llama.cpp/src/llama-vocab.cpp +5 -0
  47. package/src/llama.cpp/src/llama-vocab.h +1 -0
  48. package/src/llama.cpp/src/models/apertus.cpp +125 -0
  49. package/src/llama.cpp/src/models/arcee.cpp +135 -0
  50. package/src/llama.cpp/src/models/arctic.cpp +138 -0
  51. package/src/llama.cpp/src/models/arwkv7.cpp +86 -0
  52. package/src/llama.cpp/src/models/baichuan.cpp +122 -0
  53. package/src/llama.cpp/src/models/bailingmoe.cpp +144 -0
  54. package/src/llama.cpp/src/models/bailingmoe2.cpp +135 -0
  55. package/src/llama.cpp/src/models/bert.cpp +176 -0
  56. package/src/llama.cpp/src/models/bitnet.cpp +160 -0
  57. package/src/llama.cpp/src/models/bloom.cpp +101 -0
  58. package/src/llama.cpp/src/models/chameleon.cpp +178 -0
  59. package/src/llama.cpp/src/models/chatglm.cpp +132 -0
  60. package/src/llama.cpp/src/models/codeshell.cpp +111 -0
  61. package/src/llama.cpp/src/models/cogvlm.cpp +100 -0
  62. package/src/llama.cpp/src/models/cohere2-iswa.cpp +131 -0
  63. package/src/llama.cpp/src/models/command-r.cpp +122 -0
  64. package/src/llama.cpp/src/models/dbrx.cpp +123 -0
  65. package/src/llama.cpp/src/models/deci.cpp +135 -0
  66. package/src/llama.cpp/src/models/deepseek.cpp +144 -0
  67. package/src/llama.cpp/src/models/deepseek2.cpp +236 -0
  68. package/src/llama.cpp/src/models/dots1.cpp +134 -0
  69. package/src/llama.cpp/src/models/dream.cpp +105 -0
  70. package/src/llama.cpp/src/models/ernie4-5-moe.cpp +150 -0
  71. package/src/llama.cpp/src/models/ernie4-5.cpp +110 -0
  72. package/src/llama.cpp/src/models/exaone.cpp +114 -0
  73. package/src/llama.cpp/src/models/exaone4.cpp +123 -0
  74. package/src/llama.cpp/src/models/falcon-h1.cpp +113 -0
  75. package/src/llama.cpp/src/models/falcon.cpp +120 -0
  76. package/src/llama.cpp/src/models/gemma-embedding.cpp +120 -0
  77. package/src/llama.cpp/src/models/gemma.cpp +112 -0
  78. package/src/llama.cpp/src/models/gemma2-iswa.cpp +125 -0
  79. package/src/llama.cpp/src/models/gemma3-iswa.cpp +131 -0
  80. package/src/llama.cpp/src/models/gemma3n-iswa.cpp +377 -0
  81. package/src/llama.cpp/src/models/glm4-moe.cpp +153 -0
  82. package/src/llama.cpp/src/models/glm4.cpp +127 -0
  83. package/src/llama.cpp/src/models/gpt2.cpp +105 -0
  84. package/src/llama.cpp/src/models/gptneox.cpp +144 -0
  85. package/src/llama.cpp/src/models/granite-hybrid.cpp +196 -0
  86. package/src/llama.cpp/src/models/granite.cpp +211 -0
  87. package/src/llama.cpp/src/models/graph-context-mamba.cpp +283 -0
  88. package/src/llama.cpp/src/models/grok.cpp +159 -0
  89. package/src/llama.cpp/src/models/grovemoe.cpp +141 -0
  90. package/src/llama.cpp/src/models/hunyuan-dense.cpp +132 -0
  91. package/src/llama.cpp/src/models/hunyuan-moe.cpp +154 -0
  92. package/src/llama.cpp/src/models/internlm2.cpp +120 -0
  93. package/src/llama.cpp/src/models/jais.cpp +86 -0
  94. package/src/llama.cpp/src/models/jamba.cpp +106 -0
  95. package/src/llama.cpp/src/models/lfm2.cpp +173 -0
  96. package/src/llama.cpp/src/models/llada-moe.cpp +122 -0
  97. package/src/llama.cpp/src/models/llada.cpp +99 -0
  98. package/src/llama.cpp/src/models/llama-iswa.cpp +174 -0
  99. package/src/llama.cpp/src/models/llama.cpp +155 -0
  100. package/src/llama.cpp/src/models/mamba.cpp +55 -0
  101. package/src/llama.cpp/src/models/minicpm3.cpp +199 -0
  102. package/src/llama.cpp/src/models/minimax-m2.cpp +124 -0
  103. package/src/llama.cpp/src/models/models.h +481 -0
  104. package/src/llama.cpp/src/models/mpt.cpp +126 -0
  105. package/src/llama.cpp/src/models/nemotron-h.cpp +121 -0
  106. package/src/llama.cpp/src/models/nemotron.cpp +122 -0
  107. package/src/llama.cpp/src/models/neo-bert.cpp +104 -0
  108. package/src/llama.cpp/src/models/olmo.cpp +121 -0
  109. package/src/llama.cpp/src/models/olmo2.cpp +150 -0
  110. package/src/llama.cpp/src/models/olmoe.cpp +124 -0
  111. package/src/llama.cpp/src/models/openai-moe-iswa.cpp +124 -0
  112. package/src/llama.cpp/src/models/openelm.cpp +124 -0
  113. package/src/llama.cpp/src/models/orion.cpp +123 -0
  114. package/src/llama.cpp/src/models/pangu-embedded.cpp +121 -0
  115. package/src/llama.cpp/src/models/phi2.cpp +121 -0
  116. package/src/llama.cpp/src/models/phi3.cpp +152 -0
  117. package/src/llama.cpp/src/models/plamo.cpp +110 -0
  118. package/src/llama.cpp/src/models/plamo2.cpp +316 -0
  119. package/src/llama.cpp/src/models/plm.cpp +168 -0
  120. package/src/llama.cpp/src/models/qwen.cpp +108 -0
  121. package/src/llama.cpp/src/models/qwen2.cpp +117 -0
  122. package/src/llama.cpp/src/models/qwen2moe.cpp +151 -0
  123. package/src/llama.cpp/src/models/qwen2vl.cpp +117 -0
  124. package/src/llama.cpp/src/models/qwen3.cpp +117 -0
  125. package/src/llama.cpp/src/models/qwen3moe.cpp +124 -0
  126. package/src/llama.cpp/src/models/qwen3vl-moe.cpp +149 -0
  127. package/src/llama.cpp/src/models/qwen3vl.cpp +141 -0
  128. package/src/llama.cpp/src/models/refact.cpp +94 -0
  129. package/src/llama.cpp/src/models/rwkv6-base.cpp +162 -0
  130. package/src/llama.cpp/src/models/rwkv6.cpp +94 -0
  131. package/src/llama.cpp/src/models/rwkv6qwen2.cpp +86 -0
  132. package/src/llama.cpp/src/models/rwkv7-base.cpp +135 -0
  133. package/src/llama.cpp/src/models/rwkv7.cpp +90 -0
  134. package/src/llama.cpp/src/models/seed-oss.cpp +124 -0
  135. package/src/llama.cpp/src/models/smallthinker.cpp +120 -0
  136. package/src/llama.cpp/src/models/smollm3.cpp +128 -0
  137. package/src/llama.cpp/src/models/stablelm.cpp +146 -0
  138. package/src/llama.cpp/src/models/starcoder.cpp +100 -0
  139. package/src/llama.cpp/src/models/starcoder2.cpp +121 -0
  140. package/src/llama.cpp/src/models/t5-dec.cpp +166 -0
  141. package/src/llama.cpp/src/models/t5-enc.cpp +96 -0
  142. package/src/llama.cpp/src/models/wavtokenizer-dec.cpp +149 -0
  143. package/src/llama.cpp/src/models/xverse.cpp +108 -0
@@ -0,0 +1,1054 @@
1
+ #include "arg.h"
2
+
3
+ #include "common.h"
4
+ #include "gguf.h" // for reading GGUF splits
5
+ #include "log.h"
6
+ #include "download.h"
7
+
8
+ #define JSON_ASSERT GGML_ASSERT
9
+ #include <nlohmann/json.hpp>
10
+
11
+ #include <algorithm>
12
+ #include <filesystem>
13
+ #include <fstream>
14
+ #include <future>
15
+ #include <regex>
16
+ #include <string>
17
+ #include <thread>
18
+ #include <vector>
19
+
20
+ #if defined(LLAMA_USE_CURL)
21
+ #include <curl/curl.h>
22
+ #include <curl/easy.h>
23
+ #else
24
+ #include "http.h"
25
+ #endif
26
+
27
+ #ifdef __linux__
28
+ #include <linux/limits.h>
29
+ #elif defined(_WIN32)
30
+ # if !defined(PATH_MAX)
31
+ # define PATH_MAX MAX_PATH
32
+ # endif
33
+ #elif defined(_AIX)
34
+ #include <sys/limits.h>
35
+ #else
36
+ #include <sys/syslimits.h>
37
+ #endif
38
+ #define LLAMA_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
39
+
40
+ // isatty
41
+ #if defined(_WIN32)
42
+ #include <io.h>
43
+ #else
44
+ #include <unistd.h>
45
+ #endif
46
+
47
+ using json = nlohmann::ordered_json;
48
+
49
+ //
50
+ // downloader
51
+ //
52
+
53
+ // validate repo name format: owner/repo
54
+ static bool validate_repo_name(const std::string & repo) {
55
+ static const std::regex repo_regex(R"(^[A-Za-z0-9_.\-]+\/[A-Za-z0-9_.\-]+$)");
56
+ return std::regex_match(repo, repo_regex);
57
+ }
58
+
59
+ static std::string get_manifest_path(const std::string & repo, const std::string & tag) {
60
+ // we use "=" to avoid clashing with other component, while still being allowed on windows
61
+ std::string fname = "manifest=" + repo + "=" + tag + ".json";
62
+ if (!validate_repo_name(repo)) {
63
+ throw std::runtime_error("error: repo name must be in the format 'owner/repo'");
64
+ }
65
+ string_replace_all(fname, "/", "=");
66
+ return fs_get_cache_file(fname);
67
+ }
68
+
69
+ static std::string read_file(const std::string & fname) {
70
+ std::ifstream file(fname);
71
+ if (!file) {
72
+ throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
73
+ }
74
+ std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
75
+ file.close();
76
+ return content;
77
+ }
78
+
79
+ static void write_file(const std::string & fname, const std::string & content) {
80
+ const std::string fname_tmp = fname + ".tmp";
81
+ std::ofstream file(fname_tmp);
82
+ if (!file) {
83
+ throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
84
+ }
85
+
86
+ try {
87
+ file << content;
88
+ file.close();
89
+
90
+ // Makes write atomic
91
+ if (rename(fname_tmp.c_str(), fname.c_str()) != 0) {
92
+ LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, fname_tmp.c_str(), fname.c_str());
93
+ // If rename fails, try to delete the temporary file
94
+ if (remove(fname_tmp.c_str()) != 0) {
95
+ LOG_ERR("%s: unable to delete temporary file: %s\n", __func__, fname_tmp.c_str());
96
+ }
97
+ }
98
+ } catch (...) {
99
+ // If anything fails, try to delete the temporary file
100
+ if (remove(fname_tmp.c_str()) != 0) {
101
+ LOG_ERR("%s: unable to delete temporary file: %s\n", __func__, fname_tmp.c_str());
102
+ }
103
+
104
+ throw std::runtime_error(string_format("error: failed to write file '%s'\n", fname.c_str()));
105
+ }
106
+ }
107
+
108
+ static void write_etag(const std::string & path, const std::string & etag) {
109
+ const std::string etag_path = path + ".etag";
110
+ write_file(etag_path, etag);
111
+ LOG_DBG("%s: file etag saved: %s\n", __func__, etag_path.c_str());
112
+ }
113
+
114
+ static std::string read_etag(const std::string & path) {
115
+ std::string none;
116
+ const std::string etag_path = path + ".etag";
117
+
118
+ if (std::filesystem::exists(etag_path)) {
119
+ std::ifstream etag_in(etag_path);
120
+ if (!etag_in) {
121
+ LOG_ERR("%s: could not open .etag file for reading: %s\n", __func__, etag_path.c_str());
122
+ return none;
123
+ }
124
+ std::string etag;
125
+ std::getline(etag_in, etag);
126
+ return etag;
127
+ }
128
+
129
+ // no etag file, but maybe there is an old .json
130
+ // remove this code later
131
+ const std::string metadata_path = path + ".json";
132
+
133
+ if (std::filesystem::exists(metadata_path)) {
134
+ std::ifstream metadata_in(metadata_path);
135
+ try {
136
+ nlohmann::json metadata_json;
137
+ metadata_in >> metadata_json;
138
+ LOG_DBG("%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(),
139
+ metadata_json.dump().c_str());
140
+ if (metadata_json.contains("etag") && metadata_json.at("etag").is_string()) {
141
+ std::string etag = metadata_json.at("etag");
142
+ write_etag(path, etag);
143
+ if (!std::filesystem::remove(metadata_path)) {
144
+ LOG_WRN("%s: failed to delete old .json metadata file: %s\n", __func__, metadata_path.c_str());
145
+ }
146
+ return etag;
147
+ }
148
+ } catch (const nlohmann::json::exception & e) {
149
+ LOG_ERR("%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
150
+ }
151
+ }
152
+ return none;
153
+ }
154
+
155
+ #ifdef LLAMA_USE_CURL
156
+
157
+ //
158
+ // CURL utils
159
+ //
160
+
161
+ using curl_ptr = std::unique_ptr<CURL, decltype(&curl_easy_cleanup)>;
162
+
163
+ // cannot use unique_ptr for curl_slist, because we cannot update without destroying the old one
164
+ struct curl_slist_ptr {
165
+ struct curl_slist * ptr = nullptr;
166
+ ~curl_slist_ptr() {
167
+ if (ptr) {
168
+ curl_slist_free_all(ptr);
169
+ }
170
+ }
171
+ };
172
+
173
+ static CURLcode common_curl_perf(CURL * curl) {
174
+ CURLcode res = curl_easy_perform(curl);
175
+ if (res != CURLE_OK) {
176
+ LOG_ERR("%s: curl_easy_perform() failed\n", __func__);
177
+ }
178
+
179
+ return res;
180
+ }
181
+
182
+ // Send a HEAD request to retrieve the etag and last-modified headers
183
+ struct common_load_model_from_url_headers {
184
+ std::string etag;
185
+ std::string last_modified;
186
+ std::string accept_ranges;
187
+ };
188
+
189
+ struct FILE_deleter {
190
+ void operator()(FILE * f) const { fclose(f); }
191
+ };
192
+
193
+ static size_t common_header_callback(char * buffer, size_t, size_t n_items, void * userdata) {
194
+ common_load_model_from_url_headers * headers = (common_load_model_from_url_headers *) userdata;
195
+ static std::regex header_regex("([^:]+): (.*)\r\n");
196
+ static std::regex etag_regex("ETag", std::regex_constants::icase);
197
+ static std::regex last_modified_regex("Last-Modified", std::regex_constants::icase);
198
+ static std::regex accept_ranges_regex("Accept-Ranges", std::regex_constants::icase);
199
+ std::string header(buffer, n_items);
200
+ std::smatch match;
201
+ if (std::regex_match(header, match, header_regex)) {
202
+ const std::string & key = match[1];
203
+ const std::string & value = match[2];
204
+ if (std::regex_match(key, match, etag_regex)) {
205
+ headers->etag = value;
206
+ } else if (std::regex_match(key, match, last_modified_regex)) {
207
+ headers->last_modified = value;
208
+ } else if (std::regex_match(key, match, accept_ranges_regex)) {
209
+ headers->accept_ranges = value;
210
+ }
211
+ }
212
+
213
+ return n_items;
214
+ }
215
+
216
+ static size_t common_write_callback(void * data, size_t size, size_t nmemb, void * fd) {
217
+ return std::fwrite(data, size, nmemb, static_cast<FILE *>(fd));
218
+ }
219
+
220
+ // helper function to hide password in URL
221
+ static std::string llama_download_hide_password_in_url(const std::string & url) {
222
+ // Use regex to match and replace the user[:password]@ pattern in URLs
223
+ // Pattern: scheme://[user[:password]@]host[...]
224
+ static const std::regex url_regex(R"(^(?:[A-Za-z][A-Za-z0-9+.-]://)(?:[^/@]+@)?.$)");
225
+ std::smatch match;
226
+
227
+ if (std::regex_match(url, match, url_regex)) {
228
+ // match[1] = scheme (e.g., "https://")
229
+ // match[2] = user[:password]@ part
230
+ // match[3] = rest of URL (host and path)
231
+ return match[1].str() + "********@" + match[3].str();
232
+ }
233
+
234
+ return url; // No credentials found or malformed URL
235
+ }
236
+
237
+ static void common_curl_easy_setopt_head(CURL * curl, const std::string & url) {
238
+ // Set the URL, allow to follow http redirection
239
+ curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
240
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
241
+
242
+ # if defined(_WIN32)
243
+ // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
244
+ // operating system. Currently implemented under MS-Windows.
245
+ curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
246
+ # endif
247
+
248
+ curl_easy_setopt(curl, CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
249
+ curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1L); // hide head request progress
250
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, common_header_callback);
251
+ }
252
+
253
+ static void common_curl_easy_setopt_get(CURL * curl) {
254
+ curl_easy_setopt(curl, CURLOPT_NOBODY, 0L);
255
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, common_write_callback);
256
+
257
+ // display download progress
258
+ curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
259
+ }
260
+
261
+ static bool common_pull_file(CURL * curl, const std::string & path_temporary) {
262
+ if (std::filesystem::exists(path_temporary)) {
263
+ const std::string partial_size = std::to_string(std::filesystem::file_size(path_temporary));
264
+ LOG_INF("%s: server supports range requests, resuming download from byte %s\n", __func__, partial_size.c_str());
265
+ const std::string range_str = partial_size + "-";
266
+ curl_easy_setopt(curl, CURLOPT_RANGE, range_str.c_str());
267
+ }
268
+
269
+ // Always open file in append mode could be resuming
270
+ std::unique_ptr<FILE, FILE_deleter> outfile(fopen(path_temporary.c_str(), "ab"));
271
+ if (!outfile) {
272
+ LOG_ERR("%s: error opening local file for writing: %s\n", __func__, path_temporary.c_str());
273
+ return false;
274
+ }
275
+
276
+ common_curl_easy_setopt_get(curl);
277
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, outfile.get());
278
+
279
+ return common_curl_perf(curl) == CURLE_OK;
280
+ }
281
+
282
+ static bool common_download_head(CURL * curl,
283
+ curl_slist_ptr & http_headers,
284
+ const std::string & url,
285
+ const std::string & bearer_token) {
286
+ if (!curl) {
287
+ LOG_ERR("%s: error initializing libcurl\n", __func__);
288
+ return false;
289
+ }
290
+
291
+ http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
292
+ // Check if hf-token or bearer-token was specified
293
+ if (!bearer_token.empty()) {
294
+ std::string auth_header = "Authorization: Bearer " + bearer_token;
295
+ http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
296
+ }
297
+
298
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, http_headers.ptr);
299
+ common_curl_easy_setopt_head(curl, url);
300
+ return common_curl_perf(curl) == CURLE_OK;
301
+ }
302
+
303
+ // download one single file from remote URL to local path
304
+ static bool common_download_file_single_online(const std::string & url,
305
+ const std::string & path,
306
+ const std::string & bearer_token) {
307
+ static const int max_attempts = 3;
308
+ static const int retry_delay_seconds = 2;
309
+ for (int i = 0; i < max_attempts; ++i) {
310
+ std::string etag;
311
+
312
+ // Check if the file already exists locally
313
+ const auto file_exists = std::filesystem::exists(path);
314
+ if (file_exists) {
315
+ etag = read_etag(path);
316
+ } else {
317
+ LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str());
318
+ }
319
+
320
+ bool head_request_ok = false;
321
+ bool should_download = !file_exists; // by default, we should download if the file does not exist
322
+
323
+ // Initialize libcurl
324
+ curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
325
+ common_load_model_from_url_headers headers;
326
+ curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
327
+ curl_slist_ptr http_headers;
328
+ const bool was_perform_successful = common_download_head(curl.get(), http_headers, url, bearer_token);
329
+ if (!was_perform_successful) {
330
+ head_request_ok = false;
331
+ }
332
+
333
+ long http_code = 0;
334
+ curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
335
+ if (http_code == 200) {
336
+ head_request_ok = true;
337
+ } else {
338
+ LOG_WRN("%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
339
+ head_request_ok = false;
340
+ }
341
+
342
+ // if head_request_ok is false, we don't have the etag or last-modified headers
343
+ // we leave should_download as-is, which is true if the file does not exist
344
+ bool should_download_from_scratch = false;
345
+ if (head_request_ok) {
346
+ // check if ETag or Last-Modified headers are different
347
+ // if it is, we need to download the file again
348
+ if (!etag.empty() && etag != headers.etag) {
349
+ LOG_WRN("%s: ETag header is different (%s != %s): triggering a new download\n", __func__, etag.c_str(),
350
+ headers.etag.c_str());
351
+ should_download = true;
352
+ should_download_from_scratch = true;
353
+ }
354
+ }
355
+
356
+ const bool accept_ranges_supported = !headers.accept_ranges.empty() && headers.accept_ranges != "none";
357
+ if (should_download) {
358
+ if (file_exists &&
359
+ !accept_ranges_supported) { // Resumable downloads not supported, delete and start again.
360
+ LOG_WRN("%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
361
+ if (remove(path.c_str()) != 0) {
362
+ LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
363
+ return false;
364
+ }
365
+ }
366
+
367
+ const std::string path_temporary = path + ".downloadInProgress";
368
+ if (should_download_from_scratch) {
369
+ if (std::filesystem::exists(path_temporary)) {
370
+ if (remove(path_temporary.c_str()) != 0) {
371
+ LOG_ERR("%s: unable to delete file: %s\n", __func__, path_temporary.c_str());
372
+ return false;
373
+ }
374
+ }
375
+
376
+ if (std::filesystem::exists(path)) {
377
+ if (remove(path.c_str()) != 0) {
378
+ LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
379
+ return false;
380
+ }
381
+ }
382
+ }
383
+ if (head_request_ok) {
384
+ write_etag(path, headers.etag);
385
+ }
386
+
387
+ // start the download
388
+ LOG_INF("%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n",
389
+ __func__, llama_download_hide_password_in_url(url).c_str(), path_temporary.c_str(),
390
+ headers.etag.c_str(), headers.last_modified.c_str());
391
+ const bool was_pull_successful = common_pull_file(curl.get(), path_temporary);
392
+ if (!was_pull_successful) {
393
+ if (i + 1 < max_attempts) {
394
+ const int exponential_backoff_delay = std::pow(retry_delay_seconds, i) * 1000;
395
+ LOG_WRN("%s: retrying after %d milliseconds...\n", __func__, exponential_backoff_delay);
396
+ std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
397
+ } else {
398
+ LOG_ERR("%s: curl_easy_perform() failed after %d attempts\n", __func__, max_attempts);
399
+ }
400
+
401
+ continue;
402
+ }
403
+
404
+ long http_code = 0;
405
+ curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
406
+ if (http_code < 200 || http_code >= 400) {
407
+ LOG_ERR("%s: invalid http status code received: %ld\n", __func__, http_code);
408
+ return false;
409
+ }
410
+
411
+ if (rename(path_temporary.c_str(), path.c_str()) != 0) {
412
+ LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
413
+ return false;
414
+ }
415
+ } else {
416
+ LOG_INF("%s: using cached file: %s\n", __func__, path.c_str());
417
+ }
418
+
419
+ break;
420
+ }
421
+
422
+ return true;
423
+ }
424
+
425
+ std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params & params) {
426
+ curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
427
+ curl_slist_ptr http_headers;
428
+ std::vector<char> res_buffer;
429
+
430
+ curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
431
+ curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
432
+ curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
433
+ curl_easy_setopt(curl.get(), CURLOPT_VERBOSE, 1L);
434
+ typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
435
+ auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t {
436
+ auto data_vec = static_cast<std::vector<char> *>(data);
437
+ data_vec->insert(data_vec->end(), (char *)ptr, (char *)ptr + size * nmemb);
438
+ return size * nmemb;
439
+ };
440
+ curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
441
+ curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_buffer);
442
+ #if defined(_WIN32)
443
+ curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
444
+ #endif
445
+ if (params.timeout > 0) {
446
+ curl_easy_setopt(curl.get(), CURLOPT_TIMEOUT, params.timeout);
447
+ }
448
+ if (params.max_size > 0) {
449
+ curl_easy_setopt(curl.get(), CURLOPT_MAXFILESIZE, params.max_size);
450
+ }
451
+ http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
452
+ for (const auto & header : params.headers) {
453
+ http_headers.ptr = curl_slist_append(http_headers.ptr, header.c_str());
454
+ }
455
+ curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
456
+
457
+ CURLcode res = curl_easy_perform(curl.get());
458
+
459
+ if (res != CURLE_OK) {
460
+ std::string error_msg = curl_easy_strerror(res);
461
+ throw std::runtime_error("error: cannot make GET request: " + error_msg);
462
+ }
463
+
464
+ long res_code;
465
+ curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code);
466
+
467
+ return { res_code, std::move(res_buffer) };
468
+ }
469
+
470
+ #else
471
+
472
+ static bool is_output_a_tty() {
473
+ #if defined(_WIN32)
474
+ return _isatty(_fileno(stdout));
475
+ #else
476
+ return isatty(1);
477
+ #endif
478
+ }
479
+
480
+ static void print_progress(size_t current, size_t total) {
481
+ if (!is_output_a_tty()) {
482
+ return;
483
+ }
484
+
485
+ if (!total) {
486
+ return;
487
+ }
488
+
489
+ size_t width = 50;
490
+ size_t pct = (100 * current) / total;
491
+ size_t pos = (width * current) / total;
492
+
493
+ std::cout << "["
494
+ << std::string(pos, '=')
495
+ << (pos < width ? ">" : "")
496
+ << std::string(width - pos, ' ')
497
+ << "] " << std::setw(3) << pct << "% ("
498
+ << current / (1024 * 1024) << " MB / "
499
+ << total / (1024 * 1024) << " MB)\r";
500
+ std::cout.flush();
501
+ }
502
+
503
+ static bool common_pull_file(httplib::Client & cli,
504
+ const std::string & resolve_path,
505
+ const std::string & path_tmp,
506
+ bool supports_ranges,
507
+ size_t existing_size,
508
+ size_t & total_size) {
509
+ std::ofstream ofs(path_tmp, std::ios::binary | std::ios::app);
510
+ if (!ofs.is_open()) {
511
+ LOG_ERR("%s: error opening local file for writing: %s\n", __func__, path_tmp.c_str());
512
+ return false;
513
+ }
514
+
515
+ httplib::Headers headers;
516
+ if (supports_ranges && existing_size > 0) {
517
+ headers.emplace("Range", "bytes=" + std::to_string(existing_size) + "-");
518
+ }
519
+
520
+ std::atomic<size_t> downloaded{existing_size};
521
+
522
+ auto res = cli.Get(resolve_path, headers,
523
+ [&](const httplib::Response &response) {
524
+ if (existing_size > 0 && response.status != 206) {
525
+ LOG_WRN("%s: server did not respond with 206 Partial Content for a resume request. Status: %d\n", __func__, response.status);
526
+ return false;
527
+ }
528
+ if (existing_size == 0 && response.status != 200) {
529
+ LOG_WRN("%s: download received non-successful status code: %d\n", __func__, response.status);
530
+ return false;
531
+ }
532
+ if (total_size == 0 && response.has_header("Content-Length")) {
533
+ try {
534
+ size_t content_length = std::stoull(response.get_header_value("Content-Length"));
535
+ total_size = existing_size + content_length;
536
+ } catch (const std::exception &e) {
537
+ LOG_WRN("%s: invalid Content-Length header: %s\n", __func__, e.what());
538
+ }
539
+ }
540
+ return true;
541
+ },
542
+ [&](const char *data, size_t len) {
543
+ ofs.write(data, len);
544
+ if (!ofs) {
545
+ LOG_ERR("%s: error writing to file: %s\n", __func__, path_tmp.c_str());
546
+ return false;
547
+ }
548
+ downloaded += len;
549
+ print_progress(downloaded, total_size);
550
+ return true;
551
+ },
552
+ nullptr
553
+ );
554
+
555
+ std::cout << "\n";
556
+
557
+ if (!res) {
558
+ LOG_ERR("%s: error during download. Status: %d\n", __func__, res ? res->status : -1);
559
+ return false;
560
+ }
561
+
562
+ return true;
563
+ }
564
+
565
+ // download one single file from remote URL to local path
566
+ static bool common_download_file_single_online(const std::string & url,
567
+ const std::string & path,
568
+ const std::string & bearer_token) {
569
+ static const int max_attempts = 3;
570
+ static const int retry_delay_seconds = 2;
571
+
572
+ auto [cli, parts] = common_http_client(url);
573
+
574
+ httplib::Headers default_headers = {{"User-Agent", "llama-cpp"}};
575
+ if (!bearer_token.empty()) {
576
+ default_headers.insert({"Authorization", "Bearer " + bearer_token});
577
+ }
578
+ cli.set_default_headers(default_headers);
579
+
580
+ const bool file_exists = std::filesystem::exists(path);
581
+
582
+ std::string last_etag;
583
+ if (file_exists) {
584
+ last_etag = read_etag(path);
585
+ } else {
586
+ LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str());
587
+ }
588
+
589
+ for (int i = 0; i < max_attempts; ++i) {
590
+ auto head = cli.Head(parts.path);
591
+ bool head_ok = head && head->status >= 200 && head->status < 300;
592
+ if (!head_ok) {
593
+ LOG_WRN("%s: HEAD invalid http status code received: %d\n", __func__, head ? head->status : -1);
594
+ if (file_exists) {
595
+ LOG_INF("%s: Using cached file (HEAD failed): %s\n", __func__, path.c_str());
596
+ return true;
597
+ }
598
+ }
599
+
600
+ std::string etag;
601
+ if (head_ok && head->has_header("ETag")) {
602
+ etag = head->get_header_value("ETag");
603
+ }
604
+
605
+ size_t total_size = 0;
606
+ if (head_ok && head->has_header("Content-Length")) {
607
+ try {
608
+ total_size = std::stoull(head->get_header_value("Content-Length"));
609
+ } catch (const std::exception& e) {
610
+ LOG_WRN("%s: Invalid Content-Length in HEAD response: %s\n", __func__, e.what());
611
+ }
612
+ }
613
+
614
+ bool supports_ranges = false;
615
+ if (head_ok && head->has_header("Accept-Ranges")) {
616
+ supports_ranges = head->get_header_value("Accept-Ranges") != "none";
617
+ }
618
+
619
+ bool should_download_from_scratch = false;
620
+ if (!last_etag.empty() && !etag.empty() && last_etag != etag) {
621
+ LOG_WRN("%s: ETag header is different (%s != %s): triggering a new download\n", __func__,
622
+ last_etag.c_str(), etag.c_str());
623
+ should_download_from_scratch = true;
624
+ }
625
+
626
+ if (file_exists) {
627
+ if (!should_download_from_scratch) {
628
+ LOG_INF("%s: using cached file: %s\n", __func__, path.c_str());
629
+ return true;
630
+ }
631
+ LOG_WRN("%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
632
+ if (remove(path.c_str()) != 0) {
633
+ LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
634
+ return false;
635
+ }
636
+ }
637
+
638
+ const std::string path_temporary = path + ".downloadInProgress";
639
+ size_t existing_size = 0;
640
+
641
+ if (std::filesystem::exists(path_temporary)) {
642
+ if (supports_ranges && !should_download_from_scratch) {
643
+ existing_size = std::filesystem::file_size(path_temporary);
644
+ } else if (remove(path_temporary.c_str()) != 0) {
645
+ LOG_ERR("%s: unable to delete file: %s\n", __func__, path_temporary.c_str());
646
+ return false;
647
+ }
648
+ }
649
+
650
+ // start the download
651
+ LOG_INF("%s: trying to download model from %s to %s (etag:%s)...\n",
652
+ __func__, common_http_show_masked_url(parts).c_str(), path_temporary.c_str(), etag.c_str());
653
+ const bool was_pull_successful = common_pull_file(cli, parts.path, path_temporary, supports_ranges, existing_size, total_size);
654
+ if (!was_pull_successful) {
655
+ if (i + 1 < max_attempts) {
656
+ const int exponential_backoff_delay = std::pow(retry_delay_seconds, i) * 1000;
657
+ LOG_WRN("%s: retrying after %d milliseconds...\n", __func__, exponential_backoff_delay);
658
+ std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
659
+ } else {
660
+ LOG_ERR("%s: download failed after %d attempts\n", __func__, max_attempts);
661
+ }
662
+ continue;
663
+ }
664
+
665
+ if (std::rename(path_temporary.c_str(), path.c_str()) != 0) {
666
+ LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
667
+ return false;
668
+ }
669
+ if (!etag.empty()) {
670
+ write_etag(path, etag);
671
+ }
672
+ break;
673
+ }
674
+
675
+ return true;
676
+ }
677
+
678
+ std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url,
679
+ const common_remote_params & params) {
680
+ auto [cli, parts] = common_http_client(url);
681
+
682
+ httplib::Headers headers = {{"User-Agent", "llama-cpp"}};
683
+ for (const auto & header : params.headers) {
684
+ size_t pos = header.find(':');
685
+ if (pos != std::string::npos) {
686
+ headers.emplace(header.substr(0, pos), header.substr(pos + 1));
687
+ } else {
688
+ headers.emplace(header, "");
689
+ }
690
+ }
691
+
692
+ if (params.timeout > 0) {
693
+ cli.set_read_timeout(params.timeout, 0);
694
+ cli.set_write_timeout(params.timeout, 0);
695
+ }
696
+
697
+ std::vector<char> buf;
698
+ auto res = cli.Get(parts.path, headers,
699
+ [&](const char *data, size_t len) {
700
+ buf.insert(buf.end(), data, data + len);
701
+ return params.max_size == 0 ||
702
+ buf.size() <= static_cast<size_t>(params.max_size);
703
+ },
704
+ nullptr
705
+ );
706
+
707
+ if (!res) {
708
+ throw std::runtime_error("error: cannot make GET request");
709
+ }
710
+
711
+ return { res->status, std::move(buf) };
712
+ }
713
+
714
+ #endif // LLAMA_USE_CURL
715
+
716
+ static bool common_download_file_single(const std::string & url,
717
+ const std::string & path,
718
+ const std::string & bearer_token,
719
+ bool offline) {
720
+ if (!offline) {
721
+ return common_download_file_single_online(url, path, bearer_token);
722
+ }
723
+
724
+ if (!std::filesystem::exists(path)) {
725
+ LOG_ERR("%s: required file is not available in cache (offline mode): %s\n", __func__, path.c_str());
726
+ return false;
727
+ }
728
+
729
+ LOG_INF("%s: using cached file (offline mode): %s\n", __func__, path.c_str());
730
+ return true;
731
+ }
732
+
733
+ // download multiple files from remote URLs to local paths
734
+ // the input is a vector of pairs <url, path>
735
+ static bool common_download_file_multiple(const std::vector<std::pair<std::string, std::string>> & urls, const std::string & bearer_token, bool offline) {
736
+ // Prepare download in parallel
737
+ std::vector<std::future<bool>> futures_download;
738
+ for (auto const & item : urls) {
739
+ futures_download.push_back(std::async(std::launch::async, [bearer_token, offline](const std::pair<std::string, std::string> & it) -> bool {
740
+ return common_download_file_single(it.first, it.second, bearer_token, offline);
741
+ }, item));
742
+ }
743
+
744
+ // Wait for all downloads to complete
745
+ for (auto & f : futures_download) {
746
+ if (!f.get()) {
747
+ return false;
748
+ }
749
+ }
750
+
751
+ return true;
752
+ }
753
+
754
+ bool common_download_model(
755
+ const common_params_model & model,
756
+ const std::string & bearer_token,
757
+ bool offline) {
758
+ // Basic validation of the model.url
759
+ if (model.url.empty()) {
760
+ LOG_ERR("%s: invalid model url\n", __func__);
761
+ return false;
762
+ }
763
+
764
+ if (!common_download_file_single(model.url, model.path, bearer_token, offline)) {
765
+ return false;
766
+ }
767
+
768
+ // check for additional GGUFs split to download
769
+ int n_split = 0;
770
+ {
771
+ struct gguf_init_params gguf_params = {
772
+ /*.no_alloc = */ true,
773
+ /*.ctx = */ NULL,
774
+ };
775
+ auto * ctx_gguf = gguf_init_from_file(model.path.c_str(), gguf_params);
776
+ if (!ctx_gguf) {
777
+ LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, model.path.c_str());
778
+ return false;
779
+ }
780
+
781
+ auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
782
+ if (key_n_split >= 0) {
783
+ n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
784
+ }
785
+
786
+ gguf_free(ctx_gguf);
787
+ }
788
+
789
+ if (n_split > 1) {
790
+ char split_prefix[PATH_MAX] = {0};
791
+ char split_url_prefix[LLAMA_MAX_URL_LENGTH] = {0};
792
+
793
+ // Verify the first split file format
794
+ // and extract split URL and PATH prefixes
795
+ {
796
+ if (!llama_split_prefix(split_prefix, sizeof(split_prefix), model.path.c_str(), 0, n_split)) {
797
+ LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, model.path.c_str(), n_split);
798
+ return false;
799
+ }
800
+
801
+ if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model.url.c_str(), 0, n_split)) {
802
+ LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model.url.c_str(), n_split);
803
+ return false;
804
+ }
805
+ }
806
+
807
+ std::vector<std::pair<std::string, std::string>> urls;
808
+ for (int idx = 1; idx < n_split; idx++) {
809
+ char split_path[PATH_MAX] = {0};
810
+ llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
811
+
812
+ char split_url[LLAMA_MAX_URL_LENGTH] = {0};
813
+ llama_split_path(split_url, sizeof(split_url), split_url_prefix, idx, n_split);
814
+
815
+ if (std::string(split_path) == model.path) {
816
+ continue; // skip the already downloaded file
817
+ }
818
+
819
+ urls.push_back({split_url, split_path});
820
+ }
821
+
822
+ // Download in parallel
823
+ common_download_file_multiple(urls, bearer_token, offline);
824
+ }
825
+
826
+ return true;
827
+ }
828
+
829
+ common_hf_file_res common_get_hf_file(const std::string & hf_repo_with_tag, const std::string & bearer_token, bool offline) {
830
+ auto parts = string_split<std::string>(hf_repo_with_tag, ':');
831
+ std::string tag = parts.size() > 1 ? parts.back() : "latest";
832
+ std::string hf_repo = parts[0];
833
+ if (string_split<std::string>(hf_repo, '/').size() != 2) {
834
+ throw std::invalid_argument("error: invalid HF repo format, expected <user>/<model>[:quant]\n");
835
+ }
836
+
837
+ std::string url = get_model_endpoint() + "v2/" + hf_repo + "/manifests/" + tag;
838
+
839
+ // headers
840
+ std::vector<std::string> headers;
841
+ headers.push_back("Accept: application/json");
842
+ if (!bearer_token.empty()) {
843
+ headers.push_back("Authorization: Bearer " + bearer_token);
844
+ }
845
+ // Important: the User-Agent must be "llama-cpp" to get the "ggufFile" field in the response
846
+ // User-Agent header is already set in common_remote_get_content, no need to set it here
847
+
848
+ // make the request
849
+ common_remote_params params;
850
+ params.headers = headers;
851
+ long res_code = 0;
852
+ std::string res_str;
853
+ bool use_cache = false;
854
+ std::string cached_response_path = get_manifest_path(hf_repo, tag);
855
+ if (!offline) {
856
+ try {
857
+ auto res = common_remote_get_content(url, params);
858
+ res_code = res.first;
859
+ res_str = std::string(res.second.data(), res.second.size());
860
+ } catch (const std::exception & e) {
861
+ LOG_WRN("error: failed to get manifest at %s: %s\n", url.c_str(), e.what());
862
+ }
863
+ }
864
+ if (res_code == 0) {
865
+ if (std::filesystem::exists(cached_response_path)) {
866
+ LOG_WRN("trying to read manifest from cache: %s\n", cached_response_path.c_str());
867
+ res_str = read_file(cached_response_path);
868
+ res_code = 200;
869
+ use_cache = true;
870
+ } else {
871
+ throw std::runtime_error(
872
+ offline ? "error: failed to get manifest (offline mode)"
873
+ : "error: failed to get manifest (check your internet connection)");
874
+ }
875
+ }
876
+ std::string ggufFile;
877
+ std::string mmprojFile;
878
+
879
+ if (res_code == 200 || res_code == 304) {
880
+ try {
881
+ auto j = json::parse(res_str);
882
+
883
+ if (j.contains("ggufFile") && j["ggufFile"].contains("rfilename")) {
884
+ ggufFile = j["ggufFile"]["rfilename"].get<std::string>();
885
+ }
886
+ if (j.contains("mmprojFile") && j["mmprojFile"].contains("rfilename")) {
887
+ mmprojFile = j["mmprojFile"]["rfilename"].get<std::string>();
888
+ }
889
+ } catch (const std::exception & e) {
890
+ throw std::runtime_error(std::string("error parsing manifest JSON: ") + e.what());
891
+ }
892
+ if (!use_cache) {
893
+ // if not using cached response, update the cache file
894
+ write_file(cached_response_path, res_str);
895
+ }
896
+ } else if (res_code == 401) {
897
+ throw std::runtime_error("error: model is private or does not exist; if you are accessing a gated model, please provide a valid HF token");
898
+ } else {
899
+ throw std::runtime_error(string_format("error from HF API, response code: %ld, data: %s", res_code, res_str.c_str()));
900
+ }
901
+
902
+ // check response
903
+ if (ggufFile.empty()) {
904
+ throw std::runtime_error("error: model does not have ggufFile");
905
+ }
906
+
907
+ return { hf_repo, ggufFile, mmprojFile };
908
+ }
909
+
910
+ std::vector<common_cached_model_info> common_list_cached_models() {
911
+ std::vector<common_cached_model_info> models;
912
+ const std::string cache_dir = fs_get_cache_directory();
913
+ const std::vector<common_file_info> files = fs_list_files(cache_dir);
914
+ for (const auto & file : files) {
915
+ if (string_starts_with(file.name, "manifest=") && string_ends_with(file.name, ".json")) {
916
+ common_cached_model_info model_info;
917
+ model_info.manifest_path = file.path;
918
+ std::string fname = file.name;
919
+ string_replace_all(fname, ".json", ""); // remove extension
920
+ auto parts = string_split<std::string>(fname, '=');
921
+ if (parts.size() == 4) {
922
+ // expect format: manifest=<user>=<model>=<tag>=<other>
923
+ model_info.user = parts[1];
924
+ model_info.model = parts[2];
925
+ model_info.tag = parts[3];
926
+ } else {
927
+ // invalid format
928
+ continue;
929
+ }
930
+ model_info.size = 0; // TODO: get GGUF size, not manifest size
931
+ models.push_back(model_info);
932
+ }
933
+ }
934
+ return models;
935
+ }
936
+
937
+ //
938
+ // Docker registry functions
939
+ //
940
+
941
+ static std::string common_docker_get_token(const std::string & repo) {
942
+ std::string url = "https://auth.docker.io/token?service=registry.docker.io&scope=repository:" + repo + ":pull";
943
+
944
+ common_remote_params params;
945
+ auto res = common_remote_get_content(url, params);
946
+
947
+ if (res.first != 200) {
948
+ throw std::runtime_error("Failed to get Docker registry token, HTTP code: " + std::to_string(res.first));
949
+ }
950
+
951
+ std::string response_str(res.second.begin(), res.second.end());
952
+ nlohmann::ordered_json response = nlohmann::ordered_json::parse(response_str);
953
+
954
+ if (!response.contains("token")) {
955
+ throw std::runtime_error("Docker registry token response missing 'token' field");
956
+ }
957
+
958
+ return response["token"].get<std::string>();
959
+ }
960
+
961
+ std::string common_docker_resolve_model(const std::string & docker) {
962
+ // Parse ai/smollm2:135M-Q4_0
963
+ size_t colon_pos = docker.find(':');
964
+ std::string repo, tag;
965
+ if (colon_pos != std::string::npos) {
966
+ repo = docker.substr(0, colon_pos);
967
+ tag = docker.substr(colon_pos + 1);
968
+ } else {
969
+ repo = docker;
970
+ tag = "latest";
971
+ }
972
+
973
+ // ai/ is the default
974
+ size_t slash_pos = docker.find('/');
975
+ if (slash_pos == std::string::npos) {
976
+ repo.insert(0, "ai/");
977
+ }
978
+
979
+ LOG_INF("%s: Downloading Docker Model: %s:%s\n", __func__, repo.c_str(), tag.c_str());
980
+ try {
981
+ // --- helper: digest validation ---
982
+ auto validate_oci_digest = [](const std::string & digest) -> std::string {
983
+ // Expected: algo:hex ; start with sha256 (64 hex chars)
984
+ // You can extend this map if supporting other algorithms in future.
985
+ static const std::regex re("^sha256:([a-fA-F0-9]{64})$");
986
+ std::smatch m;
987
+ if (!std::regex_match(digest, m, re)) {
988
+ throw std::runtime_error("Invalid OCI digest format received in manifest: " + digest);
989
+ }
990
+ // normalize hex to lowercase
991
+ std::string normalized = digest;
992
+ std::transform(normalized.begin()+7, normalized.end(), normalized.begin()+7, [](unsigned char c){
993
+ return std::tolower(c);
994
+ });
995
+ return normalized;
996
+ };
997
+
998
+ std::string token = common_docker_get_token(repo); // Get authentication token
999
+
1000
+ // Get manifest
1001
+ // TODO: cache the manifest response so that it appears in the model list
1002
+ const std::string url_prefix = "https://registry-1.docker.io/v2/" + repo;
1003
+ std::string manifest_url = url_prefix + "/manifests/" + tag;
1004
+ common_remote_params manifest_params;
1005
+ manifest_params.headers.push_back("Authorization: Bearer " + token);
1006
+ manifest_params.headers.push_back(
1007
+ "Accept: application/vnd.docker.distribution.manifest.v2+json,application/vnd.oci.image.manifest.v1+json");
1008
+ auto manifest_res = common_remote_get_content(manifest_url, manifest_params);
1009
+ if (manifest_res.first != 200) {
1010
+ throw std::runtime_error("Failed to get Docker manifest, HTTP code: " + std::to_string(manifest_res.first));
1011
+ }
1012
+
1013
+ std::string manifest_str(manifest_res.second.begin(), manifest_res.second.end());
1014
+ nlohmann::ordered_json manifest = nlohmann::ordered_json::parse(manifest_str);
1015
+ std::string gguf_digest; // Find the GGUF layer
1016
+ if (manifest.contains("layers")) {
1017
+ for (const auto & layer : manifest["layers"]) {
1018
+ if (layer.contains("mediaType")) {
1019
+ std::string media_type = layer["mediaType"].get<std::string>();
1020
+ if (media_type == "application/vnd.docker.ai.gguf.v3" ||
1021
+ media_type.find("gguf") != std::string::npos) {
1022
+ gguf_digest = layer["digest"].get<std::string>();
1023
+ break;
1024
+ }
1025
+ }
1026
+ }
1027
+ }
1028
+
1029
+ if (gguf_digest.empty()) {
1030
+ throw std::runtime_error("No GGUF layer found in Docker manifest");
1031
+ }
1032
+
1033
+ // Validate & normalize digest
1034
+ gguf_digest = validate_oci_digest(gguf_digest);
1035
+ LOG_DBG("%s: Using validated digest: %s\n", __func__, gguf_digest.c_str());
1036
+
1037
+ // Prepare local filename
1038
+ std::string model_filename = repo;
1039
+ std::replace(model_filename.begin(), model_filename.end(), '/', '_');
1040
+ model_filename += "_" + tag + ".gguf";
1041
+ std::string local_path = fs_get_cache_file(model_filename);
1042
+
1043
+ const std::string blob_url = url_prefix + "/blobs/" + gguf_digest;
1044
+ if (!common_download_file_single(blob_url, local_path, token, false)) {
1045
+ throw std::runtime_error("Failed to download Docker Model");
1046
+ }
1047
+
1048
+ LOG_INF("%s: Downloaded Docker Model to: %s\n", __func__, local_path.c_str());
1049
+ return local_path;
1050
+ } catch (const std::exception & e) {
1051
+ LOG_ERR("%s: Docker Model download failed: %s\n", __func__, e.what());
1052
+ throw;
1053
+ }
1054
+ }