cui-llama.rn 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +4 -23
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +13 -7
  4. package/android/src/main/java/com/rnllama/LlamaContext.java +27 -20
  5. package/android/src/main/java/com/rnllama/RNLlama.java +5 -1
  6. package/android/src/main/jni.cpp +15 -12
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  14. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  15. package/cpp/README.md +1 -1
  16. package/cpp/common.cpp +158 -267
  17. package/cpp/common.h +46 -12
  18. package/cpp/ggml-alloc.c +1042 -1037
  19. package/cpp/ggml-backend-impl.h +255 -256
  20. package/cpp/ggml-backend-reg.cpp +582 -582
  21. package/cpp/ggml-backend.cpp +2002 -2002
  22. package/cpp/ggml-backend.h +354 -352
  23. package/cpp/ggml-common.h +1853 -1853
  24. package/cpp/ggml-cpp.h +39 -39
  25. package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
  26. package/cpp/ggml-cpu-aarch64.h +8 -8
  27. package/cpp/ggml-cpu-impl.h +386 -386
  28. package/cpp/ggml-cpu-quants.c +10920 -10839
  29. package/cpp/ggml-cpu-traits.cpp +36 -36
  30. package/cpp/ggml-cpu-traits.h +38 -38
  31. package/cpp/ggml-cpu.c +329 -60
  32. package/cpp/ggml-cpu.cpp +10 -2
  33. package/cpp/ggml-cpu.h +135 -135
  34. package/cpp/ggml-impl.h +567 -567
  35. package/cpp/ggml-metal-impl.h +17 -17
  36. package/cpp/ggml-metal.m +4884 -4884
  37. package/cpp/ggml-quants.c +5238 -5238
  38. package/cpp/ggml-threading.h +14 -14
  39. package/cpp/ggml.c +6514 -6448
  40. package/cpp/ggml.h +2194 -2163
  41. package/cpp/gguf.cpp +1329 -1325
  42. package/cpp/gguf.h +202 -202
  43. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  44. package/cpp/json-schema-to-grammar.h +8 -8
  45. package/cpp/json.hpp +24766 -24766
  46. package/cpp/llama-adapter.cpp +347 -346
  47. package/cpp/llama-adapter.h +74 -73
  48. package/cpp/llama-arch.cpp +1487 -1434
  49. package/cpp/llama-arch.h +400 -395
  50. package/cpp/llama-batch.cpp +368 -368
  51. package/cpp/llama-batch.h +88 -88
  52. package/cpp/llama-chat.cpp +578 -567
  53. package/cpp/llama-chat.h +52 -51
  54. package/cpp/llama-context.cpp +1775 -1771
  55. package/cpp/llama-context.h +128 -128
  56. package/cpp/llama-cparams.cpp +1 -1
  57. package/cpp/llama-cparams.h +37 -37
  58. package/cpp/llama-cpp.h +30 -30
  59. package/cpp/llama-grammar.cpp +1139 -1139
  60. package/cpp/llama-grammar.h +143 -143
  61. package/cpp/llama-hparams.cpp +71 -71
  62. package/cpp/llama-hparams.h +139 -140
  63. package/cpp/llama-impl.cpp +167 -167
  64. package/cpp/llama-impl.h +61 -61
  65. package/cpp/llama-kv-cache.cpp +718 -718
  66. package/cpp/llama-kv-cache.h +218 -218
  67. package/cpp/llama-mmap.cpp +2 -1
  68. package/cpp/llama-mmap.h +67 -67
  69. package/cpp/llama-model-loader.cpp +1124 -1011
  70. package/cpp/llama-model-loader.h +167 -158
  71. package/cpp/llama-model.cpp +3997 -2202
  72. package/cpp/llama-model.h +370 -391
  73. package/cpp/llama-sampling.cpp +2408 -2406
  74. package/cpp/llama-sampling.h +32 -48
  75. package/cpp/llama-vocab.cpp +3247 -1982
  76. package/cpp/llama-vocab.h +125 -182
  77. package/cpp/llama.cpp +416 -2886
  78. package/cpp/llama.h +1323 -1285
  79. package/cpp/log.cpp +401 -401
  80. package/cpp/log.h +121 -121
  81. package/cpp/rn-llama.cpp +822 -0
  82. package/cpp/rn-llama.h +123 -0
  83. package/cpp/rn-llama.hpp +18 -12
  84. package/cpp/sampling.cpp +505 -500
  85. package/cpp/sgemm.cpp +2597 -2597
  86. package/cpp/speculative.cpp +277 -274
  87. package/cpp/speculative.h +28 -28
  88. package/cpp/unicode.cpp +2 -3
  89. package/ios/CMakeLists.txt +99 -0
  90. package/ios/RNLlama.h +5 -1
  91. package/ios/RNLlama.mm +2 -2
  92. package/ios/RNLlamaContext.h +8 -1
  93. package/ios/RNLlamaContext.mm +15 -11
  94. package/ios/rnllama.xcframework/Info.plist +74 -0
  95. package/jest/mock.js +3 -2
  96. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  97. package/lib/commonjs/index.js +4 -2
  98. package/lib/commonjs/index.js.map +1 -1
  99. package/lib/module/NativeRNLlama.js.map +1 -1
  100. package/lib/module/index.js +4 -2
  101. package/lib/module/index.js.map +1 -1
  102. package/lib/typescript/NativeRNLlama.d.ts +5 -1
  103. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  104. package/lib/typescript/index.d.ts.map +1 -1
  105. package/llama-rn.podspec +8 -2
  106. package/package.json +5 -2
  107. package/src/NativeRNLlama.ts +5 -1
  108. package/src/index.ts +9 -2
@@ -1,582 +1,582 @@
1
- #include "ggml-backend-impl.h"
2
- #include "ggml-backend.h"
3
- #include "ggml-impl.h"
4
- #include <algorithm>
5
- #include <codecvt>
6
- #include <cstring>
7
- #include <filesystem>
8
- #include <locale>
9
- #include <memory>
10
- #include <string>
11
- #include <type_traits>
12
- #include <vector>
13
-
14
- #ifdef _WIN32
15
- # define WIN32_LEAN_AND_MEAN
16
- # ifndef NOMINMAX
17
- # define NOMINMAX
18
- # endif
19
- # include <windows.h>
20
- #elif defined(__APPLE__)
21
- # include <mach-o/dyld.h>
22
- # include <dlfcn.h>
23
- #else
24
- # include <dlfcn.h>
25
- # include <unistd.h>
26
- #endif
27
-
28
- // Backend registry
29
- #ifdef LM_GGML_USE_CPU
30
- #include "ggml-cpu.h"
31
- #endif
32
-
33
- #ifdef LM_GGML_USE_CUDA
34
- #include "ggml-cuda.h"
35
- #endif
36
-
37
- #ifdef LM_GGML_USE_METAL
38
- #include "ggml-metal.h"
39
- #endif
40
-
41
- #ifdef LM_GGML_USE_SYCL
42
- #include "ggml-sycl.h"
43
- #endif
44
-
45
- #ifdef LM_GGML_USE_VULKAN
46
- #include "ggml-vulkan.h"
47
- #endif
48
-
49
- #ifdef LM_GGML_USE_OPENCL
50
- #include "ggml-opencl.h"
51
- #endif
52
-
53
- #ifdef LM_GGML_USE_BLAS
54
- #include "ggml-blas.h"
55
- #endif
56
-
57
- #ifdef LM_GGML_USE_RPC
58
- #include "ggml-rpc.h"
59
- #endif
60
-
61
- #ifdef LM_GGML_USE_CANN
62
- #include "ggml-cann.h"
63
- #endif
64
-
65
- #ifdef LM_GGML_USE_KOMPUTE
66
- #include "ggml-kompute.h"
67
- #endif
68
-
69
- // disable C++17 deprecation warning for std::codecvt_utf8
70
- #if defined(__clang__)
71
- # pragma clang diagnostic push
72
- # pragma clang diagnostic ignored "-Wdeprecated-declarations"
73
- #endif
74
-
75
- static std::wstring utf8_to_utf16(const std::string & str) {
76
- std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
77
- return converter.from_bytes(str);
78
- }
79
-
80
- static std::string utf16_to_utf8(const std::wstring & str) {
81
- std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
82
- return converter.to_bytes(str);
83
- }
84
-
85
- #if defined(__clang__)
86
- # pragma clang diagnostic pop
87
- #endif
88
-
89
- #ifdef _WIN32
90
-
91
- using dl_handle = std::remove_pointer_t<HMODULE>;
92
-
93
- struct dl_handle_deleter {
94
- void operator()(HMODULE handle) {
95
- FreeLibrary(handle);
96
- }
97
- };
98
-
99
- static dl_handle * dl_load_library(const std::wstring & path) {
100
- // suppress error dialogs for missing DLLs
101
- DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
102
- SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
103
-
104
- HMODULE handle = LoadLibraryW(path.c_str());
105
-
106
- SetErrorMode(old_mode);
107
-
108
- return handle;
109
- }
110
-
111
- static void * dl_get_sym(dl_handle * handle, const char * name) {
112
- DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
113
- SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
114
-
115
- void * p = (void *) GetProcAddress(handle, name);
116
-
117
- SetErrorMode(old_mode);
118
-
119
- return p;
120
- }
121
-
122
- #else
123
-
124
- using dl_handle = void;
125
-
126
- struct dl_handle_deleter {
127
- void operator()(void * handle) {
128
- dlclose(handle);
129
- }
130
- };
131
-
132
- static void * dl_load_library(const std::wstring & path) {
133
- dl_handle * handle = dlopen(utf16_to_utf8(path).c_str(), RTLD_NOW | RTLD_LOCAL);
134
-
135
- return handle;
136
- }
137
-
138
- static void * dl_get_sym(dl_handle * handle, const char * name) {
139
- return dlsym(handle, name);
140
- }
141
-
142
- #endif
143
-
144
- using dl_handle_ptr = std::unique_ptr<dl_handle, dl_handle_deleter>;
145
-
146
- struct lm_ggml_backend_reg_entry {
147
- lm_ggml_backend_reg_t reg;
148
- dl_handle_ptr handle;
149
- };
150
-
151
- struct lm_ggml_backend_registry {
152
- std::vector<lm_ggml_backend_reg_entry> backends;
153
- std::vector<lm_ggml_backend_dev_t> devices;
154
-
155
- lm_ggml_backend_registry() {
156
- #ifdef LM_GGML_USE_CUDA
157
- register_backend(lm_ggml_backend_cuda_reg());
158
- #endif
159
- #ifdef LM_GGML_USE_METAL
160
- register_backend(lm_ggml_backend_metal_reg());
161
- #endif
162
- #ifdef LM_GGML_USE_SYCL
163
- register_backend(lm_ggml_backend_sycl_reg());
164
- #endif
165
- #ifdef LM_GGML_USE_VULKAN
166
- register_backend(lm_ggml_backend_vk_reg());
167
- #endif
168
- #ifdef LM_GGML_USE_OPENCL
169
- register_backend(lm_ggml_backend_opencl_reg());
170
- #endif
171
- #ifdef LM_GGML_USE_CANN
172
- register_backend(lm_ggml_backend_cann_reg());
173
- #endif
174
- #ifdef LM_GGML_USE_BLAS
175
- register_backend(lm_ggml_backend_blas_reg());
176
- #endif
177
- #ifdef LM_GGML_USE_RPC
178
- register_backend(lm_ggml_backend_rpc_reg());
179
- #endif
180
- #ifdef LM_GGML_USE_KOMPUTE
181
- register_backend(lm_ggml_backend_kompute_reg());
182
- #endif
183
- #ifdef LM_GGML_USE_CPU
184
- register_backend(lm_ggml_backend_cpu_reg());
185
- #endif
186
- }
187
-
188
- ~lm_ggml_backend_registry() {
189
- // FIXME: backends cannot be safely unloaded without a function to destroy all the backend resources,
190
- // since backend threads may still be running and accessing resources from the dynamic library
191
- for (auto & entry : backends) {
192
- if (entry.handle) {
193
- entry.handle.release(); // NOLINT
194
- }
195
- }
196
- }
197
-
198
- void register_backend(lm_ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) {
199
- if (!reg) {
200
- return;
201
- }
202
-
203
- #ifndef NDEBUG
204
- LM_GGML_LOG_DEBUG("%s: registered backend %s (%zu devices)\n",
205
- __func__, lm_ggml_backend_reg_name(reg), lm_ggml_backend_reg_dev_count(reg));
206
- #endif
207
- backends.push_back({ reg, std::move(handle) });
208
- for (size_t i = 0; i < lm_ggml_backend_reg_dev_count(reg); i++) {
209
- register_device(lm_ggml_backend_reg_dev_get(reg, i));
210
- }
211
- }
212
-
213
- void register_device(lm_ggml_backend_dev_t device) {
214
- #ifndef NDEBUG
215
- LM_GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, lm_ggml_backend_dev_name(device), lm_ggml_backend_dev_description(device));
216
- #endif
217
- devices.push_back(device);
218
- }
219
-
220
- lm_ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) {
221
- dl_handle_ptr handle { dl_load_library(path) };
222
- if (!handle) {
223
- if (!silent) {
224
- LM_GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(path).c_str());
225
- }
226
- return nullptr;
227
- }
228
-
229
- auto score_fn = (lm_ggml_backend_score_t) dl_get_sym(handle.get(), "lm_ggml_backend_score");
230
- if (score_fn && score_fn() == 0) {
231
- if (!silent) {
232
- LM_GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, utf16_to_utf8(path).c_str());
233
- }
234
- return nullptr;
235
- }
236
-
237
- auto backend_init_fn = (lm_ggml_backend_init_t) dl_get_sym(handle.get(), "lm_ggml_backend_init");
238
- if (!backend_init_fn) {
239
- if (!silent) {
240
- LM_GGML_LOG_ERROR("%s: failed to find lm_ggml_backend_init in %s\n", __func__, utf16_to_utf8(path).c_str());
241
- }
242
- return nullptr;
243
- }
244
-
245
- lm_ggml_backend_reg_t reg = backend_init_fn();
246
- if (!reg || reg->api_version != LM_GGML_BACKEND_API_VERSION) {
247
- if (!silent) {
248
- if (!reg) {
249
- LM_GGML_LOG_ERROR("%s: failed to initialize backend from %s: lm_ggml_backend_init returned NULL\n", __func__, utf16_to_utf8(path).c_str());
250
- } else {
251
- LM_GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n",
252
- __func__, utf16_to_utf8(path).c_str(), reg->api_version, LM_GGML_BACKEND_API_VERSION);
253
- }
254
- }
255
- return nullptr;
256
- }
257
-
258
- LM_GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, lm_ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str());
259
-
260
- register_backend(reg, std::move(handle));
261
-
262
- return reg;
263
- }
264
-
265
- void unload_backend(lm_ggml_backend_reg_t reg, bool silent) {
266
- auto it = std::find_if(backends.begin(), backends.end(),
267
- [reg](const lm_ggml_backend_reg_entry & entry) { return entry.reg == reg; });
268
-
269
- if (it == backends.end()) {
270
- if (!silent) {
271
- LM_GGML_LOG_ERROR("%s: backend not found\n", __func__);
272
- }
273
- return;
274
- }
275
-
276
- if (!silent) {
277
- LM_GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, lm_ggml_backend_reg_name(reg));
278
- }
279
-
280
- // remove devices
281
- devices.erase(
282
- std::remove_if(devices.begin(), devices.end(),
283
- [reg](lm_ggml_backend_dev_t dev) { return lm_ggml_backend_dev_backend_reg(dev) == reg; }),
284
- devices.end());
285
-
286
- // remove backend
287
- backends.erase(it);
288
- }
289
- };
290
-
291
- static lm_ggml_backend_registry & get_reg() {
292
- static lm_ggml_backend_registry reg;
293
- return reg;
294
- }
295
-
296
- // Internal API
297
- void lm_ggml_backend_register(lm_ggml_backend_reg_t reg) {
298
- get_reg().register_backend(reg);
299
- }
300
-
301
- void lm_ggml_backend_device_register(lm_ggml_backend_dev_t device) {
302
- get_reg().register_device(device);
303
- }
304
-
305
- // Backend (reg) enumeration
306
- static bool striequals(const char * a, const char * b) {
307
- for (; *a && *b; a++, b++) {
308
- if (std::tolower(*a) != std::tolower(*b)) {
309
- return false;
310
- }
311
- }
312
- return *a == *b;
313
- }
314
-
315
- size_t lm_ggml_backend_reg_count() {
316
- return get_reg().backends.size();
317
- }
318
-
319
- lm_ggml_backend_reg_t lm_ggml_backend_reg_get(size_t index) {
320
- LM_GGML_ASSERT(index < lm_ggml_backend_reg_count());
321
- return get_reg().backends[index].reg;
322
- }
323
-
324
- lm_ggml_backend_reg_t lm_ggml_backend_reg_by_name(const char * name) {
325
- for (size_t i = 0; i < lm_ggml_backend_reg_count(); i++) {
326
- lm_ggml_backend_reg_t reg = lm_ggml_backend_reg_get(i);
327
- if (striequals(lm_ggml_backend_reg_name(reg), name)) {
328
- return reg;
329
- }
330
- }
331
- return nullptr;
332
- }
333
-
334
- // Device enumeration
335
- size_t lm_ggml_backend_dev_count() {
336
- return get_reg().devices.size();
337
- }
338
-
339
- lm_ggml_backend_dev_t lm_ggml_backend_dev_get(size_t index) {
340
- LM_GGML_ASSERT(index < lm_ggml_backend_dev_count());
341
- return get_reg().devices[index];
342
- }
343
-
344
- lm_ggml_backend_dev_t lm_ggml_backend_dev_by_name(const char * name) {
345
- for (size_t i = 0; i < lm_ggml_backend_dev_count(); i++) {
346
- lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
347
- if (striequals(lm_ggml_backend_dev_name(dev), name)) {
348
- return dev;
349
- }
350
- }
351
- return nullptr;
352
- }
353
-
354
- lm_ggml_backend_dev_t lm_ggml_backend_dev_by_type(enum lm_ggml_backend_dev_type type) {
355
- for (size_t i = 0; i < lm_ggml_backend_dev_count(); i++) {
356
- lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
357
- if (lm_ggml_backend_dev_type(dev) == type) {
358
- return dev;
359
- }
360
- }
361
- return nullptr;
362
- }
363
-
364
- // Convenience functions
365
- lm_ggml_backend_t lm_ggml_backend_init_by_name(const char * name, const char * params) {
366
- lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_name(name);
367
- if (!dev) {
368
- return nullptr;
369
- }
370
- return lm_ggml_backend_dev_init(dev, params);
371
- }
372
-
373
- lm_ggml_backend_t lm_ggml_backend_init_by_type(enum lm_ggml_backend_dev_type type, const char * params) {
374
- lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_type(type);
375
- if (!dev) {
376
- return nullptr;
377
- }
378
- return lm_ggml_backend_dev_init(dev, params);
379
- }
380
-
381
- lm_ggml_backend_t lm_ggml_backend_init_best(void) {
382
- lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_GPU);
383
- if (!dev) {
384
- dev = lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU);
385
- }
386
- if (!dev) {
387
- return nullptr;
388
- }
389
- return lm_ggml_backend_dev_init(dev, nullptr);
390
- }
391
-
392
- // Dynamic loading
393
- lm_ggml_backend_reg_t lm_ggml_backend_load(const char * path) {
394
- return get_reg().load_backend(utf8_to_utf16(path), false);
395
- }
396
-
397
- void lm_ggml_backend_unload(lm_ggml_backend_reg_t reg) {
398
- get_reg().unload_backend(reg, true);
399
- }
400
-
401
- static std::wstring get_executable_path() {
402
- #if defined(__APPLE__)
403
- // get executable path
404
- std::vector<char> path;
405
- uint32_t size;
406
- while (true) {
407
- size = path.size();
408
- if (_NSGetExecutablePath(path.data(), &size) == 0) {
409
- break;
410
- }
411
- path.resize(size);
412
- }
413
- std::string base_path(path.data(), size);
414
- // remove executable name
415
- auto last_slash = base_path.find_last_of('/');
416
- if (last_slash != std::string::npos) {
417
- base_path = base_path.substr(0, last_slash);
418
- }
419
- return utf8_to_utf16(base_path + "/");
420
- #elif defined(__linux__) || defined(__FreeBSD__)
421
- std::string base_path = ".";
422
- std::vector<char> path(1024);
423
- while (true) {
424
- // get executable path
425
- # if defined(__linux__)
426
- ssize_t len = readlink("/proc/self/exe", path.data(), path.size());
427
- # elif defined(__FreeBSD__)
428
- ssize_t len = readlink("/proc/curproc/file", path.data(), path.size());
429
- # endif
430
- if (len == -1) {
431
- break;
432
- }
433
- if (len < (ssize_t) path.size()) {
434
- base_path = std::string(path.data(), len);
435
- // remove executable name
436
- auto last_slash = base_path.find_last_of('/');
437
- if (last_slash != std::string::npos) {
438
- base_path = base_path.substr(0, last_slash);
439
- }
440
- break;
441
- }
442
- path.resize(path.size() * 2);
443
- }
444
-
445
- return utf8_to_utf16(base_path + "/");
446
- #elif defined(_WIN32)
447
- std::vector<wchar_t> path(MAX_PATH);
448
- DWORD len = GetModuleFileNameW(NULL, path.data(), path.size());
449
- if (len == 0) {
450
- return {};
451
- }
452
- std::wstring base_path(path.data(), len);
453
- // remove executable name
454
- auto last_slash = base_path.find_last_of('\\');
455
- if (last_slash != std::string::npos) {
456
- base_path = base_path.substr(0, last_slash);
457
- }
458
- return base_path + L"\\";
459
- #else
460
- return {};
461
- #endif
462
- }
463
-
464
- static std::wstring backend_filename_prefix() {
465
- #ifdef _WIN32
466
- return L"ggml-";
467
- #else
468
- return L"libggml-";
469
- #endif
470
- }
471
-
472
- static std::wstring backend_filename_suffix() {
473
- #ifdef _WIN32
474
- return L".dll";
475
- #else
476
- return L".so";
477
- #endif
478
- }
479
-
480
- static std::wstring path_separator() {
481
- #ifdef _WIN32
482
- return L"\\";
483
- #else
484
- return L"/";
485
- #endif
486
- }
487
-
488
- static lm_ggml_backend_reg_t lm_ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) {
489
- // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths
490
- // TODO: search system paths
491
- std::wstring file_prefix = backend_filename_prefix() + utf8_to_utf16(name) + L"-";
492
- std::vector<std::wstring> search_paths;
493
- if (user_search_path == nullptr) {
494
- search_paths.push_back(L"." + path_separator());
495
- search_paths.push_back(get_executable_path());
496
- } else {
497
- search_paths.push_back(utf8_to_utf16(user_search_path) + path_separator());
498
- }
499
-
500
- int best_score = 0;
501
- std::wstring best_path;
502
-
503
- namespace fs = std::filesystem;
504
- for (const auto & search_path : search_paths) {
505
- if (!fs::exists(search_path)) {
506
- continue;
507
- }
508
- fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied);
509
- for (const auto & entry : dir_it) {
510
- if (entry.is_regular_file()) {
511
- std::wstring filename = entry.path().filename().wstring();
512
- std::wstring ext = entry.path().extension().wstring();
513
- if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) {
514
- dl_handle_ptr handle { dl_load_library(entry.path().wstring()) };
515
- if (!handle && !silent) {
516
- LM_GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
517
- }
518
- if (handle) {
519
- auto score_fn = (lm_ggml_backend_score_t) dl_get_sym(handle.get(), "lm_ggml_backend_score");
520
- if (score_fn) {
521
- int s = score_fn();
522
- #ifndef NDEBUG
523
- LM_GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str(), s);
524
- #endif
525
- if (s > best_score) {
526
- best_score = s;
527
- best_path = entry.path().wstring();
528
- }
529
- } else {
530
- if (!silent) {
531
- LM_GGML_LOG_INFO("%s: failed to find lm_ggml_backend_score in %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
532
- }
533
- }
534
- }
535
- }
536
- }
537
- }
538
- }
539
-
540
- if (best_score == 0) {
541
- // try to load the base backend
542
- for (const auto & search_path : search_paths) {
543
- std::wstring path = search_path + backend_filename_prefix() + utf8_to_utf16(name) + backend_filename_suffix();
544
- if (fs::exists(path)) {
545
- return get_reg().load_backend(path, silent);
546
- }
547
- }
548
- return nullptr;
549
- }
550
-
551
- return get_reg().load_backend(best_path, silent);
552
- }
553
-
554
- void lm_ggml_backend_load_all() {
555
- lm_ggml_backend_load_all_from_path(nullptr);
556
- }
557
-
558
- void lm_ggml_backend_load_all_from_path(const char * dir_path) {
559
- #ifdef NDEBUG
560
- bool silent = true;
561
- #else
562
- bool silent = false;
563
- #endif
564
-
565
- lm_ggml_backend_load_best("blas", silent, dir_path);
566
- lm_ggml_backend_load_best("cann", silent, dir_path);
567
- lm_ggml_backend_load_best("cuda", silent, dir_path);
568
- lm_ggml_backend_load_best("hip", silent, dir_path);
569
- lm_ggml_backend_load_best("kompute", silent, dir_path);
570
- lm_ggml_backend_load_best("metal", silent, dir_path);
571
- lm_ggml_backend_load_best("rpc", silent, dir_path);
572
- lm_ggml_backend_load_best("sycl", silent, dir_path);
573
- lm_ggml_backend_load_best("vulkan", silent, dir_path);
574
- lm_ggml_backend_load_best("opencl", silent, dir_path);
575
- lm_ggml_backend_load_best("musa", silent, dir_path);
576
- lm_ggml_backend_load_best("cpu", silent, dir_path);
577
- // check the environment variable LM_GGML_BACKEND_PATH to load an out-of-tree backend
578
- const char * backend_path = std::getenv("LM_GGML_BACKEND_PATH");
579
- if (backend_path) {
580
- lm_ggml_backend_load(backend_path);
581
- }
582
- }
1
+ #include "ggml-backend-impl.h"
2
+ #include "ggml-backend.h"
3
+ #include "ggml-impl.h"
4
+ #include <algorithm>
5
+ #include <codecvt>
6
+ #include <cstring>
7
+ #include <filesystem>
8
+ #include <locale>
9
+ #include <memory>
10
+ #include <string>
11
+ #include <type_traits>
12
+ #include <vector>
13
+
14
+ #ifdef _WIN32
15
+ # define WIN32_LEAN_AND_MEAN
16
+ # ifndef NOMINMAX
17
+ # define NOMINMAX
18
+ # endif
19
+ # include <windows.h>
20
+ #elif defined(__APPLE__)
21
+ # include <mach-o/dyld.h>
22
+ # include <dlfcn.h>
23
+ #else
24
+ # include <dlfcn.h>
25
+ # include <unistd.h>
26
+ #endif
27
+
28
+ // Backend registry
29
+ #ifdef LM_GGML_USE_CPU
30
+ #include "ggml-cpu.h"
31
+ #endif
32
+
33
+ #ifdef LM_GGML_USE_CUDA
34
+ #include "ggml-cuda.h"
35
+ #endif
36
+
37
+ #ifdef LM_GGML_USE_METAL
38
+ #include "ggml-metal.h"
39
+ #endif
40
+
41
+ #ifdef LM_GGML_USE_SYCL
42
+ #include "ggml-sycl.h"
43
+ #endif
44
+
45
+ #ifdef LM_GGML_USE_VULKAN
46
+ #include "ggml-vulkan.h"
47
+ #endif
48
+
49
+ #ifdef LM_GGML_USE_OPENCL
50
+ #include "ggml-opencl.h"
51
+ #endif
52
+
53
+ #ifdef LM_GGML_USE_BLAS
54
+ #include "ggml-blas.h"
55
+ #endif
56
+
57
+ #ifdef LM_GGML_USE_RPC
58
+ #include "ggml-rpc.h"
59
+ #endif
60
+
61
+ #ifdef LM_GGML_USE_CANN
62
+ #include "ggml-cann.h"
63
+ #endif
64
+
65
+ #ifdef LM_GGML_USE_KOMPUTE
66
+ #include "ggml-kompute.h"
67
+ #endif
68
+
69
+ // disable C++17 deprecation warning for std::codecvt_utf8
70
+ #if defined(__clang__)
71
+ # pragma clang diagnostic push
72
+ # pragma clang diagnostic ignored "-Wdeprecated-declarations"
73
+ #endif
74
+
75
+ static std::wstring utf8_to_utf16(const std::string & str) {
76
+ std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
77
+ return converter.from_bytes(str);
78
+ }
79
+
80
+ static std::string utf16_to_utf8(const std::wstring & str) {
81
+ std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
82
+ return converter.to_bytes(str);
83
+ }
84
+
85
+ #if defined(__clang__)
86
+ # pragma clang diagnostic pop
87
+ #endif
88
+
89
+ #ifdef _WIN32
90
+
91
+ using dl_handle = std::remove_pointer_t<HMODULE>;
92
+
93
+ struct dl_handle_deleter {
94
+ void operator()(HMODULE handle) {
95
+ FreeLibrary(handle);
96
+ }
97
+ };
98
+
99
+ static dl_handle * dl_load_library(const std::wstring & path) {
100
+ // suppress error dialogs for missing DLLs
101
+ DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
102
+ SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
103
+
104
+ HMODULE handle = LoadLibraryW(path.c_str());
105
+
106
+ SetErrorMode(old_mode);
107
+
108
+ return handle;
109
+ }
110
+
111
+ static void * dl_get_sym(dl_handle * handle, const char * name) {
112
+ DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
113
+ SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
114
+
115
+ void * p = (void *) GetProcAddress(handle, name);
116
+
117
+ SetErrorMode(old_mode);
118
+
119
+ return p;
120
+ }
121
+
122
+ #else
123
+
124
+ using dl_handle = void;
125
+
126
+ struct dl_handle_deleter {
127
+ void operator()(void * handle) {
128
+ dlclose(handle);
129
+ }
130
+ };
131
+
132
+ static void * dl_load_library(const std::wstring & path) {
133
+ dl_handle * handle = dlopen(utf16_to_utf8(path).c_str(), RTLD_NOW | RTLD_LOCAL);
134
+
135
+ return handle;
136
+ }
137
+
138
+ static void * dl_get_sym(dl_handle * handle, const char * name) {
139
+ return dlsym(handle, name);
140
+ }
141
+
142
+ #endif
143
+
144
+ using dl_handle_ptr = std::unique_ptr<dl_handle, dl_handle_deleter>;
145
+
146
+ struct lm_ggml_backend_reg_entry {
147
+ lm_ggml_backend_reg_t reg;
148
+ dl_handle_ptr handle;
149
+ };
150
+
151
+ struct lm_ggml_backend_registry {
152
+ std::vector<lm_ggml_backend_reg_entry> backends;
153
+ std::vector<lm_ggml_backend_dev_t> devices;
154
+
155
+ lm_ggml_backend_registry() {
156
+ #ifdef LM_GGML_USE_CUDA
157
+ register_backend(lm_ggml_backend_cuda_reg());
158
+ #endif
159
+ #ifdef LM_GGML_USE_METAL
160
+ register_backend(lm_ggml_backend_metal_reg());
161
+ #endif
162
+ #ifdef LM_GGML_USE_SYCL
163
+ register_backend(lm_ggml_backend_sycl_reg());
164
+ #endif
165
+ #ifdef LM_GGML_USE_VULKAN
166
+ register_backend(lm_ggml_backend_vk_reg());
167
+ #endif
168
+ #ifdef LM_GGML_USE_OPENCL
169
+ register_backend(lm_ggml_backend_opencl_reg());
170
+ #endif
171
+ #ifdef LM_GGML_USE_CANN
172
+ register_backend(lm_ggml_backend_cann_reg());
173
+ #endif
174
+ #ifdef LM_GGML_USE_BLAS
175
+ register_backend(lm_ggml_backend_blas_reg());
176
+ #endif
177
+ #ifdef LM_GGML_USE_RPC
178
+ register_backend(lm_ggml_backend_rpc_reg());
179
+ #endif
180
+ #ifdef LM_GGML_USE_KOMPUTE
181
+ register_backend(lm_ggml_backend_kompute_reg());
182
+ #endif
183
+ #ifdef LM_GGML_USE_CPU
184
+ register_backend(lm_ggml_backend_cpu_reg());
185
+ #endif
186
+ }
187
+
188
+ ~lm_ggml_backend_registry() {
189
+ // FIXME: backends cannot be safely unloaded without a function to destroy all the backend resources,
190
+ // since backend threads may still be running and accessing resources from the dynamic library
191
+ for (auto & entry : backends) {
192
+ if (entry.handle) {
193
+ entry.handle.release(); // NOLINT
194
+ }
195
+ }
196
+ }
197
+
198
+ void register_backend(lm_ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) {
199
+ if (!reg) {
200
+ return;
201
+ }
202
+
203
+ #ifndef NDEBUG
204
+ LM_GGML_LOG_DEBUG("%s: registered backend %s (%zu devices)\n",
205
+ __func__, lm_ggml_backend_reg_name(reg), lm_ggml_backend_reg_dev_count(reg));
206
+ #endif
207
+ backends.push_back({ reg, std::move(handle) });
208
+ for (size_t i = 0; i < lm_ggml_backend_reg_dev_count(reg); i++) {
209
+ register_device(lm_ggml_backend_reg_dev_get(reg, i));
210
+ }
211
+ }
212
+
213
+ void register_device(lm_ggml_backend_dev_t device) {
214
+ #ifndef NDEBUG
215
+ LM_GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, lm_ggml_backend_dev_name(device), lm_ggml_backend_dev_description(device));
216
+ #endif
217
+ devices.push_back(device);
218
+ }
219
+
220
+ lm_ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) {
221
+ dl_handle_ptr handle { dl_load_library(path) };
222
+ if (!handle) {
223
+ if (!silent) {
224
+ LM_GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(path).c_str());
225
+ }
226
+ return nullptr;
227
+ }
228
+
229
+ auto score_fn = (lm_ggml_backend_score_t) dl_get_sym(handle.get(), "lm_ggml_backend_score");
230
+ if (score_fn && score_fn() == 0) {
231
+ if (!silent) {
232
+ LM_GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, utf16_to_utf8(path).c_str());
233
+ }
234
+ return nullptr;
235
+ }
236
+
237
+ auto backend_init_fn = (lm_ggml_backend_init_t) dl_get_sym(handle.get(), "lm_ggml_backend_init");
238
+ if (!backend_init_fn) {
239
+ if (!silent) {
240
+ LM_GGML_LOG_ERROR("%s: failed to find lm_ggml_backend_init in %s\n", __func__, utf16_to_utf8(path).c_str());
241
+ }
242
+ return nullptr;
243
+ }
244
+
245
+ lm_ggml_backend_reg_t reg = backend_init_fn();
246
+ if (!reg || reg->api_version != LM_GGML_BACKEND_API_VERSION) {
247
+ if (!silent) {
248
+ if (!reg) {
249
+ LM_GGML_LOG_ERROR("%s: failed to initialize backend from %s: lm_ggml_backend_init returned NULL\n", __func__, utf16_to_utf8(path).c_str());
250
+ } else {
251
+ LM_GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n",
252
+ __func__, utf16_to_utf8(path).c_str(), reg->api_version, LM_GGML_BACKEND_API_VERSION);
253
+ }
254
+ }
255
+ return nullptr;
256
+ }
257
+
258
+ LM_GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, lm_ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str());
259
+
260
+ register_backend(reg, std::move(handle));
261
+
262
+ return reg;
263
+ }
264
+
265
+ void unload_backend(lm_ggml_backend_reg_t reg, bool silent) {
266
+ auto it = std::find_if(backends.begin(), backends.end(),
267
+ [reg](const lm_ggml_backend_reg_entry & entry) { return entry.reg == reg; });
268
+
269
+ if (it == backends.end()) {
270
+ if (!silent) {
271
+ LM_GGML_LOG_ERROR("%s: backend not found\n", __func__);
272
+ }
273
+ return;
274
+ }
275
+
276
+ if (!silent) {
277
+ LM_GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, lm_ggml_backend_reg_name(reg));
278
+ }
279
+
280
+ // remove devices
281
+ devices.erase(
282
+ std::remove_if(devices.begin(), devices.end(),
283
+ [reg](lm_ggml_backend_dev_t dev) { return lm_ggml_backend_dev_backend_reg(dev) == reg; }),
284
+ devices.end());
285
+
286
+ // remove backend
287
+ backends.erase(it);
288
+ }
289
+ };
290
+
291
+ static lm_ggml_backend_registry & get_reg() {
292
+ static lm_ggml_backend_registry reg;
293
+ return reg;
294
+ }
295
+
296
+ // Internal API
297
+ void lm_ggml_backend_register(lm_ggml_backend_reg_t reg) {
298
+ get_reg().register_backend(reg);
299
+ }
300
+
301
+ void lm_ggml_backend_device_register(lm_ggml_backend_dev_t device) {
302
+ get_reg().register_device(device);
303
+ }
304
+
305
+ // Backend (reg) enumeration
306
+ static bool striequals(const char * a, const char * b) {
307
+ for (; *a && *b; a++, b++) {
308
+ if (std::tolower(*a) != std::tolower(*b)) {
309
+ return false;
310
+ }
311
+ }
312
+ return *a == *b;
313
+ }
314
+
315
+ size_t lm_ggml_backend_reg_count() {
316
+ return get_reg().backends.size();
317
+ }
318
+
319
+ lm_ggml_backend_reg_t lm_ggml_backend_reg_get(size_t index) {
320
+ LM_GGML_ASSERT(index < lm_ggml_backend_reg_count());
321
+ return get_reg().backends[index].reg;
322
+ }
323
+
324
+ lm_ggml_backend_reg_t lm_ggml_backend_reg_by_name(const char * name) {
325
+ for (size_t i = 0; i < lm_ggml_backend_reg_count(); i++) {
326
+ lm_ggml_backend_reg_t reg = lm_ggml_backend_reg_get(i);
327
+ if (striequals(lm_ggml_backend_reg_name(reg), name)) {
328
+ return reg;
329
+ }
330
+ }
331
+ return nullptr;
332
+ }
333
+
334
+ // Device enumeration
335
+ size_t lm_ggml_backend_dev_count() {
336
+ return get_reg().devices.size();
337
+ }
338
+
339
+ lm_ggml_backend_dev_t lm_ggml_backend_dev_get(size_t index) {
340
+ LM_GGML_ASSERT(index < lm_ggml_backend_dev_count());
341
+ return get_reg().devices[index];
342
+ }
343
+
344
+ lm_ggml_backend_dev_t lm_ggml_backend_dev_by_name(const char * name) {
345
+ for (size_t i = 0; i < lm_ggml_backend_dev_count(); i++) {
346
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
347
+ if (striequals(lm_ggml_backend_dev_name(dev), name)) {
348
+ return dev;
349
+ }
350
+ }
351
+ return nullptr;
352
+ }
353
+
354
+ lm_ggml_backend_dev_t lm_ggml_backend_dev_by_type(enum lm_ggml_backend_dev_type type) {
355
+ for (size_t i = 0; i < lm_ggml_backend_dev_count(); i++) {
356
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
357
+ if (lm_ggml_backend_dev_type(dev) == type) {
358
+ return dev;
359
+ }
360
+ }
361
+ return nullptr;
362
+ }
363
+
364
+ // Convenience functions
365
+ lm_ggml_backend_t lm_ggml_backend_init_by_name(const char * name, const char * params) {
366
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_name(name);
367
+ if (!dev) {
368
+ return nullptr;
369
+ }
370
+ return lm_ggml_backend_dev_init(dev, params);
371
+ }
372
+
373
+ lm_ggml_backend_t lm_ggml_backend_init_by_type(enum lm_ggml_backend_dev_type type, const char * params) {
374
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_type(type);
375
+ if (!dev) {
376
+ return nullptr;
377
+ }
378
+ return lm_ggml_backend_dev_init(dev, params);
379
+ }
380
+
381
+ lm_ggml_backend_t lm_ggml_backend_init_best(void) {
382
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_GPU);
383
+ if (!dev) {
384
+ dev = lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU);
385
+ }
386
+ if (!dev) {
387
+ return nullptr;
388
+ }
389
+ return lm_ggml_backend_dev_init(dev, nullptr);
390
+ }
391
+
392
+ // Dynamic loading
393
+ lm_ggml_backend_reg_t lm_ggml_backend_load(const char * path) {
394
+ return get_reg().load_backend(utf8_to_utf16(path), false);
395
+ }
396
+
397
+ void lm_ggml_backend_unload(lm_ggml_backend_reg_t reg) {
398
+ get_reg().unload_backend(reg, true);
399
+ }
400
+
401
+ static std::wstring get_executable_path() {
402
+ #if defined(__APPLE__)
403
+ // get executable path
404
+ std::vector<char> path;
405
+ uint32_t size;
406
+ while (true) {
407
+ size = path.size();
408
+ if (_NSGetExecutablePath(path.data(), &size) == 0) {
409
+ break;
410
+ }
411
+ path.resize(size);
412
+ }
413
+ std::string base_path(path.data(), size);
414
+ // remove executable name
415
+ auto last_slash = base_path.find_last_of('/');
416
+ if (last_slash != std::string::npos) {
417
+ base_path = base_path.substr(0, last_slash);
418
+ }
419
+ return utf8_to_utf16(base_path + "/");
420
+ #elif defined(__linux__) || defined(__FreeBSD__)
421
+ std::string base_path = ".";
422
+ std::vector<char> path(1024);
423
+ while (true) {
424
+ // get executable path
425
+ # if defined(__linux__)
426
+ ssize_t len = readlink("/proc/self/exe", path.data(), path.size());
427
+ # elif defined(__FreeBSD__)
428
+ ssize_t len = readlink("/proc/curproc/file", path.data(), path.size());
429
+ # endif
430
+ if (len == -1) {
431
+ break;
432
+ }
433
+ if (len < (ssize_t) path.size()) {
434
+ base_path = std::string(path.data(), len);
435
+ // remove executable name
436
+ auto last_slash = base_path.find_last_of('/');
437
+ if (last_slash != std::string::npos) {
438
+ base_path = base_path.substr(0, last_slash);
439
+ }
440
+ break;
441
+ }
442
+ path.resize(path.size() * 2);
443
+ }
444
+
445
+ return utf8_to_utf16(base_path + "/");
446
+ #elif defined(_WIN32)
447
+ std::vector<wchar_t> path(MAX_PATH);
448
+ DWORD len = GetModuleFileNameW(NULL, path.data(), path.size());
449
+ if (len == 0) {
450
+ return {};
451
+ }
452
+ std::wstring base_path(path.data(), len);
453
+ // remove executable name
454
+ auto last_slash = base_path.find_last_of('\\');
455
+ if (last_slash != std::string::npos) {
456
+ base_path = base_path.substr(0, last_slash);
457
+ }
458
+ return base_path + L"\\";
459
+ #else
460
+ return {};
461
+ #endif
462
+ }
463
+
464
+ static std::wstring backend_filename_prefix() {
465
+ #ifdef _WIN32
466
+ return L"ggml-";
467
+ #else
468
+ return L"libggml-";
469
+ #endif
470
+ }
471
+
472
+ static std::wstring backend_filename_suffix() {
473
+ #ifdef _WIN32
474
+ return L".dll";
475
+ #else
476
+ return L".so";
477
+ #endif
478
+ }
479
+
480
+ static std::wstring path_separator() {
481
+ #ifdef _WIN32
482
+ return L"\\";
483
+ #else
484
+ return L"/";
485
+ #endif
486
+ }
487
+
488
+ static lm_ggml_backend_reg_t lm_ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) {
489
+ // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths
490
+ // TODO: search system paths
491
+ std::wstring file_prefix = backend_filename_prefix() + utf8_to_utf16(name) + L"-";
492
+ std::vector<std::wstring> search_paths;
493
+ if (user_search_path == nullptr) {
494
+ search_paths.push_back(L"." + path_separator());
495
+ search_paths.push_back(get_executable_path());
496
+ } else {
497
+ search_paths.push_back(utf8_to_utf16(user_search_path) + path_separator());
498
+ }
499
+
500
+ int best_score = 0;
501
+ std::wstring best_path;
502
+
503
+ namespace fs = std::filesystem;
504
+ for (const auto & search_path : search_paths) {
505
+ if (!fs::exists(search_path)) {
506
+ continue;
507
+ }
508
+ fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied);
509
+ for (const auto & entry : dir_it) {
510
+ if (entry.is_regular_file()) {
511
+ std::wstring filename = entry.path().filename().wstring();
512
+ std::wstring ext = entry.path().extension().wstring();
513
+ if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) {
514
+ dl_handle_ptr handle { dl_load_library(entry.path().wstring()) };
515
+ if (!handle && !silent) {
516
+ LM_GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
517
+ }
518
+ if (handle) {
519
+ auto score_fn = (lm_ggml_backend_score_t) dl_get_sym(handle.get(), "lm_ggml_backend_score");
520
+ if (score_fn) {
521
+ int s = score_fn();
522
+ #ifndef NDEBUG
523
+ LM_GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str(), s);
524
+ #endif
525
+ if (s > best_score) {
526
+ best_score = s;
527
+ best_path = entry.path().wstring();
528
+ }
529
+ } else {
530
+ if (!silent) {
531
+ LM_GGML_LOG_INFO("%s: failed to find lm_ggml_backend_score in %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
532
+ }
533
+ }
534
+ }
535
+ }
536
+ }
537
+ }
538
+ }
539
+
540
+ if (best_score == 0) {
541
+ // try to load the base backend
542
+ for (const auto & search_path : search_paths) {
543
+ std::wstring path = search_path + backend_filename_prefix() + utf8_to_utf16(name) + backend_filename_suffix();
544
+ if (fs::exists(path)) {
545
+ return get_reg().load_backend(path, silent);
546
+ }
547
+ }
548
+ return nullptr;
549
+ }
550
+
551
+ return get_reg().load_backend(best_path, silent);
552
+ }
553
+
554
+ void lm_ggml_backend_load_all() {
555
+ lm_ggml_backend_load_all_from_path(nullptr);
556
+ }
557
+
558
+ void lm_ggml_backend_load_all_from_path(const char * dir_path) {
559
+ #ifdef NDEBUG
560
+ bool silent = true;
561
+ #else
562
+ bool silent = false;
563
+ #endif
564
+
565
+ lm_ggml_backend_load_best("blas", silent, dir_path);
566
+ lm_ggml_backend_load_best("cann", silent, dir_path);
567
+ lm_ggml_backend_load_best("cuda", silent, dir_path);
568
+ lm_ggml_backend_load_best("hip", silent, dir_path);
569
+ lm_ggml_backend_load_best("kompute", silent, dir_path);
570
+ lm_ggml_backend_load_best("metal", silent, dir_path);
571
+ lm_ggml_backend_load_best("rpc", silent, dir_path);
572
+ lm_ggml_backend_load_best("sycl", silent, dir_path);
573
+ lm_ggml_backend_load_best("vulkan", silent, dir_path);
574
+ lm_ggml_backend_load_best("opencl", silent, dir_path);
575
+ lm_ggml_backend_load_best("musa", silent, dir_path);
576
+ lm_ggml_backend_load_best("cpu", silent, dir_path);
577
+ // check the environment variable LM_GGML_BACKEND_PATH to load an out-of-tree backend
578
+ const char * backend_path = std::getenv("LM_GGML_BACKEND_PATH");
579
+ if (backend_path) {
580
+ lm_ggml_backend_load(backend_path);
581
+ }
582
+ }