cui-llama.rn 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +54 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1085
  14. package/cpp/chat.h +143 -0
  15. package/cpp/common.cpp +1562 -1996
  16. package/cpp/common.h +677 -744
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-alloc.c +1039 -1030
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +255 -255
  21. package/cpp/ggml-backend-reg.cpp +586 -582
  22. package/cpp/ggml-backend.cpp +2004 -2002
  23. package/cpp/ggml-backend.h +354 -354
  24. package/cpp/ggml-common.h +1857 -1851
  25. package/cpp/ggml-cpp.h +39 -39
  26. package/cpp/ggml-cpu-aarch64.cpp +5725 -4247
  27. package/cpp/ggml-cpu-aarch64.h +8 -8
  28. package/cpp/ggml-cpu-impl.h +512 -380
  29. package/cpp/ggml-cpu-quants.c +13026 -11517
  30. package/cpp/ggml-cpu-traits.cpp +36 -36
  31. package/cpp/ggml-cpu-traits.h +38 -38
  32. package/cpp/ggml-cpu.c +3438 -14485
  33. package/cpp/ggml-cpu.cpp +655 -633
  34. package/cpp/ggml-cpu.h +138 -135
  35. package/cpp/ggml-impl.h +594 -567
  36. package/cpp/ggml-metal-impl.h +312 -3
  37. package/cpp/ggml-metal.h +66 -66
  38. package/cpp/ggml-metal.m +5360 -5002
  39. package/cpp/ggml-opt.cpp +854 -854
  40. package/cpp/ggml-opt.h +216 -216
  41. package/cpp/ggml-quants.c +5238 -5238
  42. package/cpp/ggml-threading.h +14 -14
  43. package/cpp/ggml.c +6618 -6524
  44. package/cpp/ggml.h +2222 -2194
  45. package/cpp/gguf.cpp +1330 -1329
  46. package/cpp/gguf.h +202 -202
  47. package/cpp/json-schema-to-grammar.cpp +1024 -1025
  48. package/cpp/json-schema-to-grammar.h +21 -22
  49. package/cpp/json.hpp +24766 -24766
  50. package/cpp/llama-adapter.cpp +382 -347
  51. package/cpp/llama-adapter.h +76 -74
  52. package/cpp/llama-arch.cpp +1714 -1492
  53. package/cpp/llama-arch.h +428 -402
  54. package/cpp/llama-batch.cpp +368 -368
  55. package/cpp/llama-batch.h +88 -88
  56. package/cpp/llama-chat.cpp +640 -587
  57. package/cpp/llama-chat.h +56 -53
  58. package/cpp/llama-context.cpp +2831 -1775
  59. package/cpp/llama-context.h +265 -128
  60. package/cpp/llama-cparams.cpp +1 -1
  61. package/cpp/llama-cparams.h +38 -37
  62. package/cpp/llama-cpp.h +30 -30
  63. package/cpp/llama-grammar.cpp +1219 -1219
  64. package/cpp/llama-grammar.h +173 -164
  65. package/cpp/llama-graph.cpp +1695 -0
  66. package/cpp/llama-graph.h +592 -0
  67. package/cpp/llama-hparams.cpp +79 -71
  68. package/cpp/llama-hparams.h +156 -139
  69. package/cpp/llama-impl.cpp +167 -167
  70. package/cpp/llama-impl.h +61 -61
  71. package/cpp/llama-io.cpp +15 -0
  72. package/cpp/llama-io.h +35 -0
  73. package/cpp/llama-kv-cache.cpp +1380 -718
  74. package/cpp/llama-kv-cache.h +213 -218
  75. package/cpp/llama-memory.cpp +1 -0
  76. package/cpp/llama-memory.h +21 -0
  77. package/cpp/llama-mmap.cpp +600 -590
  78. package/cpp/llama-mmap.h +68 -68
  79. package/cpp/llama-model-loader.cpp +1129 -1124
  80. package/cpp/llama-model-loader.h +169 -167
  81. package/cpp/llama-model.cpp +13080 -4023
  82. package/cpp/llama-model.h +409 -370
  83. package/cpp/llama-sampling.cpp +2563 -2525
  84. package/cpp/llama-sampling.h +32 -32
  85. package/cpp/llama-vocab.cpp +3295 -3252
  86. package/cpp/llama-vocab.h +125 -125
  87. package/cpp/llama.cpp +351 -10137
  88. package/cpp/llama.h +1434 -1340
  89. package/cpp/log.cpp +427 -423
  90. package/cpp/log.h +132 -132
  91. package/cpp/{chat-template.hpp → minja/chat-template.hpp} +537 -529
  92. package/cpp/{minja.hpp → minja/minja.hpp} +2941 -2883
  93. package/cpp/ops.cpp +8723 -0
  94. package/cpp/ops.h +128 -0
  95. package/cpp/rn-llama.cpp +45 -71
  96. package/cpp/rn-llama.h +3 -3
  97. package/cpp/sampling.cpp +573 -532
  98. package/cpp/sgemm.cpp +3043 -2598
  99. package/cpp/sgemm.h +14 -14
  100. package/cpp/simd-mappings.h +888 -0
  101. package/cpp/speculative.cpp +278 -277
  102. package/cpp/speculative.h +28 -28
  103. package/cpp/unary-ops.cpp +186 -0
  104. package/cpp/unary-ops.h +28 -0
  105. package/cpp/vec.cpp +258 -0
  106. package/cpp/vec.h +802 -0
  107. package/ios/CMakeLists.txt +5 -2
  108. package/ios/RNLlama.mm +2 -2
  109. package/ios/RNLlamaContext.mm +40 -24
  110. package/package.json +1 -1
  111. package/src/NativeRNLlama.ts +6 -4
  112. package/src/index.ts +3 -1
  113. package/android/src/main/build-arm64/CMakeCache.txt +0 -429
  114. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCCompiler.cmake +0 -81
  115. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCXXCompiler.cmake +0 -101
  116. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_C.bin +0 -0
  117. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_CXX.bin +0 -0
  118. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeSystem.cmake +0 -15
  119. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.c +0 -904
  120. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.o +0 -0
  121. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.cpp +0 -919
  122. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.o +0 -0
  123. package/android/src/main/build-arm64/CMakeFiles/CMakeConfigureLog.yaml +0 -431
  124. package/android/src/main/build-arm64/CMakeFiles/CMakeDirectoryInformation.cmake +0 -16
  125. package/android/src/main/build-arm64/CMakeFiles/Makefile.cmake +0 -165
  126. package/android/src/main/build-arm64/CMakeFiles/Makefile2 +0 -297
  127. package/android/src/main/build-arm64/CMakeFiles/Progress/1 +0 -1
  128. package/android/src/main/build-arm64/CMakeFiles/Progress/2 +0 -1
  129. package/android/src/main/build-arm64/CMakeFiles/Progress/3 +0 -1
  130. package/android/src/main/build-arm64/CMakeFiles/Progress/4 +0 -1
  131. package/android/src/main/build-arm64/CMakeFiles/Progress/5 +0 -1
  132. package/android/src/main/build-arm64/CMakeFiles/Progress/6 +0 -1
  133. package/android/src/main/build-arm64/CMakeFiles/Progress/count.txt +0 -1
  134. package/android/src/main/build-arm64/CMakeFiles/TargetDirectories.txt +0 -8
  135. package/android/src/main/build-arm64/CMakeFiles/cmake.check_cache +0 -1
  136. package/android/src/main/build-arm64/CMakeFiles/progress.marks +0 -1
  137. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o +0 -0
  138. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o.d +0 -58
  139. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o +0 -0
  140. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o.d +0 -756
  141. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o +0 -0
  142. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o.d +0 -709
  143. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o +0 -0
  144. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o.d +0 -714
  145. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o +0 -0
  146. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o.d +0 -62
  147. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o +0 -0
  148. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o.d +0 -708
  149. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o +0 -0
  150. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o.d +0 -113
  151. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o +0 -0
  152. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o.d +0 -713
  153. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o +0 -0
  154. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o.d +0 -763
  155. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o +0 -0
  156. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o.d +0 -61
  157. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o +0 -0
  158. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o.d +0 -707
  159. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o +0 -0
  160. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o.d +0 -104
  161. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o +0 -0
  162. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o.d +0 -714
  163. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o +0 -0
  164. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o.d +0 -723
  165. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/DependInfo.cmake +0 -62
  166. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/build.make +0 -722
  167. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/cmake_clean.cmake +0 -89
  168. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.make +0 -2
  169. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.ts +0 -2
  170. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/depend.make +0 -2
  171. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/flags.make +0 -17
  172. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/progress.make +0 -41
  173. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/DependInfo.cmake +0 -62
  174. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/build.make +0 -722
  175. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/cmake_clean.cmake +0 -89
  176. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.make +0 -2
  177. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.ts +0 -2
  178. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/depend.make +0 -2
  179. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/flags.make +0 -17
  180. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/progress.make +0 -41
  181. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/DependInfo.cmake +0 -62
  182. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/build.make +0 -722
  183. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/cmake_clean.cmake +0 -89
  184. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.make +0 -2
  185. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.ts +0 -2
  186. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/depend.make +0 -2
  187. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/flags.make +0 -17
  188. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/progress.make +0 -41
  189. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/DependInfo.cmake +0 -62
  190. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/build.make +0 -722
  191. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/cmake_clean.cmake +0 -89
  192. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.make +0 -2
  193. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.ts +0 -2
  194. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/depend.make +0 -2
  195. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/flags.make +0 -17
  196. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/progress.make +0 -41
  197. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/DependInfo.cmake +0 -62
  198. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/build.make +0 -722
  199. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/cmake_clean.cmake +0 -89
  200. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.make +0 -2
  201. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.ts +0 -2
  202. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/depend.make +0 -2
  203. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/flags.make +0 -17
  204. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/progress.make +0 -41
  205. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/DependInfo.cmake +0 -62
  206. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/build.make +0 -722
  207. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/cmake_clean.cmake +0 -89
  208. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.make +0 -2
  209. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.ts +0 -2
  210. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/depend.make +0 -2
  211. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/flags.make +0 -17
  212. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/progress.make +0 -41
  213. package/android/src/main/build-arm64/Makefile +0 -1862
  214. package/android/src/main/build-arm64/cmake_install.cmake +0 -66
  215. package/cpp/chat.hpp +0 -55
  216. package/cpp/rn-llama.hpp +0 -913
@@ -1,167 +1,169 @@
1
- #pragma once
2
-
3
- #include "llama.h"
4
-
5
- #include "llama-impl.h"
6
- #include "llama-arch.h"
7
- #include "llama-mmap.h"
8
-
9
- #include "ggml-cpp.h"
10
-
11
- #include <cstddef>
12
- #include <map>
13
- #include <stdexcept>
14
- #include <unordered_map>
15
-
16
- using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
17
-
18
- enum llama_fver {
19
- LM_GGUF_FILE_VERSION_V1 = 1,
20
- LM_GGUF_FILE_VERSION_V2 = 2,
21
- LM_GGUF_FILE_VERSION_V3 = 3,
22
- };
23
-
24
- const char * llama_file_version_name(llama_fver version);
25
-
26
- struct llama_model_loader {
27
- // Holds information on a model weight
28
- struct llama_tensor_weight {
29
- uint16_t idx; // source file index
30
- size_t offs; // tensor data offset in the original file
31
-
32
- lm_ggml_tensor * tensor;
33
-
34
- llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
35
- const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
36
- if (tensor_idx < 0) {
37
- throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
38
- }
39
-
40
- offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
41
- if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
42
- throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
43
- }
44
- }
45
- };
46
-
47
- // custom comparator to sort weights more nicely by layer
48
- struct weight_name_comparer {
49
- bool operator()(const std::string & a, const std::string & b) const {
50
- int a_layer = -1;
51
- int b_layer = -1;
52
- sscanf(a.c_str(), "blk.%d.", &a_layer);
53
- sscanf(b.c_str(), "blk.%d.", &b_layer);
54
- if (a_layer != b_layer) {
55
- return a_layer < b_layer;
56
- }
57
- return a < b;
58
- }
59
- };
60
-
61
- static const int TENSOR_NOT_REQUIRED = 1;
62
- static const int TENSOR_DUPLICATED = 2;
63
-
64
- int n_kv = 0;
65
- int n_tensors = 0;
66
- int n_created = 0;
67
-
68
- uint64_t n_elements = 0;
69
- size_t n_bytes = 0;
70
-
71
- bool use_mmap = false;
72
- bool check_tensors;
73
-
74
- llama_files files;
75
- llama_ftype ftype;
76
- llama_fver fver;
77
-
78
- llama_mmaps mappings;
79
-
80
- std::map<std::string, struct llama_tensor_weight, weight_name_comparer> weights_map;
81
- std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
82
-
83
- lm_gguf_context_ptr meta;
84
- std::vector<lm_ggml_context_ptr> contexts;
85
-
86
- std::string arch_name;
87
- LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
88
-
89
- size_t size_done = 0;
90
- size_t size_data = 0;
91
- std::vector<std::pair<size_t, size_t>> mmaps_used;
92
-
93
- llama_model_loader(
94
- const std::string & fname,
95
- std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
96
- bool use_mmap,
97
- bool check_tensors,
98
- const struct llama_model_kv_override * param_overrides_p);
99
-
100
- template<typename T>
101
- typename std::enable_if<std::is_integral<T>::value, bool>::type
102
- get_arr_n(const std::string & key, T & result, bool required = true);
103
-
104
- template<typename T>
105
- typename std::enable_if<std::is_integral<T>::value, bool>::type
106
- get_arr_n(enum llm_kv kid, T & result, bool required = true);
107
-
108
- template<typename T>
109
- bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
110
-
111
- template<typename T, size_t N_MAX>
112
- bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
113
-
114
- template<typename T>
115
- bool get_arr(enum llm_kv kid, T & result, bool required = true);
116
-
117
- template<typename T>
118
- bool get_key(const std::string & key, T & result, bool required = true);
119
-
120
- template<typename T>
121
- bool get_key(enum llm_kv kid, T & result, bool required = true);
122
-
123
- template<typename T, size_t N_MAX>
124
- bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
125
-
126
- template<typename T>
127
- bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
128
-
129
- std::string get_arch_name() const;
130
-
131
- enum llm_arch get_arch() const;
132
-
133
- const llama_tensor_weight * get_weight(const char * name) const;
134
-
135
- const llama_tensor_weight & require_weight(const char * name) const;
136
-
137
- struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
138
-
139
- struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
140
-
141
- const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
142
-
143
- struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
144
-
145
- struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
146
-
147
- void done_getting_tensors() const;
148
-
149
- void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
150
-
151
- void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
152
-
153
- // for backwards compatibility, does not support ggml-backend
154
- void load_data_for(struct lm_ggml_tensor * cur) const;
155
-
156
- // Returns false if cancelled by progress_callback
157
- bool load_all_data(
158
- struct lm_ggml_context * ctx,
159
- llama_buf_map & bufs,
160
- llama_mlocks * lmlocks,
161
- llama_progress_callback progress_callback,
162
- void * progress_callback_user_data);
163
-
164
- std::string ftype_name() const;
165
-
166
- void print_info() const;
167
- };
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include "llama-impl.h"
6
+ #include "llama-arch.h"
7
+ #include "llama-mmap.h"
8
+
9
+ #include "ggml-cpp.h"
10
+
11
+ #include <cstddef>
12
+ #include <map>
13
+ #include <stdexcept>
14
+ #include <unordered_map>
15
+
16
+ using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
17
+
18
+ enum llama_fver {
19
+ LM_GGUF_FILE_VERSION_V1 = 1,
20
+ LM_GGUF_FILE_VERSION_V2 = 2,
21
+ LM_GGUF_FILE_VERSION_V3 = 3,
22
+ };
23
+
24
+ const char * llama_file_version_name(llama_fver version);
25
+
26
+ struct llama_model_loader {
27
+ // Holds information on a model weight
28
+ struct llama_tensor_weight {
29
+ uint16_t idx; // source file index
30
+ size_t offs; // tensor data offset in the original file
31
+
32
+ lm_ggml_tensor * tensor;
33
+
34
+ llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
35
+ const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
36
+ if (tensor_idx < 0) {
37
+ throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
38
+ }
39
+
40
+ offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
41
+ if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
42
+ throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
43
+ }
44
+ }
45
+ };
46
+
47
+ // custom comparator to sort weights more nicely by layer
48
+ struct weight_name_comparer {
49
+ bool operator()(const std::string & a, const std::string & b) const {
50
+ int a_layer = -1;
51
+ int b_layer = -1;
52
+ sscanf(a.c_str(), "blk.%d.", &a_layer);
53
+ sscanf(b.c_str(), "blk.%d.", &b_layer);
54
+ if (a_layer != b_layer) {
55
+ return a_layer < b_layer;
56
+ }
57
+ return a < b;
58
+ }
59
+ };
60
+
61
+ static const int TENSOR_NOT_REQUIRED = 1;
62
+ static const int TENSOR_DUPLICATED = 2;
63
+
64
+ int n_kv = 0;
65
+ int n_tensors = 0;
66
+ int n_created = 0;
67
+
68
+ uint64_t n_elements = 0;
69
+ size_t n_bytes = 0;
70
+
71
+ bool use_mmap = false;
72
+ bool check_tensors;
73
+
74
+ llama_files files;
75
+ llama_ftype ftype;
76
+ llama_fver fver;
77
+
78
+ llama_mmaps mappings;
79
+
80
+ std::map<std::string, llama_tensor_weight, weight_name_comparer> weights_map;
81
+ std::unordered_map<std::string, llama_model_kv_override> kv_overrides;
82
+ const llama_model_tensor_buft_override * tensor_buft_overrides;
83
+
84
+ lm_gguf_context_ptr meta;
85
+ std::vector<lm_ggml_context_ptr> contexts;
86
+
87
+ std::string arch_name;
88
+ LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
89
+
90
+ size_t size_done = 0;
91
+ size_t size_data = 0;
92
+ std::vector<std::pair<size_t, size_t>> mmaps_used;
93
+
94
+ llama_model_loader(
95
+ const std::string & fname,
96
+ std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
97
+ bool use_mmap,
98
+ bool check_tensors,
99
+ const llama_model_kv_override * param_overrides_p,
100
+ const llama_model_tensor_buft_override * param_tensor_buft_overrides_p);
101
+
102
+ template<typename T>
103
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
104
+ get_arr_n(const std::string & key, T & result, bool required = true);
105
+
106
+ template<typename T>
107
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
108
+ get_arr_n(enum llm_kv kid, T & result, bool required = true);
109
+
110
+ template<typename T>
111
+ bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
112
+
113
+ template<typename T, size_t N_MAX>
114
+ bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
115
+
116
+ template<typename T>
117
+ bool get_arr(enum llm_kv kid, T & result, bool required = true);
118
+
119
+ template<typename T>
120
+ bool get_key(const std::string & key, T & result, bool required = true);
121
+
122
+ template<typename T>
123
+ bool get_key(enum llm_kv kid, T & result, bool required = true);
124
+
125
+ template<typename T, size_t N_MAX>
126
+ bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
127
+
128
+ template<typename T>
129
+ bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
130
+
131
+ std::string get_arch_name() const;
132
+
133
+ enum llm_arch get_arch() const;
134
+
135
+ const llama_tensor_weight * get_weight(const char * name) const;
136
+
137
+ const llama_tensor_weight & require_weight(const char * name) const;
138
+
139
+ struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
140
+
141
+ struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
142
+
143
+ const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
144
+
145
+ struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
146
+
147
+ struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
148
+
149
+ void done_getting_tensors() const;
150
+
151
+ void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
152
+
153
+ void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
154
+
155
+ // for backwards compatibility, does not support ggml-backend
156
+ void load_data_for(struct lm_ggml_tensor * cur) const;
157
+
158
+ // Returns false if cancelled by progress_callback
159
+ bool load_all_data(
160
+ struct lm_ggml_context * ctx,
161
+ llama_buf_map & bufs,
162
+ llama_mlocks * lmlocks,
163
+ llama_progress_callback progress_callback,
164
+ void * progress_callback_user_data);
165
+
166
+ std::string ftype_name() const;
167
+
168
+ void print_info() const;
169
+ };