cui-llama.rn 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +4 -23
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +13 -7
  4. package/android/src/main/java/com/rnllama/LlamaContext.java +27 -20
  5. package/android/src/main/java/com/rnllama/RNLlama.java +5 -1
  6. package/android/src/main/jni.cpp +15 -12
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  14. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  15. package/cpp/README.md +1 -1
  16. package/cpp/common.cpp +158 -267
  17. package/cpp/common.h +46 -12
  18. package/cpp/ggml-alloc.c +1042 -1037
  19. package/cpp/ggml-backend-impl.h +255 -256
  20. package/cpp/ggml-backend-reg.cpp +582 -582
  21. package/cpp/ggml-backend.cpp +2002 -2002
  22. package/cpp/ggml-backend.h +354 -352
  23. package/cpp/ggml-common.h +1853 -1853
  24. package/cpp/ggml-cpp.h +39 -39
  25. package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
  26. package/cpp/ggml-cpu-aarch64.h +8 -8
  27. package/cpp/ggml-cpu-impl.h +386 -386
  28. package/cpp/ggml-cpu-quants.c +10920 -10839
  29. package/cpp/ggml-cpu-traits.cpp +36 -36
  30. package/cpp/ggml-cpu-traits.h +38 -38
  31. package/cpp/ggml-cpu.c +329 -60
  32. package/cpp/ggml-cpu.cpp +10 -2
  33. package/cpp/ggml-cpu.h +135 -135
  34. package/cpp/ggml-impl.h +567 -567
  35. package/cpp/ggml-metal-impl.h +17 -17
  36. package/cpp/ggml-metal.m +4884 -4884
  37. package/cpp/ggml-quants.c +5238 -5238
  38. package/cpp/ggml-threading.h +14 -14
  39. package/cpp/ggml.c +6514 -6448
  40. package/cpp/ggml.h +2194 -2163
  41. package/cpp/gguf.cpp +1329 -1325
  42. package/cpp/gguf.h +202 -202
  43. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  44. package/cpp/json-schema-to-grammar.h +8 -8
  45. package/cpp/json.hpp +24766 -24766
  46. package/cpp/llama-adapter.cpp +347 -346
  47. package/cpp/llama-adapter.h +74 -73
  48. package/cpp/llama-arch.cpp +1487 -1434
  49. package/cpp/llama-arch.h +400 -395
  50. package/cpp/llama-batch.cpp +368 -368
  51. package/cpp/llama-batch.h +88 -88
  52. package/cpp/llama-chat.cpp +578 -567
  53. package/cpp/llama-chat.h +52 -51
  54. package/cpp/llama-context.cpp +1775 -1771
  55. package/cpp/llama-context.h +128 -128
  56. package/cpp/llama-cparams.cpp +1 -1
  57. package/cpp/llama-cparams.h +37 -37
  58. package/cpp/llama-cpp.h +30 -30
  59. package/cpp/llama-grammar.cpp +1139 -1139
  60. package/cpp/llama-grammar.h +143 -143
  61. package/cpp/llama-hparams.cpp +71 -71
  62. package/cpp/llama-hparams.h +139 -140
  63. package/cpp/llama-impl.cpp +167 -167
  64. package/cpp/llama-impl.h +61 -61
  65. package/cpp/llama-kv-cache.cpp +718 -718
  66. package/cpp/llama-kv-cache.h +218 -218
  67. package/cpp/llama-mmap.cpp +2 -1
  68. package/cpp/llama-mmap.h +67 -67
  69. package/cpp/llama-model-loader.cpp +1124 -1011
  70. package/cpp/llama-model-loader.h +167 -158
  71. package/cpp/llama-model.cpp +3997 -2202
  72. package/cpp/llama-model.h +370 -391
  73. package/cpp/llama-sampling.cpp +2408 -2406
  74. package/cpp/llama-sampling.h +32 -48
  75. package/cpp/llama-vocab.cpp +3247 -1982
  76. package/cpp/llama-vocab.h +125 -182
  77. package/cpp/llama.cpp +416 -2886
  78. package/cpp/llama.h +1323 -1285
  79. package/cpp/log.cpp +401 -401
  80. package/cpp/log.h +121 -121
  81. package/cpp/rn-llama.cpp +822 -0
  82. package/cpp/rn-llama.h +123 -0
  83. package/cpp/rn-llama.hpp +18 -12
  84. package/cpp/sampling.cpp +505 -500
  85. package/cpp/sgemm.cpp +2597 -2597
  86. package/cpp/speculative.cpp +277 -274
  87. package/cpp/speculative.h +28 -28
  88. package/cpp/unicode.cpp +2 -3
  89. package/ios/CMakeLists.txt +99 -0
  90. package/ios/RNLlama.h +5 -1
  91. package/ios/RNLlama.mm +2 -2
  92. package/ios/RNLlamaContext.h +8 -1
  93. package/ios/RNLlamaContext.mm +15 -11
  94. package/ios/rnllama.xcframework/Info.plist +74 -0
  95. package/jest/mock.js +3 -2
  96. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  97. package/lib/commonjs/index.js +4 -2
  98. package/lib/commonjs/index.js.map +1 -1
  99. package/lib/module/NativeRNLlama.js.map +1 -1
  100. package/lib/module/index.js +4 -2
  101. package/lib/module/index.js.map +1 -1
  102. package/lib/typescript/NativeRNLlama.d.ts +5 -1
  103. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  104. package/lib/typescript/index.d.ts.map +1 -1
  105. package/llama-rn.podspec +8 -2
  106. package/package.json +5 -2
  107. package/src/NativeRNLlama.ts +5 -1
  108. package/src/index.ts +9 -2
@@ -1,158 +1,167 @@
1
- #pragma once
2
-
3
- #include "llama.h"
4
-
5
- #include "llama-impl.h"
6
- #include "llama-arch.h"
7
- #include "llama-mmap.h"
8
-
9
- #include "ggml-cpp.h"
10
-
11
- #include <cstddef>
12
- #include <map>
13
- #include <stdexcept>
14
- #include <unordered_map>
15
-
16
- using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
17
-
18
- enum llama_fver {
19
- LM_GGUF_FILE_VERSION_V1 = 1,
20
- LM_GGUF_FILE_VERSION_V2 = 2,
21
- LM_GGUF_FILE_VERSION_V3 = 3,
22
- };
23
-
24
- const char * llama_file_version_name(llama_fver version);
25
-
26
- struct llama_model_loader {
27
- // Holds information on a model weight
28
- struct llama_tensor_weight {
29
- uint16_t idx; // source file index
30
- size_t offs; // tensor data offset in the original file
31
-
32
- lm_ggml_tensor * tensor;
33
-
34
- llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
35
- const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
36
- if (tensor_idx < 0) {
37
- throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
38
- }
39
-
40
- offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
41
- if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
42
- throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
43
- }
44
- }
45
- };
46
-
47
- // custom comparator to sort weights more nicely by layer
48
- struct weight_name_comparer {
49
- bool operator()(const std::string & a, const std::string & b) const {
50
- int a_layer = -1;
51
- int b_layer = -1;
52
- sscanf(a.c_str(), "blk.%d.", &a_layer);
53
- sscanf(b.c_str(), "blk.%d.", &b_layer);
54
- if (a_layer != b_layer) {
55
- return a_layer < b_layer;
56
- }
57
- return a < b;
58
- }
59
- };
60
-
61
- static const int TENSOR_NOT_REQUIRED = 1;
62
- static const int TENSOR_DUPLICATED = 2;
63
-
64
- int n_kv = 0;
65
- int n_tensors = 0;
66
- int n_created = 0;
67
-
68
- uint64_t n_elements = 0;
69
- size_t n_bytes = 0;
70
-
71
- bool use_mmap = false;
72
- bool check_tensors;
73
-
74
- llama_files files;
75
- llama_ftype ftype;
76
- llama_fver fver;
77
-
78
- llama_mmaps mappings;
79
-
80
- std::map<std::string, struct llama_tensor_weight, weight_name_comparer> weights_map;
81
- std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
82
-
83
- lm_gguf_context_ptr meta;
84
- std::vector<lm_ggml_context_ptr> contexts;
85
-
86
- std::string arch_name;
87
- LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
88
-
89
- size_t size_done = 0;
90
- size_t size_data = 0;
91
- std::vector<std::pair<size_t, size_t>> mmaps_used;
92
-
93
- llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p);
94
-
95
- template<typename T>
96
- typename std::enable_if<std::is_integral<T>::value, bool>::type
97
- get_arr_n(const std::string & key, T & result, bool required = true);
98
-
99
- template<typename T>
100
- typename std::enable_if<std::is_integral<T>::value, bool>::type
101
- get_arr_n(enum llm_kv kid, T & result, bool required = true);
102
-
103
- template<typename T>
104
- bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
105
-
106
- template<typename T, size_t N_MAX>
107
- bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
108
-
109
- template<typename T>
110
- bool get_arr(enum llm_kv kid, T & result, bool required = true);
111
-
112
- template<typename T>
113
- bool get_key(const std::string & key, T & result, bool required = true);
114
-
115
- template<typename T>
116
- bool get_key(enum llm_kv kid, T & result, bool required = true);
117
-
118
- template<typename T, size_t N_MAX>
119
- bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
120
-
121
- template<typename T>
122
- bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
123
-
124
- std::string get_arch_name() const;
125
-
126
- enum llm_arch get_arch() const;
127
-
128
- const llama_tensor_weight * get_weight(const char * name) const;
129
-
130
- const llama_tensor_weight & require_weight(const char * name) const;
131
-
132
- struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
133
-
134
- struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
135
-
136
- const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
137
-
138
- struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
139
-
140
- struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
141
-
142
- void done_getting_tensors() const;
143
-
144
- void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
145
-
146
- void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
147
-
148
- // for backwards compatibility, does not support ggml-backend
149
- void load_data_for(struct lm_ggml_tensor * cur) const;
150
-
151
- // Returns false if cancelled by progress_callback
152
- bool load_all_data(
153
- struct lm_ggml_context * ctx,
154
- llama_buf_map & bufs,
155
- llama_mlocks * lmlocks,
156
- llama_progress_callback progress_callback,
157
- void * progress_callback_user_data);
158
- };
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include "llama-impl.h"
6
+ #include "llama-arch.h"
7
+ #include "llama-mmap.h"
8
+
9
+ #include "ggml-cpp.h"
10
+
11
+ #include <cstddef>
12
+ #include <map>
13
+ #include <stdexcept>
14
+ #include <unordered_map>
15
+
16
+ using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
17
+
18
+ enum llama_fver {
19
+ LM_GGUF_FILE_VERSION_V1 = 1,
20
+ LM_GGUF_FILE_VERSION_V2 = 2,
21
+ LM_GGUF_FILE_VERSION_V3 = 3,
22
+ };
23
+
24
+ const char * llama_file_version_name(llama_fver version);
25
+
26
+ struct llama_model_loader {
27
+ // Holds information on a model weight
28
+ struct llama_tensor_weight {
29
+ uint16_t idx; // source file index
30
+ size_t offs; // tensor data offset in the original file
31
+
32
+ lm_ggml_tensor * tensor;
33
+
34
+ llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
35
+ const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
36
+ if (tensor_idx < 0) {
37
+ throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
38
+ }
39
+
40
+ offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
41
+ if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
42
+ throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
43
+ }
44
+ }
45
+ };
46
+
47
+ // custom comparator to sort weights more nicely by layer
48
+ struct weight_name_comparer {
49
+ bool operator()(const std::string & a, const std::string & b) const {
50
+ int a_layer = -1;
51
+ int b_layer = -1;
52
+ sscanf(a.c_str(), "blk.%d.", &a_layer);
53
+ sscanf(b.c_str(), "blk.%d.", &b_layer);
54
+ if (a_layer != b_layer) {
55
+ return a_layer < b_layer;
56
+ }
57
+ return a < b;
58
+ }
59
+ };
60
+
61
+ static const int TENSOR_NOT_REQUIRED = 1;
62
+ static const int TENSOR_DUPLICATED = 2;
63
+
64
+ int n_kv = 0;
65
+ int n_tensors = 0;
66
+ int n_created = 0;
67
+
68
+ uint64_t n_elements = 0;
69
+ size_t n_bytes = 0;
70
+
71
+ bool use_mmap = false;
72
+ bool check_tensors;
73
+
74
+ llama_files files;
75
+ llama_ftype ftype;
76
+ llama_fver fver;
77
+
78
+ llama_mmaps mappings;
79
+
80
+ std::map<std::string, struct llama_tensor_weight, weight_name_comparer> weights_map;
81
+ std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
82
+
83
+ lm_gguf_context_ptr meta;
84
+ std::vector<lm_ggml_context_ptr> contexts;
85
+
86
+ std::string arch_name;
87
+ LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
88
+
89
+ size_t size_done = 0;
90
+ size_t size_data = 0;
91
+ std::vector<std::pair<size_t, size_t>> mmaps_used;
92
+
93
+ llama_model_loader(
94
+ const std::string & fname,
95
+ std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
96
+ bool use_mmap,
97
+ bool check_tensors,
98
+ const struct llama_model_kv_override * param_overrides_p);
99
+
100
+ template<typename T>
101
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
102
+ get_arr_n(const std::string & key, T & result, bool required = true);
103
+
104
+ template<typename T>
105
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
106
+ get_arr_n(enum llm_kv kid, T & result, bool required = true);
107
+
108
+ template<typename T>
109
+ bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
110
+
111
+ template<typename T, size_t N_MAX>
112
+ bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
113
+
114
+ template<typename T>
115
+ bool get_arr(enum llm_kv kid, T & result, bool required = true);
116
+
117
+ template<typename T>
118
+ bool get_key(const std::string & key, T & result, bool required = true);
119
+
120
+ template<typename T>
121
+ bool get_key(enum llm_kv kid, T & result, bool required = true);
122
+
123
+ template<typename T, size_t N_MAX>
124
+ bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
125
+
126
+ template<typename T>
127
+ bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
128
+
129
+ std::string get_arch_name() const;
130
+
131
+ enum llm_arch get_arch() const;
132
+
133
+ const llama_tensor_weight * get_weight(const char * name) const;
134
+
135
+ const llama_tensor_weight & require_weight(const char * name) const;
136
+
137
+ struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
138
+
139
+ struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
140
+
141
+ const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
142
+
143
+ struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
144
+
145
+ struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
146
+
147
+ void done_getting_tensors() const;
148
+
149
+ void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
150
+
151
+ void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
152
+
153
+ // for backwards compatibility, does not support ggml-backend
154
+ void load_data_for(struct lm_ggml_tensor * cur) const;
155
+
156
+ // Returns false if cancelled by progress_callback
157
+ bool load_all_data(
158
+ struct lm_ggml_context * ctx,
159
+ llama_buf_map & bufs,
160
+ llama_mlocks * lmlocks,
161
+ llama_progress_callback progress_callback,
162
+ void * progress_callback_user_data);
163
+
164
+ std::string ftype_name() const;
165
+
166
+ void print_info() const;
167
+ };