cui-llama.rn 1.3.5 → 1.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/android/src/main/CMakeLists.txt +14 -8
  2. package/android/src/main/jni.cpp +38 -37
  3. package/cpp/common.cpp +43 -26
  4. package/cpp/common.h +18 -11
  5. package/cpp/ggml-backend-reg.cpp +5 -0
  6. package/cpp/ggml-backend.cpp +5 -2
  7. package/cpp/ggml-cpp.h +1 -0
  8. package/cpp/ggml-cpu-aarch64.cpp +6 -1
  9. package/cpp/ggml-cpu-quants.c +5 -1
  10. package/cpp/ggml-impl.h +11 -16
  11. package/cpp/ggml-metal.m +2 -2
  12. package/cpp/ggml.c +0 -1276
  13. package/cpp/ggml.h +0 -140
  14. package/cpp/gguf.cpp +1325 -0
  15. package/cpp/gguf.h +202 -0
  16. package/cpp/llama-adapter.cpp +346 -0
  17. package/cpp/llama-adapter.h +73 -0
  18. package/cpp/llama-arch.cpp +1434 -0
  19. package/cpp/llama-arch.h +395 -0
  20. package/cpp/llama-batch.cpp +368 -0
  21. package/cpp/llama-batch.h +88 -0
  22. package/cpp/llama-chat.cpp +567 -0
  23. package/cpp/llama-chat.h +51 -0
  24. package/cpp/llama-context.cpp +1771 -0
  25. package/cpp/llama-context.h +128 -0
  26. package/cpp/llama-cparams.cpp +1 -0
  27. package/cpp/llama-cparams.h +37 -0
  28. package/cpp/llama-cpp.h +30 -0
  29. package/cpp/llama-grammar.cpp +1 -0
  30. package/cpp/llama-grammar.h +3 -1
  31. package/cpp/llama-hparams.cpp +71 -0
  32. package/cpp/llama-hparams.h +140 -0
  33. package/cpp/llama-impl.cpp +167 -0
  34. package/cpp/llama-impl.h +16 -136
  35. package/cpp/llama-kv-cache.cpp +718 -0
  36. package/cpp/llama-kv-cache.h +218 -0
  37. package/cpp/llama-mmap.cpp +589 -0
  38. package/cpp/llama-mmap.h +67 -0
  39. package/cpp/llama-model-loader.cpp +1011 -0
  40. package/cpp/llama-model-loader.h +158 -0
  41. package/cpp/llama-model.cpp +2202 -0
  42. package/cpp/llama-model.h +391 -0
  43. package/cpp/llama-sampling.cpp +117 -4
  44. package/cpp/llama-vocab.cpp +21 -28
  45. package/cpp/llama-vocab.h +13 -1
  46. package/cpp/llama.cpp +8437 -19421
  47. package/cpp/llama.cpp.rej +23 -0
  48. package/cpp/llama.h +31 -6
  49. package/cpp/rn-llama.hpp +39 -37
  50. package/cpp/sgemm.cpp +776 -70
  51. package/cpp/unicode.cpp +6 -0
  52. package/package.json +1 -1
package/cpp/ggml.h CHANGED
@@ -242,12 +242,6 @@
242
242
  #define LM_GGML_ROPE_TYPE_MROPE 8
243
243
  #define LM_GGML_ROPE_TYPE_VISION 24
244
244
 
245
- #define LM_GGUF_MAGIC "GGUF"
246
-
247
- #define LM_GGUF_VERSION 3
248
-
249
- #define LM_GGUF_DEFAULT_ALIGNMENT 32
250
-
251
245
  #define LM_GGML_UNUSED(x) (void)(x)
252
246
 
253
247
  #define LM_GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
@@ -404,12 +398,6 @@ extern "C" {
404
398
  LM_GGML_PREC_F32,
405
399
  };
406
400
 
407
- enum lm_ggml_backend_type {
408
- LM_GGML_BACKEND_TYPE_CPU = 0,
409
- LM_GGML_BACKEND_TYPE_GPU = 10,
410
- LM_GGML_BACKEND_TYPE_GPU_SPLIT = 20,
411
- };
412
-
413
401
  // model file types
414
402
  enum lm_ggml_ftype {
415
403
  LM_GGML_FTYPE_UNKNOWN = -1,
@@ -588,8 +576,6 @@ extern "C" {
588
576
  struct lm_ggml_tensor {
589
577
  enum lm_ggml_type type;
590
578
 
591
- LM_GGML_DEPRECATED(enum lm_ggml_backend_type backend, "use the buffer type to find the storage location of the tensor");
592
-
593
579
  struct lm_ggml_backend_buffer * buffer;
594
580
 
595
581
  int64_t ne[LM_GGML_MAX_DIMS]; // number of elements
@@ -2112,132 +2098,6 @@ extern "C" {
2112
2098
  int64_t n_per_row,
2113
2099
  const float * imatrix);
2114
2100
 
2115
- //
2116
- // gguf
2117
- //
2118
-
2119
- enum lm_gguf_type {
2120
- LM_GGUF_TYPE_UINT8 = 0,
2121
- LM_GGUF_TYPE_INT8 = 1,
2122
- LM_GGUF_TYPE_UINT16 = 2,
2123
- LM_GGUF_TYPE_INT16 = 3,
2124
- LM_GGUF_TYPE_UINT32 = 4,
2125
- LM_GGUF_TYPE_INT32 = 5,
2126
- LM_GGUF_TYPE_FLOAT32 = 6,
2127
- LM_GGUF_TYPE_BOOL = 7,
2128
- LM_GGUF_TYPE_STRING = 8,
2129
- LM_GGUF_TYPE_ARRAY = 9,
2130
- LM_GGUF_TYPE_UINT64 = 10,
2131
- LM_GGUF_TYPE_INT64 = 11,
2132
- LM_GGUF_TYPE_FLOAT64 = 12,
2133
- LM_GGUF_TYPE_COUNT, // marks the end of the enum
2134
- };
2135
-
2136
- struct lm_gguf_context;
2137
-
2138
- struct lm_gguf_init_params {
2139
- bool no_alloc;
2140
-
2141
- // if not NULL, create a lm_ggml_context and allocate the tensor data in it
2142
- struct lm_ggml_context ** ctx;
2143
- };
2144
-
2145
- LM_GGML_API struct lm_gguf_context * lm_gguf_init_empty(void);
2146
- LM_GGML_API struct lm_gguf_context * lm_gguf_init_from_file(const char * fname, struct lm_gguf_init_params params);
2147
- //LM_GGML_API struct lm_gguf_context * lm_gguf_init_from_buffer(..);
2148
-
2149
- LM_GGML_API void lm_gguf_free(struct lm_gguf_context * ctx);
2150
-
2151
- LM_GGML_API const char * lm_gguf_type_name(enum lm_gguf_type type);
2152
-
2153
- LM_GGML_API int lm_gguf_get_version (const struct lm_gguf_context * ctx);
2154
- LM_GGML_API size_t lm_gguf_get_alignment (const struct lm_gguf_context * ctx);
2155
- LM_GGML_API size_t lm_gguf_get_data_offset(const struct lm_gguf_context * ctx);
2156
- LM_GGML_API void * lm_gguf_get_data (const struct lm_gguf_context * ctx);
2157
-
2158
- LM_GGML_API int lm_gguf_get_n_kv(const struct lm_gguf_context * ctx);
2159
- LM_GGML_API int lm_gguf_find_key(const struct lm_gguf_context * ctx, const char * key);
2160
- LM_GGML_API const char * lm_gguf_get_key (const struct lm_gguf_context * ctx, int key_id);
2161
-
2162
- LM_GGML_API enum lm_gguf_type lm_gguf_get_kv_type (const struct lm_gguf_context * ctx, int key_id);
2163
- LM_GGML_API enum lm_gguf_type lm_gguf_get_arr_type(const struct lm_gguf_context * ctx, int key_id);
2164
-
2165
- // will abort if the wrong type is used for the key
2166
- LM_GGML_API uint8_t lm_gguf_get_val_u8 (const struct lm_gguf_context * ctx, int key_id);
2167
- LM_GGML_API int8_t lm_gguf_get_val_i8 (const struct lm_gguf_context * ctx, int key_id);
2168
- LM_GGML_API uint16_t lm_gguf_get_val_u16 (const struct lm_gguf_context * ctx, int key_id);
2169
- LM_GGML_API int16_t lm_gguf_get_val_i16 (const struct lm_gguf_context * ctx, int key_id);
2170
- LM_GGML_API uint32_t lm_gguf_get_val_u32 (const struct lm_gguf_context * ctx, int key_id);
2171
- LM_GGML_API int32_t lm_gguf_get_val_i32 (const struct lm_gguf_context * ctx, int key_id);
2172
- LM_GGML_API float lm_gguf_get_val_f32 (const struct lm_gguf_context * ctx, int key_id);
2173
- LM_GGML_API uint64_t lm_gguf_get_val_u64 (const struct lm_gguf_context * ctx, int key_id);
2174
- LM_GGML_API int64_t lm_gguf_get_val_i64 (const struct lm_gguf_context * ctx, int key_id);
2175
- LM_GGML_API double lm_gguf_get_val_f64 (const struct lm_gguf_context * ctx, int key_id);
2176
- LM_GGML_API bool lm_gguf_get_val_bool(const struct lm_gguf_context * ctx, int key_id);
2177
- LM_GGML_API const char * lm_gguf_get_val_str (const struct lm_gguf_context * ctx, int key_id);
2178
- LM_GGML_API const void * lm_gguf_get_val_data(const struct lm_gguf_context * ctx, int key_id);
2179
- LM_GGML_API int lm_gguf_get_arr_n (const struct lm_gguf_context * ctx, int key_id);
2180
- LM_GGML_API const void * lm_gguf_get_arr_data(const struct lm_gguf_context * ctx, int key_id);
2181
- LM_GGML_API const char * lm_gguf_get_arr_str (const struct lm_gguf_context * ctx, int key_id, int i);
2182
-
2183
- LM_GGML_API int lm_gguf_get_n_tensors (const struct lm_gguf_context * ctx);
2184
- LM_GGML_API int lm_gguf_find_tensor (const struct lm_gguf_context * ctx, const char * name);
2185
- LM_GGML_API size_t lm_gguf_get_tensor_offset(const struct lm_gguf_context * ctx, int i);
2186
- LM_GGML_API char * lm_gguf_get_tensor_name (const struct lm_gguf_context * ctx, int i);
2187
- LM_GGML_API enum lm_ggml_type lm_gguf_get_tensor_type (const struct lm_gguf_context * ctx, int i);
2188
-
2189
- // removes key if it exists
2190
- LM_GGML_API void lm_gguf_remove_key(struct lm_gguf_context * ctx, const char * key);
2191
-
2192
- // overrides existing values or adds a new one
2193
- LM_GGML_API void lm_gguf_set_val_u8 (struct lm_gguf_context * ctx, const char * key, uint8_t val);
2194
- LM_GGML_API void lm_gguf_set_val_i8 (struct lm_gguf_context * ctx, const char * key, int8_t val);
2195
- LM_GGML_API void lm_gguf_set_val_u16 (struct lm_gguf_context * ctx, const char * key, uint16_t val);
2196
- LM_GGML_API void lm_gguf_set_val_i16 (struct lm_gguf_context * ctx, const char * key, int16_t val);
2197
- LM_GGML_API void lm_gguf_set_val_u32 (struct lm_gguf_context * ctx, const char * key, uint32_t val);
2198
- LM_GGML_API void lm_gguf_set_val_i32 (struct lm_gguf_context * ctx, const char * key, int32_t val);
2199
- LM_GGML_API void lm_gguf_set_val_f32 (struct lm_gguf_context * ctx, const char * key, float val);
2200
- LM_GGML_API void lm_gguf_set_val_u64 (struct lm_gguf_context * ctx, const char * key, uint64_t val);
2201
- LM_GGML_API void lm_gguf_set_val_i64 (struct lm_gguf_context * ctx, const char * key, int64_t val);
2202
- LM_GGML_API void lm_gguf_set_val_f64 (struct lm_gguf_context * ctx, const char * key, double val);
2203
- LM_GGML_API void lm_gguf_set_val_bool(struct lm_gguf_context * ctx, const char * key, bool val);
2204
- LM_GGML_API void lm_gguf_set_val_str (struct lm_gguf_context * ctx, const char * key, const char * val);
2205
- LM_GGML_API void lm_gguf_set_arr_data(struct lm_gguf_context * ctx, const char * key, enum lm_gguf_type type, const void * data, int n);
2206
- LM_GGML_API void lm_gguf_set_arr_str (struct lm_gguf_context * ctx, const char * key, const char ** data, int n);
2207
-
2208
- // set or add KV pairs from another context
2209
- LM_GGML_API void lm_gguf_set_kv(struct lm_gguf_context * ctx, struct lm_gguf_context * src);
2210
-
2211
- // manage tensor info
2212
- LM_GGML_API void lm_gguf_add_tensor(struct lm_gguf_context * ctx, const struct lm_ggml_tensor * tensor);
2213
- LM_GGML_API void lm_gguf_set_tensor_type(struct lm_gguf_context * ctx, const char * name, enum lm_ggml_type type);
2214
- LM_GGML_API void lm_gguf_set_tensor_data(struct lm_gguf_context * ctx, const char * name, const void * data, size_t size);
2215
-
2216
- // writing gguf files can be done in 2 ways:
2217
- //
2218
- // - write the entire lm_gguf_context to a binary file in a single pass:
2219
- //
2220
- // lm_gguf_write_to_file(ctx, fname);
2221
- //
2222
- // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
2223
- //
2224
- // FILE * f = fopen(fname, "wb");
2225
- // fseek(f, lm_gguf_get_meta_size(ctx), SEEK_SET);
2226
- // fwrite(f, ...);
2227
- // void * data = lm_gguf_meta_get_meta_data(ctx);
2228
- // fseek(f, 0, SEEK_SET);
2229
- // fwrite(f, data, lm_gguf_get_meta_size(ctx));
2230
- // free(data);
2231
- // fclose(f);
2232
- //
2233
-
2234
- // write the entire context to a binary file
2235
- LM_GGML_API void lm_gguf_write_to_file(const struct lm_gguf_context * ctx, const char * fname, bool only_meta);
2236
-
2237
- // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
2238
- LM_GGML_API size_t lm_gguf_get_meta_size(const struct lm_gguf_context * ctx);
2239
- LM_GGML_API void lm_gguf_get_meta_data(const struct lm_gguf_context * ctx, void * data);
2240
-
2241
2101
  #ifdef __cplusplus
2242
2102
  // restrict not standard in C++
2243
2103
  # if defined(__GNUC__)