cui-llama.rn 1.2.6 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +3 -2
  2. package/android/src/main/CMakeLists.txt +26 -6
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
  4. package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
  5. package/android/src/main/jni.cpp +228 -40
  6. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
  7. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
  8. package/cpp/amx/amx.cpp +196 -0
  9. package/cpp/amx/amx.h +20 -0
  10. package/cpp/amx/common.h +101 -0
  11. package/cpp/amx/mmq.cpp +2524 -0
  12. package/cpp/amx/mmq.h +16 -0
  13. package/cpp/common.cpp +118 -251
  14. package/cpp/common.h +53 -30
  15. package/cpp/ggml-aarch64.c +46 -3395
  16. package/cpp/ggml-aarch64.h +0 -20
  17. package/cpp/ggml-alloc.c +6 -8
  18. package/cpp/ggml-backend-impl.h +33 -11
  19. package/cpp/ggml-backend-reg.cpp +423 -0
  20. package/cpp/ggml-backend.cpp +14 -676
  21. package/cpp/ggml-backend.h +46 -9
  22. package/cpp/ggml-common.h +6 -0
  23. package/cpp/ggml-cpu-aarch64.c +3823 -0
  24. package/cpp/ggml-cpu-aarch64.h +32 -0
  25. package/cpp/ggml-cpu-impl.h +14 -242
  26. package/cpp/ggml-cpu-quants.c +10835 -0
  27. package/cpp/ggml-cpu-quants.h +63 -0
  28. package/cpp/ggml-cpu.c +13971 -13720
  29. package/cpp/ggml-cpu.cpp +715 -0
  30. package/cpp/ggml-cpu.h +65 -63
  31. package/cpp/ggml-impl.h +285 -25
  32. package/cpp/ggml-metal.h +8 -8
  33. package/cpp/ggml-metal.m +1221 -728
  34. package/cpp/ggml-quants.c +189 -10681
  35. package/cpp/ggml-quants.h +78 -125
  36. package/cpp/ggml-threading.cpp +12 -0
  37. package/cpp/ggml-threading.h +12 -0
  38. package/cpp/ggml.c +688 -1460
  39. package/cpp/ggml.h +58 -244
  40. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  41. package/cpp/json.hpp +24766 -24766
  42. package/cpp/llama-sampling.cpp +5 -2
  43. package/cpp/llama.cpp +409 -123
  44. package/cpp/llama.h +8 -4
  45. package/cpp/rn-llama.hpp +89 -25
  46. package/cpp/sampling.cpp +42 -3
  47. package/cpp/sampling.h +22 -1
  48. package/cpp/sgemm.cpp +608 -0
  49. package/cpp/speculative.cpp +270 -0
  50. package/cpp/speculative.h +28 -0
  51. package/cpp/unicode.cpp +11 -0
  52. package/ios/RNLlama.mm +43 -20
  53. package/ios/RNLlamaContext.h +9 -3
  54. package/ios/RNLlamaContext.mm +146 -33
  55. package/jest/mock.js +0 -1
  56. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  57. package/lib/commonjs/grammar.js +4 -2
  58. package/lib/commonjs/grammar.js.map +1 -1
  59. package/lib/commonjs/index.js +52 -15
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/module/NativeRNLlama.js.map +1 -1
  62. package/lib/module/grammar.js +2 -1
  63. package/lib/module/grammar.js.map +1 -1
  64. package/lib/module/index.js +51 -15
  65. package/lib/module/index.js.map +1 -1
  66. package/lib/typescript/NativeRNLlama.d.ts +122 -8
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  68. package/lib/typescript/grammar.d.ts +5 -6
  69. package/lib/typescript/grammar.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +15 -6
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +2 -1
  73. package/src/NativeRNLlama.ts +135 -13
  74. package/src/grammar.ts +10 -8
  75. package/src/index.ts +104 -28
package/cpp/llama.h CHANGED
@@ -273,6 +273,9 @@ extern "C" {
273
273
  };
274
274
 
275
275
  struct llama_model_params {
276
+ // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
277
+ lm_ggml_backend_dev_t * devices;
278
+
276
279
  int32_t n_gpu_layers; // number of layers to store in VRAM
277
280
  enum llama_split_mode split_mode; // how to split the model across multiple GPUs
278
281
 
@@ -668,6 +671,9 @@ extern "C" {
668
671
  // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
669
672
  LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
670
673
 
674
+ // Check if the context supports KV cache shifting
675
+ LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx);
676
+
671
677
  //
672
678
  // State / sessions
673
679
  //
@@ -798,7 +804,7 @@ extern "C" {
798
804
  // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
799
805
  // Stores the encoder output internally for later use by the decoder cross-attention layers.
800
806
  // 0 - success
801
- // < 0 - error
807
+ // < 0 - error. the KV cache state is restored to the state before this call
802
808
  LLAMA_API int32_t llama_encode(
803
809
  struct llama_context * ctx,
804
810
  struct llama_batch batch);
@@ -806,7 +812,7 @@ extern "C" {
806
812
  // Positive return values does not mean a fatal error, but rather a warning.
807
813
  // 0 - success
808
814
  // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
809
- // < 0 - error
815
+ // < 0 - error. the KV cache state is restored to the state before this call
810
816
  LLAMA_API int32_t llama_decode(
811
817
  struct llama_context * ctx,
812
818
  struct llama_batch batch);
@@ -1245,8 +1251,6 @@ extern "C" {
1245
1251
  LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1246
1252
  LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1247
1253
 
1248
- LLAMA_API void llama_perf_dump_yaml(FILE * stream, const struct llama_context * ctx);
1249
-
1250
1254
  #ifdef __cplusplus
1251
1255
  }
1252
1256
  #endif
package/cpp/rn-llama.hpp CHANGED
@@ -4,11 +4,67 @@
4
4
  #include <sstream>
5
5
  #include <iostream>
6
6
  #include "common.h"
7
+ #include "ggml.h"
7
8
  #include "llama.h"
9
+ #include "llama-impl.h"
8
10
  #include "sampling.h"
9
11
 
10
12
  namespace rnllama {
11
13
 
14
+ static std::string lm_gguf_data_to_str(enum lm_gguf_type type, const void * data, int i) {
15
+ switch (type) {
16
+ case LM_GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
17
+ case LM_GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
18
+ case LM_GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
19
+ case LM_GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
20
+ case LM_GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
21
+ case LM_GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
22
+ case LM_GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
23
+ case LM_GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
24
+ case LM_GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
25
+ case LM_GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
26
+ case LM_GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
27
+ default: return "unknown type: " + std::to_string(type);
28
+ }
29
+ }
30
+
31
+ static std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i) {
32
+ const enum lm_gguf_type type = lm_gguf_get_kv_type(ctx_gguf, i);
33
+
34
+ switch (type) {
35
+ case LM_GGUF_TYPE_STRING:
36
+ return lm_gguf_get_val_str(ctx_gguf, i);
37
+ case LM_GGUF_TYPE_ARRAY:
38
+ {
39
+ const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx_gguf, i);
40
+ int arr_n = lm_gguf_get_arr_n(ctx_gguf, i);
41
+ const void * data = lm_gguf_get_arr_data(ctx_gguf, i);
42
+ std::stringstream ss;
43
+ ss << "[";
44
+ for (int j = 0; j < arr_n; j++) {
45
+ if (arr_type == LM_GGUF_TYPE_STRING) {
46
+ std::string val = lm_gguf_get_arr_str(ctx_gguf, i, j);
47
+ // escape quotes
48
+ replace_all(val, "\\", "\\\\");
49
+ replace_all(val, "\"", "\\\"");
50
+ ss << '"' << val << '"';
51
+ } else if (arr_type == LM_GGUF_TYPE_ARRAY) {
52
+ ss << "???";
53
+ } else {
54
+ ss << lm_gguf_data_to_str(arr_type, data, j);
55
+ }
56
+ if (j < arr_n - 1) {
57
+ ss << ", ";
58
+ }
59
+ }
60
+ ss << "]";
61
+ return ss.str();
62
+ }
63
+ default:
64
+ return lm_gguf_data_to_str(type, lm_gguf_get_val_data(ctx_gguf, i), 0);
65
+ }
66
+ }
67
+
12
68
  static void llama_batch_clear(llama_batch *batch) {
13
69
  batch->n_tokens = 0;
14
70
  }
@@ -141,7 +197,6 @@ static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
141
197
  return ret;
142
198
  }
143
199
 
144
-
145
200
  struct llama_rn_context
146
201
  {
147
202
  bool is_predicting = false;
@@ -160,9 +215,12 @@ struct llama_rn_context
160
215
  common_params params;
161
216
 
162
217
  llama_model *model = nullptr;
218
+ float loading_progress = 0;
219
+ bool is_load_interrupted = false;
220
+
163
221
  llama_context *ctx = nullptr;
164
222
  common_sampler *ctx_sampling = nullptr;
165
-
223
+
166
224
  int n_ctx;
167
225
 
168
226
  bool truncated = false;
@@ -194,7 +252,7 @@ struct llama_rn_context
194
252
  {
195
253
  is_interrupted = false;
196
254
  params.antiprompt.clear();
197
- params.sparams.grammar.clear();
255
+ params.sampling.grammar.clear();
198
256
  num_prompt_tokens = 0;
199
257
  num_tokens_predicted = 0;
200
258
  generated_text = "";
@@ -208,14 +266,14 @@ struct llama_rn_context
208
266
  incomplete = false;
209
267
  n_remain = 0;
210
268
  n_past = 0;
211
- params.sparams.n_prev = n_ctx;
269
+ params.sampling.n_prev = n_ctx;
212
270
  }
213
271
 
214
272
  bool initSampling() {
215
273
  if (ctx_sampling != nullptr) {
216
274
  common_sampler_free(ctx_sampling);
217
275
  }
218
- ctx_sampling = common_sampler_init(model, params.sparams);
276
+ ctx_sampling = common_sampler_init(model, params.sampling);
219
277
  return ctx_sampling != nullptr;
220
278
  }
221
279
 
@@ -230,18 +288,22 @@ struct llama_rn_context
230
288
  LOG_ERROR("unable to load model: %s", params_.model.c_str());
231
289
  return false;
232
290
  }
291
+ LOG_VERBOSE("getting n_ctx");
233
292
  n_ctx = llama_n_ctx(ctx);
234
293
  return true;
235
294
  }
236
295
 
237
296
  bool validateModelChatTemplate() const {
238
- llama_chat_message chat[] = {{"user", "test"}};
239
-
240
297
  std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
241
298
  std::string template_key = "tokenizer.chat_template";
242
299
  int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
243
-
244
- return res >= 0;
300
+ if (res >= 0) {
301
+ llama_chat_message chat[] = {{"user", "test"}};
302
+ std::string tmpl = std::string(model_template.data(), model_template.size());
303
+ int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0);
304
+ return chat_res > 0;
305
+ }
306
+ return res > 0;
245
307
  }
246
308
 
247
309
  void truncatePrompt(std::vector<llama_token> &prompt_tokens) {
@@ -376,7 +438,7 @@ struct llama_rn_context
376
438
  n_eval = params.n_batch;
377
439
  }
378
440
  if (llama_decode(ctx, llama_batch_get_one(&embd[n_past], n_eval)))
379
- {
441
+ {
380
442
  LOG_ERROR("failed to eval, n_eval: %d, n_past: %d, n_threads: %d, embd: %s",
381
443
  n_eval,
382
444
  n_past,
@@ -387,7 +449,7 @@ struct llama_rn_context
387
449
  return result;
388
450
  }
389
451
  n_past += n_eval;
390
-
452
+
391
453
  if(is_interrupted) {
392
454
  LOG_INFO("Decoding Interrupted");
393
455
  embd.resize(n_past);
@@ -409,11 +471,11 @@ struct llama_rn_context
409
471
  candidates.reserve(llama_n_vocab(model));
410
472
 
411
473
  result.tok = common_sampler_sample(ctx_sampling, ctx, -1);
412
-
474
+
413
475
  llama_token_data_array cur_p = *common_sampler_get_candidates(ctx_sampling);
414
476
 
415
- const int32_t n_probs = params.sparams.n_probs;
416
-
477
+ const int32_t n_probs = params.sampling.n_probs;
478
+
417
479
  // deprecated
418
480
  /*if (params.sparams.temp <= 0 && n_probs > 0)
419
481
  {
@@ -421,7 +483,7 @@ struct llama_rn_context
421
483
  llama_sampler_init_softmax();
422
484
 
423
485
  }*/
424
-
486
+
425
487
 
426
488
  for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
427
489
  {
@@ -491,7 +553,7 @@ struct llama_rn_context
491
553
  const std::string token_text = token_with_probs.tok == -1 ? "" : common_token_to_piece(ctx, token_with_probs.tok);
492
554
  generated_text += token_text;
493
555
 
494
- if (params.sparams.n_probs > 0)
556
+ if (params.sampling.n_probs > 0)
495
557
  {
496
558
  generated_token_probs.push_back(token_with_probs);
497
559
  }
@@ -542,26 +604,28 @@ struct llama_rn_context
542
604
  return token_with_probs;
543
605
  }
544
606
 
545
- std::vector<float> getEmbedding()
607
+ std::vector<float> getEmbedding(common_params &embd_params)
546
608
  {
547
609
  static const int n_embd = llama_n_embd(llama_get_model(ctx));
548
- if (!params.embedding)
610
+ if (!embd_params.embedding)
549
611
  {
550
- LOG_WARNING("embedding disabled, embedding: %s", params.embedding);
612
+ LOG_WARNING("embedding disabled, embedding: %s", embd_params.embedding);
551
613
  return std::vector<float>(n_embd, 0.0f);
552
614
  }
553
615
  float *data;
554
-
555
- if(params.pooling_type == 0){
616
+
617
+ const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
618
+ printf("pooling_type: %d\n", pooling_type);
619
+ if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
556
620
  data = llama_get_embeddings(ctx);
557
- }
558
- else {
621
+ } else {
559
622
  data = llama_get_embeddings_seq(ctx, 0);
560
623
  }
561
-
562
- if(!data) {
624
+
625
+ if (!data) {
563
626
  return std::vector<float>(n_embd, 0.0f);
564
627
  }
628
+
565
629
  std::vector<float> embedding(data, data + n_embd), out(data, data + n_embd);
566
630
  common_embd_normalize(embedding.data(), out.data(), n_embd, params.embd_normalize);
567
631
  return out;
package/cpp/sampling.cpp CHANGED
@@ -99,7 +99,7 @@ struct ring_buffer {
99
99
  };
100
100
 
101
101
  struct common_sampler {
102
- common_sampler_params params;
102
+ common_params_sampling params;
103
103
 
104
104
  struct llama_sampler * grmr;
105
105
  struct llama_sampler * chain;
@@ -125,7 +125,7 @@ struct common_sampler {
125
125
  }
126
126
  };
127
127
 
128
- std::string common_sampler_params::print() const {
128
+ std::string common_params_sampling::print() const {
129
129
  char result[1024];
130
130
 
131
131
  snprintf(result, sizeof(result),
@@ -141,7 +141,7 @@ std::string common_sampler_params::print() const {
141
141
  return std::string(result);
142
142
  }
143
143
 
144
- struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params) {
144
+ struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params) {
145
145
  llama_sampler_chain_params lparams = llama_sampler_chain_default_params();
146
146
 
147
147
  lparams.no_perf = params.no_perf;
@@ -320,6 +320,45 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co
320
320
  return cur_p.data[cur_p.selected].id;
321
321
  }
322
322
 
323
+ std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first) {
324
+ LM_GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1");
325
+
326
+ std::vector<llama_token> result;
327
+ result.reserve(idxs.size());
328
+
329
+ size_t i = 0;
330
+ for (; i < draft.size(); i++) {
331
+ const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
332
+
333
+ common_sampler_accept(gsmpl, id, true);
334
+
335
+ result.push_back(id);
336
+
337
+ if (draft[i] != id) {
338
+ break;
339
+ }
340
+ }
341
+
342
+ if (i == draft.size()) {
343
+ const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
344
+
345
+ common_sampler_accept(gsmpl, id, true);
346
+
347
+ result.push_back(id);
348
+ }
349
+
350
+ return result;
351
+ }
352
+
353
+ std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) {
354
+ std::vector<int> idxs(draft.size() + 1);
355
+ for (size_t i = 0; i < idxs.size(); ++i) {
356
+ idxs[i] = i;
357
+ }
358
+
359
+ return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first);
360
+ }
361
+
323
362
  uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
324
363
  return llama_sampler_get_seed(gsmpl->chain);
325
364
  }
package/cpp/sampling.h CHANGED
@@ -36,7 +36,7 @@ struct common_sampler;
36
36
 
37
37
  // llama_sampler API overloads
38
38
 
39
- struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params);
39
+ struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params);
40
40
 
41
41
  void common_sampler_free(struct common_sampler * gsmpl);
42
42
 
@@ -60,6 +60,27 @@ void common_perf_print(const struct llama_context * ctx, const struct common_sam
60
60
  //
61
61
  llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false);
62
62
 
63
+ // generalized version of common_sampler_sample
64
+ //
65
+ // will cross-reference the sampled tokens with a batch of draft tokens and accept those that match
66
+ // if the sampler disagrees at some point, we stop and return the accepted tokens up to now
67
+ //
68
+ // common_sampler_sample_n(gsmpl, ctx, { idx }, {});
69
+ //
70
+ // is equivalent to
71
+ //
72
+ // common_sampler_sample(gsmpl, ctx, idx);
73
+ // common_sampler_accept(gsmpl, token, true);
74
+ //
75
+ // requires: idxs.size() == draft.size() + 1
76
+ //
77
+ // returns at least 1 token, up to idxs.size()
78
+ //
79
+ std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first = false);
80
+
81
+ // assume idxs == [ 0, 1, 2, ..., draft.size() ]
82
+ std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first = false);
83
+
63
84
  uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl);
64
85
 
65
86
  // helpers