cui-llama.rn 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +4 -23
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +13 -7
  4. package/android/src/main/java/com/rnllama/LlamaContext.java +27 -20
  5. package/android/src/main/java/com/rnllama/RNLlama.java +5 -1
  6. package/android/src/main/jni.cpp +15 -12
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  14. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  15. package/cpp/README.md +1 -1
  16. package/cpp/common.cpp +158 -267
  17. package/cpp/common.h +46 -12
  18. package/cpp/ggml-alloc.c +1042 -1037
  19. package/cpp/ggml-backend-impl.h +255 -256
  20. package/cpp/ggml-backend-reg.cpp +582 -582
  21. package/cpp/ggml-backend.cpp +2002 -2002
  22. package/cpp/ggml-backend.h +354 -352
  23. package/cpp/ggml-common.h +1853 -1853
  24. package/cpp/ggml-cpp.h +39 -39
  25. package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
  26. package/cpp/ggml-cpu-aarch64.h +8 -8
  27. package/cpp/ggml-cpu-impl.h +386 -386
  28. package/cpp/ggml-cpu-quants.c +10920 -10839
  29. package/cpp/ggml-cpu-traits.cpp +36 -36
  30. package/cpp/ggml-cpu-traits.h +38 -38
  31. package/cpp/ggml-cpu.c +329 -60
  32. package/cpp/ggml-cpu.cpp +10 -2
  33. package/cpp/ggml-cpu.h +135 -135
  34. package/cpp/ggml-impl.h +567 -567
  35. package/cpp/ggml-metal-impl.h +17 -17
  36. package/cpp/ggml-metal.m +4884 -4884
  37. package/cpp/ggml-quants.c +5238 -5238
  38. package/cpp/ggml-threading.h +14 -14
  39. package/cpp/ggml.c +6514 -6448
  40. package/cpp/ggml.h +2194 -2163
  41. package/cpp/gguf.cpp +1329 -1325
  42. package/cpp/gguf.h +202 -202
  43. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  44. package/cpp/json-schema-to-grammar.h +8 -8
  45. package/cpp/json.hpp +24766 -24766
  46. package/cpp/llama-adapter.cpp +347 -346
  47. package/cpp/llama-adapter.h +74 -73
  48. package/cpp/llama-arch.cpp +1487 -1434
  49. package/cpp/llama-arch.h +400 -395
  50. package/cpp/llama-batch.cpp +368 -368
  51. package/cpp/llama-batch.h +88 -88
  52. package/cpp/llama-chat.cpp +578 -567
  53. package/cpp/llama-chat.h +52 -51
  54. package/cpp/llama-context.cpp +1775 -1771
  55. package/cpp/llama-context.h +128 -128
  56. package/cpp/llama-cparams.cpp +1 -1
  57. package/cpp/llama-cparams.h +37 -37
  58. package/cpp/llama-cpp.h +30 -30
  59. package/cpp/llama-grammar.cpp +1139 -1139
  60. package/cpp/llama-grammar.h +143 -143
  61. package/cpp/llama-hparams.cpp +71 -71
  62. package/cpp/llama-hparams.h +139 -140
  63. package/cpp/llama-impl.cpp +167 -167
  64. package/cpp/llama-impl.h +61 -61
  65. package/cpp/llama-kv-cache.cpp +718 -718
  66. package/cpp/llama-kv-cache.h +218 -218
  67. package/cpp/llama-mmap.cpp +2 -1
  68. package/cpp/llama-mmap.h +67 -67
  69. package/cpp/llama-model-loader.cpp +1124 -1011
  70. package/cpp/llama-model-loader.h +167 -158
  71. package/cpp/llama-model.cpp +3997 -2202
  72. package/cpp/llama-model.h +370 -391
  73. package/cpp/llama-sampling.cpp +2408 -2406
  74. package/cpp/llama-sampling.h +32 -48
  75. package/cpp/llama-vocab.cpp +3247 -1982
  76. package/cpp/llama-vocab.h +125 -182
  77. package/cpp/llama.cpp +416 -2886
  78. package/cpp/llama.h +1323 -1285
  79. package/cpp/log.cpp +401 -401
  80. package/cpp/log.h +121 -121
  81. package/cpp/rn-llama.cpp +822 -0
  82. package/cpp/rn-llama.h +123 -0
  83. package/cpp/rn-llama.hpp +18 -12
  84. package/cpp/sampling.cpp +505 -500
  85. package/cpp/sgemm.cpp +2597 -2597
  86. package/cpp/speculative.cpp +277 -274
  87. package/cpp/speculative.h +28 -28
  88. package/cpp/unicode.cpp +2 -3
  89. package/ios/CMakeLists.txt +99 -0
  90. package/ios/RNLlama.h +5 -1
  91. package/ios/RNLlama.mm +2 -2
  92. package/ios/RNLlamaContext.h +8 -1
  93. package/ios/RNLlamaContext.mm +15 -11
  94. package/ios/rnllama.xcframework/Info.plist +74 -0
  95. package/jest/mock.js +3 -2
  96. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  97. package/lib/commonjs/index.js +4 -2
  98. package/lib/commonjs/index.js.map +1 -1
  99. package/lib/module/NativeRNLlama.js.map +1 -1
  100. package/lib/module/index.js +4 -2
  101. package/lib/module/index.js.map +1 -1
  102. package/lib/typescript/NativeRNLlama.d.ts +5 -1
  103. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  104. package/lib/typescript/index.d.ts.map +1 -1
  105. package/llama-rn.podspec +8 -2
  106. package/package.json +5 -2
  107. package/src/NativeRNLlama.ts +5 -1
  108. package/src/index.ts +9 -2
@@ -1,73 +1,74 @@
1
- #pragma once
2
-
3
- #include "llama-impl.h"
4
- #include "llama-hparams.h"
5
-
6
- #include "ggml-cpp.h"
7
-
8
- #include <unordered_map>
9
- #include <vector>
10
-
11
- //
12
- // llama_adapter_cvec
13
- //
14
-
15
- // TODO: rename to llama_adapter_cvec
16
- struct llama_control_vector {
17
- std::vector<lm_ggml_context_ptr> ctxs;
18
- std::vector<lm_ggml_backend_buffer_ptr> bufs;
19
-
20
- std::vector<struct lm_ggml_tensor *> tensors; // per layer
21
-
22
- int32_t layer_start = -1;
23
- int32_t layer_end = -1;
24
-
25
- struct lm_ggml_tensor * tensor_for(int il) const;
26
-
27
- struct lm_ggml_tensor * apply_to(struct lm_ggml_context * ctx, struct lm_ggml_tensor * cur, int il) const;
28
- };
29
-
30
- int32_t llama_control_vector_apply(
31
- struct llama_control_vector & cvec,
32
- const llama_model & model,
33
- const float * data,
34
- size_t len,
35
- int32_t n_embd,
36
- int32_t il_start,
37
- int32_t il_end);
38
-
39
- //
40
- // llama_adapter_lora
41
- //
42
-
43
- // TODO: rename to llama_adapter_lora_weight
44
- struct llama_lora_weight {
45
- struct lm_ggml_tensor * a = nullptr;
46
- struct lm_ggml_tensor * b = nullptr;
47
-
48
- // get actual scale based on rank and alpha
49
- float get_scale(float alpha, float adapter_scale) {
50
- const float rank = (float) b->ne[0];
51
- const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
52
- return scale;
53
- }
54
-
55
- llama_lora_weight() = default;
56
- llama_lora_weight(struct lm_ggml_tensor * a, struct lm_ggml_tensor * b) : a(a), b(b) {}
57
- };
58
-
59
- // TODO: rename to llama_adapter_lora
60
- struct llama_lora_adapter {
61
- // map tensor name to lora_a_b
62
- std::unordered_map<std::string, struct llama_lora_weight> ab_map;
63
-
64
- std::vector<lm_ggml_context_ptr> ctxs;
65
- std::vector<lm_ggml_backend_buffer_ptr> bufs;
66
-
67
- float alpha;
68
-
69
- llama_lora_adapter() = default;
70
- ~llama_lora_adapter() = default;
71
-
72
- llama_lora_weight * get_weight(struct lm_ggml_tensor * w);
73
- };
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include "ggml-cpp.h"
6
+
7
+ #include <string>
8
+ #include <unordered_map>
9
+ #include <vector>
10
+
11
+ // TODO: pimpl
12
+
13
+ //
14
+ // llama_adapter_cvec
15
+ //
16
+
17
+ struct llama_adapter_cvec {
18
+ struct lm_ggml_tensor * tensor_for(int il) const;
19
+
20
+ struct lm_ggml_tensor * apply_to(struct lm_ggml_context * ctx, struct lm_ggml_tensor * cur, int il) const;
21
+
22
+ int32_t apply(
23
+ const llama_model & model,
24
+ const float * data,
25
+ size_t len,
26
+ int32_t n_embd,
27
+ int32_t il_start,
28
+ int32_t il_end);
29
+
30
+ private:
31
+ bool init(const llama_model & model);
32
+
33
+ int32_t layer_start = -1;
34
+ int32_t layer_end = -1;
35
+
36
+ std::vector<lm_ggml_context_ptr> ctxs;
37
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
38
+
39
+ std::vector<struct lm_ggml_tensor *> tensors; // per layer
40
+ };
41
+
42
+ //
43
+ // llama_adapter_lora
44
+ //
45
+
46
+ struct llama_adapter_lora_weight {
47
+ struct lm_ggml_tensor * a = nullptr;
48
+ struct lm_ggml_tensor * b = nullptr;
49
+
50
+ // get actual scale based on rank and alpha
51
+ float get_scale(float alpha, float adapter_scale) const {
52
+ const float rank = (float) b->ne[0];
53
+ const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
54
+ return scale;
55
+ }
56
+
57
+ llama_adapter_lora_weight() = default;
58
+ llama_adapter_lora_weight(struct lm_ggml_tensor * a, struct lm_ggml_tensor * b) : a(a), b(b) {}
59
+ };
60
+
61
+ struct llama_adapter_lora {
62
+ // map tensor name to lora_a_b
63
+ std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
64
+
65
+ std::vector<lm_ggml_context_ptr> ctxs;
66
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
67
+
68
+ float alpha;
69
+
70
+ llama_adapter_lora() = default;
71
+ ~llama_adapter_lora() = default;
72
+
73
+ llama_adapter_lora_weight * get_weight(struct lm_ggml_tensor * w);
74
+ };