cui-llama.rn 1.3.6 → 1.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/README.md +22 -1
  2. package/android/src/main/CMakeLists.txt +25 -26
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +31 -9
  4. package/android/src/main/java/com/rnllama/RNLlama.java +98 -0
  5. package/android/src/main/jni-utils.h +94 -0
  6. package/android/src/main/jni.cpp +133 -63
  7. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +15 -0
  8. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +15 -0
  9. package/cpp/common.cpp +2085 -1982
  10. package/cpp/common.h +696 -664
  11. package/cpp/ggml-alloc.c +1042 -1037
  12. package/cpp/ggml-backend-impl.h +255 -256
  13. package/cpp/ggml-backend-reg.cpp +582 -582
  14. package/cpp/ggml-backend.cpp +2002 -2002
  15. package/cpp/ggml-backend.h +354 -352
  16. package/cpp/ggml-common.h +1853 -1853
  17. package/cpp/ggml-cpp.h +39 -39
  18. package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
  19. package/cpp/ggml-cpu-aarch64.h +8 -8
  20. package/cpp/ggml-cpu-impl.h +386 -386
  21. package/cpp/ggml-cpu-quants.c +10920 -10839
  22. package/cpp/ggml-cpu-traits.cpp +36 -36
  23. package/cpp/ggml-cpu-traits.h +38 -38
  24. package/cpp/ggml-cpu.c +14391 -14122
  25. package/cpp/ggml-cpu.cpp +635 -627
  26. package/cpp/ggml-cpu.h +135 -135
  27. package/cpp/ggml-impl.h +567 -567
  28. package/cpp/ggml-metal-impl.h +288 -0
  29. package/cpp/ggml-metal.m +4884 -4884
  30. package/cpp/ggml-opt.cpp +854 -0
  31. package/cpp/ggml-opt.h +216 -0
  32. package/cpp/ggml-quants.c +5238 -5238
  33. package/cpp/ggml-threading.h +14 -14
  34. package/cpp/ggml.c +6514 -6448
  35. package/cpp/ggml.h +2194 -2163
  36. package/cpp/gguf.cpp +1329 -1325
  37. package/cpp/gguf.h +202 -202
  38. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  39. package/cpp/json-schema-to-grammar.h +8 -8
  40. package/cpp/json.hpp +24766 -24766
  41. package/cpp/llama-adapter.cpp +347 -346
  42. package/cpp/llama-adapter.h +74 -73
  43. package/cpp/llama-arch.cpp +1487 -1434
  44. package/cpp/llama-arch.h +400 -395
  45. package/cpp/llama-batch.cpp +368 -368
  46. package/cpp/llama-batch.h +88 -88
  47. package/cpp/llama-chat.cpp +578 -567
  48. package/cpp/llama-chat.h +52 -51
  49. package/cpp/llama-context.cpp +1775 -1771
  50. package/cpp/llama-context.h +128 -128
  51. package/cpp/llama-cparams.cpp +1 -1
  52. package/cpp/llama-cparams.h +37 -37
  53. package/cpp/llama-cpp.h +30 -30
  54. package/cpp/llama-grammar.cpp +1139 -1139
  55. package/cpp/llama-grammar.h +143 -143
  56. package/cpp/llama-hparams.cpp +71 -71
  57. package/cpp/llama-hparams.h +139 -140
  58. package/cpp/llama-impl.cpp +167 -167
  59. package/cpp/llama-impl.h +61 -61
  60. package/cpp/llama-kv-cache.cpp +718 -718
  61. package/cpp/llama-kv-cache.h +218 -218
  62. package/cpp/llama-mmap.cpp +590 -589
  63. package/cpp/llama-mmap.h +67 -67
  64. package/cpp/llama-model-loader.cpp +1124 -1011
  65. package/cpp/llama-model-loader.h +167 -158
  66. package/cpp/llama-model.cpp +3997 -2202
  67. package/cpp/llama-model.h +370 -391
  68. package/cpp/llama-sampling.cpp +2408 -2406
  69. package/cpp/llama-sampling.h +32 -48
  70. package/cpp/llama-vocab.cpp +3247 -1982
  71. package/cpp/llama-vocab.h +125 -182
  72. package/cpp/llama.cpp +10077 -12544
  73. package/cpp/llama.h +1323 -1285
  74. package/cpp/log.cpp +401 -401
  75. package/cpp/log.h +121 -121
  76. package/cpp/rn-llama.hpp +123 -116
  77. package/cpp/sampling.cpp +505 -500
  78. package/cpp/sgemm.cpp +2597 -2597
  79. package/cpp/sgemm.h +14 -14
  80. package/cpp/speculative.cpp +277 -274
  81. package/cpp/speculative.h +28 -28
  82. package/cpp/unicode.cpp +2 -3
  83. package/ios/RNLlama.mm +47 -0
  84. package/ios/RNLlamaContext.h +3 -1
  85. package/ios/RNLlamaContext.mm +71 -14
  86. package/jest/mock.js +15 -3
  87. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  88. package/lib/commonjs/index.js +33 -37
  89. package/lib/commonjs/index.js.map +1 -1
  90. package/lib/module/NativeRNLlama.js.map +1 -1
  91. package/lib/module/index.js +31 -35
  92. package/lib/module/index.js.map +1 -1
  93. package/lib/typescript/NativeRNLlama.d.ts +26 -6
  94. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  95. package/lib/typescript/index.d.ts +21 -36
  96. package/lib/typescript/index.d.ts.map +1 -1
  97. package/llama-rn.podspec +4 -18
  98. package/package.json +2 -3
  99. package/src/NativeRNLlama.ts +32 -13
  100. package/src/index.ts +52 -47
  101. package/cpp/llama.cpp.rej +0 -23
package/cpp/llama-mmap.h CHANGED
@@ -1,67 +1,67 @@
1
- #pragma once
2
-
3
- #include <memory>
4
- #include <vector>
5
-
6
- struct llama_file;
7
- struct llama_mmap;
8
- struct llama_mlock;
9
-
10
- using llama_files = std::vector<std::unique_ptr<llama_file>>;
11
- using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
12
- using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
13
-
14
- struct llama_file {
15
- llama_file(const char * fname, const char * mode);
16
- ~llama_file();
17
-
18
- size_t tell() const;
19
- size_t size() const;
20
-
21
- int file_id() const; // fileno overload
22
-
23
- void seek(size_t offset, int whence) const;
24
-
25
- void read_raw(void * ptr, size_t len) const;
26
- uint32_t read_u32() const;
27
-
28
- void write_raw(const void * ptr, size_t len) const;
29
- void write_u32(uint32_t val) const;
30
-
31
- private:
32
- struct impl;
33
- std::unique_ptr<impl> pimpl;
34
- };
35
-
36
- struct llama_mmap {
37
- llama_mmap(const llama_mmap &) = delete;
38
- llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false);
39
- ~llama_mmap();
40
-
41
- size_t size() const;
42
- void * addr() const;
43
-
44
- void unmap_fragment(size_t first, size_t last);
45
-
46
- static const bool SUPPORTED;
47
-
48
- private:
49
- struct impl;
50
- std::unique_ptr<impl> pimpl;
51
- };
52
-
53
- struct llama_mlock {
54
- llama_mlock();
55
- ~llama_mlock();
56
-
57
- void init(void * ptr);
58
- void grow_to(size_t target_size);
59
-
60
- static const bool SUPPORTED;
61
-
62
- private:
63
- struct impl;
64
- std::unique_ptr<impl> pimpl;
65
- };
66
-
67
- size_t llama_path_max();
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <vector>
5
+
6
+ struct llama_file;
7
+ struct llama_mmap;
8
+ struct llama_mlock;
9
+
10
+ using llama_files = std::vector<std::unique_ptr<llama_file>>;
11
+ using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
12
+ using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
13
+
14
+ struct llama_file {
15
+ llama_file(const char * fname, const char * mode);
16
+ ~llama_file();
17
+
18
+ size_t tell() const;
19
+ size_t size() const;
20
+
21
+ int file_id() const; // fileno overload
22
+
23
+ void seek(size_t offset, int whence) const;
24
+
25
+ void read_raw(void * ptr, size_t len) const;
26
+ uint32_t read_u32() const;
27
+
28
+ void write_raw(const void * ptr, size_t len) const;
29
+ void write_u32(uint32_t val) const;
30
+
31
+ private:
32
+ struct impl;
33
+ std::unique_ptr<impl> pimpl;
34
+ };
35
+
36
+ struct llama_mmap {
37
+ llama_mmap(const llama_mmap &) = delete;
38
+ llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false);
39
+ ~llama_mmap();
40
+
41
+ size_t size() const;
42
+ void * addr() const;
43
+
44
+ void unmap_fragment(size_t first, size_t last);
45
+
46
+ static const bool SUPPORTED;
47
+
48
+ private:
49
+ struct impl;
50
+ std::unique_ptr<impl> pimpl;
51
+ };
52
+
53
+ struct llama_mlock {
54
+ llama_mlock();
55
+ ~llama_mlock();
56
+
57
+ void init(void * ptr);
58
+ void grow_to(size_t target_size);
59
+
60
+ static const bool SUPPORTED;
61
+
62
+ private:
63
+ struct impl;
64
+ std::unique_ptr<impl> pimpl;
65
+ };
66
+
67
+ size_t llama_path_max();