cui-llama.rn 1.2.6 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +3 -2
  2. package/android/src/main/CMakeLists.txt +20 -5
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
  4. package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
  5. package/android/src/main/jni.cpp +222 -34
  6. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
  7. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
  8. package/cpp/common.cpp +1682 -2114
  9. package/cpp/common.h +600 -613
  10. package/cpp/ggml-aarch64.c +129 -3478
  11. package/cpp/ggml-aarch64.h +19 -39
  12. package/cpp/ggml-alloc.c +1040 -1040
  13. package/cpp/ggml-alloc.h +76 -76
  14. package/cpp/ggml-backend-impl.h +216 -216
  15. package/cpp/ggml-backend-reg.cpp +195 -0
  16. package/cpp/ggml-backend.cpp +1997 -2661
  17. package/cpp/ggml-backend.h +328 -314
  18. package/cpp/ggml-common.h +1853 -1853
  19. package/cpp/ggml-cpp.h +38 -38
  20. package/cpp/ggml-cpu-aarch64.c +3560 -0
  21. package/cpp/ggml-cpu-aarch64.h +30 -0
  22. package/cpp/ggml-cpu-impl.h +371 -614
  23. package/cpp/ggml-cpu-quants.c +10822 -0
  24. package/cpp/ggml-cpu-quants.h +63 -0
  25. package/cpp/ggml-cpu.c +13975 -13720
  26. package/cpp/ggml-cpu.cpp +663 -0
  27. package/cpp/ggml-cpu.h +177 -150
  28. package/cpp/ggml-impl.h +550 -296
  29. package/cpp/ggml-metal.h +66 -66
  30. package/cpp/ggml-metal.m +4294 -3933
  31. package/cpp/ggml-quants.c +5247 -15739
  32. package/cpp/ggml-quants.h +100 -147
  33. package/cpp/ggml-threading.cpp +12 -0
  34. package/cpp/ggml-threading.h +12 -0
  35. package/cpp/ggml.c +8180 -8390
  36. package/cpp/ggml.h +2411 -2441
  37. package/cpp/llama-grammar.cpp +1138 -1138
  38. package/cpp/llama-grammar.h +144 -144
  39. package/cpp/llama-impl.h +181 -181
  40. package/cpp/llama-sampling.cpp +2348 -2345
  41. package/cpp/llama-sampling.h +48 -48
  42. package/cpp/llama-vocab.cpp +1984 -1984
  43. package/cpp/llama-vocab.h +170 -170
  44. package/cpp/llama.cpp +22132 -22046
  45. package/cpp/llama.h +1253 -1255
  46. package/cpp/log.cpp +401 -401
  47. package/cpp/log.h +121 -121
  48. package/cpp/rn-llama.hpp +83 -19
  49. package/cpp/sampling.cpp +466 -466
  50. package/cpp/sgemm.cpp +1884 -1276
  51. package/ios/RNLlama.mm +43 -20
  52. package/ios/RNLlamaContext.h +9 -3
  53. package/ios/RNLlamaContext.mm +133 -33
  54. package/jest/mock.js +0 -1
  55. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  56. package/lib/commonjs/index.js +52 -15
  57. package/lib/commonjs/index.js.map +1 -1
  58. package/lib/module/NativeRNLlama.js.map +1 -1
  59. package/lib/module/index.js +51 -15
  60. package/lib/module/index.js.map +1 -1
  61. package/lib/typescript/NativeRNLlama.d.ts +29 -5
  62. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  63. package/lib/typescript/index.d.ts +12 -5
  64. package/lib/typescript/index.d.ts.map +1 -1
  65. package/package.json +1 -1
  66. package/src/NativeRNLlama.ts +41 -6
  67. package/src/index.ts +82 -27
  68. package/cpp/json-schema-to-grammar.cpp +0 -1045
  69. package/cpp/json-schema-to-grammar.h +0 -8
  70. package/cpp/json.hpp +0 -24766
package/cpp/ggml-metal.h CHANGED
@@ -1,66 +1,66 @@
1
- // Note: this description is outdated
2
- //
3
- // An interface allowing to compute lm_ggml_cgraph with Metal
4
- //
5
- // This is a fully functional interface that extends ggml with GPU support for Apple devices.
6
- // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
7
- //
8
- // How it works?
9
- //
10
- // As long as your program can create and evaluate a lm_ggml_cgraph on the CPU, you can use this
11
- // interface to evaluate the same graph on the GPU. Instead of using lm_ggml_graph_compute(), you
12
- // use lm_ggml_metal_graph_compute() (or lm_ggml_vulkan_graph_compute(), etc.)
13
- //
14
- // You only need to make sure that all memory buffers that you used during the graph creation
15
- // are mapped to the device memory with the lm_ggml_metal_add_buffer() function. This mapping is
16
- // used during the graph evaluation to determine the arguments of the compute kernels.
17
- //
18
- // Synchronization between device and host memory (for example for input and output tensors)
19
- // is done with the lm_ggml_metal_set_tensor() and lm_ggml_metal_get_tensor() functions.
20
- //
21
-
22
- #pragma once
23
-
24
- #include "ggml.h"
25
- #include "ggml-backend.h"
26
-
27
- #include <stddef.h>
28
- #include <stdbool.h>
29
-
30
- struct lm_ggml_tensor;
31
- struct lm_ggml_cgraph;
32
-
33
- #ifdef __cplusplus
34
- extern "C" {
35
- #endif
36
-
37
- //
38
- // backend API
39
- // user-code should use only these functions
40
- //
41
-
42
- LM_GGML_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
43
-
44
- LM_GGML_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
45
-
46
- LM_GGML_DEPRECATED(
47
- LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
- "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
49
-
50
- LM_GGML_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
51
-
52
- LM_GGML_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
53
-
54
- // helper to check if the device supports a specific family
55
- // ideally, the user code should be doing these checks
56
- // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
57
- LM_GGML_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
58
-
59
- // capture all command buffers committed the next time `lm_ggml_backend_graph_compute` is called
60
- LM_GGML_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
61
-
62
- LM_GGML_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
63
-
64
- #ifdef __cplusplus
65
- }
66
- #endif
1
+ // Note: this description is outdated
2
+ //
3
+ // An interface allowing to compute lm_ggml_cgraph with Metal
4
+ //
5
+ // This is a fully functional interface that extends ggml with GPU support for Apple devices.
6
+ // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
7
+ //
8
+ // How it works?
9
+ //
10
+ // As long as your program can create and evaluate a lm_ggml_cgraph on the CPU, you can use this
11
+ // interface to evaluate the same graph on the GPU. Instead of using lm_ggml_graph_compute(), you
12
+ // use lm_ggml_metal_graph_compute() (or lm_ggml_vulkan_graph_compute(), etc.)
13
+ //
14
+ // You only need to make sure that all memory buffers that you used during the graph creation
15
+ // are mapped to the device memory with the lm_ggml_metal_add_buffer() function. This mapping is
16
+ // used during the graph evaluation to determine the arguments of the compute kernels.
17
+ //
18
+ // Synchronization between device and host memory (for example for input and output tensors)
19
+ // is done with the lm_ggml_metal_set_tensor() and lm_ggml_metal_get_tensor() functions.
20
+ //
21
+
22
+ #pragma once
23
+
24
+ #include "ggml.h"
25
+ #include "ggml-backend.h"
26
+
27
+ #include <stddef.h>
28
+ #include <stdbool.h>
29
+
30
+ struct lm_ggml_tensor;
31
+ struct lm_ggml_cgraph;
32
+
33
+ #ifdef __cplusplus
34
+ extern "C" {
35
+ #endif
36
+
37
+ //
38
+ // backend API
39
+ // user-code should use only these functions
40
+ //
41
+
42
+ LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
43
+
44
+ LM_GGML_BACKEND_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
45
+
46
+ LM_GGML_DEPRECATED(
47
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
+ "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
49
+
50
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
51
+
52
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
53
+
54
+ // helper to check if the device supports a specific family
55
+ // ideally, the user code should be doing these checks
56
+ // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
57
+ LM_GGML_BACKEND_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
58
+
59
+ // capture all command buffers committed the next time `lm_ggml_backend_graph_compute` is called
60
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
61
+
62
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
63
+
64
+ #ifdef __cplusplus
65
+ }
66
+ #endif