cui-llama.rn 1.3.0 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/android/src/main/CMakeLists.txt +9 -6
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +4 -4
  3. package/android/src/main/jni.cpp +15 -15
  4. package/cpp/common.cpp +1962 -1682
  5. package/cpp/common.h +645 -600
  6. package/cpp/ggml-alloc.c +1038 -1040
  7. package/cpp/ggml-alloc.h +76 -76
  8. package/cpp/ggml-backend-impl.h +256 -216
  9. package/cpp/ggml-backend-reg.cpp +552 -195
  10. package/cpp/ggml-backend.cpp +1999 -1997
  11. package/cpp/ggml-backend.h +352 -328
  12. package/cpp/ggml-common.h +1853 -1853
  13. package/cpp/ggml-cpp.h +38 -38
  14. package/cpp/{ggml-cpu-aarch64.c → ggml-cpu-aarch64.cpp} +4262 -3560
  15. package/cpp/ggml-cpu-aarch64.h +8 -30
  16. package/cpp/ggml-cpu-impl.h +386 -371
  17. package/cpp/ggml-cpu-quants.c +10835 -10822
  18. package/cpp/ggml-cpu-quants.h +63 -63
  19. package/cpp/ggml-cpu-traits.cpp +36 -0
  20. package/cpp/ggml-cpu-traits.h +38 -0
  21. package/cpp/ggml-cpu.c +14122 -13975
  22. package/cpp/ggml-cpu.cpp +618 -663
  23. package/cpp/ggml-cpu.h +135 -177
  24. package/cpp/ggml-impl.h +556 -550
  25. package/cpp/ggml-metal.h +66 -66
  26. package/cpp/ggml-metal.m +4884 -4294
  27. package/cpp/ggml-quants.c +5238 -5247
  28. package/cpp/ggml-quants.h +100 -100
  29. package/cpp/ggml-threading.cpp +12 -12
  30. package/cpp/ggml-threading.h +14 -12
  31. package/cpp/ggml.c +7707 -8180
  32. package/cpp/ggml.h +2286 -2411
  33. package/cpp/json-schema-to-grammar.cpp +1045 -0
  34. package/cpp/json-schema-to-grammar.h +8 -0
  35. package/cpp/json.hpp +24766 -0
  36. package/cpp/llama-grammar.cpp +1138 -1138
  37. package/cpp/llama-grammar.h +144 -144
  38. package/cpp/llama-impl.h +181 -181
  39. package/cpp/llama-sampling.cpp +2293 -2348
  40. package/cpp/llama-sampling.h +48 -48
  41. package/cpp/llama-vocab.cpp +1985 -1984
  42. package/cpp/llama-vocab.h +170 -170
  43. package/cpp/llama.cpp +22836 -22132
  44. package/cpp/llama.h +1263 -1253
  45. package/cpp/log.cpp +401 -401
  46. package/cpp/log.h +121 -121
  47. package/cpp/rn-llama.hpp +6 -6
  48. package/cpp/sampling.cpp +500 -466
  49. package/cpp/sampling.h +22 -1
  50. package/cpp/sgemm.cpp +1884 -1884
  51. package/cpp/speculative.cpp +274 -0
  52. package/cpp/speculative.h +28 -0
  53. package/cpp/unicode.cpp +62 -51
  54. package/cpp/unicode.h +9 -10
  55. package/ios/RNLlamaContext.mm +13 -0
  56. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  57. package/lib/commonjs/grammar.js +4 -2
  58. package/lib/commonjs/grammar.js.map +1 -1
  59. package/lib/commonjs/index.js +38 -1
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/module/NativeRNLlama.js.map +1 -1
  62. package/lib/module/grammar.js +2 -1
  63. package/lib/module/grammar.js.map +1 -1
  64. package/lib/module/index.js +36 -0
  65. package/lib/module/index.js.map +1 -1
  66. package/lib/typescript/NativeRNLlama.d.ts +95 -6
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  68. package/lib/typescript/grammar.d.ts +5 -6
  69. package/lib/typescript/grammar.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +40 -4
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +2 -1
  73. package/src/NativeRNLlama.ts +99 -12
  74. package/src/grammar.ts +10 -8
  75. package/src/index.ts +68 -3
  76. package/cpp/ggml-aarch64.c +0 -129
  77. package/cpp/ggml-aarch64.h +0 -19
package/cpp/ggml-metal.h CHANGED
@@ -1,66 +1,66 @@
1
- // Note: this description is outdated
2
- //
3
- // An interface allowing to compute lm_ggml_cgraph with Metal
4
- //
5
- // This is a fully functional interface that extends ggml with GPU support for Apple devices.
6
- // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
7
- //
8
- // How it works?
9
- //
10
- // As long as your program can create and evaluate a lm_ggml_cgraph on the CPU, you can use this
11
- // interface to evaluate the same graph on the GPU. Instead of using lm_ggml_graph_compute(), you
12
- // use lm_ggml_metal_graph_compute() (or lm_ggml_vulkan_graph_compute(), etc.)
13
- //
14
- // You only need to make sure that all memory buffers that you used during the graph creation
15
- // are mapped to the device memory with the lm_ggml_metal_add_buffer() function. This mapping is
16
- // used during the graph evaluation to determine the arguments of the compute kernels.
17
- //
18
- // Synchronization between device and host memory (for example for input and output tensors)
19
- // is done with the lm_ggml_metal_set_tensor() and lm_ggml_metal_get_tensor() functions.
20
- //
21
-
22
- #pragma once
23
-
24
- #include "ggml.h"
25
- #include "ggml-backend.h"
26
-
27
- #include <stddef.h>
28
- #include <stdbool.h>
29
-
30
- struct lm_ggml_tensor;
31
- struct lm_ggml_cgraph;
32
-
33
- #ifdef __cplusplus
34
- extern "C" {
35
- #endif
36
-
37
- //
38
- // backend API
39
- // user-code should use only these functions
40
- //
41
-
42
- LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
43
-
44
- LM_GGML_BACKEND_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
45
-
46
- LM_GGML_DEPRECATED(
47
- LM_GGML_BACKEND_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
- "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
49
-
50
- LM_GGML_BACKEND_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
51
-
52
- LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
53
-
54
- // helper to check if the device supports a specific family
55
- // ideally, the user code should be doing these checks
56
- // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
57
- LM_GGML_BACKEND_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
58
-
59
- // capture all command buffers committed the next time `lm_ggml_backend_graph_compute` is called
60
- LM_GGML_BACKEND_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
61
-
62
- LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
63
-
64
- #ifdef __cplusplus
65
- }
66
- #endif
1
+ // Note: this description is outdated
2
+ //
3
+ // An interface allowing to compute lm_ggml_cgraph with Metal
4
+ //
5
+ // This is a fully functional interface that extends ggml with GPU support for Apple devices.
6
+ // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
7
+ //
8
+ // How it works?
9
+ //
10
+ // As long as your program can create and evaluate a lm_ggml_cgraph on the CPU, you can use this
11
+ // interface to evaluate the same graph on the GPU. Instead of using lm_ggml_graph_compute(), you
12
+ // use lm_ggml_metal_graph_compute() (or lm_ggml_vulkan_graph_compute(), etc.)
13
+ //
14
+ // You only need to make sure that all memory buffers that you used during the graph creation
15
+ // are mapped to the device memory with the lm_ggml_metal_add_buffer() function. This mapping is
16
+ // used during the graph evaluation to determine the arguments of the compute kernels.
17
+ //
18
+ // Synchronization between device and host memory (for example for input and output tensors)
19
+ // is done with the lm_ggml_metal_set_tensor() and lm_ggml_metal_get_tensor() functions.
20
+ //
21
+
22
+ #pragma once
23
+
24
+ #include "ggml.h"
25
+ #include "ggml-backend.h"
26
+
27
+ #include <stddef.h>
28
+ #include <stdbool.h>
29
+
30
+ struct lm_ggml_tensor;
31
+ struct lm_ggml_cgraph;
32
+
33
+ #ifdef __cplusplus
34
+ extern "C" {
35
+ #endif
36
+
37
+ //
38
+ // backend API
39
+ // user-code should use only these functions
40
+ //
41
+
42
+ LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
43
+
44
+ LM_GGML_BACKEND_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
45
+
46
+ LM_GGML_DEPRECATED(
47
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
+ "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
49
+
50
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
51
+
52
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
53
+
54
+ // helper to check if the device supports a specific family
55
+ // ideally, the user code should be doing these checks
56
+ // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
57
+ LM_GGML_BACKEND_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
58
+
59
+ // capture all command buffers committed the next time `lm_ggml_backend_graph_compute` is called
60
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
61
+
62
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
63
+
64
+ #ifdef __cplusplus
65
+ }
66
+ #endif