cui-llama.rn 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/LICENSE +20 -0
  2. package/README.md +330 -0
  3. package/android/build.gradle +107 -0
  4. package/android/gradle.properties +5 -0
  5. package/android/src/main/AndroidManifest.xml +4 -0
  6. package/android/src/main/CMakeLists.txt +69 -0
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +353 -0
  8. package/android/src/main/java/com/rnllama/RNLlama.java +446 -0
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -0
  10. package/android/src/main/jni.cpp +635 -0
  11. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +94 -0
  12. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +95 -0
  13. package/cpp/README.md +4 -0
  14. package/cpp/common.cpp +3237 -0
  15. package/cpp/common.h +467 -0
  16. package/cpp/ggml-aarch64.c +2193 -0
  17. package/cpp/ggml-aarch64.h +39 -0
  18. package/cpp/ggml-alloc.c +1041 -0
  19. package/cpp/ggml-alloc.h +76 -0
  20. package/cpp/ggml-backend-impl.h +153 -0
  21. package/cpp/ggml-backend.c +2225 -0
  22. package/cpp/ggml-backend.h +236 -0
  23. package/cpp/ggml-common.h +1829 -0
  24. package/cpp/ggml-impl.h +655 -0
  25. package/cpp/ggml-metal.h +65 -0
  26. package/cpp/ggml-metal.m +3273 -0
  27. package/cpp/ggml-quants.c +15022 -0
  28. package/cpp/ggml-quants.h +132 -0
  29. package/cpp/ggml.c +22034 -0
  30. package/cpp/ggml.h +2444 -0
  31. package/cpp/grammar-parser.cpp +536 -0
  32. package/cpp/grammar-parser.h +29 -0
  33. package/cpp/json-schema-to-grammar.cpp +1045 -0
  34. package/cpp/json-schema-to-grammar.h +8 -0
  35. package/cpp/json.hpp +24766 -0
  36. package/cpp/llama.cpp +21789 -0
  37. package/cpp/llama.h +1201 -0
  38. package/cpp/log.h +737 -0
  39. package/cpp/rn-llama.hpp +630 -0
  40. package/cpp/sampling.cpp +460 -0
  41. package/cpp/sampling.h +160 -0
  42. package/cpp/sgemm.cpp +1027 -0
  43. package/cpp/sgemm.h +14 -0
  44. package/cpp/unicode-data.cpp +7032 -0
  45. package/cpp/unicode-data.h +20 -0
  46. package/cpp/unicode.cpp +812 -0
  47. package/cpp/unicode.h +64 -0
  48. package/ios/RNLlama.h +11 -0
  49. package/ios/RNLlama.mm +302 -0
  50. package/ios/RNLlama.xcodeproj/project.pbxproj +278 -0
  51. package/ios/RNLlamaContext.h +39 -0
  52. package/ios/RNLlamaContext.mm +426 -0
  53. package/jest/mock.js +169 -0
  54. package/lib/commonjs/NativeRNLlama.js +10 -0
  55. package/lib/commonjs/NativeRNLlama.js.map +1 -0
  56. package/lib/commonjs/grammar.js +574 -0
  57. package/lib/commonjs/grammar.js.map +1 -0
  58. package/lib/commonjs/index.js +151 -0
  59. package/lib/commonjs/index.js.map +1 -0
  60. package/lib/module/NativeRNLlama.js +3 -0
  61. package/lib/module/NativeRNLlama.js.map +1 -0
  62. package/lib/module/grammar.js +566 -0
  63. package/lib/module/grammar.js.map +1 -0
  64. package/lib/module/index.js +129 -0
  65. package/lib/module/index.js.map +1 -0
  66. package/lib/typescript/NativeRNLlama.d.ts +107 -0
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -0
  68. package/lib/typescript/grammar.d.ts +38 -0
  69. package/lib/typescript/grammar.d.ts.map +1 -0
  70. package/lib/typescript/index.d.ts +46 -0
  71. package/lib/typescript/index.d.ts.map +1 -0
  72. package/llama-rn.podspec +56 -0
  73. package/package.json +230 -0
  74. package/src/NativeRNLlama.ts +132 -0
  75. package/src/grammar.ts +849 -0
  76. package/src/index.ts +182 -0
@@ -0,0 +1,76 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+ typedef struct lm_ggml_backend_buffer_type * lm_ggml_backend_buffer_type_t;
10
+ typedef struct lm_ggml_backend_buffer * lm_ggml_backend_buffer_t;
11
+ typedef struct lm_ggml_backend * lm_ggml_backend_t;
12
+
13
+ // Tensor allocator
14
+ struct lm_ggml_tallocr {
15
+ lm_ggml_backend_buffer_t buffer;
16
+ void * base;
17
+ size_t alignment;
18
+ size_t offset;
19
+ };
20
+
21
+ LM_GGML_API struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer);
22
+ LM_GGML_API void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor);
23
+
24
+ // Graph allocator
25
+ /*
26
+ Example usage:
27
+ lm_ggml_gallocr_t galloc = lm_ggml_gallocr_new(lm_ggml_bacckend_cpu_buffer_type());
28
+
29
+ // optional: create a worst-case graph and reserve the buffers to avoid reallocations
30
+ lm_ggml_gallocr_reserve(galloc, build_graph(max_batch));
31
+
32
+ // allocate the graph
33
+ struct lm_ggml_cgraph * graph = build_graph(batch);
34
+ lm_ggml_gallocr_alloc_graph(galloc, graph);
35
+
36
+ printf("compute buffer size: %zu bytes\n", lm_ggml_gallocr_get_buffer_size(galloc, 0));
37
+
38
+ // evaluate the graph
39
+ lm_ggml_backend_graph_compute(backend, graph);
40
+ */
41
+
42
+ // special tensor flags for use with the graph allocator:
43
+ // lm_ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
44
+ // lm_ggml_set_output(): output tensors are never freed and never overwritten
45
+
46
+ typedef struct lm_ggml_gallocr * lm_ggml_gallocr_t;
47
+
48
+ LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new(lm_ggml_backend_buffer_type_t buft);
49
+ LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new_n(lm_ggml_backend_buffer_type_t * bufts, int n_bufs);
50
+ LM_GGML_API void lm_ggml_gallocr_free(lm_ggml_gallocr_t galloc);
51
+
52
+ // pre-allocate buffers from a measure graph - does not allocate or modify the graph
53
+ // call with a worst-case graph to avoid buffer reallocations
54
+ // not strictly required for single buffer usage: lm_ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
55
+ // returns false if the buffer allocation failed
56
+ LM_GGML_API bool lm_ggml_gallocr_reserve(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
57
+ LM_GGML_API bool lm_ggml_gallocr_reserve_n(
58
+ lm_ggml_gallocr_t galloc,
59
+ struct lm_ggml_cgraph * graph,
60
+ const int * node_buffer_ids,
61
+ const int * leaf_buffer_ids);
62
+
63
+ // automatic reallocation if the topology changes when using a single buffer
64
+ // returns false if using multiple buffers and a re-allocation is needed (call lm_ggml_gallocr_reserve_n first to set the node buffers)
65
+ LM_GGML_API bool lm_ggml_gallocr_alloc_graph(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
66
+
67
+ LM_GGML_API size_t lm_ggml_gallocr_get_buffer_size(lm_ggml_gallocr_t galloc, int buffer_id);
68
+
69
+ // Utils
70
+ // Create a buffer and allocate all the tensors in a lm_ggml_context
71
+ LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors_from_buft(struct lm_ggml_context * ctx, lm_ggml_backend_buffer_type_t buft);
72
+ LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors(struct lm_ggml_context * ctx, lm_ggml_backend_t backend);
73
+
74
+ #ifdef __cplusplus
75
+ }
76
+ #endif
@@ -0,0 +1,153 @@
1
+ #pragma once
2
+
3
+ // ggml-backend internal header
4
+
5
+ #include "ggml-backend.h"
6
+
7
+ #ifdef __cplusplus
8
+ extern "C" {
9
+ #endif
10
+
11
+ //
12
+ // Backend buffer
13
+ //
14
+
15
+ // buffer type
16
+ typedef void * lm_ggml_backend_buffer_type_context_t;
17
+
18
+ struct lm_ggml_backend_buffer_type_i {
19
+ const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_type_t buft);
20
+ // allocate a buffer of this type
21
+ lm_ggml_backend_buffer_t (*LM_GGML_CALL alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
22
+ // tensor alignment
23
+ size_t (*LM_GGML_CALL get_alignment) (lm_ggml_backend_buffer_type_t buft);
24
+ // max buffer size that can be allocated
25
+ size_t (*LM_GGML_CALL get_max_size) (lm_ggml_backend_buffer_type_t buft);
26
+ // data size needed to allocate the tensor, including padding
27
+ size_t (*LM_GGML_CALL get_alloc_size) (lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
28
+ // check if tensor data is in host memory
29
+ bool (*LM_GGML_CALL is_host) (lm_ggml_backend_buffer_type_t buft);
30
+ };
31
+
32
+ struct lm_ggml_backend_buffer_type {
33
+ struct lm_ggml_backend_buffer_type_i iface;
34
+ lm_ggml_backend_buffer_type_context_t context;
35
+ };
36
+
37
+ // buffer
38
+ typedef void * lm_ggml_backend_buffer_context_t;
39
+
40
+ struct lm_ggml_backend_buffer_i {
41
+ const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_t buffer);
42
+ void (*LM_GGML_CALL free_buffer)(lm_ggml_backend_buffer_t buffer);
43
+ void * (*LM_GGML_CALL get_base) (lm_ggml_backend_buffer_t buffer);
44
+ void (*LM_GGML_CALL init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
45
+ void (*LM_GGML_CALL set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
46
+ void (*LM_GGML_CALL get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
47
+ bool (*LM_GGML_CALL cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
48
+ void (*LM_GGML_CALL clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
49
+ void (*LM_GGML_CALL reset) (lm_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
50
+ };
51
+
52
+ struct lm_ggml_backend_buffer {
53
+ struct lm_ggml_backend_buffer_i iface;
54
+ lm_ggml_backend_buffer_type_t buft;
55
+ lm_ggml_backend_buffer_context_t context;
56
+ size_t size;
57
+ enum lm_ggml_backend_buffer_usage usage;
58
+ };
59
+
60
+ LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
61
+ lm_ggml_backend_buffer_type_t buft,
62
+ struct lm_ggml_backend_buffer_i iface,
63
+ lm_ggml_backend_buffer_context_t context,
64
+ size_t size);
65
+
66
+ // do not use directly, use lm_ggml_backend_tensor_copy instead
67
+ bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
68
+
69
+ // buffer that contains a collection of buffers
70
+ LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
71
+ LM_GGML_CALL bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
72
+ LM_GGML_CALL void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
73
+
74
+ //
75
+ // Backend
76
+ //
77
+
78
+ typedef void * lm_ggml_backend_context_t;
79
+
80
+ struct lm_ggml_backend_i {
81
+ const char * (*LM_GGML_CALL get_name)(lm_ggml_backend_t backend);
82
+
83
+ void (*LM_GGML_CALL free)(lm_ggml_backend_t backend);
84
+
85
+ // buffer allocation
86
+ lm_ggml_backend_buffer_type_t (*LM_GGML_CALL get_default_buffer_type)(lm_ggml_backend_t backend);
87
+
88
+ // (optional) asynchronous tensor data access
89
+ void (*LM_GGML_CALL set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
90
+ void (*LM_GGML_CALL get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
91
+ bool (*LM_GGML_CALL cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
92
+
93
+ // (optional) complete all pending operations
94
+ void (*LM_GGML_CALL synchronize)(lm_ggml_backend_t backend);
95
+
96
+ // compute graph with a plan (not used currently)
97
+ // create a new plan for a graph
98
+ lm_ggml_backend_graph_plan_t (*LM_GGML_CALL graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
99
+ void (*LM_GGML_CALL graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
100
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
101
+ void (*LM_GGML_CALL graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
102
+ // compute the graph with the plan
103
+ enum lm_ggml_status (*LM_GGML_CALL graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
104
+
105
+ // compute graph without a plan (async)
106
+ enum lm_ggml_status (*LM_GGML_CALL graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
107
+
108
+ // check if the backend can compute an operation
109
+ bool (*LM_GGML_CALL supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
110
+
111
+ // check if the backend can use tensors allocated in a buffer type
112
+ bool (*LM_GGML_CALL supports_buft)(lm_ggml_backend_t backend, lm_ggml_backend_buffer_type_t buft);
113
+
114
+ // check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
115
+ // these should be expensive operations with large batch sizes that may benefit from running on this backend
116
+ // even if the weight has to be copied from the CPU temporarily
117
+ bool (*LM_GGML_CALL offload_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
118
+
119
+ // (optional) event synchronization
120
+ // create a new event that can record events on this backend instance
121
+ lm_ggml_backend_event_t (*LM_GGML_CALL event_new) (lm_ggml_backend_t backend);
122
+ void (*LM_GGML_CALL event_free) (lm_ggml_backend_event_t event);
123
+ // record an event on the backend instance that created it
124
+ void (*LM_GGML_CALL event_record) (lm_ggml_backend_event_t event);
125
+ // wait for an event on on a different backend instance
126
+ void (*LM_GGML_CALL event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
127
+ // block until an event is recorded
128
+ void (*LM_GGML_CALL event_synchronize) (lm_ggml_backend_event_t event);
129
+ };
130
+
131
+ struct lm_ggml_backend {
132
+ lm_ggml_guid_t guid;
133
+
134
+ struct lm_ggml_backend_i iface;
135
+ lm_ggml_backend_context_t context;
136
+ };
137
+
138
+ struct lm_ggml_backend_event {
139
+ lm_ggml_backend_t backend;
140
+ void * context;
141
+ };
142
+
143
+ //
144
+ // Backend registry
145
+ //
146
+
147
+ typedef lm_ggml_backend_t (*LM_GGML_CALL lm_ggml_backend_init_fn)(const char * params, void * user_data);
148
+
149
+ LM_GGML_CALL void lm_ggml_backend_register(const char * name, lm_ggml_backend_init_fn init_fn, lm_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
150
+
151
+ #ifdef __cplusplus
152
+ }
153
+ #endif