cui-llama.rn 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -39
- package/android/src/main/CMakeLists.txt +11 -2
- package/android/src/main/java/com/rnllama/LlamaContext.java +24 -8
- package/android/src/main/java/com/rnllama/RNLlama.java +33 -1
- package/android/src/main/jni.cpp +62 -8
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +5 -0
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +5 -0
- package/cpp/common.cpp +3237 -3231
- package/cpp/common.h +469 -468
- package/cpp/ggml-aarch64.c +2193 -2193
- package/cpp/ggml-aarch64.h +39 -39
- package/cpp/ggml-alloc.c +1036 -1042
- package/cpp/ggml-backend-impl.h +153 -153
- package/cpp/ggml-backend.c +2240 -2234
- package/cpp/ggml-backend.h +238 -238
- package/cpp/ggml-common.h +1833 -1829
- package/cpp/ggml-impl.h +755 -655
- package/cpp/ggml-metal.h +65 -65
- package/cpp/ggml-metal.m +3269 -3269
- package/cpp/ggml-quants.c +14872 -14860
- package/cpp/ggml-quants.h +132 -132
- package/cpp/ggml.c +22055 -22044
- package/cpp/ggml.h +2453 -2447
- package/cpp/llama-grammar.cpp +539 -0
- package/cpp/llama-grammar.h +39 -0
- package/cpp/llama-impl.h +26 -0
- package/cpp/llama-sampling.cpp +635 -0
- package/cpp/llama-sampling.h +56 -0
- package/cpp/llama-vocab.cpp +1721 -0
- package/cpp/llama-vocab.h +130 -0
- package/cpp/llama.cpp +19171 -21892
- package/cpp/llama.h +1240 -1217
- package/cpp/log.h +737 -737
- package/cpp/rn-llama.hpp +207 -29
- package/cpp/sampling.cpp +460 -460
- package/cpp/sgemm.cpp +1027 -1027
- package/cpp/sgemm.h +14 -14
- package/cpp/unicode.cpp +6 -0
- package/cpp/unicode.h +3 -0
- package/ios/RNLlama.mm +15 -6
- package/ios/RNLlamaContext.h +2 -8
- package/ios/RNLlamaContext.mm +41 -34
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/chat.js +37 -0
- package/lib/commonjs/chat.js.map +1 -0
- package/lib/commonjs/index.js +14 -1
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/chat.js +31 -0
- package/lib/module/chat.js.map +1 -0
- package/lib/module/index.js +14 -1
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +5 -1
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/chat.d.ts +10 -0
- package/lib/typescript/chat.d.ts.map +1 -0
- package/lib/typescript/index.d.ts +9 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +10 -1
- package/src/chat.ts +44 -0
- package/src/index.ts +31 -4
package/cpp/ggml-backend-impl.h
CHANGED
@@ -1,153 +1,153 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
// ggml-backend internal header
|
4
|
-
|
5
|
-
#include "ggml-backend.h"
|
6
|
-
|
7
|
-
#ifdef __cplusplus
|
8
|
-
extern "C" {
|
9
|
-
#endif
|
10
|
-
|
11
|
-
//
|
12
|
-
// Backend buffer
|
13
|
-
//
|
14
|
-
|
15
|
-
// buffer type
|
16
|
-
typedef void * lm_ggml_backend_buffer_type_context_t;
|
17
|
-
|
18
|
-
struct lm_ggml_backend_buffer_type_i {
|
19
|
-
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_type_t buft);
|
20
|
-
// allocate a buffer of this type
|
21
|
-
lm_ggml_backend_buffer_t (*LM_GGML_CALL alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
|
22
|
-
// tensor alignment
|
23
|
-
size_t (*LM_GGML_CALL get_alignment) (lm_ggml_backend_buffer_type_t buft);
|
24
|
-
// max buffer size that can be allocated
|
25
|
-
size_t (*LM_GGML_CALL get_max_size) (lm_ggml_backend_buffer_type_t buft);
|
26
|
-
// data size needed to allocate the tensor, including padding
|
27
|
-
size_t (*LM_GGML_CALL get_alloc_size) (lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
28
|
-
// check if tensor data is in host memory
|
29
|
-
bool (*LM_GGML_CALL is_host) (lm_ggml_backend_buffer_type_t buft);
|
30
|
-
};
|
31
|
-
|
32
|
-
struct lm_ggml_backend_buffer_type {
|
33
|
-
struct lm_ggml_backend_buffer_type_i iface;
|
34
|
-
lm_ggml_backend_buffer_type_context_t context;
|
35
|
-
};
|
36
|
-
|
37
|
-
// buffer
|
38
|
-
typedef void * lm_ggml_backend_buffer_context_t;
|
39
|
-
|
40
|
-
struct lm_ggml_backend_buffer_i {
|
41
|
-
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_t buffer);
|
42
|
-
void (*LM_GGML_CALL free_buffer)(lm_ggml_backend_buffer_t buffer);
|
43
|
-
void * (*LM_GGML_CALL get_base) (lm_ggml_backend_buffer_t buffer);
|
44
|
-
void (*LM_GGML_CALL init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
45
|
-
void (*LM_GGML_CALL set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
46
|
-
void (*LM_GGML_CALL get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
47
|
-
bool (*LM_GGML_CALL cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
|
48
|
-
void (*LM_GGML_CALL clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
49
|
-
void (*LM_GGML_CALL reset) (lm_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
50
|
-
};
|
51
|
-
|
52
|
-
struct lm_ggml_backend_buffer {
|
53
|
-
struct lm_ggml_backend_buffer_i iface;
|
54
|
-
lm_ggml_backend_buffer_type_t buft;
|
55
|
-
lm_ggml_backend_buffer_context_t context;
|
56
|
-
size_t size;
|
57
|
-
enum lm_ggml_backend_buffer_usage usage;
|
58
|
-
};
|
59
|
-
|
60
|
-
LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
|
61
|
-
lm_ggml_backend_buffer_type_t buft,
|
62
|
-
struct lm_ggml_backend_buffer_i iface,
|
63
|
-
lm_ggml_backend_buffer_context_t context,
|
64
|
-
size_t size);
|
65
|
-
|
66
|
-
// do not use directly, use lm_ggml_backend_tensor_copy instead
|
67
|
-
bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
68
|
-
|
69
|
-
// buffer that contains a collection of buffers
|
70
|
-
LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
|
71
|
-
LM_GGML_CALL bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
|
72
|
-
LM_GGML_CALL void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
73
|
-
|
74
|
-
//
|
75
|
-
// Backend
|
76
|
-
//
|
77
|
-
|
78
|
-
typedef void * lm_ggml_backend_context_t;
|
79
|
-
|
80
|
-
struct lm_ggml_backend_i {
|
81
|
-
const char * (*LM_GGML_CALL get_name)(lm_ggml_backend_t backend);
|
82
|
-
|
83
|
-
void (*LM_GGML_CALL free)(lm_ggml_backend_t backend);
|
84
|
-
|
85
|
-
// buffer allocation
|
86
|
-
lm_ggml_backend_buffer_type_t (*LM_GGML_CALL get_default_buffer_type)(lm_ggml_backend_t backend);
|
87
|
-
|
88
|
-
// (optional) asynchronous tensor data access
|
89
|
-
void (*LM_GGML_CALL set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
90
|
-
void (*LM_GGML_CALL get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
91
|
-
bool (*LM_GGML_CALL cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
92
|
-
|
93
|
-
// (optional) complete all pending operations
|
94
|
-
void (*LM_GGML_CALL synchronize)(lm_ggml_backend_t backend);
|
95
|
-
|
96
|
-
// compute graph with a plan (not used currently)
|
97
|
-
// create a new plan for a graph
|
98
|
-
lm_ggml_backend_graph_plan_t (*LM_GGML_CALL graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
|
99
|
-
void (*LM_GGML_CALL graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
100
|
-
// update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
|
101
|
-
void (*LM_GGML_CALL graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
|
102
|
-
// compute the graph with the plan
|
103
|
-
enum lm_ggml_status (*LM_GGML_CALL graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
104
|
-
|
105
|
-
// compute graph without a plan (async)
|
106
|
-
enum lm_ggml_status (*LM_GGML_CALL graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
107
|
-
|
108
|
-
// check if the backend can compute an operation
|
109
|
-
bool (*LM_GGML_CALL supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
110
|
-
|
111
|
-
// check if the backend can use tensors allocated in a buffer type
|
112
|
-
bool (*LM_GGML_CALL supports_buft)(lm_ggml_backend_t backend, lm_ggml_backend_buffer_type_t buft);
|
113
|
-
|
114
|
-
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
115
|
-
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
116
|
-
// even if the weight has to be copied from the CPU temporarily
|
117
|
-
bool (*LM_GGML_CALL offload_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
118
|
-
|
119
|
-
// (optional) event synchronization
|
120
|
-
// create a new event that can record events on this backend instance
|
121
|
-
lm_ggml_backend_event_t (*LM_GGML_CALL event_new) (lm_ggml_backend_t backend);
|
122
|
-
void (*LM_GGML_CALL event_free) (lm_ggml_backend_event_t event);
|
123
|
-
// record an event on the backend instance that created it
|
124
|
-
void (*LM_GGML_CALL event_record) (lm_ggml_backend_event_t event);
|
125
|
-
// wait for an event on on a different backend instance
|
126
|
-
void (*LM_GGML_CALL event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
127
|
-
// block until an event is recorded
|
128
|
-
void (*LM_GGML_CALL event_synchronize) (lm_ggml_backend_event_t event);
|
129
|
-
};
|
130
|
-
|
131
|
-
struct lm_ggml_backend {
|
132
|
-
lm_ggml_guid_t guid;
|
133
|
-
|
134
|
-
struct lm_ggml_backend_i iface;
|
135
|
-
lm_ggml_backend_context_t context;
|
136
|
-
};
|
137
|
-
|
138
|
-
struct lm_ggml_backend_event {
|
139
|
-
lm_ggml_backend_t backend;
|
140
|
-
void * context;
|
141
|
-
};
|
142
|
-
|
143
|
-
//
|
144
|
-
// Backend registry
|
145
|
-
//
|
146
|
-
|
147
|
-
typedef lm_ggml_backend_t (*LM_GGML_CALL lm_ggml_backend_init_fn)(const char * params, void * user_data);
|
148
|
-
|
149
|
-
LM_GGML_CALL void lm_ggml_backend_register(const char * name, lm_ggml_backend_init_fn init_fn, lm_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
|
150
|
-
|
151
|
-
#ifdef __cplusplus
|
152
|
-
}
|
153
|
-
#endif
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
// ggml-backend internal header
|
4
|
+
|
5
|
+
#include "ggml-backend.h"
|
6
|
+
|
7
|
+
#ifdef __cplusplus
|
8
|
+
extern "C" {
|
9
|
+
#endif
|
10
|
+
|
11
|
+
//
|
12
|
+
// Backend buffer
|
13
|
+
//
|
14
|
+
|
15
|
+
// buffer type
|
16
|
+
typedef void * lm_ggml_backend_buffer_type_context_t;
|
17
|
+
|
18
|
+
struct lm_ggml_backend_buffer_type_i {
|
19
|
+
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_type_t buft);
|
20
|
+
// allocate a buffer of this type
|
21
|
+
lm_ggml_backend_buffer_t (*LM_GGML_CALL alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
|
22
|
+
// tensor alignment
|
23
|
+
size_t (*LM_GGML_CALL get_alignment) (lm_ggml_backend_buffer_type_t buft);
|
24
|
+
// max buffer size that can be allocated
|
25
|
+
size_t (*LM_GGML_CALL get_max_size) (lm_ggml_backend_buffer_type_t buft);
|
26
|
+
// data size needed to allocate the tensor, including padding
|
27
|
+
size_t (*LM_GGML_CALL get_alloc_size) (lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
28
|
+
// check if tensor data is in host memory
|
29
|
+
bool (*LM_GGML_CALL is_host) (lm_ggml_backend_buffer_type_t buft);
|
30
|
+
};
|
31
|
+
|
32
|
+
struct lm_ggml_backend_buffer_type {
|
33
|
+
struct lm_ggml_backend_buffer_type_i iface;
|
34
|
+
lm_ggml_backend_buffer_type_context_t context;
|
35
|
+
};
|
36
|
+
|
37
|
+
// buffer
|
38
|
+
typedef void * lm_ggml_backend_buffer_context_t;
|
39
|
+
|
40
|
+
struct lm_ggml_backend_buffer_i {
|
41
|
+
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_t buffer);
|
42
|
+
void (*LM_GGML_CALL free_buffer)(lm_ggml_backend_buffer_t buffer);
|
43
|
+
void * (*LM_GGML_CALL get_base) (lm_ggml_backend_buffer_t buffer);
|
44
|
+
void (*LM_GGML_CALL init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
45
|
+
void (*LM_GGML_CALL set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
46
|
+
void (*LM_GGML_CALL get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
47
|
+
bool (*LM_GGML_CALL cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
|
48
|
+
void (*LM_GGML_CALL clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
49
|
+
void (*LM_GGML_CALL reset) (lm_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
50
|
+
};
|
51
|
+
|
52
|
+
struct lm_ggml_backend_buffer {
|
53
|
+
struct lm_ggml_backend_buffer_i iface;
|
54
|
+
lm_ggml_backend_buffer_type_t buft;
|
55
|
+
lm_ggml_backend_buffer_context_t context;
|
56
|
+
size_t size;
|
57
|
+
enum lm_ggml_backend_buffer_usage usage;
|
58
|
+
};
|
59
|
+
|
60
|
+
LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
|
61
|
+
lm_ggml_backend_buffer_type_t buft,
|
62
|
+
struct lm_ggml_backend_buffer_i iface,
|
63
|
+
lm_ggml_backend_buffer_context_t context,
|
64
|
+
size_t size);
|
65
|
+
|
66
|
+
// do not use directly, use lm_ggml_backend_tensor_copy instead
|
67
|
+
bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
68
|
+
|
69
|
+
// buffer that contains a collection of buffers
|
70
|
+
LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
|
71
|
+
LM_GGML_CALL bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
|
72
|
+
LM_GGML_CALL void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
73
|
+
|
74
|
+
//
|
75
|
+
// Backend
|
76
|
+
//
|
77
|
+
|
78
|
+
typedef void * lm_ggml_backend_context_t;
|
79
|
+
|
80
|
+
struct lm_ggml_backend_i {
|
81
|
+
const char * (*LM_GGML_CALL get_name)(lm_ggml_backend_t backend);
|
82
|
+
|
83
|
+
void (*LM_GGML_CALL free)(lm_ggml_backend_t backend);
|
84
|
+
|
85
|
+
// buffer allocation
|
86
|
+
lm_ggml_backend_buffer_type_t (*LM_GGML_CALL get_default_buffer_type)(lm_ggml_backend_t backend);
|
87
|
+
|
88
|
+
// (optional) asynchronous tensor data access
|
89
|
+
void (*LM_GGML_CALL set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
90
|
+
void (*LM_GGML_CALL get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
91
|
+
bool (*LM_GGML_CALL cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
92
|
+
|
93
|
+
// (optional) complete all pending operations
|
94
|
+
void (*LM_GGML_CALL synchronize)(lm_ggml_backend_t backend);
|
95
|
+
|
96
|
+
// compute graph with a plan (not used currently)
|
97
|
+
// create a new plan for a graph
|
98
|
+
lm_ggml_backend_graph_plan_t (*LM_GGML_CALL graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
|
99
|
+
void (*LM_GGML_CALL graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
100
|
+
// update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
|
101
|
+
void (*LM_GGML_CALL graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
|
102
|
+
// compute the graph with the plan
|
103
|
+
enum lm_ggml_status (*LM_GGML_CALL graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
104
|
+
|
105
|
+
// compute graph without a plan (async)
|
106
|
+
enum lm_ggml_status (*LM_GGML_CALL graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
107
|
+
|
108
|
+
// check if the backend can compute an operation
|
109
|
+
bool (*LM_GGML_CALL supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
110
|
+
|
111
|
+
// check if the backend can use tensors allocated in a buffer type
|
112
|
+
bool (*LM_GGML_CALL supports_buft)(lm_ggml_backend_t backend, lm_ggml_backend_buffer_type_t buft);
|
113
|
+
|
114
|
+
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
115
|
+
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
116
|
+
// even if the weight has to be copied from the CPU temporarily
|
117
|
+
bool (*LM_GGML_CALL offload_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
118
|
+
|
119
|
+
// (optional) event synchronization
|
120
|
+
// create a new event that can record events on this backend instance
|
121
|
+
lm_ggml_backend_event_t (*LM_GGML_CALL event_new) (lm_ggml_backend_t backend);
|
122
|
+
void (*LM_GGML_CALL event_free) (lm_ggml_backend_event_t event);
|
123
|
+
// record an event on the backend instance that created it
|
124
|
+
void (*LM_GGML_CALL event_record) (lm_ggml_backend_event_t event);
|
125
|
+
// wait for an event on on a different backend instance
|
126
|
+
void (*LM_GGML_CALL event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
127
|
+
// block until an event is recorded
|
128
|
+
void (*LM_GGML_CALL event_synchronize) (lm_ggml_backend_event_t event);
|
129
|
+
};
|
130
|
+
|
131
|
+
struct lm_ggml_backend {
|
132
|
+
lm_ggml_guid_t guid;
|
133
|
+
|
134
|
+
struct lm_ggml_backend_i iface;
|
135
|
+
lm_ggml_backend_context_t context;
|
136
|
+
};
|
137
|
+
|
138
|
+
struct lm_ggml_backend_event {
|
139
|
+
lm_ggml_backend_t backend;
|
140
|
+
void * context;
|
141
|
+
};
|
142
|
+
|
143
|
+
//
|
144
|
+
// Backend registry
|
145
|
+
//
|
146
|
+
|
147
|
+
typedef lm_ggml_backend_t (*LM_GGML_CALL lm_ggml_backend_init_fn)(const char * params, void * user_data);
|
148
|
+
|
149
|
+
LM_GGML_CALL void lm_ggml_backend_register(const char * name, lm_ggml_backend_init_fn init_fn, lm_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
|
150
|
+
|
151
|
+
#ifdef __cplusplus
|
152
|
+
}
|
153
|
+
#endif
|