cui-llama.rn 1.2.6 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -2
- package/android/src/main/CMakeLists.txt +20 -5
- package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
- package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
- package/android/src/main/jni.cpp +222 -34
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/cpp/common.cpp +1682 -2114
- package/cpp/common.h +600 -613
- package/cpp/ggml-aarch64.c +129 -3478
- package/cpp/ggml-aarch64.h +19 -39
- package/cpp/ggml-alloc.c +1040 -1040
- package/cpp/ggml-alloc.h +76 -76
- package/cpp/ggml-backend-impl.h +216 -216
- package/cpp/ggml-backend-reg.cpp +195 -0
- package/cpp/ggml-backend.cpp +1997 -2661
- package/cpp/ggml-backend.h +328 -314
- package/cpp/ggml-common.h +1853 -1853
- package/cpp/ggml-cpp.h +38 -38
- package/cpp/ggml-cpu-aarch64.c +3560 -0
- package/cpp/ggml-cpu-aarch64.h +30 -0
- package/cpp/ggml-cpu-impl.h +371 -614
- package/cpp/ggml-cpu-quants.c +10822 -0
- package/cpp/ggml-cpu-quants.h +63 -0
- package/cpp/ggml-cpu.c +13975 -13720
- package/cpp/ggml-cpu.cpp +663 -0
- package/cpp/ggml-cpu.h +177 -150
- package/cpp/ggml-impl.h +550 -296
- package/cpp/ggml-metal.h +66 -66
- package/cpp/ggml-metal.m +4294 -3933
- package/cpp/ggml-quants.c +5247 -15739
- package/cpp/ggml-quants.h +100 -147
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +12 -0
- package/cpp/ggml.c +8180 -8390
- package/cpp/ggml.h +2411 -2441
- package/cpp/llama-grammar.cpp +1138 -1138
- package/cpp/llama-grammar.h +144 -144
- package/cpp/llama-impl.h +181 -181
- package/cpp/llama-sampling.cpp +2348 -2345
- package/cpp/llama-sampling.h +48 -48
- package/cpp/llama-vocab.cpp +1984 -1984
- package/cpp/llama-vocab.h +170 -170
- package/cpp/llama.cpp +22132 -22046
- package/cpp/llama.h +1253 -1255
- package/cpp/log.cpp +401 -401
- package/cpp/log.h +121 -121
- package/cpp/rn-llama.hpp +83 -19
- package/cpp/sampling.cpp +466 -466
- package/cpp/sgemm.cpp +1884 -1276
- package/ios/RNLlama.mm +43 -20
- package/ios/RNLlamaContext.h +9 -3
- package/ios/RNLlamaContext.mm +133 -33
- package/jest/mock.js +0 -1
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +52 -15
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +51 -15
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +29 -5
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +12 -5
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +41 -6
- package/src/index.ts +82 -27
- package/cpp/json-schema-to-grammar.cpp +0 -1045
- package/cpp/json-schema-to-grammar.h +0 -8
- package/cpp/json.hpp +0 -24766
package/cpp/ggml-alloc.h
CHANGED
@@ -1,76 +1,76 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "ggml.h"
|
4
|
-
|
5
|
-
#ifdef __cplusplus
|
6
|
-
extern "C" {
|
7
|
-
#endif
|
8
|
-
|
9
|
-
typedef struct lm_ggml_backend_buffer_type * lm_ggml_backend_buffer_type_t;
|
10
|
-
typedef struct lm_ggml_backend_buffer * lm_ggml_backend_buffer_t;
|
11
|
-
typedef struct lm_ggml_backend * lm_ggml_backend_t;
|
12
|
-
|
13
|
-
// Tensor allocator
|
14
|
-
struct lm_ggml_tallocr {
|
15
|
-
lm_ggml_backend_buffer_t buffer;
|
16
|
-
void * base;
|
17
|
-
size_t alignment;
|
18
|
-
size_t offset;
|
19
|
-
};
|
20
|
-
|
21
|
-
LM_GGML_API struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer);
|
22
|
-
LM_GGML_API void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor);
|
23
|
-
|
24
|
-
// Graph allocator
|
25
|
-
/*
|
26
|
-
Example usage:
|
27
|
-
lm_ggml_gallocr_t galloc = lm_ggml_gallocr_new(lm_ggml_backend_cpu_buffer_type());
|
28
|
-
|
29
|
-
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
30
|
-
lm_ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
31
|
-
|
32
|
-
// allocate the graph
|
33
|
-
struct lm_ggml_cgraph * graph = build_graph(batch);
|
34
|
-
lm_ggml_gallocr_alloc_graph(galloc, graph);
|
35
|
-
|
36
|
-
printf("compute buffer size: %zu bytes\n", lm_ggml_gallocr_get_buffer_size(galloc, 0));
|
37
|
-
|
38
|
-
// evaluate the graph
|
39
|
-
lm_ggml_backend_graph_compute(backend, graph);
|
40
|
-
*/
|
41
|
-
|
42
|
-
// special tensor flags for use with the graph allocator:
|
43
|
-
// lm_ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
|
44
|
-
// lm_ggml_set_output(): output tensors are never freed and never overwritten
|
45
|
-
|
46
|
-
typedef struct lm_ggml_gallocr * lm_ggml_gallocr_t;
|
47
|
-
|
48
|
-
LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new(lm_ggml_backend_buffer_type_t buft);
|
49
|
-
LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new_n(lm_ggml_backend_buffer_type_t * bufts, int n_bufs);
|
50
|
-
LM_GGML_API void lm_ggml_gallocr_free(lm_ggml_gallocr_t galloc);
|
51
|
-
|
52
|
-
// pre-allocate buffers from a measure graph - does not allocate or modify the graph
|
53
|
-
// call with a worst-case graph to avoid buffer reallocations
|
54
|
-
// not strictly required for single buffer usage: lm_ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
55
|
-
// returns false if the buffer allocation failed
|
56
|
-
LM_GGML_API bool lm_ggml_gallocr_reserve(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
|
57
|
-
LM_GGML_API bool lm_ggml_gallocr_reserve_n(
|
58
|
-
lm_ggml_gallocr_t galloc,
|
59
|
-
struct lm_ggml_cgraph * graph,
|
60
|
-
const int * node_buffer_ids,
|
61
|
-
const int * leaf_buffer_ids);
|
62
|
-
|
63
|
-
// automatic reallocation if the topology changes when using a single buffer
|
64
|
-
// returns false if using multiple buffers and a re-allocation is needed (call lm_ggml_gallocr_reserve_n first to set the node buffers)
|
65
|
-
LM_GGML_API bool lm_ggml_gallocr_alloc_graph(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
|
66
|
-
|
67
|
-
LM_GGML_API size_t lm_ggml_gallocr_get_buffer_size(lm_ggml_gallocr_t galloc, int buffer_id);
|
68
|
-
|
69
|
-
// Utils
|
70
|
-
// Create a buffer and allocate all the tensors in a lm_ggml_context
|
71
|
-
LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors_from_buft(struct lm_ggml_context * ctx, lm_ggml_backend_buffer_type_t buft);
|
72
|
-
LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors(struct lm_ggml_context * ctx, lm_ggml_backend_t backend);
|
73
|
-
|
74
|
-
#ifdef __cplusplus
|
75
|
-
}
|
76
|
-
#endif
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h"
|
4
|
+
|
5
|
+
#ifdef __cplusplus
|
6
|
+
extern "C" {
|
7
|
+
#endif
|
8
|
+
|
9
|
+
typedef struct lm_ggml_backend_buffer_type * lm_ggml_backend_buffer_type_t;
|
10
|
+
typedef struct lm_ggml_backend_buffer * lm_ggml_backend_buffer_t;
|
11
|
+
typedef struct lm_ggml_backend * lm_ggml_backend_t;
|
12
|
+
|
13
|
+
// Tensor allocator
|
14
|
+
struct lm_ggml_tallocr {
|
15
|
+
lm_ggml_backend_buffer_t buffer;
|
16
|
+
void * base;
|
17
|
+
size_t alignment;
|
18
|
+
size_t offset;
|
19
|
+
};
|
20
|
+
|
21
|
+
LM_GGML_API struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer);
|
22
|
+
LM_GGML_API void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor);
|
23
|
+
|
24
|
+
// Graph allocator
|
25
|
+
/*
|
26
|
+
Example usage:
|
27
|
+
lm_ggml_gallocr_t galloc = lm_ggml_gallocr_new(lm_ggml_backend_cpu_buffer_type());
|
28
|
+
|
29
|
+
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
30
|
+
lm_ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
31
|
+
|
32
|
+
// allocate the graph
|
33
|
+
struct lm_ggml_cgraph * graph = build_graph(batch);
|
34
|
+
lm_ggml_gallocr_alloc_graph(galloc, graph);
|
35
|
+
|
36
|
+
printf("compute buffer size: %zu bytes\n", lm_ggml_gallocr_get_buffer_size(galloc, 0));
|
37
|
+
|
38
|
+
// evaluate the graph
|
39
|
+
lm_ggml_backend_graph_compute(backend, graph);
|
40
|
+
*/
|
41
|
+
|
42
|
+
// special tensor flags for use with the graph allocator:
|
43
|
+
// lm_ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
|
44
|
+
// lm_ggml_set_output(): output tensors are never freed and never overwritten
|
45
|
+
|
46
|
+
typedef struct lm_ggml_gallocr * lm_ggml_gallocr_t;
|
47
|
+
|
48
|
+
LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new(lm_ggml_backend_buffer_type_t buft);
|
49
|
+
LM_GGML_API lm_ggml_gallocr_t lm_ggml_gallocr_new_n(lm_ggml_backend_buffer_type_t * bufts, int n_bufs);
|
50
|
+
LM_GGML_API void lm_ggml_gallocr_free(lm_ggml_gallocr_t galloc);
|
51
|
+
|
52
|
+
// pre-allocate buffers from a measure graph - does not allocate or modify the graph
|
53
|
+
// call with a worst-case graph to avoid buffer reallocations
|
54
|
+
// not strictly required for single buffer usage: lm_ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
55
|
+
// returns false if the buffer allocation failed
|
56
|
+
LM_GGML_API bool lm_ggml_gallocr_reserve(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
|
57
|
+
LM_GGML_API bool lm_ggml_gallocr_reserve_n(
|
58
|
+
lm_ggml_gallocr_t galloc,
|
59
|
+
struct lm_ggml_cgraph * graph,
|
60
|
+
const int * node_buffer_ids,
|
61
|
+
const int * leaf_buffer_ids);
|
62
|
+
|
63
|
+
// automatic reallocation if the topology changes when using a single buffer
|
64
|
+
// returns false if using multiple buffers and a re-allocation is needed (call lm_ggml_gallocr_reserve_n first to set the node buffers)
|
65
|
+
LM_GGML_API bool lm_ggml_gallocr_alloc_graph(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph);
|
66
|
+
|
67
|
+
LM_GGML_API size_t lm_ggml_gallocr_get_buffer_size(lm_ggml_gallocr_t galloc, int buffer_id);
|
68
|
+
|
69
|
+
// Utils
|
70
|
+
// Create a buffer and allocate all the tensors in a lm_ggml_context
|
71
|
+
LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors_from_buft(struct lm_ggml_context * ctx, lm_ggml_backend_buffer_type_t buft);
|
72
|
+
LM_GGML_API struct lm_ggml_backend_buffer * lm_ggml_backend_alloc_ctx_tensors(struct lm_ggml_context * ctx, lm_ggml_backend_t backend);
|
73
|
+
|
74
|
+
#ifdef __cplusplus
|
75
|
+
}
|
76
|
+
#endif
|
package/cpp/ggml-backend-impl.h
CHANGED
@@ -1,216 +1,216 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
// ggml-backend internal header
|
4
|
-
|
5
|
-
#include "ggml-backend.h"
|
6
|
-
|
7
|
-
#ifdef __cplusplus
|
8
|
-
extern "C" {
|
9
|
-
#endif
|
10
|
-
|
11
|
-
//
|
12
|
-
// Backend buffer type
|
13
|
-
//
|
14
|
-
|
15
|
-
struct lm_ggml_backend_buffer_type_i {
|
16
|
-
const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
|
17
|
-
// allocate a buffer of this type
|
18
|
-
lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
|
19
|
-
// tensor alignment
|
20
|
-
size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft);
|
21
|
-
// (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
|
22
|
-
size_t (*get_max_size) (lm_ggml_backend_buffer_type_t buft);
|
23
|
-
// (optional) data size needed to allocate the tensor, including padding (defaults to lm_ggml_nbytes)
|
24
|
-
size_t (*get_alloc_size)(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
25
|
-
// (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
|
26
|
-
bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
|
27
|
-
};
|
28
|
-
|
29
|
-
struct lm_ggml_backend_buffer_type {
|
30
|
-
struct lm_ggml_backend_buffer_type_i iface;
|
31
|
-
lm_ggml_backend_dev_t device;
|
32
|
-
void * context;
|
33
|
-
};
|
34
|
-
|
35
|
-
//
|
36
|
-
// Backend buffer
|
37
|
-
//
|
38
|
-
|
39
|
-
struct lm_ggml_backend_buffer_i {
|
40
|
-
// (optional) free the buffer
|
41
|
-
void (*free_buffer) (lm_ggml_backend_buffer_t buffer);
|
42
|
-
// base address of the buffer
|
43
|
-
void * (*get_base) (lm_ggml_backend_buffer_t buffer);
|
44
|
-
// (optional) initialize a tensor in the buffer (eg. add tensor extras)
|
45
|
-
void (*init_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
46
|
-
// tensor data access
|
47
|
-
void (*memset_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
|
48
|
-
void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
49
|
-
void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
50
|
-
// (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
|
51
|
-
bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
52
|
-
// clear the entire buffer
|
53
|
-
void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
54
|
-
// (optional) reset any internal state due to tensor initialization, such as tensor extras
|
55
|
-
void (*reset) (lm_ggml_backend_buffer_t buffer);
|
56
|
-
};
|
57
|
-
|
58
|
-
struct lm_ggml_backend_buffer {
|
59
|
-
struct lm_ggml_backend_buffer_i iface;
|
60
|
-
lm_ggml_backend_buffer_type_t buft;
|
61
|
-
void * context;
|
62
|
-
size_t size;
|
63
|
-
enum lm_ggml_backend_buffer_usage usage;
|
64
|
-
};
|
65
|
-
|
66
|
-
lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
|
67
|
-
lm_ggml_backend_buffer_type_t buft,
|
68
|
-
struct lm_ggml_backend_buffer_i iface,
|
69
|
-
void * context,
|
70
|
-
size_t size);
|
71
|
-
|
72
|
-
// do not use directly, use lm_ggml_backend_tensor_copy instead
|
73
|
-
bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
74
|
-
|
75
|
-
// multi-buffer
|
76
|
-
// buffer that contains a collection of buffers
|
77
|
-
lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
|
78
|
-
bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
|
79
|
-
void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
80
|
-
|
81
|
-
//
|
82
|
-
// Backend (stream)
|
83
|
-
//
|
84
|
-
|
85
|
-
struct lm_ggml_backend_i {
|
86
|
-
const char * (*get_name)(lm_ggml_backend_t backend);
|
87
|
-
|
88
|
-
void (*free)(lm_ggml_backend_t backend);
|
89
|
-
|
90
|
-
// (optional) asynchronous tensor data access
|
91
|
-
void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
92
|
-
void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
93
|
-
bool (*cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
94
|
-
|
95
|
-
// (optional) complete all pending operations (required if the backend supports async operations)
|
96
|
-
void (*synchronize)(lm_ggml_backend_t backend);
|
97
|
-
|
98
|
-
// (optional) graph plans (not used currently)
|
99
|
-
// compute graph with a plan
|
100
|
-
lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
|
101
|
-
void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
102
|
-
// update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
|
103
|
-
void (*graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
|
104
|
-
// compute the graph with the plan
|
105
|
-
enum lm_ggml_status (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
106
|
-
|
107
|
-
// compute graph (always async if supported by the backend)
|
108
|
-
enum lm_ggml_status (*graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
109
|
-
|
110
|
-
// (optional) event synchronization
|
111
|
-
// record an event on this stream
|
112
|
-
void (*event_record)(lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
113
|
-
// wait for an event on on a different stream
|
114
|
-
void (*event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
115
|
-
};
|
116
|
-
|
117
|
-
struct lm_ggml_backend {
|
118
|
-
lm_ggml_guid_t guid;
|
119
|
-
struct lm_ggml_backend_i iface;
|
120
|
-
lm_ggml_backend_dev_t device;
|
121
|
-
void * context;
|
122
|
-
};
|
123
|
-
|
124
|
-
struct lm_ggml_backend_event {
|
125
|
-
struct lm_ggml_backend_device * device;
|
126
|
-
void * context;
|
127
|
-
};
|
128
|
-
|
129
|
-
//
|
130
|
-
// Backend device
|
131
|
-
//
|
132
|
-
|
133
|
-
// Note: if additional properties are needed, we should add a struct with all of them
|
134
|
-
// the current functions to obtain the properties can remain, since they are more convenient for often used properties
|
135
|
-
struct lm_ggml_backend_device_i {
|
136
|
-
// device name: short identifier for this device, such as "CPU" or "CUDA0"
|
137
|
-
const char * (*get_name)(lm_ggml_backend_dev_t dev);
|
138
|
-
|
139
|
-
// device description: short informative description of the device, could be the model name
|
140
|
-
const char * (*get_description)(lm_ggml_backend_dev_t dev);
|
141
|
-
|
142
|
-
// device memory in bytes
|
143
|
-
void (*get_memory)(lm_ggml_backend_dev_t dev, size_t * free, size_t * total);
|
144
|
-
|
145
|
-
// device type
|
146
|
-
enum lm_ggml_backend_dev_type (*get_type)(lm_ggml_backend_dev_t dev);
|
147
|
-
|
148
|
-
// device properties
|
149
|
-
void (*get_props)(lm_ggml_backend_dev_t dev, struct lm_ggml_backend_dev_props * props);
|
150
|
-
|
151
|
-
// backend (stream) initialization
|
152
|
-
lm_ggml_backend_t (*init_backend)(lm_ggml_backend_dev_t dev, const char * params);
|
153
|
-
|
154
|
-
// preferred buffer type
|
155
|
-
lm_ggml_backend_buffer_type_t (*get_buffer_type)(lm_ggml_backend_dev_t dev);
|
156
|
-
|
157
|
-
// (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
|
158
|
-
lm_ggml_backend_buffer_type_t (*get_host_buffer_type)(lm_ggml_backend_dev_t dev);
|
159
|
-
|
160
|
-
// (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
|
161
|
-
lm_ggml_backend_buffer_t (*buffer_from_host_ptr)(lm_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
|
162
|
-
|
163
|
-
// check if the backend can compute an operation
|
164
|
-
bool (*supports_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
165
|
-
|
166
|
-
// check if the backend can use tensors allocated in a buffer type
|
167
|
-
bool (*supports_buft)(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft);
|
168
|
-
|
169
|
-
// (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
|
170
|
-
// these should be expensive operations that may benefit from running on this backend instead of the CPU backend
|
171
|
-
bool (*offload_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
172
|
-
|
173
|
-
// (optional) event synchronization
|
174
|
-
lm_ggml_backend_event_t (*event_new) (lm_ggml_backend_dev_t dev);
|
175
|
-
void (*event_free) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
176
|
-
void (*event_synchronize) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
177
|
-
};
|
178
|
-
|
179
|
-
struct lm_ggml_backend_device {
|
180
|
-
struct lm_ggml_backend_device_i iface;
|
181
|
-
lm_ggml_backend_reg_t reg;
|
182
|
-
void * context;
|
183
|
-
};
|
184
|
-
|
185
|
-
//
|
186
|
-
// Backend (reg)
|
187
|
-
//
|
188
|
-
|
189
|
-
struct lm_ggml_backend_reg_i {
|
190
|
-
const char * (*get_name)(lm_ggml_backend_reg_t reg);
|
191
|
-
|
192
|
-
// enumerate available devices
|
193
|
-
size_t (*get_device_count)(lm_ggml_backend_reg_t reg);
|
194
|
-
lm_ggml_backend_dev_t (*get_device)(lm_ggml_backend_reg_t reg, size_t index);
|
195
|
-
|
196
|
-
// (optional) get a pointer to a function in the backend
|
197
|
-
// backends can add custom functions that are not part of the standard ggml-backend interface
|
198
|
-
void * (*get_proc_address)(lm_ggml_backend_reg_t reg, const char * name);
|
199
|
-
};
|
200
|
-
|
201
|
-
struct lm_ggml_backend_reg {
|
202
|
-
// int api_version; // TODO: for dynamic loading
|
203
|
-
struct lm_ggml_backend_reg_i iface;
|
204
|
-
void * context;
|
205
|
-
};
|
206
|
-
|
207
|
-
|
208
|
-
// Internal backend registry API
|
209
|
-
void lm_ggml_backend_register(lm_ggml_backend_reg_t reg);
|
210
|
-
void lm_ggml_backend_device_register(lm_ggml_backend_dev_t device);
|
211
|
-
// TODO: backends can be loaded as a dynamic library, in which case it needs to export this function
|
212
|
-
// typedef lm_ggml_backend_register_t * (*lm_ggml_backend_init)(void);
|
213
|
-
|
214
|
-
#ifdef __cplusplus
|
215
|
-
}
|
216
|
-
#endif
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
// ggml-backend internal header
|
4
|
+
|
5
|
+
#include "ggml-backend.h"
|
6
|
+
|
7
|
+
#ifdef __cplusplus
|
8
|
+
extern "C" {
|
9
|
+
#endif
|
10
|
+
|
11
|
+
//
|
12
|
+
// Backend buffer type
|
13
|
+
//
|
14
|
+
|
15
|
+
struct lm_ggml_backend_buffer_type_i {
|
16
|
+
const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
|
17
|
+
// allocate a buffer of this type
|
18
|
+
lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
|
19
|
+
// tensor alignment
|
20
|
+
size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft);
|
21
|
+
// (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
|
22
|
+
size_t (*get_max_size) (lm_ggml_backend_buffer_type_t buft);
|
23
|
+
// (optional) data size needed to allocate the tensor, including padding (defaults to lm_ggml_nbytes)
|
24
|
+
size_t (*get_alloc_size)(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
25
|
+
// (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
|
26
|
+
bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
|
27
|
+
};
|
28
|
+
|
29
|
+
struct lm_ggml_backend_buffer_type {
|
30
|
+
struct lm_ggml_backend_buffer_type_i iface;
|
31
|
+
lm_ggml_backend_dev_t device;
|
32
|
+
void * context;
|
33
|
+
};
|
34
|
+
|
35
|
+
//
|
36
|
+
// Backend buffer
|
37
|
+
//
|
38
|
+
|
39
|
+
struct lm_ggml_backend_buffer_i {
|
40
|
+
// (optional) free the buffer
|
41
|
+
void (*free_buffer) (lm_ggml_backend_buffer_t buffer);
|
42
|
+
// base address of the buffer
|
43
|
+
void * (*get_base) (lm_ggml_backend_buffer_t buffer);
|
44
|
+
// (optional) initialize a tensor in the buffer (eg. add tensor extras)
|
45
|
+
void (*init_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
46
|
+
// tensor data access
|
47
|
+
void (*memset_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
|
48
|
+
void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
49
|
+
void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
50
|
+
// (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
|
51
|
+
bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
52
|
+
// clear the entire buffer
|
53
|
+
void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
54
|
+
// (optional) reset any internal state due to tensor initialization, such as tensor extras
|
55
|
+
void (*reset) (lm_ggml_backend_buffer_t buffer);
|
56
|
+
};
|
57
|
+
|
58
|
+
struct lm_ggml_backend_buffer {
|
59
|
+
struct lm_ggml_backend_buffer_i iface;
|
60
|
+
lm_ggml_backend_buffer_type_t buft;
|
61
|
+
void * context;
|
62
|
+
size_t size;
|
63
|
+
enum lm_ggml_backend_buffer_usage usage;
|
64
|
+
};
|
65
|
+
|
66
|
+
lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
|
67
|
+
lm_ggml_backend_buffer_type_t buft,
|
68
|
+
struct lm_ggml_backend_buffer_i iface,
|
69
|
+
void * context,
|
70
|
+
size_t size);
|
71
|
+
|
72
|
+
// do not use directly, use lm_ggml_backend_tensor_copy instead
|
73
|
+
bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
74
|
+
|
75
|
+
// multi-buffer
|
76
|
+
// buffer that contains a collection of buffers
|
77
|
+
lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
|
78
|
+
bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
|
79
|
+
void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
80
|
+
|
81
|
+
//
|
82
|
+
// Backend (stream)
|
83
|
+
//
|
84
|
+
|
85
|
+
struct lm_ggml_backend_i {
|
86
|
+
const char * (*get_name)(lm_ggml_backend_t backend);
|
87
|
+
|
88
|
+
void (*free)(lm_ggml_backend_t backend);
|
89
|
+
|
90
|
+
// (optional) asynchronous tensor data access
|
91
|
+
void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
92
|
+
void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
93
|
+
bool (*cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
94
|
+
|
95
|
+
// (optional) complete all pending operations (required if the backend supports async operations)
|
96
|
+
void (*synchronize)(lm_ggml_backend_t backend);
|
97
|
+
|
98
|
+
// (optional) graph plans (not used currently)
|
99
|
+
// compute graph with a plan
|
100
|
+
lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
|
101
|
+
void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
102
|
+
// update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
|
103
|
+
void (*graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
|
104
|
+
// compute the graph with the plan
|
105
|
+
enum lm_ggml_status (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
106
|
+
|
107
|
+
// compute graph (always async if supported by the backend)
|
108
|
+
enum lm_ggml_status (*graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
109
|
+
|
110
|
+
// (optional) event synchronization
|
111
|
+
// record an event on this stream
|
112
|
+
void (*event_record)(lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
113
|
+
// wait for an event on on a different stream
|
114
|
+
void (*event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
115
|
+
};
|
116
|
+
|
117
|
+
struct lm_ggml_backend {
|
118
|
+
lm_ggml_guid_t guid;
|
119
|
+
struct lm_ggml_backend_i iface;
|
120
|
+
lm_ggml_backend_dev_t device;
|
121
|
+
void * context;
|
122
|
+
};
|
123
|
+
|
124
|
+
struct lm_ggml_backend_event {
|
125
|
+
struct lm_ggml_backend_device * device;
|
126
|
+
void * context;
|
127
|
+
};
|
128
|
+
|
129
|
+
//
|
130
|
+
// Backend device
|
131
|
+
//
|
132
|
+
|
133
|
+
// Note: if additional properties are needed, we should add a struct with all of them
|
134
|
+
// the current functions to obtain the properties can remain, since they are more convenient for often used properties
|
135
|
+
struct lm_ggml_backend_device_i {
|
136
|
+
// device name: short identifier for this device, such as "CPU" or "CUDA0"
|
137
|
+
const char * (*get_name)(lm_ggml_backend_dev_t dev);
|
138
|
+
|
139
|
+
// device description: short informative description of the device, could be the model name
|
140
|
+
const char * (*get_description)(lm_ggml_backend_dev_t dev);
|
141
|
+
|
142
|
+
// device memory in bytes
|
143
|
+
void (*get_memory)(lm_ggml_backend_dev_t dev, size_t * free, size_t * total);
|
144
|
+
|
145
|
+
// device type
|
146
|
+
enum lm_ggml_backend_dev_type (*get_type)(lm_ggml_backend_dev_t dev);
|
147
|
+
|
148
|
+
// device properties
|
149
|
+
void (*get_props)(lm_ggml_backend_dev_t dev, struct lm_ggml_backend_dev_props * props);
|
150
|
+
|
151
|
+
// backend (stream) initialization
|
152
|
+
lm_ggml_backend_t (*init_backend)(lm_ggml_backend_dev_t dev, const char * params);
|
153
|
+
|
154
|
+
// preferred buffer type
|
155
|
+
lm_ggml_backend_buffer_type_t (*get_buffer_type)(lm_ggml_backend_dev_t dev);
|
156
|
+
|
157
|
+
// (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
|
158
|
+
lm_ggml_backend_buffer_type_t (*get_host_buffer_type)(lm_ggml_backend_dev_t dev);
|
159
|
+
|
160
|
+
// (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
|
161
|
+
lm_ggml_backend_buffer_t (*buffer_from_host_ptr)(lm_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
|
162
|
+
|
163
|
+
// check if the backend can compute an operation
|
164
|
+
bool (*supports_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
165
|
+
|
166
|
+
// check if the backend can use tensors allocated in a buffer type
|
167
|
+
bool (*supports_buft)(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft);
|
168
|
+
|
169
|
+
// (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
|
170
|
+
// these should be expensive operations that may benefit from running on this backend instead of the CPU backend
|
171
|
+
bool (*offload_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
172
|
+
|
173
|
+
// (optional) event synchronization
|
174
|
+
lm_ggml_backend_event_t (*event_new) (lm_ggml_backend_dev_t dev);
|
175
|
+
void (*event_free) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
176
|
+
void (*event_synchronize) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
177
|
+
};
|
178
|
+
|
179
|
+
struct lm_ggml_backend_device {
|
180
|
+
struct lm_ggml_backend_device_i iface;
|
181
|
+
lm_ggml_backend_reg_t reg;
|
182
|
+
void * context;
|
183
|
+
};
|
184
|
+
|
185
|
+
//
|
186
|
+
// Backend (reg)
|
187
|
+
//
|
188
|
+
|
189
|
+
struct lm_ggml_backend_reg_i {
|
190
|
+
const char * (*get_name)(lm_ggml_backend_reg_t reg);
|
191
|
+
|
192
|
+
// enumerate available devices
|
193
|
+
size_t (*get_device_count)(lm_ggml_backend_reg_t reg);
|
194
|
+
lm_ggml_backend_dev_t (*get_device)(lm_ggml_backend_reg_t reg, size_t index);
|
195
|
+
|
196
|
+
// (optional) get a pointer to a function in the backend
|
197
|
+
// backends can add custom functions that are not part of the standard ggml-backend interface
|
198
|
+
void * (*get_proc_address)(lm_ggml_backend_reg_t reg, const char * name);
|
199
|
+
};
|
200
|
+
|
201
|
+
struct lm_ggml_backend_reg {
|
202
|
+
// int api_version; // TODO: for dynamic loading
|
203
|
+
struct lm_ggml_backend_reg_i iface;
|
204
|
+
void * context;
|
205
|
+
};
|
206
|
+
|
207
|
+
|
208
|
+
// Internal backend registry API
|
209
|
+
void lm_ggml_backend_register(lm_ggml_backend_reg_t reg);
|
210
|
+
void lm_ggml_backend_device_register(lm_ggml_backend_dev_t device);
|
211
|
+
// TODO: backends can be loaded as a dynamic library, in which case it needs to export this function
|
212
|
+
// typedef lm_ggml_backend_register_t * (*lm_ggml_backend_init)(void);
|
213
|
+
|
214
|
+
#ifdef __cplusplus
|
215
|
+
}
|
216
|
+
#endif
|