llama_cpp 0.14.0 → 0.14.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/ext/llama_cpp/llama_cpp.cpp +71 -0
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +9 -0
- data/vendor/tmp/llama.cpp/Makefile +28 -12
- data/vendor/tmp/llama.cpp/ggml-alloc.c +45 -64
- data/vendor/tmp/llama.cpp/ggml-alloc.h +13 -5
- data/vendor/tmp/llama.cpp/ggml-backend-impl.h +14 -3
- data/vendor/tmp/llama.cpp/ggml-backend.c +358 -135
- data/vendor/tmp/llama.cpp/ggml-backend.h +41 -17
- data/vendor/tmp/llama.cpp/ggml-common.h +1830 -0
- data/vendor/tmp/llama.cpp/ggml-cuda.cu +187 -1033
- data/vendor/tmp/llama.cpp/ggml-impl.h +6 -2
- data/vendor/tmp/llama.cpp/ggml-kompute.cpp +5 -0
- data/vendor/tmp/llama.cpp/ggml-metal.m +42 -20
- data/vendor/tmp/llama.cpp/ggml-metal.metal +44 -910
- data/vendor/tmp/llama.cpp/ggml-quants.c +457 -1074
- data/vendor/tmp/llama.cpp/ggml-quants.h +27 -259
- data/vendor/tmp/llama.cpp/ggml-sycl.cpp +388 -565
- data/vendor/tmp/llama.cpp/ggml-vulkan.cpp +6 -39
- data/vendor/tmp/llama.cpp/ggml.c +509 -343
- data/vendor/tmp/llama.cpp/ggml.h +61 -47
- data/vendor/tmp/llama.cpp/llama.cpp +1446 -687
- data/vendor/tmp/llama.cpp/llama.h +25 -11
- data/vendor/tmp/llama.cpp/unicode.cpp +1672 -0
- data/vendor/tmp/llama.cpp/unicode.h +16 -774
- metadata +4 -2
@@ -9,6 +9,7 @@ extern "C" {
|
|
9
9
|
|
10
10
|
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
11
11
|
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
12
|
+
typedef struct ggml_backend_event * ggml_backend_event_t;
|
12
13
|
typedef struct ggml_backend * ggml_backend_t;
|
13
14
|
typedef void * ggml_backend_graph_plan_t;
|
14
15
|
|
@@ -72,11 +73,24 @@ extern "C" {
|
|
72
73
|
GGML_API enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
73
74
|
GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
74
75
|
|
76
|
+
GGML_API bool ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
75
77
|
GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
76
78
|
|
77
79
|
// tensor copy between different backends
|
78
80
|
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
79
|
-
|
81
|
+
|
82
|
+
// asynchronous copy
|
83
|
+
// the copy is performed after all the currently queued operations in backend_src
|
84
|
+
// backend_dst will wait for the copy to complete before performing other operations
|
85
|
+
// automatic fallback to sync copy if async is not supported
|
86
|
+
GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
|
87
|
+
|
88
|
+
// events
|
89
|
+
GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend);
|
90
|
+
GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
|
91
|
+
GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
|
92
|
+
GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
|
93
|
+
GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
|
80
94
|
|
81
95
|
//
|
82
96
|
// CPU backend
|
@@ -123,27 +137,31 @@ extern "C" {
|
|
123
137
|
/*
|
124
138
|
Example usage:
|
125
139
|
|
126
|
-
|
127
|
-
//
|
140
|
+
// operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be asigned
|
141
|
+
// preferrably to run on the same backend as the buffer
|
142
|
+
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
128
143
|
|
129
|
-
|
130
|
-
measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
|
144
|
+
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
|
131
145
|
|
132
|
-
//
|
133
|
-
build_graph(
|
134
|
-
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
135
|
-
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
136
|
-
ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
|
137
|
-
}
|
146
|
+
// initialize buffers from a max size graph (optional)
|
147
|
+
reserve_graph = build_graph(sched, max_batch_size);
|
138
148
|
|
139
|
-
//
|
140
|
-
|
149
|
+
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
150
|
+
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
151
|
+
ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
|
141
152
|
|
142
|
-
|
153
|
+
ggml_backend_sched_reserve(sched, reserve_graph);
|
143
154
|
|
144
155
|
// compute
|
145
156
|
graph = build_graph(sched);
|
146
157
|
ggml_backend_sched_graph_compute(sched, graph);
|
158
|
+
|
159
|
+
// if there are graph inputs:
|
160
|
+
ggml_backend_sched_reset(sched);
|
161
|
+
ggml_backend_sched_alloc_graph(sched, graph);
|
162
|
+
ggml_backend_tensor_set(input_tensor, ...);
|
163
|
+
ggml_backend_sched_graph_compute(sched, graph);
|
164
|
+
}
|
147
165
|
*/
|
148
166
|
|
149
167
|
struct ggml_backend_sched;
|
@@ -158,20 +176,26 @@ extern "C" {
|
|
158
176
|
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
159
177
|
|
160
178
|
// Initialize a backend scheduler
|
161
|
-
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
|
179
|
+
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
162
180
|
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
181
|
+
|
163
182
|
// Initialize backend buffers from a measure graph
|
164
183
|
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
184
|
+
|
165
185
|
// Get the number of splits of the last graph
|
166
186
|
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
187
|
+
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
|
167
188
|
|
168
189
|
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
169
190
|
|
170
|
-
GGML_API void
|
171
|
-
GGML_API ggml_backend_t
|
191
|
+
GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
192
|
+
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
172
193
|
|
173
194
|
// Allocate and compute graph on the backend scheduler
|
195
|
+
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
174
196
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
197
|
+
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
198
|
+
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
175
199
|
|
176
200
|
// Reset all assignments and allocators - must be called before changing the node backends
|
177
201
|
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|