llama-cpp-capacitor 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/cpp/LICENSE +21 -0
  2. package/cpp/README.md +4 -0
  3. package/cpp/anyascii.c +22223 -0
  4. package/cpp/anyascii.h +42 -0
  5. package/cpp/chat-parser.cpp +393 -0
  6. package/cpp/chat-parser.h +120 -0
  7. package/cpp/chat.cpp +2315 -0
  8. package/cpp/chat.h +221 -0
  9. package/cpp/common.cpp +1619 -0
  10. package/cpp/common.h +744 -0
  11. package/cpp/ggml-alloc.c +1028 -0
  12. package/cpp/ggml-alloc.h +76 -0
  13. package/cpp/ggml-backend-impl.h +255 -0
  14. package/cpp/ggml-backend-reg.cpp +600 -0
  15. package/cpp/ggml-backend.cpp +2118 -0
  16. package/cpp/ggml-backend.h +354 -0
  17. package/cpp/ggml-common.h +1878 -0
  18. package/cpp/ggml-cpp.h +39 -0
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  25. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  26. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  27. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  28. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  29. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  30. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  31. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  32. package/cpp/ggml-cpu/binary-ops.h +16 -0
  33. package/cpp/ggml-cpu/common.h +73 -0
  34. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  35. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  36. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  37. package/cpp/ggml-cpu/ops.cpp +10587 -0
  38. package/cpp/ggml-cpu/ops.h +114 -0
  39. package/cpp/ggml-cpu/quants.c +1193 -0
  40. package/cpp/ggml-cpu/quants.h +97 -0
  41. package/cpp/ggml-cpu/repack.cpp +1982 -0
  42. package/cpp/ggml-cpu/repack.h +120 -0
  43. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  44. package/cpp/ggml-cpu/traits.cpp +36 -0
  45. package/cpp/ggml-cpu/traits.h +38 -0
  46. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  47. package/cpp/ggml-cpu/unary-ops.h +28 -0
  48. package/cpp/ggml-cpu/vec.cpp +348 -0
  49. package/cpp/ggml-cpu/vec.h +1121 -0
  50. package/cpp/ggml-cpu.h +145 -0
  51. package/cpp/ggml-impl.h +622 -0
  52. package/cpp/ggml-metal-impl.h +688 -0
  53. package/cpp/ggml-metal.h +66 -0
  54. package/cpp/ggml-metal.m +6833 -0
  55. package/cpp/ggml-opt.cpp +1093 -0
  56. package/cpp/ggml-opt.h +256 -0
  57. package/cpp/ggml-quants.c +5324 -0
  58. package/cpp/ggml-quants.h +106 -0
  59. package/cpp/ggml-threading.cpp +12 -0
  60. package/cpp/ggml-threading.h +14 -0
  61. package/cpp/ggml.c +7108 -0
  62. package/cpp/ggml.h +2492 -0
  63. package/cpp/gguf.cpp +1358 -0
  64. package/cpp/gguf.h +202 -0
  65. package/cpp/json-partial.cpp +256 -0
  66. package/cpp/json-partial.h +38 -0
  67. package/cpp/json-schema-to-grammar.cpp +985 -0
  68. package/cpp/json-schema-to-grammar.h +21 -0
  69. package/cpp/llama-adapter.cpp +388 -0
  70. package/cpp/llama-adapter.h +76 -0
  71. package/cpp/llama-arch.cpp +2355 -0
  72. package/cpp/llama-arch.h +499 -0
  73. package/cpp/llama-batch.cpp +875 -0
  74. package/cpp/llama-batch.h +160 -0
  75. package/cpp/llama-chat.cpp +783 -0
  76. package/cpp/llama-chat.h +65 -0
  77. package/cpp/llama-context.cpp +2748 -0
  78. package/cpp/llama-context.h +306 -0
  79. package/cpp/llama-cparams.cpp +5 -0
  80. package/cpp/llama-cparams.h +41 -0
  81. package/cpp/llama-cpp.h +30 -0
  82. package/cpp/llama-grammar.cpp +1229 -0
  83. package/cpp/llama-grammar.h +173 -0
  84. package/cpp/llama-graph.cpp +1891 -0
  85. package/cpp/llama-graph.h +810 -0
  86. package/cpp/llama-hparams.cpp +180 -0
  87. package/cpp/llama-hparams.h +233 -0
  88. package/cpp/llama-impl.cpp +167 -0
  89. package/cpp/llama-impl.h +61 -0
  90. package/cpp/llama-io.cpp +15 -0
  91. package/cpp/llama-io.h +35 -0
  92. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  93. package/cpp/llama-kv-cache-iswa.h +135 -0
  94. package/cpp/llama-kv-cache.cpp +2059 -0
  95. package/cpp/llama-kv-cache.h +374 -0
  96. package/cpp/llama-kv-cells.h +491 -0
  97. package/cpp/llama-memory-hybrid.cpp +258 -0
  98. package/cpp/llama-memory-hybrid.h +137 -0
  99. package/cpp/llama-memory-recurrent.cpp +1146 -0
  100. package/cpp/llama-memory-recurrent.h +179 -0
  101. package/cpp/llama-memory.cpp +59 -0
  102. package/cpp/llama-memory.h +119 -0
  103. package/cpp/llama-mmap.cpp +600 -0
  104. package/cpp/llama-mmap.h +68 -0
  105. package/cpp/llama-model-loader.cpp +1164 -0
  106. package/cpp/llama-model-loader.h +170 -0
  107. package/cpp/llama-model-saver.cpp +282 -0
  108. package/cpp/llama-model-saver.h +37 -0
  109. package/cpp/llama-model.cpp +19042 -0
  110. package/cpp/llama-model.h +491 -0
  111. package/cpp/llama-sampling.cpp +2575 -0
  112. package/cpp/llama-sampling.h +32 -0
  113. package/cpp/llama-vocab.cpp +3792 -0
  114. package/cpp/llama-vocab.h +176 -0
  115. package/cpp/llama.cpp +358 -0
  116. package/cpp/llama.h +1373 -0
  117. package/cpp/log.cpp +427 -0
  118. package/cpp/log.h +103 -0
  119. package/cpp/minja/chat-template.hpp +550 -0
  120. package/cpp/minja/minja.hpp +3009 -0
  121. package/cpp/nlohmann/json.hpp +25526 -0
  122. package/cpp/nlohmann/json_fwd.hpp +187 -0
  123. package/cpp/regex-partial.cpp +204 -0
  124. package/cpp/regex-partial.h +56 -0
  125. package/cpp/rn-completion.cpp +681 -0
  126. package/cpp/rn-completion.h +116 -0
  127. package/cpp/rn-llama.cpp +345 -0
  128. package/cpp/rn-llama.h +149 -0
  129. package/cpp/rn-mtmd.hpp +602 -0
  130. package/cpp/rn-tts.cpp +591 -0
  131. package/cpp/rn-tts.h +59 -0
  132. package/cpp/sampling.cpp +579 -0
  133. package/cpp/sampling.h +107 -0
  134. package/cpp/tools/mtmd/clip-impl.h +473 -0
  135. package/cpp/tools/mtmd/clip.cpp +4322 -0
  136. package/cpp/tools/mtmd/clip.h +106 -0
  137. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  138. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  139. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  140. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  141. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  142. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  143. package/cpp/tools/mtmd/mtmd.h +298 -0
  144. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  145. package/cpp/unicode-data.cpp +7034 -0
  146. package/cpp/unicode-data.h +20 -0
  147. package/cpp/unicode.cpp +1061 -0
  148. package/cpp/unicode.h +68 -0
  149. package/package.json +2 -1
@@ -0,0 +1,2118 @@
1
+ // Note: porting this file to C++ is a work in progress
2
+
3
+ #ifdef _WIN32
4
+ #define WIN32_LEAN_AND_MEAN
5
+ #ifndef NOMINMAX
6
+ # define NOMINMAX
7
+ #endif
8
+ #include <windows.h>
9
+ #endif
10
+
11
+ #include "ggml-backend.h"
12
+ #include "ggml-backend-impl.h"
13
+ #include "ggml-alloc.h"
14
+ #include "ggml-impl.h"
15
+
16
+ #include <assert.h>
17
+ #include <limits.h>
18
+ #include <stdarg.h>
19
+ #include <stdio.h>
20
+ #include <stdlib.h>
21
+ #include <string.h>
22
+ #include <algorithm>
23
+ #include <vector>
24
+
25
+ #ifdef __APPLE__
26
+ #include <sys/types.h>
27
+ #include <sys/sysctl.h>
28
+ #endif
29
+
30
+
31
+ // backend buffer type
32
+
33
+ const char * lm_ggml_backend_buft_name(lm_ggml_backend_buffer_type_t buft) {
34
+ return buft->iface.get_name(buft);
35
+ }
36
+
37
+ lm_ggml_backend_buffer_t lm_ggml_backend_buft_alloc_buffer(lm_ggml_backend_buffer_type_t buft, size_t size) {
38
+ if (size == 0) {
39
+ // return a dummy buffer for zero-sized allocations
40
+ return lm_ggml_backend_buffer_init(buft, {}, NULL, 0);
41
+ }
42
+
43
+ return buft->iface.alloc_buffer(buft, size);
44
+ }
45
+
46
+ size_t lm_ggml_backend_buft_get_alignment(lm_ggml_backend_buffer_type_t buft) {
47
+ return buft->iface.get_alignment(buft);
48
+ }
49
+
50
+ size_t lm_ggml_backend_buft_get_max_size(lm_ggml_backend_buffer_type_t buft) {
51
+ // get_max_size is optional, defaults to SIZE_MAX
52
+ if (buft->iface.get_max_size) {
53
+ return buft->iface.get_max_size(buft);
54
+ }
55
+ return SIZE_MAX;
56
+ }
57
+
58
+ size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor) {
59
+ // get_alloc_size is optional, defaults to lm_ggml_nbytes
60
+ if (buft->iface.get_alloc_size) {
61
+ size_t size = buft->iface.get_alloc_size(buft, tensor);
62
+ assert(size >= lm_ggml_nbytes(tensor));
63
+ return size;
64
+ }
65
+ return lm_ggml_nbytes(tensor);
66
+ }
67
+
68
+ bool lm_ggml_backend_buft_is_host(lm_ggml_backend_buffer_type_t buft) {
69
+ if (buft->iface.is_host) {
70
+ return buft->iface.is_host(buft);
71
+ }
72
+ return false;
73
+ }
74
+
75
+ lm_ggml_backend_dev_t lm_ggml_backend_buft_get_device(lm_ggml_backend_buffer_type_t buft) {
76
+ return buft->device;
77
+ }
78
+
79
+ // backend buffer
80
+
81
+ lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
82
+ lm_ggml_backend_buffer_type_t buft,
83
+ struct lm_ggml_backend_buffer_i iface,
84
+ void * context,
85
+ size_t size) {
86
+ lm_ggml_backend_buffer_t buffer = new lm_ggml_backend_buffer {
87
+ /* .interface = */ iface,
88
+ /* .buft = */ buft,
89
+ /* .context = */ context,
90
+ /* .size = */ size,
91
+ /* .usage = */ LM_GGML_BACKEND_BUFFER_USAGE_ANY
92
+ };
93
+
94
+ return buffer;
95
+ }
96
+
97
+ const char * lm_ggml_backend_buffer_name(lm_ggml_backend_buffer_t buffer) {
98
+ return lm_ggml_backend_buft_name(lm_ggml_backend_buffer_get_type(buffer));
99
+ }
100
+
101
+ void lm_ggml_backend_buffer_free(lm_ggml_backend_buffer_t buffer) {
102
+ if (buffer == NULL) {
103
+ return;
104
+ }
105
+
106
+ if (buffer->iface.free_buffer != NULL) {
107
+ buffer->iface.free_buffer(buffer);
108
+ }
109
+ delete buffer;
110
+ }
111
+
112
+ size_t lm_ggml_backend_buffer_get_size(lm_ggml_backend_buffer_t buffer) {
113
+ return buffer->size;
114
+ }
115
+
116
+ void * lm_ggml_backend_buffer_get_base(lm_ggml_backend_buffer_t buffer) {
117
+ // get_base is optional if the buffer is zero-sized
118
+ if (buffer->size == 0) {
119
+ return NULL;
120
+ }
121
+
122
+ void * base = buffer->iface.get_base(buffer);
123
+
124
+ LM_GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
125
+
126
+ return base;
127
+ }
128
+
129
+ enum lm_ggml_status lm_ggml_backend_buffer_init_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor) {
130
+ // init_tensor is optional
131
+ if (buffer->iface.init_tensor) {
132
+ return buffer->iface.init_tensor(buffer, tensor);
133
+ }
134
+ return LM_GGML_STATUS_SUCCESS;
135
+ }
136
+
137
+ void lm_ggml_backend_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
138
+ // clear is optional if the buffer is zero-sized
139
+ if (buffer->size == 0) {
140
+ return;
141
+ }
142
+
143
+ buffer->iface.clear(buffer, value);
144
+ }
145
+
146
+ size_t lm_ggml_backend_buffer_get_alignment(lm_ggml_backend_buffer_t buffer) {
147
+ return lm_ggml_backend_buft_get_alignment(lm_ggml_backend_buffer_get_type(buffer));
148
+ }
149
+
150
+ size_t lm_ggml_backend_buffer_get_max_size(lm_ggml_backend_buffer_t buffer) {
151
+ return lm_ggml_backend_buft_get_max_size(lm_ggml_backend_buffer_get_type(buffer));
152
+ }
153
+
154
+ size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor) {
155
+ return lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_get_type(buffer), tensor);
156
+ }
157
+
158
+ bool lm_ggml_backend_buffer_is_host(lm_ggml_backend_buffer_t buffer) {
159
+ return lm_ggml_backend_buft_is_host(lm_ggml_backend_buffer_get_type(buffer));
160
+ }
161
+
162
+ void lm_ggml_backend_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage) {
163
+ buffer->usage = usage;
164
+
165
+ // FIXME: add a generic callback to the buffer interface
166
+ if (lm_ggml_backend_buffer_is_multi_buffer(buffer)) {
167
+ lm_ggml_backend_multi_buffer_set_usage(buffer, usage);
168
+ }
169
+ }
170
+
171
+ enum lm_ggml_backend_buffer_usage lm_ggml_backend_buffer_get_usage(lm_ggml_backend_buffer_t buffer) {
172
+ return buffer->usage;
173
+ }
174
+
175
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_buffer_get_type(lm_ggml_backend_buffer_t buffer) {
176
+ return buffer->buft;
177
+ }
178
+
179
+ void lm_ggml_backend_buffer_reset(lm_ggml_backend_buffer_t buffer) {
180
+ if (buffer->iface.reset) {
181
+ buffer->iface.reset(buffer);
182
+ }
183
+ }
184
+
185
+ bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
186
+ lm_ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
187
+ if (dst_buf->iface.cpy_tensor) {
188
+ return dst_buf->iface.cpy_tensor(dst_buf, src, dst);
189
+ }
190
+ return false;
191
+ }
192
+
193
+ // backend
194
+
195
+ lm_ggml_guid_t lm_ggml_backend_guid(lm_ggml_backend_t backend) {
196
+ if (backend == NULL) {
197
+ return NULL;
198
+ }
199
+ return backend->guid;
200
+ }
201
+
202
+ const char * lm_ggml_backend_name(lm_ggml_backend_t backend) {
203
+ if (backend == NULL) {
204
+ return "NULL";
205
+ }
206
+ return backend->iface.get_name(backend);
207
+ }
208
+
209
+ void lm_ggml_backend_free(lm_ggml_backend_t backend) {
210
+ if (backend == NULL) {
211
+ return;
212
+ }
213
+
214
+ backend->iface.free(backend);
215
+ }
216
+
217
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_get_default_buffer_type(lm_ggml_backend_t backend) {
218
+ return lm_ggml_backend_dev_buffer_type(backend->device);
219
+ }
220
+
221
+ lm_ggml_backend_buffer_t lm_ggml_backend_alloc_buffer(lm_ggml_backend_t backend, size_t size) {
222
+ return lm_ggml_backend_buft_alloc_buffer(lm_ggml_backend_get_default_buffer_type(backend), size);
223
+ }
224
+
225
+ size_t lm_ggml_backend_get_alignment(lm_ggml_backend_t backend) {
226
+ return lm_ggml_backend_buft_get_alignment(lm_ggml_backend_get_default_buffer_type(backend));
227
+ }
228
+
229
+ size_t lm_ggml_backend_get_max_size(lm_ggml_backend_t backend) {
230
+ return lm_ggml_backend_buft_get_max_size(lm_ggml_backend_get_default_buffer_type(backend));
231
+ }
232
+
233
+ void lm_ggml_backend_tensor_set_async(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
234
+ LM_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
235
+ LM_GGML_ASSERT(offset + size <= lm_ggml_nbytes(tensor) && "tensor write out of bounds");
236
+
237
+ if (backend->iface.set_tensor_async == NULL) {
238
+ lm_ggml_backend_tensor_set(tensor, data, offset, size);
239
+ } else {
240
+ backend->iface.set_tensor_async(backend, tensor, data, offset, size);
241
+ }
242
+ }
243
+
244
+ void lm_ggml_backend_tensor_get_async(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
245
+ LM_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
246
+ LM_GGML_ASSERT(offset + size <= lm_ggml_nbytes(tensor) && "tensor read out of bounds");
247
+
248
+ if (backend->iface.get_tensor_async == NULL) {
249
+ lm_ggml_backend_tensor_get(tensor, data, offset, size);
250
+ } else {
251
+ backend->iface.get_tensor_async(backend, tensor, data, offset, size);
252
+ }
253
+ }
254
+
255
+ void lm_ggml_backend_tensor_set(struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
256
+ LM_GGML_ASSERT(tensor);
257
+ lm_ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
258
+
259
+ if (size == 0) {
260
+ return;
261
+ }
262
+
263
+ LM_GGML_ASSERT(buf != NULL && "tensor buffer not set");
264
+ LM_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
265
+ LM_GGML_ASSERT(offset + size <= lm_ggml_nbytes(tensor) && "tensor write out of bounds");
266
+
267
+ buf->iface.set_tensor(buf, tensor, data, offset, size);
268
+ }
269
+
270
+ void lm_ggml_backend_tensor_get(const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
271
+ LM_GGML_ASSERT(tensor);
272
+ lm_ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
273
+
274
+ if (size == 0) {
275
+ return;
276
+ }
277
+
278
+ LM_GGML_ASSERT(buf != NULL && "tensor buffer not set");
279
+ LM_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
280
+ LM_GGML_ASSERT(offset + size <= lm_ggml_nbytes(tensor) && "tensor read out of bounds");
281
+
282
+ buf->iface.get_tensor(buf, tensor, data, offset, size);
283
+ }
284
+
285
+ void lm_ggml_backend_tensor_memset(struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
286
+ lm_ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
287
+
288
+ if (size == 0) {
289
+ return;
290
+ }
291
+
292
+ LM_GGML_ASSERT(buf != NULL && "tensor buffer not set");
293
+ LM_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
294
+ LM_GGML_ASSERT(offset + size <= lm_ggml_nbytes(tensor) && "tensor write out of bounds");
295
+ LM_GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer");
296
+
297
+ buf->iface.memset_tensor(buf, tensor, value, offset, size);
298
+ }
299
+
300
+ void lm_ggml_backend_synchronize(lm_ggml_backend_t backend) {
301
+ if (backend->iface.synchronize == NULL) {
302
+ return;
303
+ }
304
+
305
+ backend->iface.synchronize(backend);
306
+ }
307
+
308
+ lm_ggml_backend_graph_plan_t lm_ggml_backend_graph_plan_create(lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph) {
309
+ LM_GGML_ASSERT(backend->iface.graph_plan_create != NULL);
310
+
311
+ return backend->iface.graph_plan_create(backend, cgraph);
312
+ }
313
+
314
+ void lm_ggml_backend_graph_plan_free(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan) {
315
+ LM_GGML_ASSERT(backend->iface.graph_plan_free != NULL);
316
+
317
+ backend->iface.graph_plan_free(backend, plan);
318
+ }
319
+
320
+ enum lm_ggml_status lm_ggml_backend_graph_plan_compute(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan) {
321
+ LM_GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
322
+
323
+ return backend->iface.graph_plan_compute(backend, plan);
324
+ }
325
+
326
+ enum lm_ggml_status lm_ggml_backend_graph_compute(lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph) {
327
+ enum lm_ggml_status err = lm_ggml_backend_graph_compute_async(backend, cgraph);
328
+ lm_ggml_backend_synchronize(backend);
329
+ return err;
330
+ }
331
+
332
+ enum lm_ggml_status lm_ggml_backend_graph_compute_async(lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph) {
333
+ return backend->iface.graph_compute(backend, cgraph);
334
+ }
335
+
336
+ bool lm_ggml_backend_supports_op(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op) {
337
+ return lm_ggml_backend_dev_supports_op(backend->device, op);
338
+ }
339
+
340
+ bool lm_ggml_backend_supports_buft(lm_ggml_backend_t backend, lm_ggml_backend_buffer_type_t buft) {
341
+ return lm_ggml_backend_dev_supports_buft(backend->device, buft);
342
+ }
343
+
344
+ bool lm_ggml_backend_offload_op(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op) {
345
+ return lm_ggml_backend_dev_offload_op(backend->device, op);
346
+ }
347
+
348
+ lm_ggml_backend_dev_t lm_ggml_backend_get_device(lm_ggml_backend_t backend) {
349
+ return backend->device;
350
+ }
351
+
352
+ // backend copy
353
+
354
+ void lm_ggml_backend_tensor_copy(struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
355
+ LM_GGML_ASSERT(lm_ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
356
+
357
+ if (src == dst) {
358
+ return;
359
+ }
360
+
361
+ if (lm_ggml_backend_buffer_is_host(src->buffer)) {
362
+ lm_ggml_backend_tensor_set(dst, src->data, 0, lm_ggml_nbytes(src));
363
+ } else if (lm_ggml_backend_buffer_is_host(dst->buffer)) {
364
+ lm_ggml_backend_tensor_get(src, dst->data, 0, lm_ggml_nbytes(src));
365
+ } else if (!lm_ggml_backend_buffer_copy_tensor(src, dst)) {
366
+ #ifndef NDEBUG
367
+ LM_GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, lm_ggml_backend_buffer_name(src->buffer), lm_ggml_backend_buffer_name(dst->buffer));
368
+ #endif
369
+ size_t nbytes = lm_ggml_nbytes(src);
370
+ void * data = malloc(nbytes);
371
+ lm_ggml_backend_tensor_get(src, data, 0, nbytes);
372
+ lm_ggml_backend_tensor_set(dst, data, 0, nbytes);
373
+ free(data);
374
+ }
375
+ }
376
+
377
+ void lm_ggml_backend_tensor_copy_async(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
378
+ LM_GGML_ASSERT(lm_ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
379
+
380
+ if (src == dst) {
381
+ return;
382
+ }
383
+
384
+ if (backend_dst->iface.cpy_tensor_async != NULL) {
385
+ if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
386
+ return;
387
+ }
388
+ }
389
+
390
+ // an async copy would normally happen after all the queued operations on both backends are completed
391
+ // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
392
+ lm_ggml_backend_synchronize(backend_src);
393
+ lm_ggml_backend_synchronize(backend_dst);
394
+ lm_ggml_backend_tensor_copy(src, dst);
395
+ }
396
+
397
+ // events
398
+
399
+ lm_ggml_backend_event_t lm_ggml_backend_event_new(lm_ggml_backend_dev_t device) {
400
+ // null device is allowed for the transition period to the device interface
401
+ if (device == NULL || device->iface.event_new == NULL) {
402
+ return NULL;
403
+ }
404
+ return device->iface.event_new(device);
405
+ }
406
+
407
+ void lm_ggml_backend_event_free(lm_ggml_backend_event_t event) {
408
+ if (event == NULL) {
409
+ return;
410
+ }
411
+ event->device->iface.event_free(event->device, event);
412
+ }
413
+
414
+ void lm_ggml_backend_event_record(lm_ggml_backend_event_t event, lm_ggml_backend_t backend) {
415
+ LM_GGML_ASSERT(backend->iface.event_record != NULL);
416
+
417
+ backend->iface.event_record(backend, event);
418
+ }
419
+
420
+ void lm_ggml_backend_event_synchronize(lm_ggml_backend_event_t event) {
421
+ LM_GGML_ASSERT(event->device->iface.event_synchronize);
422
+
423
+ event->device->iface.event_synchronize(event->device, event);
424
+ }
425
+
426
+ void lm_ggml_backend_event_wait(lm_ggml_backend_t backend, lm_ggml_backend_event_t event) {
427
+ LM_GGML_ASSERT(backend->iface.event_wait != NULL);
428
+
429
+ backend->iface.event_wait(backend, event);
430
+ }
431
+
432
+ // Backend device
433
+
434
+ const char * lm_ggml_backend_dev_name(lm_ggml_backend_dev_t device) {
435
+ return device->iface.get_name(device);
436
+ }
437
+
438
+ const char * lm_ggml_backend_dev_description(lm_ggml_backend_dev_t device) {
439
+ return device->iface.get_description(device);
440
+ }
441
+
442
+ void lm_ggml_backend_dev_memory(lm_ggml_backend_dev_t device, size_t * free, size_t * total) {
443
+ device->iface.get_memory(device, free, total);
444
+ }
445
+
446
+ enum lm_ggml_backend_dev_type lm_ggml_backend_dev_type(lm_ggml_backend_dev_t device) {
447
+ return device->iface.get_type(device);
448
+ }
449
+
450
+ void lm_ggml_backend_dev_get_props(lm_ggml_backend_dev_t device, struct lm_ggml_backend_dev_props * props) {
451
+ memset(props, 0, sizeof(*props));
452
+ device->iface.get_props(device, props);
453
+ }
454
+
455
+ lm_ggml_backend_reg_t lm_ggml_backend_dev_backend_reg(lm_ggml_backend_dev_t device) {
456
+ return device->reg;
457
+ }
458
+
459
+ lm_ggml_backend_t lm_ggml_backend_dev_init(lm_ggml_backend_dev_t device, const char * params) {
460
+ return device->iface.init_backend(device, params);
461
+ }
462
+
463
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_dev_buffer_type(lm_ggml_backend_dev_t device) {
464
+ return device->iface.get_buffer_type(device);
465
+ }
466
+
467
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_dev_host_buffer_type(lm_ggml_backend_dev_t device) {
468
+ if (device->iface.get_host_buffer_type == NULL) {
469
+ return NULL;
470
+ }
471
+
472
+ return device->iface.get_host_buffer_type(device);
473
+ }
474
+
475
+ lm_ggml_backend_buffer_t lm_ggml_backend_dev_buffer_from_host_ptr(lm_ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
476
+ return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
477
+ }
478
+
479
+ bool lm_ggml_backend_dev_supports_op(lm_ggml_backend_dev_t device, const struct lm_ggml_tensor * op) {
480
+ return device->iface.supports_op(device, op);
481
+ }
482
+
483
+ bool lm_ggml_backend_dev_supports_buft(lm_ggml_backend_dev_t device, lm_ggml_backend_buffer_type_t buft) {
484
+ return device->iface.supports_buft(device, buft);
485
+ }
486
+
487
+ bool lm_ggml_backend_dev_offload_op(lm_ggml_backend_dev_t device, const struct lm_ggml_tensor * op) {
488
+ if (device->iface.offload_op != NULL) {
489
+ return device->iface.offload_op(device, op);
490
+ }
491
+
492
+ return false;
493
+ }
494
+
495
+ // Backend (reg)
496
+
497
+ const char * lm_ggml_backend_reg_name(lm_ggml_backend_reg_t reg) {
498
+ return reg->iface.get_name(reg);
499
+ }
500
+
501
+ size_t lm_ggml_backend_reg_dev_count(lm_ggml_backend_reg_t reg) {
502
+ return reg->iface.get_device_count(reg);
503
+ }
504
+
505
+ lm_ggml_backend_dev_t lm_ggml_backend_reg_dev_get(lm_ggml_backend_reg_t reg, size_t index) {
506
+ return reg->iface.get_device(reg, index);
507
+ }
508
+
509
+ void * lm_ggml_backend_reg_get_proc_address(lm_ggml_backend_reg_t reg, const char * name) {
510
+ if (!reg->iface.get_proc_address) {
511
+ return NULL;
512
+ }
513
+ return reg->iface.get_proc_address(reg, name);
514
+ }
515
+
516
+ // multi-buffer buffer
517
+
518
+ struct lm_ggml_backend_multi_buffer_context {
519
+ lm_ggml_backend_buffer_t * buffers;
520
+ size_t n_buffers;
521
+ };
522
+
523
+ static void lm_ggml_backend_multi_buffer_free_buffer(lm_ggml_backend_buffer_t buffer) {
524
+ lm_ggml_backend_multi_buffer_context * ctx = (lm_ggml_backend_multi_buffer_context *) buffer->context;
525
+ for (size_t i = 0; i < ctx->n_buffers; i++) {
526
+ lm_ggml_backend_buffer_free(ctx->buffers[i]);
527
+ }
528
+
529
+ free(ctx->buffers);
530
+ free(ctx);
531
+ }
532
+
533
+ static void lm_ggml_backend_multi_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
534
+ lm_ggml_backend_multi_buffer_context * ctx = (lm_ggml_backend_multi_buffer_context *) buffer->context;
535
+ for (size_t i = 0; i < ctx->n_buffers; i++) {
536
+ lm_ggml_backend_buffer_clear(ctx->buffers[i], value);
537
+ }
538
+ }
539
+
540
+ static const struct lm_ggml_backend_buffer_i lm_ggml_backend_multi_buffer_i = {
541
+ /* .free_buffer = */ lm_ggml_backend_multi_buffer_free_buffer,
542
+ /* .get_base = */ NULL,
543
+ /* .init_tensor = */ NULL,
544
+ /* .memset_tensor = */ NULL,
545
+ /* .set_tensor = */ NULL,
546
+ /* .get_tensor = */ NULL,
547
+ /* .cpy_tensor = */ NULL,
548
+ /* .clear = */ lm_ggml_backend_multi_buffer_clear,
549
+ /* .reset = */ NULL,
550
+ };
551
+
552
+ lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers) {
553
+ lm_ggml_backend_multi_buffer_context * ctx = (lm_ggml_backend_multi_buffer_context *) malloc(sizeof(struct lm_ggml_backend_multi_buffer_context));
554
+ ctx->n_buffers = n_buffers;
555
+ ctx->buffers = (lm_ggml_backend_buffer_t *) malloc(n_buffers * sizeof(lm_ggml_backend_buffer_t));
556
+
557
+ LM_GGML_ASSERT(ctx->buffers != NULL);
558
+
559
+ size_t total_size = 0;
560
+ for (size_t i = 0; i < n_buffers; i++) {
561
+ ctx->buffers[i] = buffers[i];
562
+ total_size += lm_ggml_backend_buffer_get_size(buffers[i]);
563
+ }
564
+
565
+ return lm_ggml_backend_buffer_init(buffers[0]->buft, lm_ggml_backend_multi_buffer_i, ctx, total_size);
566
+ }
567
+
568
+ bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer) {
569
+ return buffer->iface.free_buffer == lm_ggml_backend_multi_buffer_free_buffer;
570
+ }
571
+
572
+ void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage) {
573
+ LM_GGML_ASSERT(lm_ggml_backend_buffer_is_multi_buffer(buffer));
574
+ lm_ggml_backend_multi_buffer_context * ctx = (lm_ggml_backend_multi_buffer_context *) buffer->context;
575
+ for (size_t i = 0; i < ctx->n_buffers; i++) {
576
+ lm_ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
577
+ }
578
+ }
579
+
580
+ // creates a copy of the tensor with the same memory layout
581
+ static struct lm_ggml_tensor * lm_ggml_dup_tensor_layout(struct lm_ggml_context * ctx, const struct lm_ggml_tensor * tensor) {
582
+ struct lm_ggml_tensor * dup = lm_ggml_dup_tensor(ctx, tensor);
583
+ for (int i = 0; i < LM_GGML_MAX_DIMS; i++) {
584
+ dup->nb[i] = tensor->nb[i];
585
+ }
586
+ return dup;
587
+ }
588
+
589
+ static bool lm_ggml_is_view_op(enum lm_ggml_op op) {
590
+ return op == LM_GGML_OP_VIEW || op == LM_GGML_OP_RESHAPE || op == LM_GGML_OP_PERMUTE || op == LM_GGML_OP_TRANSPOSE;
591
+ }
592
+
593
+ // scheduler
594
+
595
+ #ifndef LM_GGML_SCHED_MAX_BACKENDS
596
+ #define LM_GGML_SCHED_MAX_BACKENDS 16
597
+ #endif
598
+
599
+ #ifndef LM_GGML_SCHED_MAX_SPLIT_INPUTS
600
+ #define LM_GGML_SCHED_MAX_SPLIT_INPUTS LM_GGML_MAX_SRC
601
+ #endif
602
+
603
+ #ifndef LM_GGML_SCHED_MAX_COPIES
604
+ #define LM_GGML_SCHED_MAX_COPIES 4
605
+ #endif
606
+
607
+ struct lm_ggml_backend_sched_split {
608
+ int backend_id;
609
+ int i_start;
610
+ int i_end;
611
+ struct lm_ggml_tensor * inputs[LM_GGML_SCHED_MAX_SPLIT_INPUTS];
612
+ int n_inputs;
613
+ // graph view of this split
614
+ struct lm_ggml_cgraph graph;
615
+ };
616
+
617
+ struct lm_ggml_backend_sched {
618
+ bool is_reset; // true if the scheduler has been reset since the last graph split
619
+ bool is_alloc;
620
+
621
+ int n_backends;
622
+
623
+ lm_ggml_backend_t backends[LM_GGML_SCHED_MAX_BACKENDS];
624
+ lm_ggml_backend_buffer_type_t bufts[LM_GGML_SCHED_MAX_BACKENDS];
625
+ lm_ggml_gallocr_t galloc;
626
+
627
+ // hash map of the nodes in the graph
628
+ struct lm_ggml_hash_set hash_set;
629
+ int * hv_tensor_backend_ids; // [hash_set.size]
630
+ struct lm_ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies]
631
+
632
+ int * node_backend_ids; // [graph_size]
633
+ int * leaf_backend_ids; // [graph_size]
634
+
635
+ int * prev_node_backend_ids; // [graph_size]
636
+ int * prev_leaf_backend_ids; // [graph_size]
637
+
638
+ // copy of the graph with modified inputs
639
+ struct lm_ggml_cgraph graph;
640
+
641
+ // graph splits
642
+ struct lm_ggml_backend_sched_split * splits;
643
+ int n_splits;
644
+ int splits_capacity;
645
+
646
+ // pipeline parallelism support
647
+ int n_copies;
648
+ int cur_copy;
649
+ int next_copy;
650
+ lm_ggml_backend_event_t events[LM_GGML_SCHED_MAX_BACKENDS][LM_GGML_SCHED_MAX_COPIES];
651
+ struct lm_ggml_tensor * graph_inputs[LM_GGML_SCHED_MAX_SPLIT_INPUTS];
652
+ int n_graph_inputs;
653
+
654
+ struct lm_ggml_context * ctx;
655
+
656
+ lm_ggml_backend_sched_eval_callback callback_eval;
657
+ void * callback_eval_user_data;
658
+
659
+ char * context_buffer;
660
+ size_t context_buffer_size;
661
+
662
+ bool op_offload;
663
+
664
+ int debug;
665
+ };
666
+
667
+ #define hash_id(tensor) lm_ggml_hash_find_or_insert(&sched->hash_set, tensor)
668
+ #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
669
+ #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
670
+ #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
671
+
672
+ // returns the priority of the backend, lower id is higher priority
673
+ static int lm_ggml_backend_sched_backend_id(lm_ggml_backend_sched_t sched, lm_ggml_backend_t backend) {
674
+ for (int i = 0; i < sched->n_backends; i++) {
675
+ if (sched->backends[i] == backend) {
676
+ return i;
677
+ }
678
+ }
679
+ return -1;
680
+ }
681
+
682
+ static int lm_ggml_backend_sched_backend_from_buffer(lm_ggml_backend_sched_t sched, const struct lm_ggml_tensor * tensor, const struct lm_ggml_tensor * op) {
683
+ lm_ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
684
+ if (buffer == NULL) {
685
+ return -1;
686
+ }
687
+
688
+ // find highest prio backend that supports the buffer type and the op
689
+ for (int i = 0; i < sched->n_backends; i++) {
690
+ if (lm_ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
691
+ lm_ggml_backend_supports_op(sched->backends[i], op)) {
692
+ return i;
693
+ }
694
+ }
695
+
696
+ #ifndef NDEBUG
697
+ LM_GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
698
+ __func__, lm_ggml_op_desc(tensor), lm_ggml_backend_buffer_name(buffer), tensor->name);
699
+ #endif
700
+
701
+ return -1;
702
+ }
703
+
704
+ #if 0
705
+ #define LM_GGML_SCHED_MAX_SPLITS_DEBUG 4096
706
+ static char causes[LM_GGML_DEFAULT_GRAPH_SIZE*16 + LM_GGML_SCHED_MAX_SPLITS_DEBUG*LM_GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
707
+ #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
708
+ #define GET_CAUSE(node) causes[hash_id(node)]
709
+ #else
710
+ #define SET_CAUSE(node, ...)
711
+ #define GET_CAUSE(node) ""
712
+ #endif
713
+
714
+ // returns the backend that should be used for the node based on the current locations
715
+ static int lm_ggml_backend_sched_backend_id_from_cur(lm_ggml_backend_sched_t sched, struct lm_ggml_tensor * tensor) {
716
+ // assign pre-allocated nodes to their backend
717
+ int cur_backend_id = lm_ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
718
+ if (cur_backend_id != -1) {
719
+ SET_CAUSE(tensor, "1.dst");
720
+ return cur_backend_id;
721
+ }
722
+
723
+ // view_src
724
+ if (tensor->view_src != NULL) {
725
+ cur_backend_id = lm_ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
726
+ if (cur_backend_id != -1) {
727
+ SET_CAUSE(tensor, "1.vsrc");
728
+ return cur_backend_id;
729
+ }
730
+ }
731
+
732
+ if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
733
+ // since the tensor is pre-allocated, it cannot be moved to another backend
734
+ lm_ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
735
+ LM_GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, lm_ggml_backend_buffer_name(buffer), lm_ggml_op_name(tensor->op));
736
+ }
737
+
738
+ // graph input
739
+ if (tensor->flags & LM_GGML_TENSOR_FLAG_INPUT) {
740
+ cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU)
741
+ SET_CAUSE(tensor, "1.inp");
742
+ return cur_backend_id;
743
+ }
744
+
745
+ // operations with weights are preferably run on the same backend as the weights
746
+ for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
747
+ const struct lm_ggml_tensor * src = tensor->src[i];
748
+ if (src == NULL) {
749
+ continue;
750
+ }
751
+ // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
752
+ // not an ideal solution
753
+ if (tensor->op != LM_GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
754
+ int src_backend_id = lm_ggml_backend_sched_backend_from_buffer(sched, src, tensor);
755
+ // check if a backend with higher prio wants to offload the op
756
+ if (sched->op_offload && src_backend_id == sched->n_backends - 1 && lm_ggml_backend_buffer_is_host(src->buffer)) {
757
+ for (int b = 0; b < src_backend_id; b++) {
758
+ if (lm_ggml_backend_supports_op(sched->backends[b], tensor) && lm_ggml_backend_offload_op(sched->backends[b], tensor)) {
759
+ SET_CAUSE(tensor, "1.off");
760
+ return b;
761
+ }
762
+ }
763
+ }
764
+ SET_CAUSE(tensor, "1.wgt%d", i);
765
+ return src_backend_id;
766
+ }
767
+ }
768
+
769
+ return -1;
770
+ }
771
+
772
+ static char * fmt_size(size_t size) {
773
+ static char buffer[128];
774
+ if (size >= 1024*1024) {
775
+ snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
776
+ } else {
777
+ snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
778
+ }
779
+ return buffer;
780
+ }
781
+
782
+ static void lm_ggml_backend_sched_print_assignments(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * graph) {
783
+ int cur_split = 0;
784
+ for (int i = 0; i < graph->n_nodes; i++) {
785
+ if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
786
+ lm_ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
787
+ LM_GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, lm_ggml_backend_name(split_backend),
788
+ sched->splits[cur_split].n_inputs);
789
+ for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
790
+ if (j == 0) {
791
+ LM_GGML_LOG_DEBUG(": ");
792
+ }
793
+ LM_GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
794
+ fmt_size(lm_ggml_nbytes(sched->splits[cur_split].inputs[j])));
795
+ }
796
+ LM_GGML_LOG_DEBUG("\n");
797
+ cur_split++;
798
+ }
799
+ struct lm_ggml_tensor * node = graph->nodes[i];
800
+ if (lm_ggml_is_view_op(node->op)) {
801
+ continue;
802
+ }
803
+ if (sched->debug > 1) {
804
+ lm_ggml_backend_t tensor_backend = lm_ggml_backend_sched_get_tensor_backend(sched, node);
805
+ LM_GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d:", i, lm_ggml_op_name(node->op), node->name,
806
+ fmt_size(lm_ggml_nbytes(node)), tensor_backend ? lm_ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node),
807
+ graph->use_counts[lm_ggml_hash_find(&graph->visited_hash_set, node)]);
808
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
809
+ struct lm_ggml_tensor * src = node->src[j];
810
+ if (src == NULL) {
811
+ continue;
812
+ }
813
+ lm_ggml_backend_t src_backend = lm_ggml_backend_sched_get_tensor_backend(sched, src);
814
+ LM_GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
815
+ fmt_size(lm_ggml_nbytes(src)), src_backend ? lm_ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
816
+ }
817
+ LM_GGML_LOG_DEBUG("\n");
818
+ }
819
+ }
820
+ }
821
+
822
+ static bool lm_ggml_backend_sched_buffer_supported(lm_ggml_backend_sched_t sched, struct lm_ggml_tensor * t, int backend_id) {
823
+ lm_ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
824
+ lm_ggml_backend_buffer_type_t buft = NULL;
825
+
826
+ if (buf) {
827
+ // the tensor is already allocated
828
+ buft = buf->buft;
829
+ } else {
830
+ // see if the tensor already has a backend assigned, and use the buffer type of that backend
831
+ int tensor_backend_id = tensor_backend_id(t);
832
+ if (tensor_backend_id == -1 && t->view_src) {
833
+ tensor_backend_id = tensor_backend_id(t->view_src);
834
+ }
835
+ if (tensor_backend_id != -1) {
836
+ buft = sched->bufts[tensor_backend_id];
837
+ }
838
+ }
839
+
840
+ return buft != NULL && lm_ggml_backend_supports_buft(sched->backends[backend_id], buft);
841
+ }
842
+
843
+ static void lm_ggml_backend_sched_set_if_supported(lm_ggml_backend_sched_t sched, struct lm_ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
844
+ if (lm_ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
845
+ *node_backend_id = cur_backend_id;
846
+ SET_CAUSE(node, "2.sup");
847
+ }
848
+ }
849
+
850
+ // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
851
+ static void lm_ggml_backend_sched_split_graph(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * graph) {
852
+ // reset splits
853
+ sched->n_splits = 0;
854
+ sched->n_graph_inputs = 0;
855
+ sched->is_reset = false;
856
+
857
+ struct lm_ggml_init_params params = {
858
+ /* .mem_size = */ sched->context_buffer_size,
859
+ /* .mem_buffer = */ sched->context_buffer,
860
+ /* .no_alloc = */ true
861
+ };
862
+
863
+ lm_ggml_free(sched->ctx);
864
+
865
+ sched->ctx = lm_ggml_init(params);
866
+ if (sched->ctx == NULL) {
867
+ LM_GGML_ABORT("%s: failed to initialize context\n", __func__);
868
+ }
869
+
870
+ // pass 1: assign backends to ops with pre-allocated inputs
871
+ for (int i = 0; i < graph->n_leafs; i++) {
872
+ struct lm_ggml_tensor * leaf = graph->leafs[i];
873
+ int * leaf_backend_id = &tensor_backend_id(leaf);
874
+ // do not overwrite user assignments
875
+ if (*leaf_backend_id == -1) {
876
+ *leaf_backend_id = lm_ggml_backend_sched_backend_id_from_cur(sched, leaf);
877
+ }
878
+ }
879
+
880
+ for (int i = 0; i < graph->n_nodes; i++) {
881
+ struct lm_ggml_tensor * node = graph->nodes[i];
882
+ int * node_backend_id = &tensor_backend_id(node);
883
+ // do not overwrite user assignments
884
+ if (*node_backend_id == -1) {
885
+ *node_backend_id = lm_ggml_backend_sched_backend_id_from_cur(sched, node);
886
+
887
+ #if 0
888
+ // src
889
+ if (node->op == LM_GGML_OP_NONE) {
890
+ continue;
891
+ }
892
+
893
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
894
+ struct lm_ggml_tensor * src = node->src[j];
895
+ if (src == NULL) {
896
+ continue;
897
+ }
898
+ int * src_backend_id = &tensor_backend_id(src);
899
+ if (*src_backend_id == -1) {
900
+ *src_backend_id = lm_ggml_backend_sched_backend_id_from_cur(sched, src);
901
+ }
902
+ }
903
+ #endif
904
+ }
905
+ }
906
+
907
+ // pass 2: expand current backend assignments
908
+ // assign the same backend to adjacent nodes
909
+ // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
910
+ // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
911
+ // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
912
+ // expand gpu down
913
+ {
914
+ int cur_backend_id = -1;
915
+ for (int i = 0; i < graph->n_nodes; i++) {
916
+ struct lm_ggml_tensor * node = graph->nodes[i];
917
+ if (lm_ggml_is_view_op(node->op)) {
918
+ continue;
919
+ }
920
+ int * node_backend_id = &tensor_backend_id(node);
921
+ if (*node_backend_id != -1) {
922
+ if (*node_backend_id == sched->n_backends - 1) {
923
+ // skip cpu (lowest prio backend)
924
+ cur_backend_id = -1;
925
+ } else {
926
+ cur_backend_id = *node_backend_id;
927
+ }
928
+ } else if (cur_backend_id != -1) {
929
+ lm_ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
930
+ }
931
+ }
932
+ }
933
+ // expand gpu up
934
+ {
935
+ int cur_backend_id = -1;
936
+ for (int i = graph->n_nodes - 1; i >= 0; i--) {
937
+ struct lm_ggml_tensor * node = graph->nodes[i];
938
+ if (lm_ggml_is_view_op(node->op)) {
939
+ continue;
940
+ }
941
+ int * node_backend_id = &tensor_backend_id(node);
942
+ if (*node_backend_id != -1) {
943
+ if (*node_backend_id == sched->n_backends - 1) {
944
+ // skip cpu (lowest prio backend)
945
+ cur_backend_id = -1;
946
+ } else {
947
+ cur_backend_id = *node_backend_id;
948
+ }
949
+ } else if (cur_backend_id != -1) {
950
+ lm_ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
951
+ }
952
+ }
953
+ }
954
+ // expand rest down
955
+ {
956
+ int cur_backend_id = -1;
957
+ for (int i = 0; i < graph->n_nodes; i++) {
958
+ struct lm_ggml_tensor * node = graph->nodes[i];
959
+ if (lm_ggml_is_view_op(node->op)) {
960
+ continue;
961
+ }
962
+ int * node_backend_id = &tensor_backend_id(node);
963
+ if (*node_backend_id != -1) {
964
+ cur_backend_id = *node_backend_id;
965
+ } else if (cur_backend_id != -1) {
966
+ lm_ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
967
+ }
968
+ }
969
+ }
970
+ // expand rest up
971
+ {
972
+ int cur_backend_id = -1;
973
+ for (int i = graph->n_nodes - 1; i >= 0; i--) {
974
+ struct lm_ggml_tensor * node = graph->nodes[i];
975
+ if (lm_ggml_is_view_op(node->op)) {
976
+ continue;
977
+ }
978
+ int * node_backend_id = &tensor_backend_id(node);
979
+ if (*node_backend_id != -1) {
980
+ cur_backend_id = *node_backend_id;
981
+ } else if (cur_backend_id != -1) {
982
+ lm_ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
983
+ }
984
+ }
985
+ }
986
+
987
+ // pass 3: upgrade nodes to higher prio backends with compatible buffer types
988
+ // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
989
+ // however, we also need to verify that the sources are in compatible buffer types
990
+ // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
991
+ // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
992
+ // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
993
+ // additionally, set remaining unassigned nodes to the backend with the most supported inputs
994
+ // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
995
+ for (int i = 0; i < graph->n_nodes; i++) {
996
+ struct lm_ggml_tensor * node = graph->nodes[i];
997
+ if (lm_ggml_is_view_op(node->op)) {
998
+ continue;
999
+ }
1000
+ int * node_backend_id = &tensor_backend_id(node);
1001
+ if (*node_backend_id == -1) {
1002
+ // unassigned node: find the backend with the most supported inputs
1003
+ int n_supported_best = -1;
1004
+ for (int b = 0; b < sched->n_backends; b++) {
1005
+ if (lm_ggml_backend_supports_op(sched->backends[b], node)) {
1006
+ int n_supported = 0;
1007
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
1008
+ struct lm_ggml_tensor * src = node->src[j];
1009
+ if (src == NULL) {
1010
+ continue;
1011
+ }
1012
+ if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && lm_ggml_backend_sched_buffer_supported(sched, src, b)) {
1013
+ n_supported++;
1014
+ }
1015
+ }
1016
+ if (n_supported > n_supported_best) {
1017
+ n_supported_best = n_supported;
1018
+ *node_backend_id = b;
1019
+ SET_CAUSE(node, "3.best");
1020
+ }
1021
+ }
1022
+ }
1023
+ } else {
1024
+ // assigned node: upgrade to higher prio backend if possible
1025
+ for (int b = 0; b < *node_backend_id; b++) {
1026
+ if (sched->bufts[b] == sched->bufts[*node_backend_id] && lm_ggml_backend_supports_op(sched->backends[b], node)) {
1027
+ bool supported = true;
1028
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
1029
+ struct lm_ggml_tensor * src = node->src[j];
1030
+ if (src == NULL) {
1031
+ continue;
1032
+ }
1033
+ if (!lm_ggml_backend_sched_buffer_supported(sched, src, b)) {
1034
+ supported = false;
1035
+ break;
1036
+ }
1037
+ }
1038
+ if (supported) {
1039
+ *node_backend_id = b;
1040
+ SET_CAUSE(node, "3.upg");
1041
+ break;
1042
+ }
1043
+ }
1044
+ }
1045
+ }
1046
+ }
1047
+
1048
+ // pass 4: assign backends to remaining src from dst and view_src
1049
+ for (int i = 0; i < graph->n_nodes; i++) {
1050
+ struct lm_ggml_tensor * node = graph->nodes[i];
1051
+ int * cur_backend_id = &tensor_backend_id(node);
1052
+ if (node->view_src != NULL && *cur_backend_id == -1) {
1053
+ *cur_backend_id = tensor_backend_id(node->view_src);
1054
+ SET_CAUSE(node, "4.vsrc");
1055
+ }
1056
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
1057
+ struct lm_ggml_tensor * src = node->src[j];
1058
+ if (src == NULL) {
1059
+ continue;
1060
+ }
1061
+ int * src_backend_id = &tensor_backend_id(src);
1062
+ if (*src_backend_id == -1) {
1063
+ if (src->view_src != NULL) {
1064
+ // views are always on the same backend as the source
1065
+ *src_backend_id = tensor_backend_id(src->view_src);
1066
+ SET_CAUSE(src, "4.vsrc");
1067
+ } else {
1068
+ *src_backend_id = *cur_backend_id;
1069
+ SET_CAUSE(src, "4.cur");
1070
+ }
1071
+ }
1072
+ }
1073
+ // if the node is still unassigned, assign it to the first backend that supports it
1074
+ for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) {
1075
+ lm_ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id);
1076
+ }
1077
+ LM_GGML_ASSERT(*cur_backend_id != -1);
1078
+ }
1079
+
1080
+ // pass 5: split graph, find tensors that need to be copied
1081
+ {
1082
+ int i_split = 0;
1083
+ struct lm_ggml_backend_sched_split * split = &sched->splits[0];
1084
+ // find the backend of the first split, skipping view ops
1085
+ int i = 0;
1086
+ for (; i < graph->n_nodes; i++) {
1087
+ struct lm_ggml_tensor * node = graph->nodes[i];
1088
+ if (!lm_ggml_is_view_op(node->op)) {
1089
+ split->backend_id = tensor_backend_id(node);
1090
+ break;
1091
+ }
1092
+ }
1093
+ split->i_start = 0;
1094
+ split->n_inputs = 0;
1095
+ int cur_backend_id = split->backend_id;
1096
+ for (; i < graph->n_nodes; i++) {
1097
+ struct lm_ggml_tensor * node = graph->nodes[i];
1098
+
1099
+ if (lm_ggml_is_view_op(node->op)) {
1100
+ continue;
1101
+ }
1102
+
1103
+ const int node_backend_id = tensor_backend_id(node);
1104
+
1105
+ LM_GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback
1106
+
1107
+ // check if we should start a new split based on the sources of the current node
1108
+ bool need_new_split = false;
1109
+ if (node_backend_id == cur_backend_id && split->n_inputs > 0) {
1110
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
1111
+ struct lm_ggml_tensor * src = node->src[j];
1112
+ if (src == NULL) {
1113
+ continue;
1114
+ }
1115
+ // check if a weight is on a different and incompatible backend
1116
+ // by starting a new split, the memory of the previously offloaded weights can be reused
1117
+ if (src->buffer != NULL && src->buffer->usage == LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
1118
+ int src_backend_id = tensor_backend_id(src);
1119
+ if (src_backend_id != cur_backend_id && !lm_ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1120
+ need_new_split = true;
1121
+ break;
1122
+ }
1123
+ }
1124
+ // check if the split has too many inputs
1125
+ // FIXME: count the number of inputs instead of only checking when full
1126
+ if (split->n_inputs == LM_GGML_SCHED_MAX_SPLIT_INPUTS) {
1127
+ const size_t id = hash_id(src);
1128
+ int src_backend_id = sched->hv_tensor_backend_ids[id];
1129
+ bool supported = lm_ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
1130
+ if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) {
1131
+ need_new_split = true;
1132
+ break;
1133
+ }
1134
+ }
1135
+ }
1136
+ }
1137
+
1138
+ if (node_backend_id != cur_backend_id || need_new_split) {
1139
+ split->i_end = i;
1140
+ i_split++;
1141
+ if (i_split >= sched->splits_capacity) {
1142
+ sched->splits_capacity *= 2;
1143
+ sched->splits = (lm_ggml_backend_sched_split *)
1144
+ realloc(sched->splits, sched->splits_capacity * sizeof(struct lm_ggml_backend_sched_split));
1145
+ LM_GGML_ASSERT(sched->splits != NULL);
1146
+ }
1147
+ split = &sched->splits[i_split];
1148
+ split->backend_id = node_backend_id;
1149
+ split->i_start = i;
1150
+ split->n_inputs = 0;
1151
+ cur_backend_id = node_backend_id;
1152
+ }
1153
+
1154
+ // find inputs that are not on the same backend
1155
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
1156
+ struct lm_ggml_tensor * src = node->src[j];
1157
+ if (src == NULL) {
1158
+ continue;
1159
+ }
1160
+
1161
+ size_t src_id = hash_id(src);
1162
+ const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
1163
+ LM_GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now
1164
+
1165
+ if (src->flags & LM_GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
1166
+ if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
1167
+ lm_ggml_backend_t backend = sched->backends[src_backend_id];
1168
+ for (int c = 0; c < sched->n_copies; c++) {
1169
+ struct lm_ggml_tensor * tensor_copy;
1170
+ if (c == sched->cur_copy) {
1171
+ tensor_copy = src; // use the original tensor as the current copy
1172
+ } else {
1173
+ tensor_copy = lm_ggml_dup_tensor_layout(sched->ctx, src);
1174
+ lm_ggml_format_name(tensor_copy, "%s#%s#%d", lm_ggml_backend_name(backend), src->name, c);
1175
+ }
1176
+ if (sched->n_copies > 1) {
1177
+ lm_ggml_set_input(tensor_copy);
1178
+ lm_ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1179
+ }
1180
+ tensor_id_copy(src_id, src_backend_id, c) = tensor_copy;
1181
+ SET_CAUSE(tensor_copy, "4.cpy");
1182
+ }
1183
+ int n_graph_inputs = sched->n_graph_inputs++;
1184
+ LM_GGML_ASSERT(n_graph_inputs < LM_GGML_SCHED_MAX_SPLIT_INPUTS);
1185
+ sched->graph_inputs[n_graph_inputs] = src;
1186
+ }
1187
+ }
1188
+
1189
+ if (src_backend_id != cur_backend_id && !lm_ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1190
+ // create a copy of the input in the split's backend
1191
+ if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) {
1192
+ lm_ggml_backend_t backend = sched->backends[cur_backend_id];
1193
+ for (int c = 0; c < sched->n_copies; c++) {
1194
+ struct lm_ggml_tensor * tensor_copy = lm_ggml_dup_tensor_layout(sched->ctx, src);
1195
+ lm_ggml_format_name(tensor_copy, "%s#%s#%d", lm_ggml_backend_name(backend), src->name, c);
1196
+ if (sched->n_copies > 1) {
1197
+ lm_ggml_set_input(tensor_copy);
1198
+ lm_ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1199
+ }
1200
+ tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy;
1201
+ SET_CAUSE(tensor_copy, "4.cpy");
1202
+ }
1203
+ int n_inputs = split->n_inputs++;
1204
+ LM_GGML_ASSERT(n_inputs < LM_GGML_SCHED_MAX_SPLIT_INPUTS);
1205
+ split->inputs[n_inputs] = src;
1206
+ }
1207
+ node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy);
1208
+ }
1209
+ }
1210
+ }
1211
+ split->i_end = graph->n_nodes;
1212
+ sched->n_splits = i_split + 1;
1213
+ }
1214
+
1215
+ if (sched->debug) {
1216
+ lm_ggml_backend_sched_print_assignments(sched, graph);
1217
+ }
1218
+
1219
+ // swap node_backend_ids and leaf _backend_ids with prevs
1220
+ {
1221
+ int * tmp = sched->node_backend_ids;
1222
+ sched->node_backend_ids = sched->prev_node_backend_ids;
1223
+ sched->prev_node_backend_ids = tmp;
1224
+
1225
+ tmp = sched->leaf_backend_ids;
1226
+ sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
1227
+ sched->prev_leaf_backend_ids = tmp;
1228
+ }
1229
+
1230
+ int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*LM_GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies;
1231
+ if (sched->graph.size < graph_size) {
1232
+ sched->graph.size = graph_size;
1233
+ sched->graph.nodes = (lm_ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct lm_ggml_tensor *));
1234
+ sched->graph.leafs = (lm_ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct lm_ggml_tensor *));
1235
+ LM_GGML_ASSERT(sched->graph.nodes != NULL);
1236
+ LM_GGML_ASSERT(sched->graph.leafs != NULL);
1237
+ }
1238
+ sched->graph.n_nodes = 0;
1239
+ sched->graph.n_leafs = 0;
1240
+
1241
+ struct lm_ggml_cgraph * graph_copy = &sched->graph;
1242
+
1243
+ for (int i = 0; i < sched->n_splits; i++) {
1244
+ struct lm_ggml_backend_sched_split * split = &sched->splits[i];
1245
+ split->graph = lm_ggml_graph_view(graph, split->i_start, split->i_end);
1246
+
1247
+ // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
1248
+ for (int j = 0; j < split->n_inputs; j++) {
1249
+ assert(graph_copy->size > (graph_copy->n_nodes + 1));
1250
+
1251
+ struct lm_ggml_tensor * input = split->inputs[j];
1252
+ const size_t input_id = hash_id(input);
1253
+ struct lm_ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy);
1254
+
1255
+ // add a dependency to the input source so that it is not freed before the copy is done
1256
+ struct lm_ggml_tensor * input_dep = lm_ggml_view_tensor(sched->ctx, input);
1257
+ input_dep->src[0] = input;
1258
+ sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id];
1259
+ graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
1260
+
1261
+ // add a dependency to the input copy so that it is allocated at the start of the split
1262
+ sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
1263
+ graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
1264
+ }
1265
+
1266
+ for (int j = split->i_start; j < split->i_end; j++) {
1267
+ assert(graph_copy->size > graph_copy->n_nodes);
1268
+ sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
1269
+ graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
1270
+ }
1271
+ }
1272
+
1273
+ if (sched->n_copies > 1) {
1274
+ // add input copies as leafs so that they are allocated first
1275
+ for (int i = 0; i < sched->n_graph_inputs; i++) {
1276
+ struct lm_ggml_tensor * input = sched->graph_inputs[i];
1277
+ size_t id = hash_id(input);
1278
+ int backend_id = tensor_backend_id(input);
1279
+ for (int c = 0; c < sched->n_copies; c++) {
1280
+ struct lm_ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1281
+ sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1282
+ assert(graph_copy->size > graph_copy->n_leafs);
1283
+ graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1284
+ }
1285
+ }
1286
+
1287
+ for (int i = 0; i < sched->n_splits; i++) {
1288
+ struct lm_ggml_backend_sched_split * split = &sched->splits[i];
1289
+ int backend_id = split->backend_id;
1290
+ for (int j = 0; j < split->n_inputs; j++) {
1291
+ struct lm_ggml_tensor * input = split->inputs[j];
1292
+ size_t id = hash_id(input);
1293
+ for (int c = 0; c < sched->n_copies; c++) {
1294
+ struct lm_ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1295
+ sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1296
+ assert(graph_copy->size > graph_copy->n_leafs);
1297
+ graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1298
+ }
1299
+ }
1300
+ }
1301
+ }
1302
+
1303
+ // add leafs from the original graph
1304
+ for (int i = 0; i < graph->n_leafs; i++) {
1305
+ struct lm_ggml_tensor * leaf = graph->leafs[i];
1306
+ sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf);
1307
+ assert(graph_copy->size > graph_copy->n_leafs);
1308
+ graph_copy->leafs[graph_copy->n_leafs++] = leaf;
1309
+ }
1310
+ }
1311
+
1312
+ static bool lm_ggml_backend_sched_alloc_splits(lm_ggml_backend_sched_t sched) {
1313
+ bool backend_ids_changed = false;
1314
+ for (int i = 0; i < sched->graph.n_nodes; i++) {
1315
+ if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
1316
+ sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
1317
+ backend_ids_changed = true;
1318
+ break;
1319
+ }
1320
+ }
1321
+ if (!backend_ids_changed) {
1322
+ for (int i = 0; i < sched->graph.n_leafs; i++) {
1323
+ if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
1324
+ sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
1325
+ backend_ids_changed = true;
1326
+ break;
1327
+ }
1328
+ }
1329
+ }
1330
+
1331
+ // allocate graph
1332
+ if (backend_ids_changed || !lm_ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1333
+ // the re-allocation may cause the split inputs to be moved to a different address
1334
+ // synchronize without lm_ggml_backend_sched_synchronize to avoid changing cur_copy
1335
+ for (int i = 0; i < sched->n_backends; i++) {
1336
+ lm_ggml_backend_synchronize(sched->backends[i]);
1337
+ }
1338
+ #ifndef NDEBUG
1339
+ LM_GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
1340
+ #endif
1341
+ lm_ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
1342
+ if (!lm_ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1343
+ LM_GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
1344
+ return false;
1345
+ }
1346
+ }
1347
+
1348
+ return true;
1349
+ }
1350
+
1351
+ static enum lm_ggml_status lm_ggml_backend_sched_compute_splits(lm_ggml_backend_sched_t sched) {
1352
+ struct lm_ggml_backend_sched_split * splits = sched->splits;
1353
+
1354
+ lm_ggml_tensor * prev_ids_tensor = nullptr;
1355
+ std::vector<int32_t> ids;
1356
+ std::vector<lm_ggml_bitset_t> used_ids;
1357
+
1358
+ for (int split_id = 0; split_id < sched->n_splits; split_id++) {
1359
+ struct lm_ggml_backend_sched_split * split = &splits[split_id];
1360
+ int split_backend_id = split->backend_id;
1361
+ lm_ggml_backend_t split_backend = sched->backends[split_backend_id];
1362
+
1363
+ // copy the input tensors to the split backend
1364
+ for (int input_id = 0; input_id < split->n_inputs; input_id++) {
1365
+ lm_ggml_backend_t input_backend = lm_ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
1366
+ struct lm_ggml_tensor * input = split->inputs[input_id];
1367
+ struct lm_ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
1368
+
1369
+ if (input->flags & LM_GGML_TENSOR_FLAG_INPUT) {
1370
+ // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
1371
+ if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1372
+ lm_ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1373
+ } else {
1374
+ lm_ggml_backend_synchronize(split_backend);
1375
+ }
1376
+ lm_ggml_backend_tensor_copy(input, input_cpy);
1377
+ } else {
1378
+ // wait for the split backend to finish using the input before overwriting it
1379
+ if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1380
+ lm_ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
1381
+ } else {
1382
+ lm_ggml_backend_synchronize(split_backend);
1383
+ }
1384
+
1385
+ // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
1386
+ lm_ggml_tensor * node = split->graph.nodes[0];
1387
+ if (split->graph.n_nodes > 0 &&
1388
+ lm_ggml_backend_buffer_get_usage(input->buffer) == LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
1389
+ lm_ggml_backend_buffer_is_host(input->buffer) && (
1390
+ (node->src[0] == input_cpy && node->op == LM_GGML_OP_MUL_MAT_ID)
1391
+ //|| (node->src[1] == input_cpy && node->op == LM_GGML_OP_ADD_ID) /* LM_GGML_OP_ADD_ID weights are small and not worth splitting */
1392
+ )) {
1393
+
1394
+ const int64_t n_expert = node->op == LM_GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
1395
+ const size_t expert_size = node->op == LM_GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
1396
+
1397
+ lm_ggml_backend_synchronize(input_backend);
1398
+
1399
+ // get the ids
1400
+ lm_ggml_tensor * ids_tensor = node->src[2];
1401
+ lm_ggml_backend_t ids_backend = split_backend;
1402
+
1403
+ // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend
1404
+ // in that case, we use the original ids tensor
1405
+ for (int i = input_id + 1; i < split->n_inputs; i++) {
1406
+ if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) {
1407
+ ids_tensor = split->inputs[i];
1408
+ ids_backend = lm_ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]);
1409
+ break;
1410
+ }
1411
+ }
1412
+
1413
+ if (ids_tensor != prev_ids_tensor) {
1414
+ ids.resize(lm_ggml_nbytes(ids_tensor) / sizeof(int32_t));
1415
+ lm_ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, lm_ggml_nbytes(ids_tensor));
1416
+ lm_ggml_backend_synchronize(ids_backend);
1417
+
1418
+ // find the used experts
1419
+ used_ids.clear();
1420
+ used_ids.resize(lm_ggml_bitset_size(n_expert));
1421
+ for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1422
+ for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1423
+ int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1424
+ LM_GGML_ASSERT(id >= 0 && id < n_expert);
1425
+ lm_ggml_bitset_set(used_ids.data(), id);
1426
+ }
1427
+ }
1428
+
1429
+ prev_ids_tensor = ids_tensor;
1430
+ }
1431
+
1432
+ // group consecutive experts and copy them together
1433
+ auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1434
+ const size_t expert_offset = first_id * expert_size;
1435
+ const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
1436
+ const size_t padding = std::min<size_t>(expert_size, 512);
1437
+ const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
1438
+
1439
+ lm_ggml_backend_tensor_set_async(split_backend,
1440
+ input_cpy,
1441
+ (const uint8_t *)input->data + expert_offset, expert_offset,
1442
+ // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
1443
+ // this is necessary for MMQ in the CUDA backend
1444
+ expert_size_copy + padding_end);
1445
+ };
1446
+
1447
+ int id = 0;
1448
+ while (!lm_ggml_bitset_get(used_ids.data(), id)) {
1449
+ id++;
1450
+ }
1451
+ int32_t first_id = id;
1452
+ int32_t last_id = first_id;
1453
+
1454
+ for (++id; id < n_expert; ++id) {
1455
+ if (!lm_ggml_bitset_get(used_ids.data(), id)) {
1456
+ continue;
1457
+ }
1458
+
1459
+ if (id == last_id + 1) {
1460
+ last_id = id;
1461
+ continue;
1462
+ }
1463
+
1464
+ copy_experts(first_id, last_id);
1465
+
1466
+ first_id = id;
1467
+ last_id = id;
1468
+ }
1469
+ copy_experts(first_id, last_id);
1470
+ } else {
1471
+ // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1472
+ // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1473
+ if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1474
+ lm_ggml_backend_synchronize(input_backend);
1475
+ if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1476
+ lm_ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1477
+ } else {
1478
+ lm_ggml_backend_synchronize(split_backend);
1479
+ }
1480
+ lm_ggml_backend_tensor_copy(input, input_cpy);
1481
+ }
1482
+ }
1483
+ }
1484
+ }
1485
+
1486
+ if (!sched->callback_eval) {
1487
+ enum lm_ggml_status ec = lm_ggml_backend_graph_compute_async(split_backend, &split->graph);
1488
+ if (ec != LM_GGML_STATUS_SUCCESS) {
1489
+ return ec;
1490
+ }
1491
+ } else {
1492
+ // similar to lm_ggml_backend_compare_graph_backend
1493
+ for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
1494
+ struct lm_ggml_tensor * t = split->graph.nodes[j0];
1495
+
1496
+ // check if the user needs data from this node
1497
+ bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1498
+
1499
+ int j1 = j0;
1500
+
1501
+ // determine the range [j0, j1] of nodes that can be computed together
1502
+ while (!need && j1 < split->graph.n_nodes - 1) {
1503
+ t = split->graph.nodes[++j1];
1504
+ need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1505
+ }
1506
+
1507
+ struct lm_ggml_cgraph gv = lm_ggml_graph_view(&split->graph, j0, j1 + 1);
1508
+
1509
+ enum lm_ggml_status ec = lm_ggml_backend_graph_compute_async(split_backend, &gv);
1510
+ if (ec != LM_GGML_STATUS_SUCCESS) {
1511
+ return ec;
1512
+ }
1513
+
1514
+ // TODO: pass backend to the callback, then the user can decide if they want to synchronize
1515
+ lm_ggml_backend_synchronize(split_backend);
1516
+
1517
+ if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
1518
+ break;
1519
+ }
1520
+
1521
+ j0 = j1;
1522
+ }
1523
+ }
1524
+
1525
+ // record the event of this copy
1526
+ if (split->n_inputs > 0) {
1527
+ if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1528
+ lm_ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend);
1529
+ }
1530
+ }
1531
+ }
1532
+
1533
+ return LM_GGML_STATUS_SUCCESS;
1534
+ }
1535
+
1536
+ lm_ggml_backend_sched_t lm_ggml_backend_sched_new(
1537
+ lm_ggml_backend_t * backends,
1538
+ lm_ggml_backend_buffer_type_t * bufts,
1539
+ int n_backends,
1540
+ size_t graph_size,
1541
+ bool parallel,
1542
+ bool op_offload) {
1543
+ LM_GGML_ASSERT(n_backends > 0);
1544
+ LM_GGML_ASSERT(n_backends <= LM_GGML_SCHED_MAX_BACKENDS);
1545
+ LM_GGML_ASSERT(lm_ggml_backend_dev_type(lm_ggml_backend_get_device(backends[n_backends - 1])) == LM_GGML_BACKEND_DEVICE_TYPE_CPU);
1546
+
1547
+ struct lm_ggml_backend_sched * sched = (lm_ggml_backend_sched *) calloc(1, sizeof(struct lm_ggml_backend_sched));
1548
+
1549
+ const char * LM_GGML_SCHED_DEBUG = getenv("LM_GGML_SCHED_DEBUG");
1550
+ sched->debug = LM_GGML_SCHED_DEBUG ? atoi(LM_GGML_SCHED_DEBUG) : 0;
1551
+ sched->n_backends = n_backends;
1552
+ sched->n_copies = parallel ? LM_GGML_SCHED_MAX_COPIES : 1;
1553
+
1554
+ // initialize hash table
1555
+ // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
1556
+ sched->hash_set = lm_ggml_hash_set_new(graph_size);
1557
+ sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1558
+ sched->hv_tensor_copies = (lm_ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct lm_ggml_tensor *));
1559
+
1560
+ const size_t lm_ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
1561
+ const size_t nodes_size = graph_size + lm_ggml_sched_max_splits*LM_GGML_SCHED_MAX_SPLIT_INPUTS*2;
1562
+ sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
1563
+ sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
1564
+ sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
1565
+ sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
1566
+
1567
+ sched->context_buffer_size = lm_ggml_sched_max_splits*LM_GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct lm_ggml_tensor) + lm_ggml_graph_overhead_custom(graph_size, false);
1568
+ sched->context_buffer = (char *) malloc(sched->context_buffer_size);
1569
+
1570
+ const int initial_splits_capacity = 16;
1571
+ sched->splits = (lm_ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0]));
1572
+ sched->splits_capacity = initial_splits_capacity;
1573
+
1574
+ for (int b = 0; b < n_backends; b++) {
1575
+ sched->backends[b] = backends[b];
1576
+ sched->bufts[b] = bufts ? bufts[b] : lm_ggml_backend_get_default_buffer_type(backends[b]);
1577
+ LM_GGML_ASSERT(lm_ggml_backend_supports_buft(backends[b], sched->bufts[b]));
1578
+
1579
+ if (sched->n_copies > 1) {
1580
+ for (int c = 0; c < sched->n_copies; c++) {
1581
+ sched->events[b][c] = lm_ggml_backend_event_new(backends[b]->device);
1582
+ }
1583
+ }
1584
+ }
1585
+
1586
+ sched->galloc = lm_ggml_gallocr_new_n(sched->bufts, n_backends);
1587
+ sched->op_offload = op_offload;
1588
+
1589
+ lm_ggml_backend_sched_reset(sched);
1590
+
1591
+ return sched;
1592
+ }
1593
+
1594
+ void lm_ggml_backend_sched_free(lm_ggml_backend_sched_t sched) {
1595
+ if (sched == NULL) {
1596
+ return;
1597
+ }
1598
+ for (int b = 0; b < sched->n_backends; b++) {
1599
+ for (int c = 0; c < sched->n_copies; c++) {
1600
+ lm_ggml_backend_event_free(sched->events[b][c]);
1601
+ }
1602
+ }
1603
+ lm_ggml_gallocr_free(sched->galloc);
1604
+ lm_ggml_free(sched->ctx);
1605
+ lm_ggml_hash_set_free(&sched->hash_set);
1606
+ free(sched->splits);
1607
+ free(sched->hv_tensor_backend_ids);
1608
+ free(sched->hv_tensor_copies);
1609
+ free(sched->node_backend_ids);
1610
+ free(sched->leaf_backend_ids);
1611
+ free(sched->prev_node_backend_ids);
1612
+ free(sched->prev_leaf_backend_ids);
1613
+ free(sched->context_buffer);
1614
+ free(sched->graph.nodes);
1615
+ free(sched->graph.leafs);
1616
+ free(sched);
1617
+ }
1618
+
1619
+ void lm_ggml_backend_sched_reset(lm_ggml_backend_sched_t sched) {
1620
+ // reset state for the next run
1621
+ if (!sched->is_reset) {
1622
+ lm_ggml_hash_set_reset(&sched->hash_set);
1623
+ memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1624
+ memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct lm_ggml_tensor *));
1625
+ sched->is_reset = true;
1626
+ }
1627
+ sched->is_alloc = false;
1628
+ }
1629
+
1630
+ bool lm_ggml_backend_sched_reserve(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * measure_graph) {
1631
+ LM_GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
1632
+
1633
+ lm_ggml_backend_sched_synchronize(sched);
1634
+
1635
+ lm_ggml_backend_sched_split_graph(sched, measure_graph);
1636
+
1637
+ if (!lm_ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
1638
+ return false;
1639
+ }
1640
+
1641
+ lm_ggml_backend_sched_reset(sched);
1642
+
1643
+ return true;
1644
+ }
1645
+
1646
+ bool lm_ggml_backend_sched_alloc_graph(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * graph) {
1647
+ LM_GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
1648
+ LM_GGML_ASSERT(!sched->is_alloc);
1649
+
1650
+ sched->cur_copy = sched->next_copy;
1651
+ sched->next_copy = (sched->next_copy + 1) % sched->n_copies;
1652
+
1653
+ lm_ggml_backend_sched_split_graph(sched, graph);
1654
+
1655
+ if (!lm_ggml_backend_sched_alloc_splits(sched)) {
1656
+ return false;
1657
+ }
1658
+
1659
+ sched->is_alloc = true;
1660
+
1661
+ return true;
1662
+ }
1663
+
1664
+ enum lm_ggml_status lm_ggml_backend_sched_graph_compute(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * graph) {
1665
+ enum lm_ggml_status err = lm_ggml_backend_sched_graph_compute_async(sched, graph);
1666
+ lm_ggml_backend_sched_synchronize(sched);
1667
+ return err;
1668
+ }
1669
+
1670
+ enum lm_ggml_status lm_ggml_backend_sched_graph_compute_async(lm_ggml_backend_sched_t sched, struct lm_ggml_cgraph * graph) {
1671
+ if (!sched->is_reset && !sched->is_alloc) {
1672
+ lm_ggml_backend_sched_reset(sched);
1673
+ }
1674
+
1675
+ if (!sched->is_alloc) {
1676
+ if (!lm_ggml_backend_sched_alloc_graph(sched, graph)) {
1677
+ return LM_GGML_STATUS_ALLOC_FAILED;
1678
+ }
1679
+ }
1680
+
1681
+ return lm_ggml_backend_sched_compute_splits(sched);
1682
+ }
1683
+
1684
+ void lm_ggml_backend_sched_synchronize(lm_ggml_backend_sched_t sched) {
1685
+ for (int i = 0; i < sched->n_backends; i++) {
1686
+ lm_ggml_backend_synchronize(sched->backends[i]);
1687
+ }
1688
+ if (!sched->is_alloc) {
1689
+ // if the graph is not already allocated, always use copy 0 after a synchronization
1690
+ // this ensures that during generation the same copy is used every time,
1691
+ // which avoids changes in the graph that could cause CUDA or other graphs to be disabled
1692
+ sched->next_copy = 0;
1693
+ }
1694
+ }
1695
+
1696
+ void lm_ggml_backend_sched_set_eval_callback(lm_ggml_backend_sched_t sched, lm_ggml_backend_sched_eval_callback callback, void * user_data) {
1697
+ sched->callback_eval = callback;
1698
+ sched->callback_eval_user_data = user_data;
1699
+ }
1700
+
1701
+ int lm_ggml_backend_sched_get_n_splits(lm_ggml_backend_sched_t sched) {
1702
+ return sched->n_splits;
1703
+ }
1704
+
1705
+ int lm_ggml_backend_sched_get_n_copies(lm_ggml_backend_sched_t sched) {
1706
+ return sched->n_copies;
1707
+ }
1708
+
1709
+ int lm_ggml_backend_sched_get_n_backends(lm_ggml_backend_sched_t sched) {
1710
+ return sched->n_backends;
1711
+ }
1712
+
1713
+ lm_ggml_backend_t lm_ggml_backend_sched_get_backend(lm_ggml_backend_sched_t sched, int i) {
1714
+ LM_GGML_ASSERT(i >= 0 && i < sched->n_backends);
1715
+ return sched->backends[i];
1716
+ }
1717
+
1718
+ size_t lm_ggml_backend_sched_get_buffer_size(lm_ggml_backend_sched_t sched, lm_ggml_backend_t backend) {
1719
+ int backend_index = lm_ggml_backend_sched_backend_id(sched, backend);
1720
+ LM_GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1721
+
1722
+ return lm_ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
1723
+ }
1724
+
1725
+ void lm_ggml_backend_sched_set_tensor_backend(lm_ggml_backend_sched_t sched, struct lm_ggml_tensor * node, lm_ggml_backend_t backend) {
1726
+ int backend_index = lm_ggml_backend_sched_backend_id(sched, backend);
1727
+ LM_GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1728
+ tensor_backend_id(node) = backend_index;
1729
+ SET_CAUSE(node, "usr");
1730
+ sched->is_reset = false;
1731
+ }
1732
+
1733
+ lm_ggml_backend_t lm_ggml_backend_sched_get_tensor_backend(lm_ggml_backend_sched_t sched, struct lm_ggml_tensor * node) {
1734
+ int backend_index = tensor_backend_id(node);
1735
+ if (backend_index == -1) {
1736
+ return NULL;
1737
+ }
1738
+ return sched->backends[backend_index];
1739
+ }
1740
+
1741
+ // utils
1742
+
1743
+ enum lm_ggml_status lm_ggml_backend_view_init(struct lm_ggml_tensor * tensor) {
1744
+ LM_GGML_ASSERT(tensor->buffer == NULL);
1745
+ LM_GGML_ASSERT(tensor->view_src != NULL);
1746
+ LM_GGML_ASSERT(tensor->view_src->buffer != NULL);
1747
+ LM_GGML_ASSERT(tensor->view_src->data != NULL);
1748
+
1749
+ tensor->buffer = tensor->view_src->buffer;
1750
+ tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
1751
+ return lm_ggml_backend_buffer_init_tensor(tensor->buffer, tensor);
1752
+ }
1753
+
1754
+ enum lm_ggml_status lm_ggml_backend_tensor_alloc(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, void * addr) {
1755
+ LM_GGML_ASSERT(tensor->buffer == NULL);
1756
+ LM_GGML_ASSERT(tensor->data == NULL);
1757
+ LM_GGML_ASSERT(tensor->view_src == NULL);
1758
+ LM_GGML_ASSERT(addr >= lm_ggml_backend_buffer_get_base(buffer));
1759
+ LM_GGML_ASSERT((char *)addr + lm_ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
1760
+ (char *)lm_ggml_backend_buffer_get_base(buffer) + lm_ggml_backend_buffer_get_size(buffer));
1761
+
1762
+ tensor->buffer = buffer;
1763
+ tensor->data = addr;
1764
+ return lm_ggml_backend_buffer_init_tensor(buffer, tensor);
1765
+ }
1766
+
1767
+ static struct lm_ggml_tensor * graph_copy_dup_tensor(struct lm_ggml_hash_set hash_set, struct lm_ggml_tensor ** node_copies,
1768
+ struct lm_ggml_context * ctx_allocated, struct lm_ggml_context * ctx_unallocated, struct lm_ggml_tensor * src) {
1769
+
1770
+ LM_GGML_ASSERT(src != NULL);
1771
+ LM_GGML_ASSERT(src->data && "graph must be allocated");
1772
+
1773
+ size_t id = lm_ggml_hash_insert(&hash_set, src);
1774
+ if (id == LM_GGML_HASHSET_ALREADY_EXISTS) {
1775
+ return node_copies[lm_ggml_hash_find(&hash_set, src)];
1776
+ }
1777
+
1778
+ struct lm_ggml_tensor * dst = lm_ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
1779
+ if (src->view_src != NULL) {
1780
+ dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
1781
+ dst->view_offs = src->view_offs;
1782
+ }
1783
+ dst->op = src->op;
1784
+ memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
1785
+ lm_ggml_set_name(dst, src->name);
1786
+
1787
+ // copy src
1788
+ for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
1789
+ struct lm_ggml_tensor * s = src->src[i];
1790
+ if (s == NULL) {
1791
+ continue;
1792
+ }
1793
+ dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
1794
+ }
1795
+
1796
+ node_copies[id] = dst;
1797
+ return dst;
1798
+ }
1799
+
1800
+ static void graph_copy_init_tensor(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor ** node_copies, bool * node_init, struct lm_ggml_tensor * src) {
1801
+ size_t id = lm_ggml_hash_find(hash_set, src);
1802
+ if (node_init[id]) {
1803
+ return;
1804
+ }
1805
+ node_init[id] = true;
1806
+
1807
+ struct lm_ggml_tensor * dst = node_copies[id];
1808
+ if (dst->view_src != NULL) {
1809
+ graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
1810
+ enum lm_ggml_status status = lm_ggml_backend_view_init(dst);
1811
+ LM_GGML_ASSERT(status == LM_GGML_STATUS_SUCCESS);
1812
+ }
1813
+ else {
1814
+ lm_ggml_backend_tensor_copy(src, dst);
1815
+ }
1816
+
1817
+ // init src
1818
+ for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
1819
+ struct lm_ggml_tensor * s = src->src[i];
1820
+ if (s == NULL) {
1821
+ continue;
1822
+ }
1823
+ graph_copy_init_tensor(hash_set, node_copies, node_init, s);
1824
+ }
1825
+ }
1826
+
1827
+ struct lm_ggml_backend_graph_copy lm_ggml_backend_graph_copy(lm_ggml_backend_t backend, struct lm_ggml_cgraph * graph) {
1828
+ struct lm_ggml_hash_set hash_set = lm_ggml_hash_set_new(graph->visited_hash_set.size);
1829
+ struct lm_ggml_tensor ** node_copies = (lm_ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
1830
+ bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
1831
+
1832
+ struct lm_ggml_init_params params = {
1833
+ /* .mem_size = */ lm_ggml_tensor_overhead()*hash_set.size + lm_ggml_graph_overhead_custom(graph->size, false),
1834
+ /* .mem_buffer = */ NULL,
1835
+ /* .no_alloc = */ true
1836
+ };
1837
+
1838
+ struct lm_ggml_context * ctx_allocated = lm_ggml_init(params);
1839
+ struct lm_ggml_context * ctx_unallocated = lm_ggml_init(params);
1840
+
1841
+ if (ctx_allocated == NULL || ctx_unallocated == NULL) {
1842
+ LM_GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
1843
+ lm_ggml_hash_set_free(&hash_set);
1844
+ free(node_copies);
1845
+ free(node_init);
1846
+ lm_ggml_free(ctx_allocated);
1847
+ lm_ggml_free(ctx_unallocated);
1848
+ return {
1849
+ /* .buffer = */ NULL,
1850
+ /* .ctx_allocated = */ NULL,
1851
+ /* .ctx_unallocated = */ NULL,
1852
+ /* .graph = */ NULL,
1853
+ };
1854
+ }
1855
+
1856
+ // dup nodes
1857
+ for (int i = 0; i < graph->n_nodes; i++) {
1858
+ struct lm_ggml_tensor * node = graph->nodes[i];
1859
+ graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
1860
+ }
1861
+
1862
+ // allocate nodes
1863
+ lm_ggml_backend_buffer_t buffer = lm_ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
1864
+ if (buffer == NULL) {
1865
+ LM_GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
1866
+ lm_ggml_hash_set_free(&hash_set);
1867
+ free(node_copies);
1868
+ free(node_init);
1869
+ lm_ggml_free(ctx_allocated);
1870
+ lm_ggml_free(ctx_unallocated);
1871
+ return {
1872
+ /* .buffer = */ NULL,
1873
+ /* .ctx_allocated = */ NULL,
1874
+ /* .ctx_unallocated = */ NULL,
1875
+ /* .graph = */ NULL,
1876
+ };
1877
+ }
1878
+
1879
+ //printf("copy buffer size: %zu MB\n", lm_ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
1880
+
1881
+ // copy data and init views
1882
+ for (int i = 0; i < graph->n_nodes; i++) {
1883
+ struct lm_ggml_tensor * node = graph->nodes[i];
1884
+ graph_copy_init_tensor(&hash_set, node_copies, node_init, node);
1885
+ }
1886
+
1887
+ // build graph copy
1888
+ struct lm_ggml_cgraph * graph_copy = lm_ggml_new_graph_custom(ctx_allocated, graph->size, false);
1889
+ for (int i = 0; i < graph->n_nodes; i++) {
1890
+ struct lm_ggml_tensor * node = graph->nodes[i];
1891
+ struct lm_ggml_tensor * node_copy = node_copies[lm_ggml_hash_find(&hash_set, node)];
1892
+ graph_copy->nodes[i] = node_copy;
1893
+ }
1894
+ graph_copy->n_nodes = graph->n_nodes;
1895
+
1896
+ lm_ggml_hash_set_free(&hash_set);
1897
+ free(node_copies);
1898
+ free(node_init);
1899
+
1900
+ return {
1901
+ /* .buffer = */ buffer,
1902
+ /* .ctx_allocated = */ ctx_allocated,
1903
+ /* .ctx_unallocated = */ ctx_unallocated,
1904
+ /* .graph = */ graph_copy,
1905
+ };
1906
+ }
1907
+
1908
+ void lm_ggml_backend_graph_copy_free(struct lm_ggml_backend_graph_copy copy) {
1909
+ lm_ggml_backend_buffer_free(copy.buffer);
1910
+ lm_ggml_free(copy.ctx_allocated);
1911
+ lm_ggml_free(copy.ctx_unallocated);
1912
+ }
1913
+
1914
+ bool lm_ggml_backend_compare_graph_backend(lm_ggml_backend_t backend1, lm_ggml_backend_t backend2, struct lm_ggml_cgraph * graph, lm_ggml_backend_eval_callback callback, void * user_data, struct lm_ggml_tensor * test_node) {
1915
+ struct lm_ggml_backend_graph_copy copy = lm_ggml_backend_graph_copy(backend2, graph);
1916
+ if (copy.buffer == NULL) {
1917
+ return false;
1918
+ }
1919
+
1920
+ struct lm_ggml_cgraph * g1 = graph;
1921
+ struct lm_ggml_cgraph * g2 = copy.graph;
1922
+
1923
+ assert(g1->n_nodes == g2->n_nodes);
1924
+
1925
+ if (test_node != nullptr) {
1926
+ // Compute the whole graph and only test the output for a specific tensor
1927
+ lm_ggml_backend_graph_compute(backend1, g1);
1928
+ lm_ggml_backend_graph_compute(backend2, g2);
1929
+
1930
+ int test_node_idx = -1;
1931
+ for (int i = 0; i < g1->n_nodes; i++) {
1932
+ struct lm_ggml_tensor * t1 = g1->nodes[i];
1933
+ if (t1 == test_node) {
1934
+ test_node_idx = i;
1935
+ break;
1936
+ }
1937
+ }
1938
+ LM_GGML_ASSERT(test_node_idx != -1);
1939
+
1940
+ callback(test_node_idx, g1->nodes[test_node_idx], g2->nodes[test_node_idx], user_data);
1941
+ } else {
1942
+ for (int i = 0; i < g1->n_nodes; i++) {
1943
+ struct lm_ggml_tensor * t1 = g1->nodes[i];
1944
+ struct lm_ggml_tensor * t2 = g2->nodes[i];
1945
+
1946
+ assert(t1->op == t2->op && lm_ggml_are_same_layout(t1, t2));
1947
+
1948
+ struct lm_ggml_cgraph g1v = lm_ggml_graph_view(g1, i, i + 1);
1949
+ struct lm_ggml_cgraph g2v = lm_ggml_graph_view(g2, i, i + 1);
1950
+
1951
+ lm_ggml_backend_graph_compute(backend1, &g1v);
1952
+ lm_ggml_backend_graph_compute(backend2, &g2v);
1953
+
1954
+ if (lm_ggml_is_view_op(t1->op)) {
1955
+ continue;
1956
+ }
1957
+
1958
+ // compare results, calculate rms etc
1959
+ if (!callback(i, t1, t2, user_data)) {
1960
+ break;
1961
+ }
1962
+ }
1963
+ }
1964
+ lm_ggml_backend_graph_copy_free(copy);
1965
+
1966
+ return true;
1967
+ }
1968
+
1969
+ // CPU backend - buffer
1970
+
1971
+ static void * lm_ggml_backend_cpu_buffer_get_base(lm_ggml_backend_buffer_t buffer) {
1972
+ uintptr_t data = (uintptr_t)buffer->context;
1973
+
1974
+ // align the buffer
1975
+ if (data % TENSOR_ALIGNMENT != 0) {
1976
+ data = LM_GGML_PAD(data, TENSOR_ALIGNMENT);
1977
+ }
1978
+
1979
+ return (void *)data;
1980
+ }
1981
+
1982
+ static void lm_ggml_backend_cpu_buffer_free_buffer(lm_ggml_backend_buffer_t buffer) {
1983
+ lm_ggml_aligned_free(buffer->context, buffer->size);
1984
+ }
1985
+
1986
+ static void lm_ggml_backend_cpu_buffer_memset_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
1987
+ memset((char *)tensor->data + offset, value, size);
1988
+
1989
+ LM_GGML_UNUSED(buffer);
1990
+ }
1991
+
1992
+ static void lm_ggml_backend_cpu_buffer_set_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
1993
+ memcpy((char *)tensor->data + offset, data, size);
1994
+
1995
+ LM_GGML_UNUSED(buffer);
1996
+ }
1997
+
1998
+ static void lm_ggml_backend_cpu_buffer_get_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
1999
+ memcpy(data, (const char *)tensor->data + offset, size);
2000
+
2001
+ LM_GGML_UNUSED(buffer);
2002
+ }
2003
+
2004
+ static bool lm_ggml_backend_cpu_buffer_cpy_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
2005
+ if (lm_ggml_backend_buffer_is_host(src->buffer)) {
2006
+ memcpy(dst->data, src->data, lm_ggml_nbytes(src));
2007
+ return true;
2008
+ }
2009
+ return false;
2010
+
2011
+ LM_GGML_UNUSED(buffer);
2012
+ }
2013
+
2014
+ static void lm_ggml_backend_cpu_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
2015
+ memset(buffer->context, value, buffer->size);
2016
+ }
2017
+
2018
+ static const struct lm_ggml_backend_buffer_i lm_ggml_backend_cpu_buffer_i = {
2019
+ /* .free_buffer = */ lm_ggml_backend_cpu_buffer_free_buffer,
2020
+ /* .get_base = */ lm_ggml_backend_cpu_buffer_get_base,
2021
+ /* .init_tensor = */ NULL, // no initialization required
2022
+ /* .memset_tensor = */ lm_ggml_backend_cpu_buffer_memset_tensor,
2023
+ /* .set_tensor = */ lm_ggml_backend_cpu_buffer_set_tensor,
2024
+ /* .get_tensor = */ lm_ggml_backend_cpu_buffer_get_tensor,
2025
+ /* .cpy_tensor = */ lm_ggml_backend_cpu_buffer_cpy_tensor,
2026
+ /* .clear = */ lm_ggml_backend_cpu_buffer_clear,
2027
+ /* .reset = */ NULL,
2028
+ };
2029
+
2030
+ static const struct lm_ggml_backend_buffer_i lm_ggml_backend_cpu_buffer_from_ptr_i = {
2031
+ /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
2032
+ /* .get_base = */ lm_ggml_backend_cpu_buffer_get_base,
2033
+ /* .init_tensor = */ NULL, // no initialization required
2034
+ /* .memset_tensor = */ lm_ggml_backend_cpu_buffer_memset_tensor,
2035
+ /* .set_tensor = */ lm_ggml_backend_cpu_buffer_set_tensor,
2036
+ /* .get_tensor = */ lm_ggml_backend_cpu_buffer_get_tensor,
2037
+ /* .cpy_tensor = */ lm_ggml_backend_cpu_buffer_cpy_tensor,
2038
+ /* .clear = */ lm_ggml_backend_cpu_buffer_clear,
2039
+ /* .reset = */ NULL,
2040
+ };
2041
+
2042
+ // CPU backend buffer type
2043
+
2044
+ // this buffer type is defined here to make it available to all backends
2045
+
2046
+ static const char * lm_ggml_backend_cpu_buffer_type_get_name(lm_ggml_backend_buffer_type_t buft) {
2047
+ return "CPU";
2048
+
2049
+ LM_GGML_UNUSED(buft);
2050
+ }
2051
+
2052
+ static lm_ggml_backend_buffer_t lm_ggml_backend_cpu_buffer_type_alloc_buffer(lm_ggml_backend_buffer_type_t buft, size_t size) {
2053
+ void * data = lm_ggml_aligned_malloc(size);
2054
+
2055
+ if (data == NULL) {
2056
+ LM_GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
2057
+ return NULL;
2058
+ }
2059
+
2060
+ return lm_ggml_backend_buffer_init(buft, lm_ggml_backend_cpu_buffer_i, data, size);
2061
+ }
2062
+
2063
+ static size_t lm_ggml_backend_cpu_buffer_type_get_alignment(lm_ggml_backend_buffer_type_t buft) {
2064
+ return TENSOR_ALIGNMENT;
2065
+
2066
+ LM_GGML_UNUSED(buft);
2067
+ }
2068
+
2069
+ static bool lm_ggml_backend_cpu_buffer_type_is_host(lm_ggml_backend_buffer_type_t buft) {
2070
+ return true;
2071
+
2072
+ LM_GGML_UNUSED(buft);
2073
+ }
2074
+
2075
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_buffer_type(void) {
2076
+ static struct lm_ggml_backend_buffer_type lm_ggml_backend_cpu_buffer_type = {
2077
+ /* .iface = */ {
2078
+ /* .get_name = */ lm_ggml_backend_cpu_buffer_type_get_name,
2079
+ /* .alloc_buffer = */ lm_ggml_backend_cpu_buffer_type_alloc_buffer,
2080
+ /* .get_alignment = */ lm_ggml_backend_cpu_buffer_type_get_alignment,
2081
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
2082
+ /* .get_alloc_size = */ NULL, // defaults to lm_ggml_nbytes
2083
+ /* .is_host = */ lm_ggml_backend_cpu_buffer_type_is_host,
2084
+ },
2085
+ /* .device = */ NULL, // FIXME lm_ggml_backend_reg_dev_get(lm_ggml_backend_cpu_reg(), 0),
2086
+ /* .context = */ NULL,
2087
+ };
2088
+
2089
+ return &lm_ggml_backend_cpu_buffer_type;
2090
+ }
2091
+
2092
+ static const char * lm_ggml_backend_cpu_buffer_from_ptr_type_get_name(lm_ggml_backend_buffer_type_t buft) {
2093
+ return "CPU_Mapped";
2094
+
2095
+ LM_GGML_UNUSED(buft);
2096
+ }
2097
+
2098
+ static lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_buffer_from_ptr_type(void) {
2099
+ static struct lm_ggml_backend_buffer_type lm_ggml_backend_cpu_buffer_type = {
2100
+ /* .iface = */ {
2101
+ /* .get_name = */ lm_ggml_backend_cpu_buffer_from_ptr_type_get_name,
2102
+ /* .alloc_buffer = */ lm_ggml_backend_cpu_buffer_type_alloc_buffer,
2103
+ /* .get_alignment = */ lm_ggml_backend_cpu_buffer_type_get_alignment,
2104
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
2105
+ /* .get_alloc_size = */ NULL, // defaults to lm_ggml_nbytes
2106
+ /* .is_host = */ lm_ggml_backend_cpu_buffer_type_is_host,
2107
+ },
2108
+ /* .device = */ NULL, // FIXME lm_ggml_backend_reg_dev_get(lm_ggml_backend_cpu_reg(), 0),
2109
+ /* .context = */ NULL,
2110
+ };
2111
+
2112
+ return &lm_ggml_backend_cpu_buffer_type;
2113
+ }
2114
+
2115
+ lm_ggml_backend_buffer_t lm_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
2116
+ LM_GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
2117
+ return lm_ggml_backend_buffer_init(lm_ggml_backend_cpu_buffer_from_ptr_type(), lm_ggml_backend_cpu_buffer_from_ptr_i, ptr, size);
2118
+ }