cui-llama.rn 1.3.0 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/android/src/main/CMakeLists.txt +9 -6
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +4 -4
  3. package/android/src/main/jni.cpp +15 -15
  4. package/cpp/common.cpp +1962 -1682
  5. package/cpp/common.h +645 -600
  6. package/cpp/ggml-alloc.c +1038 -1040
  7. package/cpp/ggml-alloc.h +76 -76
  8. package/cpp/ggml-backend-impl.h +256 -216
  9. package/cpp/ggml-backend-reg.cpp +552 -195
  10. package/cpp/ggml-backend.cpp +1999 -1997
  11. package/cpp/ggml-backend.h +352 -328
  12. package/cpp/ggml-common.h +1853 -1853
  13. package/cpp/ggml-cpp.h +38 -38
  14. package/cpp/{ggml-cpu-aarch64.c → ggml-cpu-aarch64.cpp} +4262 -3560
  15. package/cpp/ggml-cpu-aarch64.h +8 -30
  16. package/cpp/ggml-cpu-impl.h +386 -371
  17. package/cpp/ggml-cpu-quants.c +10835 -10822
  18. package/cpp/ggml-cpu-quants.h +63 -63
  19. package/cpp/ggml-cpu-traits.cpp +36 -0
  20. package/cpp/ggml-cpu-traits.h +38 -0
  21. package/cpp/ggml-cpu.c +14122 -13975
  22. package/cpp/ggml-cpu.cpp +618 -663
  23. package/cpp/ggml-cpu.h +135 -177
  24. package/cpp/ggml-impl.h +556 -550
  25. package/cpp/ggml-metal.h +66 -66
  26. package/cpp/ggml-metal.m +4884 -4294
  27. package/cpp/ggml-quants.c +5238 -5247
  28. package/cpp/ggml-quants.h +100 -100
  29. package/cpp/ggml-threading.cpp +12 -12
  30. package/cpp/ggml-threading.h +14 -12
  31. package/cpp/ggml.c +7707 -8180
  32. package/cpp/ggml.h +2286 -2411
  33. package/cpp/json-schema-to-grammar.cpp +1045 -0
  34. package/cpp/json-schema-to-grammar.h +8 -0
  35. package/cpp/json.hpp +24766 -0
  36. package/cpp/llama-grammar.cpp +1138 -1138
  37. package/cpp/llama-grammar.h +144 -144
  38. package/cpp/llama-impl.h +181 -181
  39. package/cpp/llama-sampling.cpp +2293 -2348
  40. package/cpp/llama-sampling.h +48 -48
  41. package/cpp/llama-vocab.cpp +1985 -1984
  42. package/cpp/llama-vocab.h +170 -170
  43. package/cpp/llama.cpp +22836 -22132
  44. package/cpp/llama.h +1263 -1253
  45. package/cpp/log.cpp +401 -401
  46. package/cpp/log.h +121 -121
  47. package/cpp/rn-llama.hpp +6 -6
  48. package/cpp/sampling.cpp +500 -466
  49. package/cpp/sampling.h +22 -1
  50. package/cpp/sgemm.cpp +1884 -1884
  51. package/cpp/speculative.cpp +274 -0
  52. package/cpp/speculative.h +28 -0
  53. package/cpp/unicode.cpp +62 -51
  54. package/cpp/unicode.h +9 -10
  55. package/ios/RNLlamaContext.mm +13 -0
  56. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  57. package/lib/commonjs/grammar.js +4 -2
  58. package/lib/commonjs/grammar.js.map +1 -1
  59. package/lib/commonjs/index.js +38 -1
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/module/NativeRNLlama.js.map +1 -1
  62. package/lib/module/grammar.js +2 -1
  63. package/lib/module/grammar.js.map +1 -1
  64. package/lib/module/index.js +36 -0
  65. package/lib/module/index.js.map +1 -1
  66. package/lib/typescript/NativeRNLlama.d.ts +95 -6
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  68. package/lib/typescript/grammar.d.ts +5 -6
  69. package/lib/typescript/grammar.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +40 -4
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +2 -1
  73. package/src/NativeRNLlama.ts +99 -12
  74. package/src/grammar.ts +10 -8
  75. package/src/index.ts +68 -3
  76. package/cpp/ggml-aarch64.c +0 -129
  77. package/cpp/ggml-aarch64.h +0 -19
package/cpp/ggml-alloc.c CHANGED
@@ -1,1040 +1,1038 @@
1
- #include "ggml-alloc.h"
2
- #include "ggml-backend-impl.h"
3
- #include "ggml.h"
4
- #include "ggml-impl.h"
5
- #include <assert.h>
6
- #include <limits.h>
7
- #include <stdarg.h>
8
- #include <stdio.h>
9
- #include <stdlib.h>
10
- #include <string.h>
11
-
12
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
13
- #define MAX_FREE_BLOCKS 256
14
-
15
- //#define LM_GGML_ALLOCATOR_DEBUG
16
-
17
- //#define AT_PRINTF(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
18
- #define AT_PRINTF(...)
19
-
20
-
21
- static bool lm_ggml_is_view(const struct lm_ggml_tensor * t) {
22
- return t->view_src != NULL;
23
- }
24
-
25
- static bool lm_ggml_are_same_layout(const struct lm_ggml_tensor * a, const struct lm_ggml_tensor * b) {
26
- if (a->type != b->type) {
27
- return false;
28
- }
29
- for (int i = 0; i < LM_GGML_MAX_DIMS; i++) {
30
- if (a->ne[i] != b->ne[i]) {
31
- return false;
32
- }
33
- if (a->nb[i] != b->nb[i]) {
34
- return false;
35
- }
36
- }
37
- return true;
38
- }
39
-
40
- static bool lm_ggml_op_can_inplace(enum lm_ggml_op op) {
41
- switch (op) {
42
- case LM_GGML_OP_SCALE:
43
- case LM_GGML_OP_DIAG_MASK_ZERO:
44
- case LM_GGML_OP_DIAG_MASK_INF:
45
- case LM_GGML_OP_ADD:
46
- case LM_GGML_OP_ADD1:
47
- case LM_GGML_OP_SUB:
48
- case LM_GGML_OP_MUL:
49
- case LM_GGML_OP_DIV:
50
- case LM_GGML_OP_SQR:
51
- case LM_GGML_OP_SQRT:
52
- case LM_GGML_OP_LOG:
53
- case LM_GGML_OP_UNARY:
54
- case LM_GGML_OP_ROPE:
55
- case LM_GGML_OP_RMS_NORM:
56
- case LM_GGML_OP_SOFT_MAX:
57
- return true;
58
-
59
- default:
60
- return false;
61
- }
62
- }
63
-
64
- static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
65
- assert(alignment && !(alignment & (alignment - 1))); // power of 2
66
- size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
67
- return offset + align;
68
- }
69
-
70
- // tallocr
71
-
72
- struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer) {
73
- void * base = lm_ggml_backend_buffer_get_base(buffer);
74
- size_t align = lm_ggml_backend_buffer_get_alignment(buffer);
75
-
76
- assert(align && !(align & (align - 1))); // power of 2
77
-
78
- struct lm_ggml_tallocr talloc = (struct lm_ggml_tallocr) {
79
- /*.buffer = */ buffer,
80
- /*.base = */ base,
81
- /*.alignment = */ align,
82
- /*.offset = */ aligned_offset(base, 0, align),
83
- };
84
- return talloc;
85
- }
86
-
87
- void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor) {
88
- size_t size = lm_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
89
- size = LM_GGML_PAD(size, talloc->alignment);
90
-
91
- if (talloc->offset + size > lm_ggml_backend_buffer_get_size(talloc->buffer)) {
92
- LM_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
93
- __func__, tensor->name, size, lm_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
94
- LM_GGML_ABORT("not enough space in the buffer");
95
- }
96
-
97
- void * addr = (char *)lm_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
98
- talloc->offset += size;
99
-
100
- assert(((uintptr_t)addr % talloc->alignment) == 0);
101
-
102
- lm_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
103
- }
104
-
105
- // dynamic tensor allocator
106
-
107
- struct free_block {
108
- size_t offset;
109
- size_t size;
110
- };
111
-
112
- struct lm_ggml_dyn_tallocr {
113
- size_t alignment;
114
- int n_free_blocks;
115
- struct free_block free_blocks[MAX_FREE_BLOCKS];
116
- size_t max_size;
117
-
118
- #ifdef LM_GGML_ALLOCATOR_DEBUG
119
- struct {
120
- const struct lm_ggml_tensor * tensor;
121
- size_t offset;
122
- } allocated_tensors[1024];
123
- #endif
124
- };
125
-
126
- #ifdef LM_GGML_ALLOCATOR_DEBUG
127
- static void add_allocated_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, const struct lm_ggml_tensor * tensor) {
128
- for (int i = 0; i < 1024; i++) {
129
- if (alloc->allocated_tensors[i].tensor == NULL) {
130
- alloc->allocated_tensors[i].tensor = tensor;
131
- alloc->allocated_tensors[i].offset = offset;
132
- return;
133
- }
134
- }
135
- LM_GGML_ABORT("out of allocated_tensors");
136
- }
137
- static void remove_allocated_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, const struct lm_ggml_tensor * tensor) {
138
- for (int i = 0; i < 1024; i++) {
139
- if (alloc->allocated_tensors[i].offset == offset) {
140
- alloc->allocated_tensors[i].tensor = NULL;
141
- return;
142
- }
143
- }
144
- LM_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
145
- }
146
- #endif
147
-
148
- static size_t lm_ggml_dyn_tallocr_alloc(struct lm_ggml_dyn_tallocr * alloc, size_t size, const struct lm_ggml_tensor * tensor) {
149
- size = aligned_offset(NULL, size, alloc->alignment);
150
-
151
- AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
152
-
153
- size_t max_avail = 0;
154
-
155
- // find the best fitting free block besides the last block
156
- int best_fit_block = -1;
157
- size_t best_fit_size = SIZE_MAX;
158
- for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
159
- struct free_block * block = &alloc->free_blocks[i];
160
- max_avail = MAX(max_avail, block->size);
161
- if (block->size >= size && block->size <= best_fit_size) {
162
- best_fit_block = i;
163
- best_fit_size = block->size;
164
- }
165
- }
166
-
167
- if (best_fit_block == -1) {
168
- // the last block is our last resort
169
- struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
170
- max_avail = MAX(max_avail, block->size);
171
- if (block->size >= size) {
172
- best_fit_block = alloc->n_free_blocks - 1;
173
- } else {
174
- // this should never happen
175
- LM_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
176
- __func__, size, max_avail);
177
- LM_GGML_ABORT("not enough space in the buffer");
178
- }
179
- }
180
-
181
- struct free_block * block = &alloc->free_blocks[best_fit_block];
182
- size_t offset = block->offset;
183
- block->offset = offset + size;
184
- block->size -= size;
185
- if (block->size == 0) {
186
- // remove block if empty
187
- alloc->n_free_blocks--;
188
- for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
189
- alloc->free_blocks[j] = alloc->free_blocks[j+1];
190
- }
191
- }
192
-
193
- AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
194
-
195
- #ifdef LM_GGML_ALLOCATOR_DEBUG
196
- add_allocated_tensor(alloc, offset, tensor);
197
- size_t cur_max = offset + size;
198
- if (cur_max > alloc->max_size) {
199
- // sort allocated_tensors by offset
200
- for (int i = 0; i < 1024; i++) {
201
- for (int j = i + 1; j < 1024; j++) {
202
- if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
203
- const struct lm_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
204
- size_t tmp_offset = alloc->allocated_tensors[i].offset;
205
- alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
206
- alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
207
- alloc->allocated_tensors[j].tensor = tmp_tensor;
208
- alloc->allocated_tensors[j].offset = tmp_offset;
209
- }
210
- }
211
- }
212
- LM_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
213
- for (int i = 0; i < 1024; i++) {
214
- if (alloc->allocated_tensors[i].tensor) {
215
- LM_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
216
- alloc->allocated_tensors[i].offset,
217
- alloc->allocated_tensors[i].offset + lm_ggml_nbytes(alloc->allocated_tensors[i].tensor),
218
- lm_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
219
- }
220
- }
221
- LM_GGML_LOG_DEBUG("\n");
222
- }
223
- #endif
224
-
225
- alloc->max_size = MAX(alloc->max_size, offset + size);
226
-
227
- return offset;
228
-
229
- LM_GGML_UNUSED(tensor);
230
- }
231
-
232
- // this is a very naive implementation, but for our case the number of free blocks should be very small
233
- static void lm_ggml_dyn_tallocr_free_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct lm_ggml_tensor * tensor) {
234
- size = aligned_offset(NULL, size, alloc->alignment);
235
-
236
- AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
237
-
238
- #ifdef LM_GGML_ALLOCATOR_DEBUG
239
- remove_allocated_tensor(alloc, offset, tensor);
240
- #endif
241
-
242
- // see if we can merge with an existing block
243
- for (int i = 0; i < alloc->n_free_blocks; i++) {
244
- struct free_block * block = &alloc->free_blocks[i];
245
- // check if ptr is at the end of the block
246
- if (block->offset + block->size == offset) {
247
- block->size += size;
248
- // check if we can merge with the next block
249
- if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
250
- block->size += alloc->free_blocks[i+1].size;
251
- alloc->n_free_blocks--;
252
- for (int j = i+1; j < alloc->n_free_blocks; j++) {
253
- alloc->free_blocks[j] = alloc->free_blocks[j+1];
254
- }
255
- }
256
- return;
257
- }
258
- // check if ptr is at the beginning of the block
259
- if (offset + size == block->offset) {
260
- block->offset = offset;
261
- block->size += size;
262
- // check if we can merge with the previous block
263
- if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
264
- alloc->free_blocks[i-1].size += block->size;
265
- alloc->n_free_blocks--;
266
- for (int j = i; j < alloc->n_free_blocks; j++) {
267
- alloc->free_blocks[j] = alloc->free_blocks[j+1];
268
- }
269
- }
270
- return;
271
- }
272
- }
273
- // otherwise, add a new block
274
- LM_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
275
- // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
276
- int insert_pos = 0;
277
- while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
278
- insert_pos++;
279
- }
280
- // shift all blocks from insert_pos onward to make room for the new block
281
- for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
282
- alloc->free_blocks[i] = alloc->free_blocks[i-1];
283
- }
284
- // insert the new block
285
- alloc->free_blocks[insert_pos].offset = offset;
286
- alloc->free_blocks[insert_pos].size = size;
287
- alloc->n_free_blocks++;
288
-
289
- LM_GGML_UNUSED(tensor);
290
- }
291
-
292
- static void lm_ggml_dyn_tallocr_reset(struct lm_ggml_dyn_tallocr * alloc) {
293
- alloc->n_free_blocks = 1;
294
- alloc->free_blocks[0].offset = 0;
295
- alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
296
- alloc->max_size = 0;
297
-
298
- #ifdef LM_GGML_ALLOCATOR_DEBUG
299
- for (int i = 0; i < 1024; i++) {
300
- alloc->allocated_tensors[i].tensor = NULL;
301
- }
302
- #endif
303
- }
304
-
305
- static struct lm_ggml_dyn_tallocr * lm_ggml_dyn_tallocr_new(size_t alignment) {
306
- struct lm_ggml_dyn_tallocr * alloc = (struct lm_ggml_dyn_tallocr *)malloc(sizeof(struct lm_ggml_dyn_tallocr));
307
-
308
- *alloc = (struct lm_ggml_dyn_tallocr) {
309
- /*.alignment = */ alignment,
310
- /*.n_free_blocks = */ 0,
311
- /*.free_blocks = */ {{0}},
312
- /*.max_size = */ 0,
313
- #ifdef LM_GGML_ALLOCATOR_DEBUG
314
- /*.allocated_tensors = */ {{0}},
315
- #endif
316
- };
317
-
318
- lm_ggml_dyn_tallocr_reset(alloc);
319
-
320
- return alloc;
321
- }
322
-
323
- static void lm_ggml_dyn_tallocr_free(struct lm_ggml_dyn_tallocr * alloc) {
324
- free(alloc);
325
- }
326
-
327
- static size_t lm_ggml_dyn_tallocr_max_size(struct lm_ggml_dyn_tallocr * alloc) {
328
- return alloc->max_size;
329
- }
330
-
331
-
332
- /////////////////////////////////////
333
-
334
- // graph allocator
335
-
336
- struct hash_node {
337
- int n_children;
338
- int n_views;
339
- int buffer_id;
340
- size_t offset; // offset within the buffer
341
- bool allocated;
342
- };
343
-
344
- struct tensor_alloc {
345
- int buffer_id;
346
- size_t offset;
347
- size_t size_max; // 0 = pre-allocated, unused, or view
348
- };
349
-
350
- struct leaf_alloc {
351
- struct tensor_alloc leaf;
352
- };
353
-
354
- struct node_alloc {
355
- struct tensor_alloc dst;
356
- struct tensor_alloc src[LM_GGML_MAX_SRC];
357
- };
358
-
359
- struct lm_ggml_gallocr {
360
- lm_ggml_backend_buffer_type_t * bufts; // [n_buffers]
361
- lm_ggml_backend_buffer_t * buffers; // [n_buffers]
362
- struct lm_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
363
- int n_buffers;
364
-
365
- struct lm_ggml_hash_set hash_set;
366
- struct hash_node * hash_values; // [hash_set.size]
367
-
368
- struct node_alloc * node_allocs; // [n_nodes]
369
- int n_nodes;
370
-
371
- struct leaf_alloc * leaf_allocs; // [n_leafs]
372
- int n_leafs;
373
- };
374
-
375
- lm_ggml_gallocr_t lm_ggml_gallocr_new_n(lm_ggml_backend_buffer_type_t * bufts, int n_bufs) {
376
- lm_ggml_gallocr_t galloc = (lm_ggml_gallocr_t)calloc(1, sizeof(struct lm_ggml_gallocr));
377
- LM_GGML_ASSERT(galloc != NULL);
378
-
379
- galloc->bufts = calloc(n_bufs, sizeof(lm_ggml_backend_buffer_type_t));
380
- LM_GGML_ASSERT(galloc->bufts != NULL);
381
-
382
- galloc->buffers = calloc(n_bufs, sizeof(lm_ggml_backend_buffer_t));
383
- LM_GGML_ASSERT(galloc->buffers != NULL);
384
-
385
- galloc->buf_tallocs = calloc(n_bufs, sizeof(struct lm_ggml_dyn_tallocr *));
386
- LM_GGML_ASSERT(galloc->buf_tallocs != NULL);
387
-
388
- for (int i = 0; i < n_bufs; i++) {
389
- galloc->bufts[i] = bufts[i];
390
- galloc->buffers[i] = NULL;
391
-
392
- // check if the same buffer type is used multiple times and reuse the same allocator
393
- for (int j = 0; j < i; j++) {
394
- if (bufts[i] == bufts[j]) {
395
- galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
396
- break;
397
- }
398
- }
399
-
400
- if (galloc->buf_tallocs[i] == NULL) {
401
- size_t alignment = lm_ggml_backend_buft_get_alignment(bufts[i]);
402
- galloc->buf_tallocs[i] = lm_ggml_dyn_tallocr_new(alignment);
403
- }
404
- }
405
- galloc->n_buffers = n_bufs;
406
-
407
- return galloc;
408
- }
409
-
410
- lm_ggml_gallocr_t lm_ggml_gallocr_new(lm_ggml_backend_buffer_type_t buft) {
411
- return lm_ggml_gallocr_new_n(&buft, 1);
412
- }
413
-
414
- void lm_ggml_gallocr_free(lm_ggml_gallocr_t galloc) {
415
- if (galloc == NULL) {
416
- return;
417
- }
418
-
419
- for (int i = 0; i < galloc->n_buffers; i++) {
420
- if (galloc->buffers != NULL) {
421
- // skip if already freed
422
- bool freed = false;
423
- for (int j = 0; j < i; j++) {
424
- if (galloc->buffers[j] == galloc->buffers[i]) {
425
- freed = true;
426
- break;
427
- }
428
- }
429
- if (!freed) {
430
- lm_ggml_backend_buffer_free(galloc->buffers[i]);
431
- }
432
- }
433
- if (galloc->buf_tallocs != NULL) {
434
- // skip if already freed
435
- bool freed = false;
436
- for (int j = 0; j < i; j++) {
437
- if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
438
- freed = true;
439
- break;
440
- }
441
- }
442
- if (!freed) {
443
- lm_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
444
- }
445
- }
446
- }
447
-
448
- lm_ggml_hash_set_free(&galloc->hash_set);
449
- free(galloc->hash_values);
450
- free(galloc->bufts);
451
- free(galloc->buffers);
452
- free(galloc->buf_tallocs);
453
- free(galloc->node_allocs);
454
- free(galloc->leaf_allocs);
455
- free(galloc);
456
- }
457
-
458
- typedef struct lm_ggml_gallocr * lm_ggml_gallocr_t;
459
-
460
- static struct hash_node * lm_ggml_gallocr_hash_get(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
461
- size_t i = lm_ggml_hash_find_or_insert(&galloc->hash_set, t);
462
- return &galloc->hash_values[i];
463
- }
464
-
465
- static bool lm_ggml_gallocr_is_own(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
466
- return lm_ggml_gallocr_hash_get(galloc, t)->allocated;
467
- }
468
-
469
- static void lm_ggml_gallocr_set_node_offset(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, int buffer_id, size_t offset) {
470
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
471
- hn->buffer_id = buffer_id;
472
- hn->offset = offset;
473
- hn->allocated = true;
474
- }
475
-
476
- static bool lm_ggml_gallocr_is_allocated(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
477
- return t->data != NULL || lm_ggml_gallocr_hash_get(galloc, t)->allocated;
478
- }
479
-
480
- static void lm_ggml_gallocr_allocate_node(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, int buffer_id) {
481
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
482
-
483
- if (!lm_ggml_gallocr_is_allocated(galloc, node) && !lm_ggml_is_view(node)) {
484
- hn->allocated = true;
485
- assert(hn->offset == 0);
486
-
487
- // try to reuse a parent's buffer (inplace)
488
- if (lm_ggml_op_can_inplace(node->op)) {
489
- for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
490
- struct lm_ggml_tensor * parent = node->src[i];
491
- if (parent == NULL) {
492
- continue;
493
- }
494
-
495
- // if the node's data is external, then we cannot re-use it
496
- if (!lm_ggml_gallocr_is_own(galloc, parent)) {
497
- AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
498
- continue;
499
- }
500
-
501
- // outputs cannot be reused
502
- if (parent->flags & LM_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & LM_GGML_TENSOR_FLAG_OUTPUT)) {
503
- AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
504
- continue;
505
- }
506
-
507
- if (!lm_ggml_are_same_layout(node, parent)) {
508
- AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
509
- continue;
510
- }
511
-
512
- struct hash_node * p_hn = lm_ggml_gallocr_hash_get(galloc, parent);
513
- if (p_hn->n_children == 1 && p_hn->n_views == 0) {
514
- if (lm_ggml_is_view(parent)) {
515
- struct lm_ggml_tensor * view_src = parent->view_src;
516
- struct hash_node * view_src_hn = lm_ggml_gallocr_hash_get(galloc, view_src);
517
- if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
518
- AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
519
- assert(view_src_hn->offset == p_hn->offset);
520
- hn->buffer_id = p_hn->buffer_id;
521
- hn->offset = p_hn->offset;
522
- p_hn->allocated = false; // avoid freeing the parent
523
- view_src_hn->allocated = false;
524
- return;
525
- }
526
- } else {
527
- AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
528
- hn->buffer_id = p_hn->buffer_id;
529
- hn->offset = p_hn->offset;
530
- p_hn->allocated = false; // avoid freeing the parent
531
- return;
532
- }
533
- }
534
- }
535
- }
536
- // allocate tensor from the buffer
537
- struct lm_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
538
- lm_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
539
- size_t size = lm_ggml_backend_buft_get_alloc_size(buft, node);
540
- size_t offset = lm_ggml_dyn_tallocr_alloc(alloc, size, node);
541
- hn->buffer_id = buffer_id;
542
- hn->offset = offset;
543
- return;
544
- }
545
- }
546
-
547
- static void lm_ggml_gallocr_free_node(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node) {
548
- // graph outputs are never freed
549
- if (node->flags & LM_GGML_TENSOR_FLAG_OUTPUT) {
550
- AT_PRINTF("not freeing output %s\n", node->name);
551
- return;
552
- }
553
-
554
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
555
- size_t offset = hn->offset;
556
- int buffer_id = hn->buffer_id;
557
- struct lm_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
558
- lm_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
559
- size_t size = lm_ggml_backend_buft_get_alloc_size(buft, node);
560
- lm_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
561
- hn->allocated = false;
562
- }
563
-
564
- static int get_node_buffer_id(const int * node_buffer_ids, int i) {
565
- return node_buffer_ids ? node_buffer_ids[i] : 0;
566
- }
567
-
568
- static void lm_ggml_gallocr_alloc_graph_impl(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
569
- // clear hash tables
570
- lm_ggml_hash_set_reset(&galloc->hash_set);
571
- memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
572
-
573
- // allocate leafs
574
- // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
575
- for (int i = 0; i < graph->n_leafs; i++) {
576
- struct lm_ggml_tensor * leaf = graph->leafs[i];
577
- lm_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
578
- }
579
-
580
- // count number of children and views
581
- // allocate other graph inputs and leafs first to avoid overwriting them
582
- for (int i = 0; i < graph->n_nodes; i++) {
583
- struct lm_ggml_tensor * node = graph->nodes[i];
584
-
585
- // TODO: better way to add external dependencies
586
- // LM_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
587
- // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
588
- // itself is never used and should not be considered a dependency
589
- if (lm_ggml_is_view(node) && node->op != LM_GGML_OP_NONE) {
590
- struct lm_ggml_tensor * view_src = node->view_src;
591
- lm_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
592
- }
593
-
594
- if (node->flags & LM_GGML_TENSOR_FLAG_INPUT) {
595
- lm_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
596
- }
597
-
598
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
599
- struct lm_ggml_tensor * src = node->src[j];
600
- if (src == NULL) {
601
- continue;
602
- }
603
-
604
- lm_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
605
-
606
- // allocate explicit inputs
607
- if (src->flags & LM_GGML_TENSOR_FLAG_INPUT) {
608
- lm_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
609
- }
610
- }
611
- }
612
-
613
- // allocate tensors
614
- for (int i = 0; i < graph->n_nodes; i++) {
615
- struct lm_ggml_tensor * node = graph->nodes[i];
616
- int buffer_id = get_node_buffer_id(node_buffer_ids, i);
617
-
618
- // allocate parents (only leafs need to be allocated at this point)
619
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
620
- struct lm_ggml_tensor * parent = node->src[j];
621
- if (parent == NULL) {
622
- continue;
623
- }
624
- lm_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
625
- }
626
-
627
- // allocate node
628
- lm_ggml_gallocr_allocate_node(galloc, node, buffer_id);
629
-
630
- AT_PRINTF("exec: %s (%s) <= ", lm_ggml_op_desc(node), node->name);
631
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
632
- struct lm_ggml_tensor * parent = node->src[j];
633
- if (parent == NULL) {
634
- continue;
635
- }
636
- AT_PRINTF("%s", parent->name);
637
- if (j < LM_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
638
- AT_PRINTF(", ");
639
- }
640
- }
641
- AT_PRINTF("\n");
642
-
643
- // update parents
644
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
645
- struct lm_ggml_tensor * parent = node->src[j];
646
- if (parent == NULL) {
647
- continue;
648
- }
649
- struct hash_node * p_hn = lm_ggml_gallocr_hash_get(galloc, parent);
650
- p_hn->n_children -= 1;
651
-
652
- AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
653
- parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
654
-
655
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
656
- if (lm_ggml_is_view(parent)) {
657
- struct lm_ggml_tensor * view_src = parent->view_src;
658
- struct hash_node * view_src_hn = lm_ggml_gallocr_hash_get(galloc, view_src);
659
- view_src_hn->n_views -= 1;
660
- AT_PRINTF("view_src %s: %d children, %d views\n",
661
- view_src->name, view_src_hn->n_children, view_src_hn->n_views);
662
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
663
- lm_ggml_gallocr_free_node(galloc, view_src);
664
- }
665
- }
666
- else if (p_hn->allocated) {
667
- lm_ggml_gallocr_free_node(galloc, parent);
668
- }
669
- }
670
- AT_PRINTF("\n");
671
- }
672
- }
673
- }
674
-
675
- bool lm_ggml_gallocr_reserve_n(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
676
- size_t min_hash_size = graph->n_nodes + graph->n_leafs;
677
- // add 25% margin to avoid hash collisions
678
- min_hash_size += min_hash_size / 4;
679
-
680
- // initialize hash table
681
- if (galloc->hash_set.size < min_hash_size) {
682
- lm_ggml_hash_set_free(&galloc->hash_set);
683
- galloc->hash_set = lm_ggml_hash_set_new(min_hash_size);
684
- LM_GGML_ASSERT(galloc->hash_set.keys != NULL);
685
-
686
- free(galloc->hash_values);
687
- galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
688
- LM_GGML_ASSERT(galloc->hash_values != NULL);
689
- }
690
-
691
- // reset allocators
692
- for (int i = 0; i < galloc->n_buffers; i++) {
693
- lm_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
694
- }
695
-
696
- // allocate in hash table
697
- lm_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
698
-
699
- // set the node_allocs from the hash table
700
- if (galloc->n_nodes < graph->n_nodes) {
701
- free(galloc->node_allocs);
702
- galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
703
- LM_GGML_ASSERT(galloc->node_allocs != NULL);
704
- }
705
- galloc->n_nodes = graph->n_nodes;
706
- for (int i = 0; i < graph->n_nodes; i++) {
707
- struct lm_ggml_tensor * node = graph->nodes[i];
708
- struct node_alloc * node_alloc = &galloc->node_allocs[i];
709
- if (node->view_src || node->data) {
710
- node_alloc->dst.buffer_id = -1;
711
- node_alloc->dst.offset = SIZE_MAX;
712
- node_alloc->dst.size_max = 0;
713
- } else {
714
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
715
- node_alloc->dst.buffer_id = hn->buffer_id;
716
- node_alloc->dst.offset = hn->offset;
717
- node_alloc->dst.size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
718
- }
719
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
720
- struct lm_ggml_tensor * src = node->src[j];
721
- if (!src || src->view_src || src->data) {
722
- node_alloc->src[j].buffer_id = -1;
723
- node_alloc->src[j].offset = SIZE_MAX;
724
- node_alloc->src[j].size_max = 0;
725
- } else {
726
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, src);
727
- node_alloc->src[j].buffer_id = hn->buffer_id;
728
- node_alloc->src[j].offset = hn->offset;
729
- node_alloc->src[j].size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
730
- }
731
- }
732
- }
733
- if (galloc->n_leafs < graph->n_leafs) {
734
- free(galloc->leaf_allocs);
735
- galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
736
- LM_GGML_ASSERT(galloc->leaf_allocs != NULL);
737
- }
738
- galloc->n_leafs = graph->n_leafs;
739
- for (int i = 0; i < graph->n_leafs; i++) {
740
- struct lm_ggml_tensor * leaf = graph->leafs[i];
741
- struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, leaf);
742
- if (leaf->view_src || leaf->data) {
743
- galloc->leaf_allocs[i].leaf.buffer_id = -1;
744
- galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
745
- galloc->leaf_allocs[i].leaf.size_max = 0;
746
- } else {
747
- galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
748
- galloc->leaf_allocs[i].leaf.offset = hn->offset;
749
- galloc->leaf_allocs[i].leaf.size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
750
- }
751
- }
752
-
753
- // reallocate buffers if needed
754
- for (int i = 0; i < galloc->n_buffers; i++) {
755
- // if the buffer type is used multiple times, we reuse the same buffer
756
- for (int j = 0; j < i; j++) {
757
- if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
758
- galloc->buffers[i] = galloc->buffers[j];
759
- break;
760
- }
761
- }
762
-
763
- size_t cur_size = galloc->buffers[i] ? lm_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
764
- size_t new_size = lm_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
765
-
766
- // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
767
- if (new_size > cur_size || galloc->buffers[i] == NULL) {
768
- #ifndef NDEBUG
769
- LM_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, lm_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
770
- #endif
771
-
772
- lm_ggml_backend_buffer_free(galloc->buffers[i]);
773
- galloc->buffers[i] = lm_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
774
- if (galloc->buffers[i] == NULL) {
775
- LM_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, lm_ggml_backend_buft_name(galloc->bufts[i]), new_size);
776
- return false;
777
- }
778
- lm_ggml_backend_buffer_set_usage(galloc->buffers[i], LM_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
779
- }
780
- }
781
-
782
- return true;
783
- }
784
-
785
- bool lm_ggml_gallocr_reserve(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph *graph) {
786
- return lm_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
787
- }
788
-
789
- static void lm_ggml_gallocr_init_tensor(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
790
- int buffer_id = tensor_alloc->buffer_id;
791
- assert(tensor->data || tensor->view_src || lm_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
792
-
793
- if (tensor->view_src != NULL) {
794
- if (tensor->buffer == NULL) {
795
- assert(tensor_alloc->offset == SIZE_MAX);
796
- if (tensor->view_src->buffer == NULL) {
797
- // this tensor was allocated without ggml-backend
798
- return;
799
- }
800
- lm_ggml_backend_view_init(tensor);
801
- }
802
- } else {
803
- if (tensor->data == NULL) {
804
- assert(tensor_alloc->offset != SIZE_MAX);
805
- assert(lm_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
806
- void * base = lm_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
807
- void * addr = (char *)base + tensor_alloc->offset;
808
- lm_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
809
- } else {
810
- if (tensor->buffer == NULL) {
811
- // this tensor was allocated without ggml-backend
812
- return;
813
- }
814
- }
815
- }
816
- }
817
-
818
- static bool lm_ggml_gallocr_node_needs_realloc(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, struct tensor_alloc * talloc) {
819
- size_t node_size = (node->data || node->view_src) ? 0 : lm_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
820
- return talloc->size_max >= node_size;
821
- }
822
-
823
- static bool lm_ggml_gallocr_needs_realloc(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph) {
824
- if (galloc->n_nodes != graph->n_nodes) {
825
- #ifndef NDEBUG
826
- LM_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
827
- #endif
828
- return true;
829
- }
830
-
831
- if (galloc->n_leafs != graph->n_leafs) {
832
- #ifndef NDEBUG
833
- LM_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
834
- #endif
835
- return true;
836
- }
837
-
838
- for (int i = 0; i < graph->n_nodes; i++) {
839
- struct lm_ggml_tensor * node = graph->nodes[i];
840
- struct node_alloc * node_alloc = &galloc->node_allocs[i];
841
-
842
- if (!lm_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
843
- #ifndef NDEBUG
844
- LM_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
845
- #endif
846
- return true;
847
- }
848
-
849
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
850
- struct lm_ggml_tensor * src = node->src[j];
851
- if (src == NULL) {
852
- continue;
853
- }
854
- if (!lm_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
855
- #ifndef NDEBUG
856
- LM_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
857
- #endif
858
- return true;
859
- }
860
- }
861
- }
862
-
863
- return false;
864
- }
865
-
866
- bool lm_ggml_gallocr_alloc_graph(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph) {
867
- if (lm_ggml_gallocr_needs_realloc(galloc, graph)) {
868
- if (galloc->n_buffers == 1) {
869
- #ifndef NDEBUG
870
- LM_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
871
- #endif
872
- if (!lm_ggml_gallocr_reserve(galloc, graph)) {
873
- return false;
874
- }
875
- } else {
876
- #ifndef NDEBUG
877
- LM_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
878
- #endif
879
- return false;
880
- }
881
- }
882
-
883
- // reset buffers
884
- for (int i = 0; i < galloc->n_buffers; i++) {
885
- if (galloc->buffers[i] != NULL) {
886
- lm_ggml_backend_buffer_reset(galloc->buffers[i]);
887
- }
888
- }
889
-
890
- // allocate the graph tensors from the previous assignments
891
- // leafs
892
- for (int i = 0; i < graph->n_leafs; i++) {
893
- struct lm_ggml_tensor * leaf = graph->leafs[i];
894
- struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
895
- lm_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
896
- }
897
- // nodes
898
- for (int i = 0; i < graph->n_nodes; i++) {
899
- struct lm_ggml_tensor * node = graph->nodes[i];
900
- struct node_alloc * node_alloc = &galloc->node_allocs[i];
901
- for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
902
- struct lm_ggml_tensor * src = node->src[j];
903
- if (src == NULL) {
904
- continue;
905
- }
906
- lm_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
907
- }
908
- lm_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
909
- }
910
-
911
- return true;
912
- }
913
-
914
- size_t lm_ggml_gallocr_get_buffer_size(lm_ggml_gallocr_t galloc, int buffer_id) {
915
- LM_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
916
-
917
- if (galloc->buffers[buffer_id] == NULL) {
918
- return 0;
919
- }
920
-
921
- for (int i = 0; i < buffer_id; i++) {
922
- if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
923
- // this buffer is the same as a previous one due to the same buffer type being used multiple times
924
- // only return the buffer size the first time it appears to avoid double counting
925
- return 0;
926
- }
927
- }
928
-
929
- return lm_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
930
- }
931
-
932
- // utils
933
-
934
- static bool alloc_tensor_range(struct lm_ggml_context * ctx,
935
- struct lm_ggml_tensor * first, struct lm_ggml_tensor * last,
936
- lm_ggml_backend_buffer_type_t buft, size_t size,
937
- lm_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
938
- lm_ggml_backend_buffer_t buffer = lm_ggml_backend_buft_alloc_buffer(buft, size);
939
- if (buffer == NULL) {
940
- #ifndef NDEBUG
941
- LM_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, lm_ggml_backend_buft_name(buft), size);
942
- #endif
943
- for (size_t i = 0; i < *n_buffers; i++) {
944
- lm_ggml_backend_buffer_free((*buffers)[i]);
945
- }
946
- free(*buffers);
947
- return false;
948
- }
949
-
950
- struct lm_ggml_tallocr tallocr = lm_ggml_tallocr_new(buffer);
951
-
952
- for (struct lm_ggml_tensor * t = first; t != last; t = lm_ggml_get_next_tensor(ctx, t)) {
953
- if (t->data == NULL) {
954
- if (t->view_src == NULL) {
955
- lm_ggml_tallocr_alloc(&tallocr, t);
956
- } else if (t->buffer == NULL) {
957
- lm_ggml_backend_view_init(t);
958
- }
959
- } else {
960
- if (t->view_src != NULL && t->buffer == NULL) {
961
- // view of a pre-allocated tensor
962
- lm_ggml_backend_view_init(t);
963
- }
964
- }
965
- }
966
-
967
- *buffers = realloc(*buffers, sizeof(lm_ggml_backend_buffer_t) * (*n_buffers + 1));
968
- (*buffers)[(*n_buffers)++] = buffer;
969
-
970
- return true;
971
- }
972
-
973
- lm_ggml_backend_buffer_t lm_ggml_backend_alloc_ctx_tensors_from_buft(struct lm_ggml_context * ctx, lm_ggml_backend_buffer_type_t buft) {
974
- LM_GGML_ASSERT(lm_ggml_get_no_alloc(ctx) == true);
975
-
976
- size_t alignment = lm_ggml_backend_buft_get_alignment(buft);
977
- size_t max_size = lm_ggml_backend_buft_get_max_size(buft);
978
-
979
- lm_ggml_backend_buffer_t * buffers = NULL;
980
- size_t n_buffers = 0;
981
-
982
- size_t cur_buf_size = 0;
983
- struct lm_ggml_tensor * first = lm_ggml_get_first_tensor(ctx);
984
- for (struct lm_ggml_tensor * t = first; t != NULL; t = lm_ggml_get_next_tensor(ctx, t)) {
985
- size_t this_size = 0;
986
- if (t->data == NULL && t->view_src == NULL) {
987
- this_size = LM_GGML_PAD(lm_ggml_backend_buft_get_alloc_size(buft, t), alignment);
988
- }
989
-
990
- if (this_size > max_size) {
991
- LM_GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
992
- __func__, t->name,
993
- lm_ggml_backend_buft_name(buft),
994
- this_size, max_size);
995
- for (size_t i = 0; i < n_buffers; i++) {
996
- lm_ggml_backend_buffer_free(buffers[i]);
997
- }
998
- free(buffers);
999
- return NULL;
1000
- }
1001
-
1002
- if ((cur_buf_size + this_size) > max_size) {
1003
- // allocate tensors in the current buffer
1004
- if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
1005
- return NULL;
1006
- }
1007
- first = t;
1008
- cur_buf_size = this_size;
1009
- } else {
1010
- cur_buf_size += this_size;
1011
- }
1012
- }
1013
-
1014
- // allocate remaining tensors
1015
- if (cur_buf_size > 0) {
1016
- if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1017
- return NULL;
1018
- }
1019
- }
1020
-
1021
- if (n_buffers == 0) {
1022
- #ifndef NDEBUG
1023
- LM_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1024
- #endif
1025
- return NULL;
1026
- }
1027
-
1028
- lm_ggml_backend_buffer_t buffer;
1029
- if (n_buffers == 1) {
1030
- buffer = buffers[0];
1031
- } else {
1032
- buffer = lm_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1033
- }
1034
- free(buffers);
1035
- return buffer;
1036
- }
1037
-
1038
- lm_ggml_backend_buffer_t lm_ggml_backend_alloc_ctx_tensors(struct lm_ggml_context * ctx, lm_ggml_backend_t backend) {
1039
- return lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx, lm_ggml_backend_get_default_buffer_type(backend));
1040
- }
1
+ #include "ggml-alloc.h"
2
+ #include "ggml-backend-impl.h"
3
+ #include "ggml.h"
4
+ #include "ggml-impl.h"
5
+ #include <assert.h>
6
+ #include <limits.h>
7
+ #include <stdarg.h>
8
+ #include <stdio.h>
9
+ #include <stdlib.h>
10
+ #include <string.h>
11
+
12
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
13
+ #define MAX_FREE_BLOCKS 256
14
+
15
+ //#define LM_GGML_ALLOCATOR_DEBUG
16
+
17
+ //#define AT_PRINTF(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
18
+ #define AT_PRINTF(...)
19
+
20
+
21
+ static bool lm_ggml_is_view(const struct lm_ggml_tensor * t) {
22
+ return t->view_src != NULL;
23
+ }
24
+
25
+ static bool lm_ggml_are_same_layout(const struct lm_ggml_tensor * a, const struct lm_ggml_tensor * b) {
26
+ if (a->type != b->type) {
27
+ return false;
28
+ }
29
+ for (int i = 0; i < LM_GGML_MAX_DIMS; i++) {
30
+ if (a->ne[i] != b->ne[i]) {
31
+ return false;
32
+ }
33
+ if (a->nb[i] != b->nb[i]) {
34
+ return false;
35
+ }
36
+ }
37
+ return true;
38
+ }
39
+
40
+ static bool lm_ggml_op_can_inplace(enum lm_ggml_op op) {
41
+ switch (op) {
42
+ case LM_GGML_OP_SCALE:
43
+ case LM_GGML_OP_DIAG_MASK_ZERO:
44
+ case LM_GGML_OP_DIAG_MASK_INF:
45
+ case LM_GGML_OP_ADD:
46
+ case LM_GGML_OP_ADD1:
47
+ case LM_GGML_OP_SUB:
48
+ case LM_GGML_OP_MUL:
49
+ case LM_GGML_OP_DIV:
50
+ case LM_GGML_OP_SQR:
51
+ case LM_GGML_OP_SQRT:
52
+ case LM_GGML_OP_LOG:
53
+ case LM_GGML_OP_UNARY:
54
+ case LM_GGML_OP_ROPE:
55
+ case LM_GGML_OP_RMS_NORM:
56
+ case LM_GGML_OP_SOFT_MAX:
57
+ return true;
58
+
59
+ default:
60
+ return false;
61
+ }
62
+ }
63
+
64
+ static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
65
+ assert(alignment && !(alignment & (alignment - 1))); // power of 2
66
+ size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
67
+ return offset + align;
68
+ }
69
+
70
+ // tallocr
71
+
72
+ struct lm_ggml_tallocr lm_ggml_tallocr_new(lm_ggml_backend_buffer_t buffer) {
73
+ void * base = lm_ggml_backend_buffer_get_base(buffer);
74
+ size_t align = lm_ggml_backend_buffer_get_alignment(buffer);
75
+
76
+ assert(align && !(align & (align - 1))); // power of 2
77
+
78
+ struct lm_ggml_tallocr talloc = (struct lm_ggml_tallocr) {
79
+ /*.buffer = */ buffer,
80
+ /*.base = */ base,
81
+ /*.alignment = */ align,
82
+ /*.offset = */ aligned_offset(base, 0, align),
83
+ };
84
+ return talloc;
85
+ }
86
+
87
+ void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * talloc, struct lm_ggml_tensor * tensor) {
88
+ size_t size = lm_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
89
+ size = LM_GGML_PAD(size, talloc->alignment);
90
+
91
+ if (talloc->offset + size > lm_ggml_backend_buffer_get_size(talloc->buffer)) {
92
+ LM_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
93
+ __func__, tensor->name, size, lm_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
94
+ LM_GGML_ABORT("not enough space in the buffer");
95
+ }
96
+
97
+ void * addr = (char *)lm_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
98
+ talloc->offset += size;
99
+
100
+ assert(((uintptr_t)addr % talloc->alignment) == 0);
101
+
102
+ lm_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
103
+ }
104
+
105
+ // dynamic tensor allocator
106
+
107
+ struct free_block {
108
+ size_t offset;
109
+ size_t size;
110
+ };
111
+
112
+ struct lm_ggml_dyn_tallocr {
113
+ size_t alignment;
114
+ int n_free_blocks;
115
+ struct free_block free_blocks[MAX_FREE_BLOCKS];
116
+ size_t max_size;
117
+
118
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
119
+ struct {
120
+ const struct lm_ggml_tensor * tensor;
121
+ size_t offset;
122
+ } allocated_tensors[1024];
123
+ #endif
124
+ };
125
+
126
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
127
+ static void add_allocated_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, const struct lm_ggml_tensor * tensor) {
128
+ for (int i = 0; i < 1024; i++) {
129
+ if (alloc->allocated_tensors[i].tensor == NULL) {
130
+ alloc->allocated_tensors[i].tensor = tensor;
131
+ alloc->allocated_tensors[i].offset = offset;
132
+ return;
133
+ }
134
+ }
135
+ LM_GGML_ABORT("out of allocated_tensors");
136
+ }
137
+ static void remove_allocated_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, const struct lm_ggml_tensor * tensor) {
138
+ for (int i = 0; i < 1024; i++) {
139
+ if (alloc->allocated_tensors[i].offset == offset) {
140
+ alloc->allocated_tensors[i].tensor = NULL;
141
+ return;
142
+ }
143
+ }
144
+ LM_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
145
+ }
146
+ #endif
147
+
148
+ static size_t lm_ggml_dyn_tallocr_alloc(struct lm_ggml_dyn_tallocr * alloc, size_t size, const struct lm_ggml_tensor * tensor) {
149
+ size = aligned_offset(NULL, size, alloc->alignment);
150
+
151
+ AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
152
+
153
+ size_t max_avail = 0;
154
+
155
+ // find the best fitting free block besides the last block
156
+ int best_fit_block = -1;
157
+ size_t best_fit_size = SIZE_MAX;
158
+ for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
159
+ struct free_block * block = &alloc->free_blocks[i];
160
+ max_avail = MAX(max_avail, block->size);
161
+ if (block->size >= size && block->size <= best_fit_size) {
162
+ best_fit_block = i;
163
+ best_fit_size = block->size;
164
+ }
165
+ }
166
+
167
+ if (best_fit_block == -1) {
168
+ // the last block is our last resort
169
+ struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
170
+ max_avail = MAX(max_avail, block->size);
171
+ if (block->size >= size) {
172
+ best_fit_block = alloc->n_free_blocks - 1;
173
+ } else {
174
+ // this should never happen
175
+ LM_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
176
+ __func__, size, max_avail);
177
+ LM_GGML_ABORT("not enough space in the buffer");
178
+ }
179
+ }
180
+
181
+ struct free_block * block = &alloc->free_blocks[best_fit_block];
182
+ size_t offset = block->offset;
183
+ block->offset = offset + size;
184
+ block->size -= size;
185
+ if (block->size == 0) {
186
+ // remove block if empty
187
+ alloc->n_free_blocks--;
188
+ for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
189
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
190
+ }
191
+ }
192
+
193
+ AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
194
+
195
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
196
+ add_allocated_tensor(alloc, offset, tensor);
197
+ size_t cur_max = offset + size;
198
+ if (cur_max > alloc->max_size) {
199
+ // sort allocated_tensors by offset
200
+ for (int i = 0; i < 1024; i++) {
201
+ for (int j = i + 1; j < 1024; j++) {
202
+ if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
203
+ const struct lm_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
204
+ size_t tmp_offset = alloc->allocated_tensors[i].offset;
205
+ alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
206
+ alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
207
+ alloc->allocated_tensors[j].tensor = tmp_tensor;
208
+ alloc->allocated_tensors[j].offset = tmp_offset;
209
+ }
210
+ }
211
+ }
212
+ LM_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
213
+ for (int i = 0; i < 1024; i++) {
214
+ if (alloc->allocated_tensors[i].tensor) {
215
+ LM_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
216
+ alloc->allocated_tensors[i].offset,
217
+ alloc->allocated_tensors[i].offset + lm_ggml_nbytes(alloc->allocated_tensors[i].tensor),
218
+ lm_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
219
+ }
220
+ }
221
+ LM_GGML_LOG_DEBUG("\n");
222
+ }
223
+ #endif
224
+
225
+ alloc->max_size = MAX(alloc->max_size, offset + size);
226
+
227
+ return offset;
228
+
229
+ LM_GGML_UNUSED(tensor);
230
+ }
231
+
232
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
233
+ static void lm_ggml_dyn_tallocr_free_tensor(struct lm_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct lm_ggml_tensor * tensor) {
234
+ size = aligned_offset(NULL, size, alloc->alignment);
235
+
236
+ AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
237
+
238
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
239
+ remove_allocated_tensor(alloc, offset, tensor);
240
+ #endif
241
+
242
+ // see if we can merge with an existing block
243
+ for (int i = 0; i < alloc->n_free_blocks; i++) {
244
+ struct free_block * block = &alloc->free_blocks[i];
245
+ // check if ptr is at the end of the block
246
+ if (block->offset + block->size == offset) {
247
+ block->size += size;
248
+ // check if we can merge with the next block
249
+ if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
250
+ block->size += alloc->free_blocks[i+1].size;
251
+ alloc->n_free_blocks--;
252
+ for (int j = i+1; j < alloc->n_free_blocks; j++) {
253
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
254
+ }
255
+ }
256
+ return;
257
+ }
258
+ // check if ptr is at the beginning of the block
259
+ if (offset + size == block->offset) {
260
+ block->offset = offset;
261
+ block->size += size;
262
+ // check if we can merge with the previous block
263
+ if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
264
+ alloc->free_blocks[i-1].size += block->size;
265
+ alloc->n_free_blocks--;
266
+ for (int j = i; j < alloc->n_free_blocks; j++) {
267
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
268
+ }
269
+ }
270
+ return;
271
+ }
272
+ }
273
+ // otherwise, add a new block
274
+ LM_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
275
+ // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
276
+ int insert_pos = 0;
277
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
278
+ insert_pos++;
279
+ }
280
+ // shift all blocks from insert_pos onward to make room for the new block
281
+ for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
282
+ alloc->free_blocks[i] = alloc->free_blocks[i-1];
283
+ }
284
+ // insert the new block
285
+ alloc->free_blocks[insert_pos].offset = offset;
286
+ alloc->free_blocks[insert_pos].size = size;
287
+ alloc->n_free_blocks++;
288
+
289
+ LM_GGML_UNUSED(tensor);
290
+ }
291
+
292
+ static void lm_ggml_dyn_tallocr_reset(struct lm_ggml_dyn_tallocr * alloc) {
293
+ alloc->n_free_blocks = 1;
294
+ alloc->free_blocks[0].offset = 0;
295
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
296
+ alloc->max_size = 0;
297
+
298
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
299
+ for (int i = 0; i < 1024; i++) {
300
+ alloc->allocated_tensors[i].tensor = NULL;
301
+ }
302
+ #endif
303
+ }
304
+
305
+ static struct lm_ggml_dyn_tallocr * lm_ggml_dyn_tallocr_new(size_t alignment) {
306
+ struct lm_ggml_dyn_tallocr * alloc = (struct lm_ggml_dyn_tallocr *)malloc(sizeof(struct lm_ggml_dyn_tallocr));
307
+
308
+ *alloc = (struct lm_ggml_dyn_tallocr) {
309
+ /*.alignment = */ alignment,
310
+ /*.n_free_blocks = */ 0,
311
+ /*.free_blocks = */ {{0}},
312
+ /*.max_size = */ 0,
313
+ #ifdef LM_GGML_ALLOCATOR_DEBUG
314
+ /*.allocated_tensors = */ {{0}},
315
+ #endif
316
+ };
317
+
318
+ lm_ggml_dyn_tallocr_reset(alloc);
319
+
320
+ return alloc;
321
+ }
322
+
323
+ static void lm_ggml_dyn_tallocr_free(struct lm_ggml_dyn_tallocr * alloc) {
324
+ free(alloc);
325
+ }
326
+
327
+ static size_t lm_ggml_dyn_tallocr_max_size(struct lm_ggml_dyn_tallocr * alloc) {
328
+ return alloc->max_size;
329
+ }
330
+
331
+
332
+ /////////////////////////////////////
333
+
334
+ // graph allocator
335
+
336
+ struct hash_node {
337
+ int n_children;
338
+ int n_views;
339
+ int buffer_id;
340
+ size_t offset; // offset within the buffer
341
+ bool allocated;
342
+ };
343
+
344
+ struct tensor_alloc {
345
+ int buffer_id;
346
+ size_t offset;
347
+ size_t size_max; // 0 = pre-allocated, unused, or view
348
+ };
349
+
350
+ struct leaf_alloc {
351
+ struct tensor_alloc leaf;
352
+ };
353
+
354
+ struct node_alloc {
355
+ struct tensor_alloc dst;
356
+ struct tensor_alloc src[LM_GGML_MAX_SRC];
357
+ };
358
+
359
+ struct lm_ggml_gallocr {
360
+ lm_ggml_backend_buffer_type_t * bufts; // [n_buffers]
361
+ lm_ggml_backend_buffer_t * buffers; // [n_buffers]
362
+ struct lm_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
363
+ int n_buffers;
364
+
365
+ struct lm_ggml_hash_set hash_set;
366
+ struct hash_node * hash_values; // [hash_set.size]
367
+
368
+ struct node_alloc * node_allocs; // [n_nodes]
369
+ int n_nodes;
370
+
371
+ struct leaf_alloc * leaf_allocs; // [n_leafs]
372
+ int n_leafs;
373
+ };
374
+
375
+ lm_ggml_gallocr_t lm_ggml_gallocr_new_n(lm_ggml_backend_buffer_type_t * bufts, int n_bufs) {
376
+ lm_ggml_gallocr_t galloc = (lm_ggml_gallocr_t)calloc(1, sizeof(struct lm_ggml_gallocr));
377
+ LM_GGML_ASSERT(galloc != NULL);
378
+
379
+ galloc->bufts = calloc(n_bufs, sizeof(lm_ggml_backend_buffer_type_t));
380
+ LM_GGML_ASSERT(galloc->bufts != NULL);
381
+
382
+ galloc->buffers = calloc(n_bufs, sizeof(lm_ggml_backend_buffer_t));
383
+ LM_GGML_ASSERT(galloc->buffers != NULL);
384
+
385
+ galloc->buf_tallocs = calloc(n_bufs, sizeof(struct lm_ggml_dyn_tallocr *));
386
+ LM_GGML_ASSERT(galloc->buf_tallocs != NULL);
387
+
388
+ for (int i = 0; i < n_bufs; i++) {
389
+ galloc->bufts[i] = bufts[i];
390
+ galloc->buffers[i] = NULL;
391
+
392
+ // check if the same buffer type is used multiple times and reuse the same allocator
393
+ for (int j = 0; j < i; j++) {
394
+ if (bufts[i] == bufts[j]) {
395
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
396
+ break;
397
+ }
398
+ }
399
+
400
+ if (galloc->buf_tallocs[i] == NULL) {
401
+ size_t alignment = lm_ggml_backend_buft_get_alignment(bufts[i]);
402
+ galloc->buf_tallocs[i] = lm_ggml_dyn_tallocr_new(alignment);
403
+ }
404
+ }
405
+ galloc->n_buffers = n_bufs;
406
+
407
+ return galloc;
408
+ }
409
+
410
+ lm_ggml_gallocr_t lm_ggml_gallocr_new(lm_ggml_backend_buffer_type_t buft) {
411
+ return lm_ggml_gallocr_new_n(&buft, 1);
412
+ }
413
+
414
+ void lm_ggml_gallocr_free(lm_ggml_gallocr_t galloc) {
415
+ if (galloc == NULL) {
416
+ return;
417
+ }
418
+
419
+ for (int i = 0; i < galloc->n_buffers; i++) {
420
+ if (galloc->buffers != NULL) {
421
+ // skip if already freed
422
+ bool freed = false;
423
+ for (int j = 0; j < i; j++) {
424
+ if (galloc->buffers[j] == galloc->buffers[i]) {
425
+ freed = true;
426
+ break;
427
+ }
428
+ }
429
+ if (!freed) {
430
+ lm_ggml_backend_buffer_free(galloc->buffers[i]);
431
+ }
432
+ }
433
+ if (galloc->buf_tallocs != NULL) {
434
+ // skip if already freed
435
+ bool freed = false;
436
+ for (int j = 0; j < i; j++) {
437
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
438
+ freed = true;
439
+ break;
440
+ }
441
+ }
442
+ if (!freed) {
443
+ lm_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
444
+ }
445
+ }
446
+ }
447
+
448
+ lm_ggml_hash_set_free(&galloc->hash_set);
449
+ free(galloc->hash_values);
450
+ free(galloc->bufts);
451
+ free(galloc->buffers);
452
+ free(galloc->buf_tallocs);
453
+ free(galloc->node_allocs);
454
+ free(galloc->leaf_allocs);
455
+ free(galloc);
456
+ }
457
+
458
+ typedef struct lm_ggml_gallocr * lm_ggml_gallocr_t;
459
+
460
+ static struct hash_node * lm_ggml_gallocr_hash_get(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
461
+ size_t i = lm_ggml_hash_find_or_insert(&galloc->hash_set, t);
462
+ return &galloc->hash_values[i];
463
+ }
464
+
465
+ static bool lm_ggml_gallocr_is_own(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
466
+ return lm_ggml_gallocr_hash_get(galloc, t)->allocated;
467
+ }
468
+
469
+ static bool lm_ggml_gallocr_is_allocated(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * t) {
470
+ return t->data != NULL || lm_ggml_gallocr_hash_get(galloc, t)->allocated;
471
+ }
472
+
473
+ static void lm_ggml_gallocr_allocate_node(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, int buffer_id) {
474
+ LM_GGML_ASSERT(buffer_id >= 0);
475
+ struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
476
+
477
+ if (!lm_ggml_gallocr_is_allocated(galloc, node) && !lm_ggml_is_view(node)) {
478
+ hn->allocated = true;
479
+ assert(hn->offset == 0);
480
+
481
+ // try to reuse a parent's buffer (inplace)
482
+ if (lm_ggml_op_can_inplace(node->op)) {
483
+ for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
484
+ struct lm_ggml_tensor * parent = node->src[i];
485
+ if (parent == NULL) {
486
+ continue;
487
+ }
488
+
489
+ // if the node's data is external, then we cannot re-use it
490
+ if (!lm_ggml_gallocr_is_own(galloc, parent)) {
491
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
492
+ continue;
493
+ }
494
+
495
+ // outputs cannot be reused
496
+ if (parent->flags & LM_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & LM_GGML_TENSOR_FLAG_OUTPUT)) {
497
+ AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
498
+ continue;
499
+ }
500
+
501
+ if (!lm_ggml_are_same_layout(node, parent)) {
502
+ AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
503
+ continue;
504
+ }
505
+
506
+ struct hash_node * p_hn = lm_ggml_gallocr_hash_get(galloc, parent);
507
+ if (p_hn->n_children == 1 && p_hn->n_views == 0) {
508
+ if (lm_ggml_is_view(parent)) {
509
+ struct lm_ggml_tensor * view_src = parent->view_src;
510
+ struct hash_node * view_src_hn = lm_ggml_gallocr_hash_get(galloc, view_src);
511
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
512
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
513
+ assert(view_src_hn->offset == p_hn->offset);
514
+ hn->buffer_id = p_hn->buffer_id;
515
+ hn->offset = p_hn->offset;
516
+ p_hn->allocated = false; // avoid freeing the parent
517
+ view_src_hn->allocated = false;
518
+ return;
519
+ }
520
+ } else {
521
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
522
+ hn->buffer_id = p_hn->buffer_id;
523
+ hn->offset = p_hn->offset;
524
+ p_hn->allocated = false; // avoid freeing the parent
525
+ return;
526
+ }
527
+ }
528
+ }
529
+ }
530
+ // allocate tensor from the buffer
531
+ struct lm_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
532
+ lm_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
533
+ size_t size = lm_ggml_backend_buft_get_alloc_size(buft, node);
534
+ size_t offset = lm_ggml_dyn_tallocr_alloc(alloc, size, node);
535
+ hn->buffer_id = buffer_id;
536
+ hn->offset = offset;
537
+ return;
538
+ }
539
+ }
540
+
541
+ static void lm_ggml_gallocr_free_node(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node) {
542
+ // graph outputs are never freed
543
+ if (node->flags & LM_GGML_TENSOR_FLAG_OUTPUT) {
544
+ AT_PRINTF("not freeing output %s\n", node->name);
545
+ return;
546
+ }
547
+
548
+ struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
549
+ size_t offset = hn->offset;
550
+ int buffer_id = hn->buffer_id;
551
+ struct lm_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
552
+ lm_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
553
+ size_t size = lm_ggml_backend_buft_get_alloc_size(buft, node);
554
+ lm_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
555
+ hn->allocated = false;
556
+ }
557
+
558
+ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
559
+ return node_buffer_ids ? node_buffer_ids[i] : 0;
560
+ }
561
+
562
+ static void lm_ggml_gallocr_alloc_graph_impl(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
563
+ // clear hash tables
564
+ lm_ggml_hash_set_reset(&galloc->hash_set);
565
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
566
+
567
+ // allocate leafs
568
+ // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
569
+ for (int i = 0; i < graph->n_leafs; i++) {
570
+ struct lm_ggml_tensor * leaf = graph->leafs[i];
571
+ lm_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
572
+ }
573
+
574
+ // count number of children and views
575
+ // allocate other graph inputs and leafs first to avoid overwriting them
576
+ for (int i = 0; i < graph->n_nodes; i++) {
577
+ struct lm_ggml_tensor * node = graph->nodes[i];
578
+
579
+ // TODO: better way to add external dependencies
580
+ // LM_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
581
+ // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
582
+ // itself is never used and should not be considered a dependency
583
+ if (lm_ggml_is_view(node) && node->op != LM_GGML_OP_NONE) {
584
+ struct lm_ggml_tensor * view_src = node->view_src;
585
+ lm_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
586
+ }
587
+
588
+ if (node->flags & LM_GGML_TENSOR_FLAG_INPUT) {
589
+ lm_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
590
+ }
591
+
592
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
593
+ struct lm_ggml_tensor * src = node->src[j];
594
+ if (src == NULL) {
595
+ continue;
596
+ }
597
+
598
+ lm_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
599
+
600
+ // allocate explicit inputs
601
+ if (src->flags & LM_GGML_TENSOR_FLAG_INPUT) {
602
+ lm_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
603
+ }
604
+ }
605
+ }
606
+
607
+ // allocate tensors
608
+ for (int i = 0; i < graph->n_nodes; i++) {
609
+ struct lm_ggml_tensor * node = graph->nodes[i];
610
+ int buffer_id = get_node_buffer_id(node_buffer_ids, i);
611
+
612
+ // allocate parents (only leafs need to be allocated at this point)
613
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
614
+ struct lm_ggml_tensor * parent = node->src[j];
615
+ if (parent == NULL) {
616
+ continue;
617
+ }
618
+ lm_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
619
+ }
620
+
621
+ // allocate node
622
+ lm_ggml_gallocr_allocate_node(galloc, node, buffer_id);
623
+
624
+ AT_PRINTF("exec: %s (%s) <= ", lm_ggml_op_desc(node), node->name);
625
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
626
+ struct lm_ggml_tensor * parent = node->src[j];
627
+ if (parent == NULL) {
628
+ continue;
629
+ }
630
+ AT_PRINTF("%s", parent->name);
631
+ if (j < LM_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
632
+ AT_PRINTF(", ");
633
+ }
634
+ }
635
+ AT_PRINTF("\n");
636
+
637
+ // update parents
638
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
639
+ struct lm_ggml_tensor * parent = node->src[j];
640
+ if (parent == NULL) {
641
+ continue;
642
+ }
643
+ struct hash_node * p_hn = lm_ggml_gallocr_hash_get(galloc, parent);
644
+ p_hn->n_children -= 1;
645
+
646
+ AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
647
+ parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
648
+
649
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
650
+ if (lm_ggml_is_view(parent)) {
651
+ struct lm_ggml_tensor * view_src = parent->view_src;
652
+ struct hash_node * view_src_hn = lm_ggml_gallocr_hash_get(galloc, view_src);
653
+ view_src_hn->n_views -= 1;
654
+ AT_PRINTF("view_src %s: %d children, %d views\n",
655
+ view_src->name, view_src_hn->n_children, view_src_hn->n_views);
656
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
657
+ lm_ggml_gallocr_free_node(galloc, view_src);
658
+ }
659
+ }
660
+ else if (p_hn->allocated) {
661
+ lm_ggml_gallocr_free_node(galloc, parent);
662
+ }
663
+ }
664
+ AT_PRINTF("\n");
665
+ }
666
+ }
667
+ }
668
+
669
+ bool lm_ggml_gallocr_reserve_n(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
670
+ size_t min_hash_size = graph->n_nodes + graph->n_leafs;
671
+ // add 25% margin to avoid hash collisions
672
+ min_hash_size += min_hash_size / 4;
673
+
674
+ // initialize hash table
675
+ if (galloc->hash_set.size < min_hash_size) {
676
+ lm_ggml_hash_set_free(&galloc->hash_set);
677
+ galloc->hash_set = lm_ggml_hash_set_new(min_hash_size);
678
+ LM_GGML_ASSERT(galloc->hash_set.keys != NULL);
679
+
680
+ free(galloc->hash_values);
681
+ galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
682
+ LM_GGML_ASSERT(galloc->hash_values != NULL);
683
+ }
684
+
685
+ // reset allocators
686
+ for (int i = 0; i < galloc->n_buffers; i++) {
687
+ lm_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
688
+ }
689
+
690
+ // allocate in hash table
691
+ lm_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
692
+
693
+ // set the node_allocs from the hash table
694
+ if (galloc->n_nodes < graph->n_nodes) {
695
+ free(galloc->node_allocs);
696
+ galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
697
+ LM_GGML_ASSERT(galloc->node_allocs != NULL);
698
+ }
699
+ galloc->n_nodes = graph->n_nodes;
700
+ for (int i = 0; i < graph->n_nodes; i++) {
701
+ struct lm_ggml_tensor * node = graph->nodes[i];
702
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
703
+ if (node->view_src || node->data) {
704
+ node_alloc->dst.buffer_id = -1;
705
+ node_alloc->dst.offset = SIZE_MAX;
706
+ node_alloc->dst.size_max = 0;
707
+ } else {
708
+ struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, node);
709
+ node_alloc->dst.buffer_id = hn->buffer_id;
710
+ node_alloc->dst.offset = hn->offset;
711
+ node_alloc->dst.size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
712
+ }
713
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
714
+ struct lm_ggml_tensor * src = node->src[j];
715
+ if (!src || src->view_src || src->data) {
716
+ node_alloc->src[j].buffer_id = -1;
717
+ node_alloc->src[j].offset = SIZE_MAX;
718
+ node_alloc->src[j].size_max = 0;
719
+ } else {
720
+ struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, src);
721
+ node_alloc->src[j].buffer_id = hn->buffer_id;
722
+ node_alloc->src[j].offset = hn->offset;
723
+ node_alloc->src[j].size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
724
+ }
725
+ }
726
+ }
727
+ if (galloc->n_leafs < graph->n_leafs) {
728
+ free(galloc->leaf_allocs);
729
+ galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
730
+ LM_GGML_ASSERT(galloc->leaf_allocs != NULL);
731
+ }
732
+ galloc->n_leafs = graph->n_leafs;
733
+ for (int i = 0; i < graph->n_leafs; i++) {
734
+ struct lm_ggml_tensor * leaf = graph->leafs[i];
735
+ struct hash_node * hn = lm_ggml_gallocr_hash_get(galloc, leaf);
736
+ if (leaf->view_src || leaf->data) {
737
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
738
+ galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
739
+ galloc->leaf_allocs[i].leaf.size_max = 0;
740
+ } else {
741
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
742
+ galloc->leaf_allocs[i].leaf.offset = hn->offset;
743
+ galloc->leaf_allocs[i].leaf.size_max = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
744
+ }
745
+ }
746
+
747
+ // reallocate buffers if needed
748
+ for (int i = 0; i < galloc->n_buffers; i++) {
749
+ // if the buffer type is used multiple times, we reuse the same buffer
750
+ for (int j = 0; j < i; j++) {
751
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
752
+ galloc->buffers[i] = galloc->buffers[j];
753
+ break;
754
+ }
755
+ }
756
+
757
+ size_t cur_size = galloc->buffers[i] ? lm_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
758
+ size_t new_size = lm_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
759
+
760
+ // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
761
+ if (new_size > cur_size || galloc->buffers[i] == NULL) {
762
+ #ifndef NDEBUG
763
+ LM_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, lm_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
764
+ #endif
765
+
766
+ lm_ggml_backend_buffer_free(galloc->buffers[i]);
767
+ galloc->buffers[i] = lm_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
768
+ if (galloc->buffers[i] == NULL) {
769
+ LM_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, lm_ggml_backend_buft_name(galloc->bufts[i]), new_size);
770
+ return false;
771
+ }
772
+ lm_ggml_backend_buffer_set_usage(galloc->buffers[i], LM_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
773
+ }
774
+ }
775
+
776
+ return true;
777
+ }
778
+
779
+ bool lm_ggml_gallocr_reserve(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph *graph) {
780
+ return lm_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
781
+ }
782
+
783
+ static void lm_ggml_gallocr_init_tensor(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
784
+ int buffer_id = tensor_alloc->buffer_id;
785
+ assert(tensor->data || tensor->view_src || lm_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
786
+
787
+ if (tensor->view_src != NULL) {
788
+ if (tensor->buffer == NULL) {
789
+ assert(tensor_alloc->offset == SIZE_MAX);
790
+ if (tensor->view_src->buffer == NULL) {
791
+ // this tensor was allocated without ggml-backend
792
+ return;
793
+ }
794
+ lm_ggml_backend_view_init(tensor);
795
+ }
796
+ } else {
797
+ if (tensor->data == NULL) {
798
+ assert(tensor_alloc->offset != SIZE_MAX);
799
+ assert(lm_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
800
+ void * base = lm_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
801
+ void * addr = (char *)base + tensor_alloc->offset;
802
+ lm_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
803
+ } else {
804
+ if (tensor->buffer == NULL) {
805
+ // this tensor was allocated without ggml-backend
806
+ return;
807
+ }
808
+ }
809
+ }
810
+ }
811
+
812
+ static bool lm_ggml_gallocr_node_needs_realloc(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, struct tensor_alloc * talloc) {
813
+ size_t node_size = 0;
814
+ if (!node->data && !node->view_src) {
815
+ LM_GGML_ASSERT(talloc->buffer_id >= 0); // prevent segfault when misusing the API
816
+ node_size = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
817
+ }
818
+ return talloc->size_max >= node_size;
819
+ }
820
+
821
+ static bool lm_ggml_gallocr_needs_realloc(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph) {
822
+ if (galloc->n_nodes != graph->n_nodes) {
823
+ #ifndef NDEBUG
824
+ LM_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
825
+ #endif
826
+ return true;
827
+ }
828
+
829
+ if (galloc->n_leafs != graph->n_leafs) {
830
+ #ifndef NDEBUG
831
+ LM_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
832
+ #endif
833
+ return true;
834
+ }
835
+
836
+ for (int i = 0; i < graph->n_nodes; i++) {
837
+ struct lm_ggml_tensor * node = graph->nodes[i];
838
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
839
+
840
+ if (!lm_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
841
+ #ifndef NDEBUG
842
+ LM_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
843
+ #endif
844
+ return true;
845
+ }
846
+
847
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
848
+ struct lm_ggml_tensor * src = node->src[j];
849
+ if (src == NULL) {
850
+ continue;
851
+ }
852
+ if (!lm_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
853
+ #ifndef NDEBUG
854
+ LM_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
855
+ #endif
856
+ return true;
857
+ }
858
+ }
859
+ }
860
+
861
+ return false;
862
+ }
863
+
864
+ bool lm_ggml_gallocr_alloc_graph(lm_ggml_gallocr_t galloc, struct lm_ggml_cgraph * graph) {
865
+ if (lm_ggml_gallocr_needs_realloc(galloc, graph)) {
866
+ if (galloc->n_buffers == 1) {
867
+ #ifndef NDEBUG
868
+ LM_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
869
+ #endif
870
+ if (!lm_ggml_gallocr_reserve(galloc, graph)) {
871
+ return false;
872
+ }
873
+ } else {
874
+ #ifndef NDEBUG
875
+ LM_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
876
+ #endif
877
+ return false;
878
+ }
879
+ }
880
+
881
+ // reset buffers
882
+ for (int i = 0; i < galloc->n_buffers; i++) {
883
+ if (galloc->buffers[i] != NULL) {
884
+ lm_ggml_backend_buffer_reset(galloc->buffers[i]);
885
+ }
886
+ }
887
+
888
+ // allocate the graph tensors from the previous assignments
889
+ // leafs
890
+ for (int i = 0; i < graph->n_leafs; i++) {
891
+ struct lm_ggml_tensor * leaf = graph->leafs[i];
892
+ struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
893
+ lm_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
894
+ }
895
+ // nodes
896
+ for (int i = 0; i < graph->n_nodes; i++) {
897
+ struct lm_ggml_tensor * node = graph->nodes[i];
898
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
899
+ for (int j = 0; j < LM_GGML_MAX_SRC; j++) {
900
+ struct lm_ggml_tensor * src = node->src[j];
901
+ if (src == NULL) {
902
+ continue;
903
+ }
904
+ lm_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
905
+ }
906
+ lm_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
907
+ }
908
+
909
+ return true;
910
+ }
911
+
912
+ size_t lm_ggml_gallocr_get_buffer_size(lm_ggml_gallocr_t galloc, int buffer_id) {
913
+ LM_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
914
+
915
+ if (galloc->buffers[buffer_id] == NULL) {
916
+ return 0;
917
+ }
918
+
919
+ for (int i = 0; i < buffer_id; i++) {
920
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
921
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
922
+ // only return the buffer size the first time it appears to avoid double counting
923
+ return 0;
924
+ }
925
+ }
926
+
927
+ return lm_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
928
+ }
929
+
930
+ // utils
931
+
932
+ static bool alloc_tensor_range(struct lm_ggml_context * ctx,
933
+ struct lm_ggml_tensor * first, struct lm_ggml_tensor * last,
934
+ lm_ggml_backend_buffer_type_t buft, size_t size,
935
+ lm_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
936
+ lm_ggml_backend_buffer_t buffer = lm_ggml_backend_buft_alloc_buffer(buft, size);
937
+ if (buffer == NULL) {
938
+ #ifndef NDEBUG
939
+ LM_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, lm_ggml_backend_buft_name(buft), size);
940
+ #endif
941
+ for (size_t i = 0; i < *n_buffers; i++) {
942
+ lm_ggml_backend_buffer_free((*buffers)[i]);
943
+ }
944
+ free(*buffers);
945
+ return false;
946
+ }
947
+
948
+ struct lm_ggml_tallocr tallocr = lm_ggml_tallocr_new(buffer);
949
+
950
+ for (struct lm_ggml_tensor * t = first; t != last; t = lm_ggml_get_next_tensor(ctx, t)) {
951
+ if (t->data == NULL) {
952
+ if (t->view_src == NULL) {
953
+ lm_ggml_tallocr_alloc(&tallocr, t);
954
+ } else if (t->buffer == NULL) {
955
+ lm_ggml_backend_view_init(t);
956
+ }
957
+ } else {
958
+ if (t->view_src != NULL && t->buffer == NULL) {
959
+ // view of a pre-allocated tensor
960
+ lm_ggml_backend_view_init(t);
961
+ }
962
+ }
963
+ }
964
+
965
+ *buffers = realloc(*buffers, sizeof(lm_ggml_backend_buffer_t) * (*n_buffers + 1));
966
+ (*buffers)[(*n_buffers)++] = buffer;
967
+
968
+ return true;
969
+ }
970
+
971
+ lm_ggml_backend_buffer_t lm_ggml_backend_alloc_ctx_tensors_from_buft(struct lm_ggml_context * ctx, lm_ggml_backend_buffer_type_t buft) {
972
+ LM_GGML_ASSERT(lm_ggml_get_no_alloc(ctx) == true);
973
+
974
+ size_t alignment = lm_ggml_backend_buft_get_alignment(buft);
975
+ size_t max_size = lm_ggml_backend_buft_get_max_size(buft);
976
+
977
+ lm_ggml_backend_buffer_t * buffers = NULL;
978
+ size_t n_buffers = 0;
979
+
980
+ size_t cur_buf_size = 0;
981
+ struct lm_ggml_tensor * first = lm_ggml_get_first_tensor(ctx);
982
+ for (struct lm_ggml_tensor * t = first; t != NULL; t = lm_ggml_get_next_tensor(ctx, t)) {
983
+ size_t this_size = 0;
984
+ if (t->data == NULL && t->view_src == NULL) {
985
+ this_size = LM_GGML_PAD(lm_ggml_backend_buft_get_alloc_size(buft, t), alignment);
986
+ }
987
+
988
+ if (this_size > max_size) {
989
+ LM_GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
990
+ __func__, t->name,
991
+ lm_ggml_backend_buft_name(buft),
992
+ this_size, max_size);
993
+ for (size_t i = 0; i < n_buffers; i++) {
994
+ lm_ggml_backend_buffer_free(buffers[i]);
995
+ }
996
+ free(buffers);
997
+ return NULL;
998
+ }
999
+
1000
+ if ((cur_buf_size + this_size) > max_size) {
1001
+ // allocate tensors in the current buffer
1002
+ if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
1003
+ return NULL;
1004
+ }
1005
+ first = t;
1006
+ cur_buf_size = this_size;
1007
+ } else {
1008
+ cur_buf_size += this_size;
1009
+ }
1010
+ }
1011
+
1012
+ // allocate remaining tensors
1013
+ if (cur_buf_size > 0) {
1014
+ if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1015
+ return NULL;
1016
+ }
1017
+ }
1018
+
1019
+ if (n_buffers == 0) {
1020
+ #ifndef NDEBUG
1021
+ LM_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1022
+ #endif
1023
+ return NULL;
1024
+ }
1025
+
1026
+ lm_ggml_backend_buffer_t buffer;
1027
+ if (n_buffers == 1) {
1028
+ buffer = buffers[0];
1029
+ } else {
1030
+ buffer = lm_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1031
+ }
1032
+ free(buffers);
1033
+ return buffer;
1034
+ }
1035
+
1036
+ lm_ggml_backend_buffer_t lm_ggml_backend_alloc_ctx_tensors(struct lm_ggml_context * ctx, lm_ggml_backend_t backend) {
1037
+ return lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx, lm_ggml_backend_get_default_buffer_type(backend));
1038
+ }