whisper.rn 0.4.0-rc.8 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/ggml-aarch64.c +3209 -0
  9. package/cpp/ggml-aarch64.h +39 -0
  10. package/cpp/ggml-alloc.c +725 -517
  11. package/cpp/ggml-alloc.h +47 -65
  12. package/cpp/ggml-backend-impl.h +166 -55
  13. package/cpp/ggml-backend.cpp +2635 -0
  14. package/cpp/ggml-backend.h +202 -85
  15. package/cpp/ggml-common.h +1853 -0
  16. package/cpp/ggml-cpu-impl.h +614 -0
  17. package/cpp/ggml-impl.h +143 -180
  18. package/cpp/ggml-metal.h +13 -11
  19. package/cpp/ggml-metal.m +2955 -1632
  20. package/cpp/ggml-quants.c +9824 -3263
  21. package/cpp/ggml-quants.h +133 -248
  22. package/cpp/ggml-whisper.metallib +0 -0
  23. package/cpp/ggml.c +8482 -5142
  24. package/cpp/ggml.h +633 -349
  25. package/cpp/rn-whisper.cpp +91 -0
  26. package/cpp/rn-whisper.h +2 -0
  27. package/cpp/whisper.cpp +1427 -658
  28. package/cpp/whisper.h +84 -28
  29. package/ios/RNWhisper.mm +124 -37
  30. package/ios/RNWhisperAudioUtils.h +1 -0
  31. package/ios/RNWhisperAudioUtils.m +20 -13
  32. package/ios/RNWhisperContext.h +3 -2
  33. package/ios/RNWhisperContext.mm +39 -7
  34. package/jest/mock.js +9 -1
  35. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  36. package/lib/commonjs/index.js +48 -19
  37. package/lib/commonjs/index.js.map +1 -1
  38. package/lib/commonjs/version.json +1 -1
  39. package/lib/module/NativeRNWhisper.js.map +1 -1
  40. package/lib/module/index.js +48 -19
  41. package/lib/module/index.js.map +1 -1
  42. package/lib/module/version.json +1 -1
  43. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  44. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  45. package/lib/typescript/index.d.ts +25 -3
  46. package/lib/typescript/index.d.ts.map +1 -1
  47. package/package.json +6 -5
  48. package/src/NativeRNWhisper.ts +12 -3
  49. package/src/index.ts +63 -24
  50. package/src/version.json +1 -1
  51. package/whisper-rn.podspec +9 -2
  52. package/cpp/ggml-backend.c +0 -1718
  53. package/cpp/ggml-metal-whisper.metal +0 -5820
package/cpp/ggml-alloc.c CHANGED
@@ -14,76 +14,138 @@
14
14
 
15
15
  //#define WSP_GGML_ALLOCATOR_DEBUG
16
16
 
17
- //#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
17
+ //#define AT_PRINTF(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
18
18
  #define AT_PRINTF(...)
19
19
 
20
- // TODO: WSP_GGML_PAD ?
20
+
21
+ static bool wsp_ggml_is_view(const struct wsp_ggml_tensor * t) {
22
+ return t->view_src != NULL;
23
+ }
24
+
25
+ static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
26
+ if (a->type != b->type) {
27
+ return false;
28
+ }
29
+ for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
30
+ if (a->ne[i] != b->ne[i]) {
31
+ return false;
32
+ }
33
+ if (a->nb[i] != b->nb[i]) {
34
+ return false;
35
+ }
36
+ }
37
+ return true;
38
+ }
39
+
40
+ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
41
+ switch (op) {
42
+ case WSP_GGML_OP_SCALE:
43
+ case WSP_GGML_OP_DIAG_MASK_ZERO:
44
+ case WSP_GGML_OP_DIAG_MASK_INF:
45
+ case WSP_GGML_OP_ADD:
46
+ case WSP_GGML_OP_ADD1:
47
+ case WSP_GGML_OP_SUB:
48
+ case WSP_GGML_OP_MUL:
49
+ case WSP_GGML_OP_DIV:
50
+ case WSP_GGML_OP_SQR:
51
+ case WSP_GGML_OP_SQRT:
52
+ case WSP_GGML_OP_LOG:
53
+ case WSP_GGML_OP_UNARY:
54
+ case WSP_GGML_OP_ROPE:
55
+ case WSP_GGML_OP_RMS_NORM:
56
+ case WSP_GGML_OP_SOFT_MAX:
57
+ return true;
58
+
59
+ default:
60
+ return false;
61
+ }
62
+ }
63
+
21
64
  static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
22
65
  assert(alignment && !(alignment & (alignment - 1))); // power of 2
23
66
  size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
24
67
  return offset + align;
25
68
  }
26
69
 
70
+ // tallocr
71
+
72
+ struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer) {
73
+ void * base = wsp_ggml_backend_buffer_get_base(buffer);
74
+ size_t align = wsp_ggml_backend_buffer_get_alignment(buffer);
75
+
76
+ assert(align && !(align & (align - 1))); // power of 2
77
+
78
+ struct wsp_ggml_tallocr talloc = (struct wsp_ggml_tallocr) {
79
+ /*.buffer = */ buffer,
80
+ /*.base = */ base,
81
+ /*.alignment = */ align,
82
+ /*.offset = */ aligned_offset(base, 0, align),
83
+ };
84
+ return talloc;
85
+ }
86
+
87
+ void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor) {
88
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
89
+ size = WSP_GGML_PAD(size, talloc->alignment);
90
+
91
+ if (talloc->offset + size > wsp_ggml_backend_buffer_get_size(talloc->buffer)) {
92
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
93
+ __func__, tensor->name, size, wsp_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
94
+ WSP_GGML_ABORT("not enough space in the buffer");
95
+ }
96
+
97
+ void * addr = (char *)wsp_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
98
+ talloc->offset += size;
99
+
100
+ assert(((uintptr_t)addr % talloc->alignment) == 0);
101
+
102
+ wsp_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
103
+ }
104
+
105
+ // dynamic tensor allocator
106
+
27
107
  struct free_block {
28
- void * addr;
108
+ size_t offset;
29
109
  size_t size;
30
110
  };
31
111
 
32
- struct wsp_ggml_tallocr {
33
- struct wsp_ggml_backend_buffer * buffer;
34
- bool buffer_owned;
35
- void * base;
112
+ struct wsp_ggml_dyn_tallocr {
36
113
  size_t alignment;
37
-
38
114
  int n_free_blocks;
39
115
  struct free_block free_blocks[MAX_FREE_BLOCKS];
40
-
41
116
  size_t max_size;
42
117
 
43
- bool measure;
44
-
45
118
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
46
- struct wsp_ggml_tensor * allocated_tensors[1024];
119
+ struct {
120
+ const struct wsp_ggml_tensor * tensor;
121
+ size_t offset;
122
+ } allocated_tensors[1024];
47
123
  #endif
48
124
  };
49
125
 
50
126
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
51
- static void add_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
127
+ static void add_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
52
128
  for (int i = 0; i < 1024; i++) {
53
- if (alloc->allocated_tensors[i] == NULL) {
54
- alloc->allocated_tensors[i] = tensor;
129
+ if (alloc->allocated_tensors[i].tensor == NULL) {
130
+ alloc->allocated_tensors[i].tensor = tensor;
131
+ alloc->allocated_tensors[i].offset = offset;
55
132
  return;
56
133
  }
57
134
  }
58
- WSP_GGML_ASSERT(!"out of allocated_tensors");
135
+ WSP_GGML_ABORT("out of allocated_tensors");
59
136
  }
60
- static void remove_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
137
+ static void remove_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
61
138
  for (int i = 0; i < 1024; i++) {
62
- if (alloc->allocated_tensors[i] == tensor ||
63
- (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
64
- alloc->allocated_tensors[i] = NULL;
139
+ if (alloc->allocated_tensors[i].offset == offset) {
140
+ alloc->allocated_tensors[i].tensor = NULL;
65
141
  return;
66
142
  }
67
143
  }
68
- printf("tried to free tensor %s not found\n", tensor->name);
69
- WSP_GGML_ASSERT(!"tensor not found");
144
+ WSP_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
70
145
  }
71
146
  #endif
72
147
 
73
- // check if a tensor is allocated by this buffer
74
- static bool wsp_ggml_tallocr_is_own(wsp_ggml_tallocr_t alloc, const struct wsp_ggml_tensor * tensor) {
75
- return tensor->buffer == alloc->buffer && (!tensor->view_src || tensor->view_src->buffer == alloc->buffer);
76
- }
77
-
78
- static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
79
- return t->view_src != NULL;
80
- }
81
-
82
- void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
83
- WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
84
- WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
85
-
86
- size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
148
+ static size_t wsp_ggml_dyn_tallocr_alloc(struct wsp_ggml_dyn_tallocr * alloc, size_t size, const struct wsp_ggml_tensor * tensor) {
87
149
  size = aligned_offset(NULL, size, alloc->alignment);
88
150
 
89
151
  AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
@@ -109,16 +171,16 @@ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * t
109
171
  if (block->size >= size) {
110
172
  best_fit_block = alloc->n_free_blocks - 1;
111
173
  } else {
112
- fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
174
+ // this should never happen
175
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
113
176
  __func__, size, max_avail);
114
- WSP_GGML_ASSERT(!"not enough space in the buffer");
115
- return;
177
+ WSP_GGML_ABORT("not enough space in the buffer");
116
178
  }
117
179
  }
118
180
 
119
181
  struct free_block * block = &alloc->free_blocks[best_fit_block];
120
- void * addr = block->addr;
121
- block->addr = (char*)block->addr + size;
182
+ size_t offset = block->offset;
183
+ block->offset = offset + size;
122
184
  block->size -= size;
123
185
  if (block->size == 0) {
124
186
  // remove block if empty
@@ -128,59 +190,63 @@ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * t
128
190
  }
129
191
  }
130
192
 
131
- AT_PRINTF("block %d, addr %p\n", best_fit_block, addr);
132
-
133
- tensor->data = addr;
134
- tensor->buffer = alloc->buffer;
135
- if (!alloc->measure) {
136
- wsp_ggml_backend_buffer_init_tensor(alloc->buffer, tensor);
137
- }
193
+ AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
138
194
 
139
195
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
140
- add_allocated_tensor(alloc, tensor);
141
- size_t cur_max = (char*)addr - (char*)alloc->base + size;
196
+ add_allocated_tensor(alloc, offset, tensor);
197
+ size_t cur_max = offset + size;
142
198
  if (cur_max > alloc->max_size) {
143
- printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
199
+ // sort allocated_tensors by offset
200
+ for (int i = 0; i < 1024; i++) {
201
+ for (int j = i + 1; j < 1024; j++) {
202
+ if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
203
+ const struct wsp_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
204
+ size_t tmp_offset = alloc->allocated_tensors[i].offset;
205
+ alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
206
+ alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
207
+ alloc->allocated_tensors[j].tensor = tmp_tensor;
208
+ alloc->allocated_tensors[j].offset = tmp_offset;
209
+ }
210
+ }
211
+ }
212
+ WSP_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
144
213
  for (int i = 0; i < 1024; i++) {
145
- if (alloc->allocated_tensors[i]) {
146
- printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, wsp_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
214
+ if (alloc->allocated_tensors[i].tensor) {
215
+ WSP_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
216
+ alloc->allocated_tensors[i].offset,
217
+ alloc->allocated_tensors[i].offset + wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor),
218
+ wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
147
219
  }
148
220
  }
149
- printf("\n");
221
+ WSP_GGML_LOG_DEBUG("\n");
150
222
  }
151
223
  #endif
152
224
 
153
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size);
154
- }
225
+ alloc->max_size = MAX(alloc->max_size, offset + size);
155
226
 
156
- // this is a very naive implementation, but for our case the number of free blocks should be very small
157
- static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
158
- if (wsp_ggml_tallocr_is_own(alloc, tensor) == false) {
159
- // the tensor was not allocated in this buffer
160
- // this can happen because the graph allocator will try to free weights and other tensors from different buffers
161
- // the easiest way to deal with this is just to ignore it
162
- // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer);
163
- return;
164
- }
227
+ return offset;
165
228
 
166
- void * ptr = tensor->data;
229
+ WSP_GGML_UNUSED(tensor);
230
+ }
167
231
 
168
- size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
232
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
233
+ static void wsp_ggml_dyn_tallocr_free_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct wsp_ggml_tensor * tensor) {
169
234
  size = aligned_offset(NULL, size, alloc->alignment);
170
- AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
235
+
236
+ AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
171
237
 
172
238
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
173
- remove_allocated_tensor(alloc, tensor);
239
+ remove_allocated_tensor(alloc, offset, tensor);
174
240
  #endif
175
241
 
176
242
  // see if we can merge with an existing block
177
243
  for (int i = 0; i < alloc->n_free_blocks; i++) {
178
244
  struct free_block * block = &alloc->free_blocks[i];
179
245
  // check if ptr is at the end of the block
180
- if ((char*)block->addr + block->size == ptr) {
246
+ if (block->offset + block->size == offset) {
181
247
  block->size += size;
182
248
  // check if we can merge with the next block
183
- if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
249
+ if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
184
250
  block->size += alloc->free_blocks[i+1].size;
185
251
  alloc->n_free_blocks--;
186
252
  for (int j = i+1; j < alloc->n_free_blocks; j++) {
@@ -190,11 +256,11 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
190
256
  return;
191
257
  }
192
258
  // check if ptr is at the beginning of the block
193
- if ((char*)ptr + size == block->addr) {
194
- block->addr = ptr;
259
+ if (offset + size == block->offset) {
260
+ block->offset = offset;
195
261
  block->size += size;
196
262
  // check if we can merge with the previous block
197
- if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
263
+ if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
198
264
  alloc->free_blocks[i-1].size += block->size;
199
265
  alloc->n_free_blocks--;
200
266
  for (int j = i; j < alloc->n_free_blocks; j++) {
@@ -208,7 +274,7 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
208
274
  WSP_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
209
275
  // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
210
276
  int insert_pos = 0;
211
- while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
277
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
212
278
  insert_pos++;
213
279
  }
214
280
  // shift all blocks from insert_pos onward to make room for the new block
@@ -216,614 +282,756 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
216
282
  alloc->free_blocks[i] = alloc->free_blocks[i-1];
217
283
  }
218
284
  // insert the new block
219
- alloc->free_blocks[insert_pos].addr = ptr;
285
+ alloc->free_blocks[insert_pos].offset = offset;
220
286
  alloc->free_blocks[insert_pos].size = size;
221
287
  alloc->n_free_blocks++;
288
+
289
+ WSP_GGML_UNUSED(tensor);
222
290
  }
223
291
 
224
- void wsp_ggml_tallocr_reset(wsp_ggml_tallocr_t alloc) {
292
+ static void wsp_ggml_dyn_tallocr_reset(struct wsp_ggml_dyn_tallocr * alloc) {
225
293
  alloc->n_free_blocks = 1;
226
- size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment);
227
- alloc->free_blocks[0].addr = (char *)alloc->base + align_offset;
294
+ alloc->free_blocks[0].offset = 0;
295
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
296
+ alloc->max_size = 0;
228
297
 
229
- if (alloc->measure) {
230
- alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
231
- } else {
232
- alloc->free_blocks[0].size = wsp_ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
233
- wsp_ggml_backend_buffer_reset(alloc->buffer);
298
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
299
+ for (int i = 0; i < 1024; i++) {
300
+ alloc->allocated_tensors[i].tensor = NULL;
234
301
  }
302
+ #endif
235
303
  }
236
304
 
237
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment) {
238
- struct wsp_ggml_backend_buffer * buffer = wsp_ggml_backend_cpu_buffer_from_ptr(data, size);
305
+ static struct wsp_ggml_dyn_tallocr * wsp_ggml_dyn_tallocr_new(size_t alignment) {
306
+ struct wsp_ggml_dyn_tallocr * alloc = (struct wsp_ggml_dyn_tallocr *)malloc(sizeof(struct wsp_ggml_dyn_tallocr));
239
307
 
240
- wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
241
-
242
- *alloc = (struct wsp_ggml_tallocr) {
243
- /*.buffer = */ buffer,
244
- /*.buffer_owned = */ true,
245
- /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
308
+ *alloc = (struct wsp_ggml_dyn_tallocr) {
246
309
  /*.alignment = */ alignment,
247
310
  /*.n_free_blocks = */ 0,
248
311
  /*.free_blocks = */ {{0}},
249
312
  /*.max_size = */ 0,
250
- /*.measure = */ false,
251
313
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
252
- /*.allocated_tensors = */ {0},
314
+ /*.allocated_tensors = */ {{0}},
253
315
  #endif
254
316
  };
255
317
 
256
- wsp_ggml_tallocr_reset(alloc);
318
+ wsp_ggml_dyn_tallocr_reset(alloc);
257
319
 
258
320
  return alloc;
259
321
  }
260
322
 
261
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment) {
262
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment);
263
- alloc->measure = true;
323
+ static void wsp_ggml_dyn_tallocr_free(struct wsp_ggml_dyn_tallocr * alloc) {
324
+ free(alloc);
325
+ }
264
326
 
265
- return alloc;
327
+ static size_t wsp_ggml_dyn_tallocr_max_size(struct wsp_ggml_dyn_tallocr * alloc) {
328
+ return alloc->max_size;
266
329
  }
267
330
 
268
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_buft(struct wsp_ggml_backend_buffer_type * buft) {
269
- // create a backend buffer to get the correct tensor allocation sizes
270
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, 1);
271
331
 
272
- // TODO: move alloc initialization to a common wsp_ggml_tallocr_new_impl function
273
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
274
- alloc->buffer_owned = true;
275
- alloc->measure = true;
276
- wsp_ggml_tallocr_reset(alloc);
277
- return alloc;
278
- }
332
+ /////////////////////////////////////
279
333
 
280
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
281
- return wsp_ggml_tallocr_new_measure_from_buft(wsp_ggml_backend_get_default_buffer_type(backend));
282
- }
334
+ // graph allocator
283
335
 
284
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buft(struct wsp_ggml_backend_buffer_type * buft, size_t size) {
285
- // create a backend buffer to get the correct tensor allocation sizes
286
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, size);
287
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
288
- alloc->buffer_owned = true;
289
- return alloc;
290
- }
336
+ struct hash_node {
337
+ int n_children;
338
+ int n_views;
339
+ int buffer_id;
340
+ size_t offset; // offset within the buffer
341
+ bool allocated;
342
+ };
291
343
 
292
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
293
- return wsp_ggml_tallocr_new_from_buft(wsp_ggml_backend_get_default_buffer_type(backend), size);
294
- }
344
+ struct tensor_alloc {
345
+ int buffer_id;
346
+ size_t offset;
347
+ size_t size_max; // 0 = pre-allocated, unused, or view
348
+ };
295
349
 
296
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
297
- wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
350
+ struct leaf_alloc {
351
+ struct tensor_alloc leaf;
352
+ };
298
353
 
299
- *alloc = (struct wsp_ggml_tallocr) {
300
- /*.buffer = */ buffer,
301
- /*.buffer_owned = */ false,
302
- /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
303
- /*.alignment = */ wsp_ggml_backend_buffer_get_alignment(buffer),
304
- /*.n_free_blocks = */ 0,
305
- /*.free_blocks = */ {{0}},
306
- /*.max_size = */ 0,
307
- /*.measure = */ false,
308
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
309
- /*.allocated_tensors = */ {0},
310
- #endif
311
- };
354
+ struct node_alloc {
355
+ struct tensor_alloc dst;
356
+ struct tensor_alloc src[WSP_GGML_MAX_SRC];
357
+ };
312
358
 
313
- wsp_ggml_tallocr_reset(alloc);
359
+ struct wsp_ggml_gallocr {
360
+ wsp_ggml_backend_buffer_type_t * bufts; // [n_buffers]
361
+ wsp_ggml_backend_buffer_t * buffers; // [n_buffers]
362
+ struct wsp_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
363
+ int n_buffers;
314
364
 
315
- return alloc;
316
- }
365
+ struct wsp_ggml_hash_set hash_set;
366
+ struct hash_node * hash_values; // [hash_set.size]
317
367
 
318
- struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t alloc) {
319
- return alloc->buffer;
320
- }
368
+ struct node_alloc * node_allocs; // [n_nodes]
369
+ int n_nodes;
321
370
 
322
- void wsp_ggml_tallocr_free(wsp_ggml_tallocr_t alloc) {
323
- if (alloc == NULL) {
324
- return;
325
- }
371
+ struct leaf_alloc * leaf_allocs; // [n_leafs]
372
+ int n_leafs;
373
+ };
326
374
 
327
- if (alloc->buffer_owned) {
328
- wsp_ggml_backend_buffer_free(alloc->buffer);
329
- }
330
- free(alloc);
331
- }
375
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs) {
376
+ wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)calloc(1, sizeof(struct wsp_ggml_gallocr));
377
+ WSP_GGML_ASSERT(galloc != NULL);
332
378
 
333
- bool wsp_ggml_tallocr_is_measure(wsp_ggml_tallocr_t alloc) {
334
- return alloc->measure;
335
- }
379
+ galloc->bufts = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_type_t));
380
+ WSP_GGML_ASSERT(galloc->bufts != NULL);
336
381
 
337
- size_t wsp_ggml_tallocr_max_size(wsp_ggml_tallocr_t alloc) {
338
- return alloc->max_size;
339
- }
382
+ galloc->buffers = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_t));
383
+ WSP_GGML_ASSERT(galloc->buffers != NULL);
340
384
 
341
- // graph allocator
385
+ galloc->buf_tallocs = calloc(n_bufs, sizeof(struct wsp_ggml_dyn_tallocr *));
386
+ WSP_GGML_ASSERT(galloc->buf_tallocs != NULL);
342
387
 
343
- struct hash_node {
344
- int n_children;
345
- int n_views;
346
- };
388
+ for (int i = 0; i < n_bufs; i++) {
389
+ galloc->bufts[i] = bufts[i];
390
+ galloc->buffers[i] = NULL;
347
391
 
348
- struct wsp_ggml_gallocr {
349
- wsp_ggml_tallocr_t talloc;
350
- struct wsp_ggml_hash_set hash_set;
351
- struct hash_node * hash_values;
352
- size_t hash_values_size;
353
- wsp_ggml_tallocr_t * hash_allocs;
354
- int * parse_seq;
355
- int parse_seq_len;
356
- };
392
+ // check if the same buffer type is used multiple times and reuse the same allocator
393
+ for (int j = 0; j < i; j++) {
394
+ if (bufts[i] == bufts[j]) {
395
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
396
+ break;
397
+ }
398
+ }
357
399
 
358
- wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void) {
359
- wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)malloc(sizeof(struct wsp_ggml_gallocr));
360
-
361
- *galloc = (struct wsp_ggml_gallocr) {
362
- /*.talloc = */ NULL,
363
- /*.hash_set = */ {0},
364
- /*.hash_values = */ NULL,
365
- /*.hash_values_size = */ 0,
366
- /*.hash_allocs = */ NULL,
367
- /*.parse_seq = */ NULL,
368
- /*.parse_seq_len = */ 0,
369
- };
400
+ if (galloc->buf_tallocs[i] == NULL) {
401
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(bufts[i]);
402
+ galloc->buf_tallocs[i] = wsp_ggml_dyn_tallocr_new(alignment);
403
+ }
404
+ }
405
+ galloc->n_buffers = n_bufs;
370
406
 
371
407
  return galloc;
372
408
  }
373
409
 
410
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft) {
411
+ return wsp_ggml_gallocr_new_n(&buft, 1);
412
+ }
413
+
374
414
  void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc) {
375
415
  if (galloc == NULL) {
376
416
  return;
377
417
  }
378
418
 
379
- if (galloc->hash_set.keys != NULL) {
380
- free(galloc->hash_set.keys);
381
- }
382
- if (galloc->hash_values != NULL) {
383
- free(galloc->hash_values);
384
- }
385
- if (galloc->hash_allocs != NULL) {
386
- free(galloc->hash_allocs);
387
- }
388
- if (galloc->parse_seq != NULL) {
389
- free(galloc->parse_seq);
419
+ for (int i = 0; i < galloc->n_buffers; i++) {
420
+ if (galloc->buffers != NULL) {
421
+ // skip if already freed
422
+ bool freed = false;
423
+ for (int j = 0; j < i; j++) {
424
+ if (galloc->buffers[j] == galloc->buffers[i]) {
425
+ freed = true;
426
+ break;
427
+ }
428
+ }
429
+ if (!freed) {
430
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
431
+ }
432
+ }
433
+ if (galloc->buf_tallocs != NULL) {
434
+ // skip if already freed
435
+ bool freed = false;
436
+ for (int j = 0; j < i; j++) {
437
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
438
+ freed = true;
439
+ break;
440
+ }
441
+ }
442
+ if (!freed) {
443
+ wsp_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
444
+ }
445
+ }
390
446
  }
447
+
448
+ wsp_ggml_hash_set_free(&galloc->hash_set);
449
+ free(galloc->hash_values);
450
+ free(galloc->bufts);
451
+ free(galloc->buffers);
452
+ free(galloc->buf_tallocs);
453
+ free(galloc->node_allocs);
454
+ free(galloc->leaf_allocs);
391
455
  free(galloc);
392
456
  }
393
457
 
394
- void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n) {
395
- free(galloc->parse_seq);
396
- galloc->parse_seq = malloc(sizeof(int) * n);
397
-
398
- for (int i = 0; i < n; i++) {
399
- galloc->parse_seq[i] = list[i];
400
- }
401
- galloc->parse_seq_len = n;
402
- }
458
+ typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
403
459
 
404
- static struct hash_node * hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
405
- size_t i = wsp_ggml_hash_find_or_insert(galloc->hash_set, t);
460
+ static struct hash_node * wsp_ggml_gallocr_hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
461
+ size_t i = wsp_ggml_hash_find_or_insert(&galloc->hash_set, t);
406
462
  return &galloc->hash_values[i];
407
463
  }
408
464
 
409
- static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
410
- if (a->type != b->type) {
411
- return false;
412
- }
413
- for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
414
- if (a->ne[i] != b->ne[i]) {
415
- return false;
416
- }
417
- if (a->nb[i] != b->nb[i]) {
418
- return false;
419
- }
420
- }
421
- return true;
465
+ static bool wsp_ggml_gallocr_is_own(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
466
+ return wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
422
467
  }
423
468
 
424
- static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
425
- switch (op) {
426
- case WSP_GGML_OP_SCALE:
427
- case WSP_GGML_OP_DIAG_MASK_ZERO:
428
- case WSP_GGML_OP_DIAG_MASK_INF:
429
- case WSP_GGML_OP_ADD:
430
- case WSP_GGML_OP_ADD1:
431
- case WSP_GGML_OP_SUB:
432
- case WSP_GGML_OP_MUL:
433
- case WSP_GGML_OP_DIV:
434
- case WSP_GGML_OP_SQR:
435
- case WSP_GGML_OP_SQRT:
436
- case WSP_GGML_OP_LOG:
437
- case WSP_GGML_OP_UNARY:
438
- case WSP_GGML_OP_ROPE:
439
- case WSP_GGML_OP_RMS_NORM:
440
- case WSP_GGML_OP_SOFT_MAX:
441
- return true;
442
-
443
- default:
444
- return false;
445
- }
469
+ static void wsp_ggml_gallocr_set_node_offset(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id, size_t offset) {
470
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
471
+ hn->buffer_id = buffer_id;
472
+ hn->offset = offset;
473
+ hn->allocated = true;
446
474
  }
447
475
 
448
- static wsp_ggml_tallocr_t node_tallocr(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
449
- if (galloc->talloc != NULL) {
450
- return galloc->talloc;
451
- }
452
-
453
- return galloc->hash_allocs[wsp_ggml_hash_find_or_insert(galloc->hash_set, node)];
476
+ static bool wsp_ggml_gallocr_is_allocated(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
477
+ return t->data != NULL || wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
454
478
  }
455
479
 
456
- static void init_view(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * view, bool update_backend) {
457
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, view);
480
+ static void wsp_ggml_gallocr_allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id) {
481
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
458
482
 
459
- WSP_GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
460
- if (update_backend) {
461
- view->backend = view->view_src->backend;
462
- }
463
- // views are initialized in the alloc buffer rather than the view_src buffer
464
- view->buffer = alloc->buffer;
465
- view->data = (char *)view->view_src->data + view->view_offs;
466
-
467
- assert(wsp_ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft);
483
+ if (!wsp_ggml_gallocr_is_allocated(galloc, node) && !wsp_ggml_is_view(node)) {
484
+ hn->allocated = true;
485
+ assert(hn->offset == 0);
468
486
 
469
- if (!alloc->measure) {
470
- wsp_ggml_backend_buffer_init_tensor(alloc->buffer, view);
471
- }
472
- }
487
+ // try to reuse a parent's buffer (inplace)
488
+ if (wsp_ggml_op_can_inplace(node->op)) {
489
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
490
+ struct wsp_ggml_tensor * parent = node->src[i];
491
+ if (parent == NULL) {
492
+ continue;
493
+ }
473
494
 
474
- static void allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
475
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
495
+ // if the node's data is external, then we cannot re-use it
496
+ if (!wsp_ggml_gallocr_is_own(galloc, parent)) {
497
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
498
+ continue;
499
+ }
476
500
 
477
- if (node->data == NULL) {
478
- if (wsp_ggml_is_view(node)) {
479
- init_view(galloc, node, true);
480
- } else {
481
- // see if we can reuse a parent's buffer (inplace)
482
- if (wsp_ggml_op_can_inplace(node->op)) {
483
- for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
484
- struct wsp_ggml_tensor * parent = node->src[i];
485
- if (parent == NULL) {
486
- break;
487
- }
501
+ // outputs cannot be reused
502
+ if (parent->flags & WSP_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & WSP_GGML_TENSOR_FLAG_OUTPUT)) {
503
+ AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
504
+ continue;
505
+ }
488
506
 
489
- // if the node's data is external, then we cannot re-use it
490
- if (wsp_ggml_tallocr_is_own(alloc, parent) == false) {
491
- AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
492
- continue;
493
- }
507
+ if (!wsp_ggml_are_same_layout(node, parent)) {
508
+ AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
509
+ continue;
510
+ }
494
511
 
495
- struct hash_node * p_hn = hash_get(galloc, parent);
496
- if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
497
- if (wsp_ggml_is_view(parent)) {
498
- struct wsp_ggml_tensor * view_src = parent->view_src;
499
- struct hash_node * view_src_hn = hash_get(galloc, view_src);
500
- if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
501
- // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
502
- // the parent's data that it will need later (same layout requirement). the problem is that then
503
- // we cannot free the tensor because the original address of the allocation is lost.
504
- // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
505
- // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
506
- AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
507
- node->view_src = view_src;
508
- view_src_hn->n_views += 1;
509
- init_view(galloc, node, false);
510
- return;
511
- }
512
- } else {
513
- AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
514
- node->view_src = parent;
515
- p_hn->n_views += 1;
516
- init_view(galloc, node, false);
512
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
513
+ if (p_hn->n_children == 1 && p_hn->n_views == 0) {
514
+ if (wsp_ggml_is_view(parent)) {
515
+ struct wsp_ggml_tensor * view_src = parent->view_src;
516
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
517
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
518
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
519
+ assert(view_src_hn->offset == p_hn->offset);
520
+ hn->buffer_id = p_hn->buffer_id;
521
+ hn->offset = p_hn->offset;
522
+ p_hn->allocated = false; // avoid freeing the parent
523
+ view_src_hn->allocated = false;
517
524
  return;
518
525
  }
526
+ } else {
527
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
528
+ hn->buffer_id = p_hn->buffer_id;
529
+ hn->offset = p_hn->offset;
530
+ p_hn->allocated = false; // avoid freeing the parent
531
+ return;
519
532
  }
520
533
  }
521
534
  }
522
- wsp_ggml_tallocr_alloc(alloc, node);
523
535
  }
536
+ // allocate tensor from the buffer
537
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
538
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
539
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
540
+ size_t offset = wsp_ggml_dyn_tallocr_alloc(alloc, size, node);
541
+ hn->buffer_id = buffer_id;
542
+ hn->offset = offset;
543
+ return;
524
544
  }
525
545
  }
526
546
 
527
- static void free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
528
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
547
+ static void wsp_ggml_gallocr_free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
548
+ // graph outputs are never freed
549
+ if (node->flags & WSP_GGML_TENSOR_FLAG_OUTPUT) {
550
+ AT_PRINTF("not freeing output %s\n", node->name);
551
+ return;
552
+ }
553
+
554
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
555
+ size_t offset = hn->offset;
556
+ int buffer_id = hn->buffer_id;
557
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
558
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
559
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
560
+ wsp_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
561
+ hn->allocated = false;
562
+ }
529
563
 
530
- wsp_ggml_tallocr_free_tensor(alloc, node);
564
+ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
565
+ return node_buffer_ids ? node_buffer_ids[i] : 0;
531
566
  }
532
567
 
533
- static void wsp_ggml_tallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * gf) {
534
- const int * parse_seq = galloc->parse_seq;
535
- int parse_seq_len = galloc->parse_seq_len;
568
+ static void wsp_ggml_gallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
569
+ // clear hash tables
570
+ wsp_ggml_hash_set_reset(&galloc->hash_set);
571
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
536
572
 
537
- // count number of children and views
538
- for (int i = 0; i < gf->n_nodes; i++) {
539
- struct wsp_ggml_tensor * node = gf->nodes[i];
573
+ // allocate leafs
574
+ // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
575
+ for (int i = 0; i < graph->n_leafs; i++) {
576
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
577
+ wsp_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
578
+ }
540
579
 
541
- if (wsp_ggml_is_view(node)) {
580
+ // count number of children and views
581
+ // allocate other graph inputs and leafs first to avoid overwriting them
582
+ for (int i = 0; i < graph->n_nodes; i++) {
583
+ struct wsp_ggml_tensor * node = graph->nodes[i];
584
+
585
+ // TODO: better way to add external dependencies
586
+ // WSP_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
587
+ // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
588
+ // itself is never used and should not be considered a dependency
589
+ if (wsp_ggml_is_view(node) && node->op != WSP_GGML_OP_NONE) {
542
590
  struct wsp_ggml_tensor * view_src = node->view_src;
543
- hash_get(galloc, view_src)->n_views += 1;
544
- if (node->buffer == NULL && node->data != NULL) {
545
- // view of a pre-allocated tensor, didn't call init_view() yet
546
- init_view(galloc, node, true);
547
- }
591
+ wsp_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
592
+ }
593
+
594
+ if (node->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
595
+ wsp_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
548
596
  }
549
597
 
550
598
  for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
551
- struct wsp_ggml_tensor * parent = node->src[j];
552
- if (parent == NULL) {
553
- break;
599
+ struct wsp_ggml_tensor * src = node->src[j];
600
+ if (src == NULL) {
601
+ continue;
554
602
  }
555
- hash_get(galloc, parent)->n_children += 1;
556
- if (wsp_ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) {
557
- init_view(galloc, parent, true);
603
+
604
+ wsp_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
605
+
606
+ // allocate explicit inputs
607
+ if (src->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
608
+ wsp_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
558
609
  }
559
610
  }
560
- }
611
+ }
561
612
 
562
613
  // allocate tensors
563
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
564
- int last_barrier_pos = 0;
565
- int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes;
566
-
567
- for (int ind = 0; ind < n_nodes; ind++) {
568
- // allocate a node if there is no parse_seq or this is not a barrier
569
- if (parse_seq_len == 0 || parse_seq[ind] != -1) {
570
- int i = parse_seq_len ? parse_seq[ind] : ind;
571
- struct wsp_ggml_tensor * node = gf->nodes[i];
572
-
573
- // allocate parents (leafs)
574
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
575
- struct wsp_ggml_tensor * parent = node->src[j];
576
- if (parent == NULL) {
577
- break;
578
- }
579
- allocate_node(galloc, parent);
614
+ for (int i = 0; i < graph->n_nodes; i++) {
615
+ struct wsp_ggml_tensor * node = graph->nodes[i];
616
+ int buffer_id = get_node_buffer_id(node_buffer_ids, i);
617
+
618
+ // allocate parents (only leafs need to be allocated at this point)
619
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
620
+ struct wsp_ggml_tensor * parent = node->src[j];
621
+ if (parent == NULL) {
622
+ continue;
580
623
  }
624
+ wsp_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
625
+ }
581
626
 
582
- // allocate node
583
- allocate_node(galloc, node);
627
+ // allocate node
628
+ wsp_ggml_gallocr_allocate_node(galloc, node, buffer_id);
584
629
 
585
- AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
586
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
587
- struct wsp_ggml_tensor * parent = node->src[j];
588
- if (parent == NULL) {
589
- break;
590
- }
591
- AT_PRINTF("%s", parent->name);
592
- if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
593
- AT_PRINTF(", ");
594
- }
630
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_desc(node), node->name);
631
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
632
+ struct wsp_ggml_tensor * parent = node->src[j];
633
+ if (parent == NULL) {
634
+ continue;
635
+ }
636
+ AT_PRINTF("%s", parent->name);
637
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
638
+ AT_PRINTF(", ");
595
639
  }
596
- AT_PRINTF("\n");
597
640
  }
641
+ AT_PRINTF("\n");
598
642
 
599
643
  // update parents
600
- // update immediately if there is no parse_seq
601
- // update only at barriers if there is parse_seq
602
- if ((parse_seq_len == 0) || parse_seq[ind] == -1) {
603
- int update_start = parse_seq_len ? last_barrier_pos : ind;
604
- int update_end = parse_seq_len ? ind : ind + 1;
605
- for (int i = update_start; i < update_end; i++) {
606
- int node_i = parse_seq_len ? parse_seq[i] : i;
607
- struct wsp_ggml_tensor * node = gf->nodes[node_i];
608
-
609
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
610
- struct wsp_ggml_tensor * parent = node->src[j];
611
- if (parent == NULL) {
612
- break;
613
- }
614
- struct hash_node * p_hn = hash_get(galloc, parent);
615
- p_hn->n_children -= 1;
616
-
617
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
618
-
619
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
620
- if (wsp_ggml_is_view(parent)) {
621
- struct wsp_ggml_tensor * view_src = parent->view_src;
622
- struct hash_node * view_src_hn = hash_get(galloc, view_src);
623
- view_src_hn->n_views -= 1;
624
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
625
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) {
626
- free_node(galloc, view_src);
627
- }
628
- }
629
- else {
630
- free_node(galloc, parent);
631
- }
644
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
645
+ struct wsp_ggml_tensor * parent = node->src[j];
646
+ if (parent == NULL) {
647
+ continue;
648
+ }
649
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
650
+ p_hn->n_children -= 1;
651
+
652
+ AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
653
+ parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
654
+
655
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
656
+ if (wsp_ggml_is_view(parent)) {
657
+ struct wsp_ggml_tensor * view_src = parent->view_src;
658
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
659
+ view_src_hn->n_views -= 1;
660
+ AT_PRINTF("view_src %s: %d children, %d views\n",
661
+ view_src->name, view_src_hn->n_children, view_src_hn->n_views);
662
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
663
+ wsp_ggml_gallocr_free_node(galloc, view_src);
632
664
  }
633
665
  }
666
+ else if (p_hn->allocated) {
667
+ wsp_ggml_gallocr_free_node(galloc, parent);
668
+ }
634
669
  }
635
670
  AT_PRINTF("\n");
636
- if (parse_seq_len) {
637
- last_barrier_pos = ind + 1;
638
- }
639
671
  }
640
672
  }
641
673
  }
642
674
 
643
- size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph) {
644
- size_t hash_size = graph->visited_hash_table.size;
675
+ bool wsp_ggml_gallocr_reserve_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
676
+ size_t min_hash_size = graph->n_nodes + graph->n_leafs;
677
+ // add 25% margin to avoid hash collisions
678
+ min_hash_size += min_hash_size / 4;
679
+
680
+ // initialize hash table
681
+ if (galloc->hash_set.size < min_hash_size) {
682
+ wsp_ggml_hash_set_free(&galloc->hash_set);
683
+ galloc->hash_set = wsp_ggml_hash_set_new(min_hash_size);
684
+ WSP_GGML_ASSERT(galloc->hash_set.keys != NULL);
685
+
686
+ free(galloc->hash_values);
687
+ galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
688
+ WSP_GGML_ASSERT(galloc->hash_values != NULL);
689
+ }
690
+
691
+ // reset allocators
692
+ for (int i = 0; i < galloc->n_buffers; i++) {
693
+ wsp_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
694
+ }
695
+
696
+ // allocate in hash table
697
+ wsp_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
645
698
 
646
- // check if the hash table is initialized and large enough
647
- if (galloc->hash_set.size < hash_size) {
648
- if (galloc->hash_set.keys != NULL) {
649
- free(galloc->hash_set.keys);
699
+ // set the node_allocs from the hash table
700
+ if (galloc->n_nodes < graph->n_nodes) {
701
+ free(galloc->node_allocs);
702
+ galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
703
+ WSP_GGML_ASSERT(galloc->node_allocs != NULL);
704
+ }
705
+ galloc->n_nodes = graph->n_nodes;
706
+ for (int i = 0; i < graph->n_nodes; i++) {
707
+ struct wsp_ggml_tensor * node = graph->nodes[i];
708
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
709
+ if (node->view_src || node->data) {
710
+ node_alloc->dst.buffer_id = -1;
711
+ node_alloc->dst.offset = SIZE_MAX;
712
+ node_alloc->dst.size_max = 0;
713
+ } else {
714
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
715
+ node_alloc->dst.buffer_id = hn->buffer_id;
716
+ node_alloc->dst.offset = hn->offset;
717
+ node_alloc->dst.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
718
+ }
719
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
720
+ struct wsp_ggml_tensor * src = node->src[j];
721
+ if (!src || src->view_src || src->data) {
722
+ node_alloc->src[j].buffer_id = -1;
723
+ node_alloc->src[j].offset = SIZE_MAX;
724
+ node_alloc->src[j].size_max = 0;
725
+ } else {
726
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, src);
727
+ node_alloc->src[j].buffer_id = hn->buffer_id;
728
+ node_alloc->src[j].offset = hn->offset;
729
+ node_alloc->src[j].size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
730
+ }
650
731
  }
651
- if (galloc->hash_values != NULL) {
652
- free(galloc->hash_values);
732
+ }
733
+ if (galloc->n_leafs < graph->n_leafs) {
734
+ free(galloc->leaf_allocs);
735
+ galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
736
+ WSP_GGML_ASSERT(galloc->leaf_allocs != NULL);
737
+ }
738
+ galloc->n_leafs = graph->n_leafs;
739
+ for (int i = 0; i < graph->n_leafs; i++) {
740
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
741
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, leaf);
742
+ if (leaf->view_src || leaf->data) {
743
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
744
+ galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
745
+ galloc->leaf_allocs[i].leaf.size_max = 0;
746
+ } else {
747
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
748
+ galloc->leaf_allocs[i].leaf.offset = hn->offset;
749
+ galloc->leaf_allocs[i].leaf.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
653
750
  }
654
- galloc->hash_set.keys = malloc(sizeof(struct wsp_ggml_tensor *) * hash_size);
655
- galloc->hash_set.size = hash_size;
656
- galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
657
751
  }
658
752
 
659
- // reset hash table
660
- memset(galloc->hash_set.keys, 0, sizeof(struct wsp_ggml_tensor *) * hash_size);
661
- memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
753
+ // reallocate buffers if needed
754
+ for (int i = 0; i < galloc->n_buffers; i++) {
755
+ // if the buffer type is used multiple times, we reuse the same buffer
756
+ for (int j = 0; j < i; j++) {
757
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
758
+ galloc->buffers[i] = galloc->buffers[j];
759
+ break;
760
+ }
761
+ }
762
+
763
+ size_t cur_size = galloc->buffers[i] ? wsp_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
764
+ size_t new_size = wsp_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
662
765
 
663
- galloc->talloc = talloc;
664
- wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
665
- galloc->talloc = NULL;
766
+ // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
767
+ if (new_size > cur_size || galloc->buffers[i] == NULL) {
768
+ #ifndef NDEBUG
769
+ WSP_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
770
+ #endif
771
+
772
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
773
+ galloc->buffers[i] = wsp_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
774
+ if (galloc->buffers[i] == NULL) {
775
+ WSP_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), new_size);
776
+ return false;
777
+ }
778
+ wsp_ggml_backend_buffer_set_usage(galloc->buffers[i], WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
779
+ }
780
+ }
666
781
 
667
- size_t max_size = wsp_ggml_tallocr_max_size(talloc);
782
+ return true;
783
+ }
668
784
 
669
- return max_size;
785
+ bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph *graph) {
786
+ return wsp_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
670
787
  }
671
788
 
672
- void wsp_ggml_gallocr_alloc_graph_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, struct wsp_ggml_hash_set hash_set, wsp_ggml_tallocr_t * hash_node_talloc) {
673
- const size_t hash_size = hash_set.size;
789
+ static void wsp_ggml_gallocr_init_tensor(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
790
+ int buffer_id = tensor_alloc->buffer_id;
791
+ assert(tensor->data || tensor->view_src || wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
674
792
 
675
- WSP_GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs));
793
+ if (tensor->view_src != NULL) {
794
+ if (tensor->buffer == NULL) {
795
+ assert(tensor_alloc->offset == SIZE_MAX);
796
+ if (tensor->view_src->buffer == NULL) {
797
+ // this tensor was allocated without ggml-backend
798
+ return;
799
+ }
800
+ wsp_ggml_backend_view_init(tensor);
801
+ }
802
+ } else {
803
+ if (tensor->data == NULL) {
804
+ assert(tensor_alloc->offset != SIZE_MAX);
805
+ assert(wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
806
+ void * base = wsp_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
807
+ void * addr = (char *)base + tensor_alloc->offset;
808
+ wsp_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
809
+ } else {
810
+ if (tensor->buffer == NULL) {
811
+ // this tensor was allocated without ggml-backend
812
+ return;
813
+ }
814
+ }
815
+ }
816
+ }
676
817
 
677
- galloc->talloc = NULL;
818
+ static bool wsp_ggml_gallocr_node_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, struct tensor_alloc * talloc) {
819
+ size_t node_size = (node->data || node->view_src) ? 0 : wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
820
+ return talloc->size_max >= node_size;
821
+ }
678
822
 
679
- // alloc hash_values if needed
680
- if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) {
681
- free(galloc->hash_values);
682
- galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
683
- galloc->hash_values_size = hash_size;
823
+ static bool wsp_ggml_gallocr_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
824
+ if (galloc->n_nodes != graph->n_nodes) {
825
+ #ifndef NDEBUG
826
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
827
+ #endif
828
+ return true;
684
829
  }
685
830
 
686
- // free hash_set.keys if needed
687
- if (galloc->hash_set.keys != NULL) {
688
- free(galloc->hash_set.keys);
831
+ if (galloc->n_leafs != graph->n_leafs) {
832
+ #ifndef NDEBUG
833
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
834
+ #endif
835
+ return true;
689
836
  }
690
- galloc->hash_set = hash_set;
691
837
 
692
- // reset hash values
693
- memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
838
+ for (int i = 0; i < graph->n_nodes; i++) {
839
+ struct wsp_ggml_tensor * node = graph->nodes[i];
840
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
694
841
 
695
- galloc->hash_allocs = hash_node_talloc;
842
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
843
+ #ifndef NDEBUG
844
+ WSP_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
845
+ #endif
846
+ return true;
847
+ }
696
848
 
697
- wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
849
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
850
+ struct wsp_ggml_tensor * src = node->src[j];
851
+ if (src == NULL) {
852
+ continue;
853
+ }
854
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
855
+ #ifndef NDEBUG
856
+ WSP_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
857
+ #endif
858
+ return true;
859
+ }
860
+ }
861
+ }
698
862
 
699
- // remove unowned resources
700
- galloc->hash_set.keys = NULL;
701
- galloc->hash_allocs = NULL;
863
+ return false;
702
864
  }
703
865
 
704
- // legacy API wrapper
705
-
706
- struct wsp_ggml_allocr {
707
- wsp_ggml_tallocr_t talloc;
708
- wsp_ggml_gallocr_t galloc;
709
- };
866
+ bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
867
+ if (wsp_ggml_gallocr_needs_realloc(galloc, graph)) {
868
+ if (galloc->n_buffers == 1) {
869
+ #ifndef NDEBUG
870
+ WSP_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
871
+ #endif
872
+ if (!wsp_ggml_gallocr_reserve(galloc, graph)) {
873
+ return false;
874
+ }
875
+ } else {
876
+ #ifndef NDEBUG
877
+ WSP_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
878
+ #endif
879
+ return false;
880
+ }
881
+ }
710
882
 
711
- static wsp_ggml_allocr_t wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_t talloc) {
712
- wsp_ggml_allocr_t alloc = (wsp_ggml_allocr_t)malloc(sizeof(struct wsp_ggml_allocr));
713
- *alloc = (struct wsp_ggml_allocr) {
714
- /*.talloc = */ talloc,
715
- /*.galloc = */ wsp_ggml_gallocr_new(),
716
- };
717
- return alloc;
718
- }
883
+ // reset buffers
884
+ for (int i = 0; i < galloc->n_buffers; i++) {
885
+ if (galloc->buffers[i] != NULL) {
886
+ wsp_ggml_backend_buffer_reset(galloc->buffers[i]);
887
+ }
888
+ }
719
889
 
720
- wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
721
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new(data, size, alignment));
722
- }
890
+ // allocate the graph tensors from the previous assignments
891
+ // leafs
892
+ for (int i = 0; i < graph->n_leafs; i++) {
893
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
894
+ struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
895
+ wsp_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
896
+ }
897
+ // nodes
898
+ for (int i = 0; i < graph->n_nodes; i++) {
899
+ struct wsp_ggml_tensor * node = graph->nodes[i];
900
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
901
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
902
+ struct wsp_ggml_tensor * src = node->src[j];
903
+ if (src == NULL) {
904
+ continue;
905
+ }
906
+ wsp_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
907
+ }
908
+ wsp_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
909
+ }
723
910
 
724
- wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment) {
725
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure(alignment));
911
+ return true;
726
912
  }
727
913
 
728
- wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
729
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_buffer(buffer));
730
- }
914
+ size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id) {
915
+ WSP_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
731
916
 
732
- wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
733
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_backend(backend, size));
734
- }
917
+ if (galloc->buffers[buffer_id] == NULL) {
918
+ return 0;
919
+ }
735
920
 
736
- wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
737
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure_from_backend(backend));
738
- }
921
+ for (int i = 0; i < buffer_id; i++) {
922
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
923
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
924
+ // only return the buffer size the first time it appears to avoid double counting
925
+ return 0;
926
+ }
927
+ }
739
928
 
740
- struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc) {
741
- return wsp_ggml_tallocr_get_buffer(alloc->talloc);
929
+ return wsp_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
742
930
  }
743
931
 
744
- void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n) {
745
- wsp_ggml_gallocr_set_parse_seq(alloc->galloc, list, n);
746
- }
932
+ // utils
747
933
 
748
- void wsp_ggml_allocr_free(wsp_ggml_allocr_t alloc) {
749
- if (alloc == NULL) {
750
- return;
934
+ static bool alloc_tensor_range(struct wsp_ggml_context * ctx,
935
+ struct wsp_ggml_tensor * first, struct wsp_ggml_tensor * last,
936
+ wsp_ggml_backend_buffer_type_t buft, size_t size,
937
+ wsp_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
938
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, size);
939
+ if (buffer == NULL) {
940
+ #ifndef NDEBUG
941
+ WSP_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(buft), size);
942
+ #endif
943
+ for (size_t i = 0; i < *n_buffers; i++) {
944
+ wsp_ggml_backend_buffer_free((*buffers)[i]);
945
+ }
946
+ free(*buffers);
947
+ return false;
751
948
  }
752
949
 
753
- wsp_ggml_gallocr_free(alloc->galloc);
754
- wsp_ggml_tallocr_free(alloc->talloc);
755
- free(alloc);
756
- }
757
-
758
- bool wsp_ggml_allocr_is_measure(wsp_ggml_allocr_t alloc) {
759
- return wsp_ggml_tallocr_is_measure(alloc->talloc);
760
- }
761
-
762
- void wsp_ggml_allocr_reset(wsp_ggml_allocr_t alloc) {
763
- wsp_ggml_tallocr_reset(alloc->talloc);
764
- }
950
+ struct wsp_ggml_tallocr tallocr = wsp_ggml_tallocr_new(buffer);
765
951
 
766
- void wsp_ggml_allocr_alloc(wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor) {
767
- wsp_ggml_tallocr_alloc(alloc->talloc, tensor);
768
- }
952
+ for (struct wsp_ggml_tensor * t = first; t != last; t = wsp_ggml_get_next_tensor(ctx, t)) {
953
+ if (t->data == NULL) {
954
+ if (t->view_src == NULL) {
955
+ wsp_ggml_tallocr_alloc(&tallocr, t);
956
+ } else if (t->buffer == NULL) {
957
+ wsp_ggml_backend_view_init(t);
958
+ }
959
+ } else {
960
+ if (t->view_src != NULL && t->buffer == NULL) {
961
+ // view of a pre-allocated tensor
962
+ wsp_ggml_backend_view_init(t);
963
+ }
964
+ }
965
+ }
769
966
 
770
- size_t wsp_ggml_allocr_max_size(wsp_ggml_allocr_t alloc) {
771
- return wsp_ggml_tallocr_max_size(alloc->talloc);
772
- }
967
+ *buffers = realloc(*buffers, sizeof(wsp_ggml_backend_buffer_t) * (*n_buffers + 1));
968
+ (*buffers)[(*n_buffers)++] = buffer;
773
969
 
774
- size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph) {
775
- return wsp_ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
970
+ return true;
776
971
  }
777
972
 
778
- // utils
779
973
  wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft) {
780
974
  WSP_GGML_ASSERT(wsp_ggml_get_no_alloc(ctx) == true);
781
975
 
782
976
  size_t alignment = wsp_ggml_backend_buft_get_alignment(buft);
977
+ size_t max_size = wsp_ggml_backend_buft_get_max_size(buft);
978
+
979
+ wsp_ggml_backend_buffer_t * buffers = NULL;
980
+ size_t n_buffers = 0;
783
981
 
784
- size_t nbytes = 0;
785
- for (struct wsp_ggml_tensor * t = wsp_ggml_get_first_tensor(ctx); t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
982
+ size_t cur_buf_size = 0;
983
+ struct wsp_ggml_tensor * first = wsp_ggml_get_first_tensor(ctx);
984
+ for (struct wsp_ggml_tensor * t = first; t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
985
+ size_t this_size = 0;
786
986
  if (t->data == NULL && t->view_src == NULL) {
787
- nbytes += WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
987
+ this_size = WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
988
+ }
989
+
990
+ if (this_size > max_size) {
991
+ WSP_GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
992
+ __func__, t->name,
993
+ wsp_ggml_backend_buft_name(buft),
994
+ this_size, max_size);
995
+ for (size_t i = 0; i < n_buffers; i++) {
996
+ wsp_ggml_backend_buffer_free(buffers[i]);
997
+ }
998
+ free(buffers);
999
+ return NULL;
1000
+ }
1001
+
1002
+ if ((cur_buf_size + this_size) > max_size) {
1003
+ // allocate tensors in the current buffer
1004
+ if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
1005
+ return NULL;
1006
+ }
1007
+ first = t;
1008
+ cur_buf_size = this_size;
1009
+ } else {
1010
+ cur_buf_size += this_size;
788
1011
  }
789
1012
  }
790
1013
 
791
- if (nbytes == 0) {
792
- // all the tensors in the context are already allocated
793
- #ifndef NDEBUG
794
- fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__);
795
- #endif
796
- return NULL;
1014
+ // allocate remaining tensors
1015
+ if (cur_buf_size > 0) {
1016
+ if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1017
+ return NULL;
1018
+ }
797
1019
  }
798
1020
 
799
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, nbytes);
800
- if (buffer == NULL) {
801
- // failed to allocate buffer
1021
+ if (n_buffers == 0) {
802
1022
  #ifndef NDEBUG
803
- fprintf(stderr, "%s: failed to allocate buffer\n", __func__);
1023
+ WSP_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
804
1024
  #endif
805
1025
  return NULL;
806
1026
  }
807
1027
 
808
- wsp_ggml_tallocr_t tallocr = wsp_ggml_tallocr_new_from_buffer(buffer);
809
-
810
- for (struct wsp_ggml_tensor * t = wsp_ggml_get_first_tensor(ctx); t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
811
- if (t->data == NULL) {
812
- if (t->view_src == NULL) {
813
- wsp_ggml_tallocr_alloc(tallocr, t);
814
- } else {
815
- wsp_ggml_backend_view_init(buffer, t);
816
- }
817
- } else {
818
- if (t->view_src != NULL) {
819
- // view of a pre-allocated tensor
820
- wsp_ggml_backend_view_init(buffer, t);
821
- }
822
- }
1028
+ wsp_ggml_backend_buffer_t buffer;
1029
+ if (n_buffers == 1) {
1030
+ buffer = buffers[0];
1031
+ } else {
1032
+ buffer = wsp_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
823
1033
  }
824
-
825
- wsp_ggml_tallocr_free(tallocr);
826
-
1034
+ free(buffers);
827
1035
  return buffer;
828
1036
  }
829
1037