whisper.rn 0.4.0-rc.7 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/coreml/whisper-encoder.mm +1 -1
  9. package/cpp/ggml-aarch64.c +3209 -0
  10. package/cpp/ggml-aarch64.h +39 -0
  11. package/cpp/ggml-alloc.c +732 -494
  12. package/cpp/ggml-alloc.h +47 -63
  13. package/cpp/ggml-backend-impl.h +162 -47
  14. package/cpp/ggml-backend.cpp +2635 -0
  15. package/cpp/ggml-backend.h +216 -71
  16. package/cpp/ggml-common.h +1853 -0
  17. package/cpp/ggml-cpu-impl.h +614 -0
  18. package/cpp/ggml-impl.h +144 -178
  19. package/cpp/ggml-metal.h +14 -60
  20. package/cpp/ggml-metal.m +3437 -2097
  21. package/cpp/ggml-quants.c +12559 -4189
  22. package/cpp/ggml-quants.h +135 -212
  23. package/cpp/ggml-whisper.metallib +0 -0
  24. package/cpp/ggml.c +9029 -5219
  25. package/cpp/ggml.h +673 -338
  26. package/cpp/rn-whisper.cpp +91 -0
  27. package/cpp/rn-whisper.h +2 -0
  28. package/cpp/whisper.cpp +1476 -675
  29. package/cpp/whisper.h +84 -28
  30. package/ios/RNWhisper.mm +124 -37
  31. package/ios/RNWhisperAudioUtils.h +1 -0
  32. package/ios/RNWhisperAudioUtils.m +20 -13
  33. package/ios/RNWhisperContext.h +3 -2
  34. package/ios/RNWhisperContext.mm +41 -8
  35. package/jest/mock.js +9 -1
  36. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  37. package/lib/commonjs/index.js +48 -19
  38. package/lib/commonjs/index.js.map +1 -1
  39. package/lib/commonjs/version.json +1 -1
  40. package/lib/module/NativeRNWhisper.js.map +1 -1
  41. package/lib/module/index.js +48 -19
  42. package/lib/module/index.js.map +1 -1
  43. package/lib/module/version.json +1 -1
  44. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  45. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  46. package/lib/typescript/index.d.ts +25 -3
  47. package/lib/typescript/index.d.ts.map +1 -1
  48. package/package.json +6 -5
  49. package/src/NativeRNWhisper.ts +12 -3
  50. package/src/index.ts +63 -24
  51. package/src/version.json +1 -1
  52. package/whisper-rn.podspec +9 -2
  53. package/cpp/ggml-backend.c +0 -1357
  54. package/cpp/ggml-metal-whisper.metal +0 -4908
package/cpp/ggml-alloc.c CHANGED
@@ -14,76 +14,138 @@
14
14
 
15
15
  //#define WSP_GGML_ALLOCATOR_DEBUG
16
16
 
17
- //#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
17
+ //#define AT_PRINTF(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
18
18
  #define AT_PRINTF(...)
19
19
 
20
- // TODO: WSP_GGML_PAD ?
20
+
21
+ static bool wsp_ggml_is_view(const struct wsp_ggml_tensor * t) {
22
+ return t->view_src != NULL;
23
+ }
24
+
25
+ static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
26
+ if (a->type != b->type) {
27
+ return false;
28
+ }
29
+ for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
30
+ if (a->ne[i] != b->ne[i]) {
31
+ return false;
32
+ }
33
+ if (a->nb[i] != b->nb[i]) {
34
+ return false;
35
+ }
36
+ }
37
+ return true;
38
+ }
39
+
40
+ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
41
+ switch (op) {
42
+ case WSP_GGML_OP_SCALE:
43
+ case WSP_GGML_OP_DIAG_MASK_ZERO:
44
+ case WSP_GGML_OP_DIAG_MASK_INF:
45
+ case WSP_GGML_OP_ADD:
46
+ case WSP_GGML_OP_ADD1:
47
+ case WSP_GGML_OP_SUB:
48
+ case WSP_GGML_OP_MUL:
49
+ case WSP_GGML_OP_DIV:
50
+ case WSP_GGML_OP_SQR:
51
+ case WSP_GGML_OP_SQRT:
52
+ case WSP_GGML_OP_LOG:
53
+ case WSP_GGML_OP_UNARY:
54
+ case WSP_GGML_OP_ROPE:
55
+ case WSP_GGML_OP_RMS_NORM:
56
+ case WSP_GGML_OP_SOFT_MAX:
57
+ return true;
58
+
59
+ default:
60
+ return false;
61
+ }
62
+ }
63
+
21
64
  static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
22
65
  assert(alignment && !(alignment & (alignment - 1))); // power of 2
23
66
  size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
24
67
  return offset + align;
25
68
  }
26
69
 
70
+ // tallocr
71
+
72
+ struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer) {
73
+ void * base = wsp_ggml_backend_buffer_get_base(buffer);
74
+ size_t align = wsp_ggml_backend_buffer_get_alignment(buffer);
75
+
76
+ assert(align && !(align & (align - 1))); // power of 2
77
+
78
+ struct wsp_ggml_tallocr talloc = (struct wsp_ggml_tallocr) {
79
+ /*.buffer = */ buffer,
80
+ /*.base = */ base,
81
+ /*.alignment = */ align,
82
+ /*.offset = */ aligned_offset(base, 0, align),
83
+ };
84
+ return talloc;
85
+ }
86
+
87
+ void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor) {
88
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
89
+ size = WSP_GGML_PAD(size, talloc->alignment);
90
+
91
+ if (talloc->offset + size > wsp_ggml_backend_buffer_get_size(talloc->buffer)) {
92
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
93
+ __func__, tensor->name, size, wsp_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
94
+ WSP_GGML_ABORT("not enough space in the buffer");
95
+ }
96
+
97
+ void * addr = (char *)wsp_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
98
+ talloc->offset += size;
99
+
100
+ assert(((uintptr_t)addr % talloc->alignment) == 0);
101
+
102
+ wsp_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
103
+ }
104
+
105
+ // dynamic tensor allocator
106
+
27
107
  struct free_block {
28
- void * addr;
108
+ size_t offset;
29
109
  size_t size;
30
110
  };
31
111
 
32
- struct wsp_ggml_tallocr {
33
- struct wsp_ggml_backend_buffer * buffer;
34
- bool buffer_owned;
35
- void * base;
112
+ struct wsp_ggml_dyn_tallocr {
36
113
  size_t alignment;
37
-
38
114
  int n_free_blocks;
39
115
  struct free_block free_blocks[MAX_FREE_BLOCKS];
40
-
41
116
  size_t max_size;
42
117
 
43
- bool measure;
44
-
45
118
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
46
- struct wsp_ggml_tensor * allocated_tensors[1024];
119
+ struct {
120
+ const struct wsp_ggml_tensor * tensor;
121
+ size_t offset;
122
+ } allocated_tensors[1024];
47
123
  #endif
48
124
  };
49
125
 
50
126
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
51
- static void add_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
127
+ static void add_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
52
128
  for (int i = 0; i < 1024; i++) {
53
- if (alloc->allocated_tensors[i] == NULL) {
54
- alloc->allocated_tensors[i] = tensor;
129
+ if (alloc->allocated_tensors[i].tensor == NULL) {
130
+ alloc->allocated_tensors[i].tensor = tensor;
131
+ alloc->allocated_tensors[i].offset = offset;
55
132
  return;
56
133
  }
57
134
  }
58
- WSP_GGML_ASSERT(!"out of allocated_tensors");
135
+ WSP_GGML_ABORT("out of allocated_tensors");
59
136
  }
60
- static void remove_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
137
+ static void remove_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
61
138
  for (int i = 0; i < 1024; i++) {
62
- if (alloc->allocated_tensors[i] == tensor ||
63
- (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
64
- alloc->allocated_tensors[i] = NULL;
139
+ if (alloc->allocated_tensors[i].offset == offset) {
140
+ alloc->allocated_tensors[i].tensor = NULL;
65
141
  return;
66
142
  }
67
143
  }
68
- printf("tried to free tensor %s not found\n", tensor->name);
69
- WSP_GGML_ASSERT(!"tensor not found");
144
+ WSP_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
70
145
  }
71
146
  #endif
72
147
 
73
- // check if a tensor is allocated by this buffer
74
- static bool wsp_ggml_tallocr_is_own(wsp_ggml_tallocr_t alloc, const struct wsp_ggml_tensor * tensor) {
75
- return tensor->buffer == alloc->buffer;
76
- }
77
-
78
- static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
79
- return t->view_src != NULL;
80
- }
81
-
82
- void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
83
- WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
84
- WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
85
-
86
- size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
148
+ static size_t wsp_ggml_dyn_tallocr_alloc(struct wsp_ggml_dyn_tallocr * alloc, size_t size, const struct wsp_ggml_tensor * tensor) {
87
149
  size = aligned_offset(NULL, size, alloc->alignment);
88
150
 
89
151
  AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
@@ -102,8 +164,6 @@ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * t
102
164
  }
103
165
  }
104
166
 
105
- AT_PRINTF("block %d\n", best_fit_block);
106
-
107
167
  if (best_fit_block == -1) {
108
168
  // the last block is our last resort
109
169
  struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
@@ -111,15 +171,16 @@ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * t
111
171
  if (block->size >= size) {
112
172
  best_fit_block = alloc->n_free_blocks - 1;
113
173
  } else {
114
- fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
174
+ // this should never happen
175
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
115
176
  __func__, size, max_avail);
116
- WSP_GGML_ASSERT(!"not enough space in the buffer");
117
- return;
177
+ WSP_GGML_ABORT("not enough space in the buffer");
118
178
  }
119
179
  }
180
+
120
181
  struct free_block * block = &alloc->free_blocks[best_fit_block];
121
- void * addr = block->addr;
122
- block->addr = (char*)block->addr + size;
182
+ size_t offset = block->offset;
183
+ block->offset = offset + size;
123
184
  block->size -= size;
124
185
  if (block->size == 0) {
125
186
  // remove block if empty
@@ -129,57 +190,63 @@ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * t
129
190
  }
130
191
  }
131
192
 
132
- tensor->data = addr;
133
- tensor->buffer = alloc->buffer;
134
- if (!alloc->measure) {
135
- wsp_ggml_backend_buffer_init_tensor(alloc->buffer, tensor);
136
- }
193
+ AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
137
194
 
138
195
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
139
- add_allocated_tensor(alloc, tensor);
140
- size_t cur_max = (char*)addr - (char*)alloc->base + size;
196
+ add_allocated_tensor(alloc, offset, tensor);
197
+ size_t cur_max = offset + size;
141
198
  if (cur_max > alloc->max_size) {
142
- printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
199
+ // sort allocated_tensors by offset
200
+ for (int i = 0; i < 1024; i++) {
201
+ for (int j = i + 1; j < 1024; j++) {
202
+ if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
203
+ const struct wsp_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
204
+ size_t tmp_offset = alloc->allocated_tensors[i].offset;
205
+ alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
206
+ alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
207
+ alloc->allocated_tensors[j].tensor = tmp_tensor;
208
+ alloc->allocated_tensors[j].offset = tmp_offset;
209
+ }
210
+ }
211
+ }
212
+ WSP_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
143
213
  for (int i = 0; i < 1024; i++) {
144
- if (alloc->allocated_tensors[i]) {
145
- printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, wsp_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
214
+ if (alloc->allocated_tensors[i].tensor) {
215
+ WSP_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
216
+ alloc->allocated_tensors[i].offset,
217
+ alloc->allocated_tensors[i].offset + wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor),
218
+ wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
146
219
  }
147
220
  }
148
- printf("\n");
221
+ WSP_GGML_LOG_DEBUG("\n");
149
222
  }
150
223
  #endif
151
224
 
152
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size);
153
- }
225
+ alloc->max_size = MAX(alloc->max_size, offset + size);
154
226
 
155
- // this is a very naive implementation, but for our case the number of free blocks should be very small
156
- static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
157
- if (wsp_ggml_tallocr_is_own(alloc, tensor) == false) {
158
- // the tensor was not allocated in this buffer
159
- // this can happen because the graph allocator will try to free weights and other tensors from different buffers
160
- // the easiest way to deal with this is just to ignore it
161
- // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer);
162
- return;
163
- }
227
+ return offset;
164
228
 
165
- void * ptr = tensor->data;
229
+ WSP_GGML_UNUSED(tensor);
230
+ }
166
231
 
167
- size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
232
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
233
+ static void wsp_ggml_dyn_tallocr_free_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct wsp_ggml_tensor * tensor) {
168
234
  size = aligned_offset(NULL, size, alloc->alignment);
169
- AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
235
+
236
+ AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
170
237
 
171
238
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
172
- remove_allocated_tensor(alloc, tensor);
239
+ remove_allocated_tensor(alloc, offset, tensor);
173
240
  #endif
174
241
 
175
242
  // see if we can merge with an existing block
176
243
  for (int i = 0; i < alloc->n_free_blocks; i++) {
177
244
  struct free_block * block = &alloc->free_blocks[i];
178
245
  // check if ptr is at the end of the block
179
- if ((char*)block->addr + block->size == ptr) {
246
+ if (block->offset + block->size == offset) {
180
247
  block->size += size;
181
248
  // check if we can merge with the next block
182
- if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
249
+ if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
183
250
  block->size += alloc->free_blocks[i+1].size;
184
251
  alloc->n_free_blocks--;
185
252
  for (int j = i+1; j < alloc->n_free_blocks; j++) {
@@ -189,11 +256,11 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
189
256
  return;
190
257
  }
191
258
  // check if ptr is at the beginning of the block
192
- if ((char*)ptr + size == block->addr) {
193
- block->addr = ptr;
259
+ if (offset + size == block->offset) {
260
+ block->offset = offset;
194
261
  block->size += size;
195
262
  // check if we can merge with the previous block
196
- if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
263
+ if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
197
264
  alloc->free_blocks[i-1].size += block->size;
198
265
  alloc->n_free_blocks--;
199
266
  for (int j = i; j < alloc->n_free_blocks; j++) {
@@ -207,7 +274,7 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
207
274
  WSP_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
208
275
  // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
209
276
  int insert_pos = 0;
210
- while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
277
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
211
278
  insert_pos++;
212
279
  }
213
280
  // shift all blocks from insert_pos onward to make room for the new block
@@ -215,585 +282,756 @@ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_gg
215
282
  alloc->free_blocks[i] = alloc->free_blocks[i-1];
216
283
  }
217
284
  // insert the new block
218
- alloc->free_blocks[insert_pos].addr = ptr;
285
+ alloc->free_blocks[insert_pos].offset = offset;
219
286
  alloc->free_blocks[insert_pos].size = size;
220
287
  alloc->n_free_blocks++;
288
+
289
+ WSP_GGML_UNUSED(tensor);
221
290
  }
222
291
 
223
- void wsp_ggml_tallocr_reset(wsp_ggml_tallocr_t alloc) {
292
+ static void wsp_ggml_dyn_tallocr_reset(struct wsp_ggml_dyn_tallocr * alloc) {
224
293
  alloc->n_free_blocks = 1;
225
- size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment);
226
- alloc->free_blocks[0].addr = (char *)alloc->base + align_offset;
294
+ alloc->free_blocks[0].offset = 0;
295
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
296
+ alloc->max_size = 0;
227
297
 
228
- if (alloc->measure) {
229
- alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
230
- } else {
231
- alloc->free_blocks[0].size = wsp_ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
298
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
299
+ for (int i = 0; i < 1024; i++) {
300
+ alloc->allocated_tensors[i].tensor = NULL;
232
301
  }
302
+ #endif
233
303
  }
234
304
 
235
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment) {
236
- struct wsp_ggml_backend_buffer * buffer = wsp_ggml_backend_cpu_buffer_from_ptr(data, size);
237
-
238
- wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
305
+ static struct wsp_ggml_dyn_tallocr * wsp_ggml_dyn_tallocr_new(size_t alignment) {
306
+ struct wsp_ggml_dyn_tallocr * alloc = (struct wsp_ggml_dyn_tallocr *)malloc(sizeof(struct wsp_ggml_dyn_tallocr));
239
307
 
240
- *alloc = (struct wsp_ggml_tallocr) {
241
- /*.buffer = */ buffer,
242
- /*.buffer_owned = */ true,
243
- /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
308
+ *alloc = (struct wsp_ggml_dyn_tallocr) {
244
309
  /*.alignment = */ alignment,
245
310
  /*.n_free_blocks = */ 0,
246
311
  /*.free_blocks = */ {{0}},
247
312
  /*.max_size = */ 0,
248
- /*.measure = */ false,
249
313
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
250
- /*.allocated_tensors = */ {0},
314
+ /*.allocated_tensors = */ {{0}},
251
315
  #endif
252
316
  };
253
317
 
254
- wsp_ggml_tallocr_reset(alloc);
318
+ wsp_ggml_dyn_tallocr_reset(alloc);
255
319
 
256
320
  return alloc;
257
321
  }
258
322
 
259
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment) {
260
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment);
261
- alloc->measure = true;
323
+ static void wsp_ggml_dyn_tallocr_free(struct wsp_ggml_dyn_tallocr * alloc) {
324
+ free(alloc);
325
+ }
262
326
 
263
- return alloc;
327
+ static size_t wsp_ggml_dyn_tallocr_max_size(struct wsp_ggml_dyn_tallocr * alloc) {
328
+ return alloc->max_size;
264
329
  }
265
330
 
266
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
267
- // create a backend buffer to get the correct tensor allocation sizes
268
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_alloc_buffer(backend, 1);
269
331
 
270
- // TODO: move alloc initialization to a common wsp_ggml_tallocr_new_impl function
271
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
272
- alloc->buffer_owned = true;
273
- alloc->measure = true;
274
- wsp_ggml_tallocr_reset(alloc);
275
- return alloc;
276
- }
332
+ /////////////////////////////////////
277
333
 
278
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
279
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_alloc_buffer(backend, size);
280
- wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
281
- alloc->buffer_owned = true;
282
- return alloc;
283
- }
334
+ // graph allocator
284
335
 
285
- wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
286
- wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
336
+ struct hash_node {
337
+ int n_children;
338
+ int n_views;
339
+ int buffer_id;
340
+ size_t offset; // offset within the buffer
341
+ bool allocated;
342
+ };
287
343
 
288
- *alloc = (struct wsp_ggml_tallocr) {
289
- /*.buffer = */ buffer,
290
- /*.buffer_owned = */ false,
291
- /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
292
- /*.alignment = */ wsp_ggml_backend_buffer_get_alignment(buffer),
293
- /*.n_free_blocks = */ 0,
294
- /*.free_blocks = */ {{0}},
295
- /*.max_size = */ 0,
296
- /*.measure = */ false,
297
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
298
- /*.allocated_tensors = */ {0},
299
- #endif
300
- };
344
+ struct tensor_alloc {
345
+ int buffer_id;
346
+ size_t offset;
347
+ size_t size_max; // 0 = pre-allocated, unused, or view
348
+ };
301
349
 
302
- wsp_ggml_tallocr_reset(alloc);
350
+ struct leaf_alloc {
351
+ struct tensor_alloc leaf;
352
+ };
303
353
 
304
- return alloc;
305
- }
354
+ struct node_alloc {
355
+ struct tensor_alloc dst;
356
+ struct tensor_alloc src[WSP_GGML_MAX_SRC];
357
+ };
306
358
 
307
- struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t alloc) {
308
- return alloc->buffer;
309
- }
359
+ struct wsp_ggml_gallocr {
360
+ wsp_ggml_backend_buffer_type_t * bufts; // [n_buffers]
361
+ wsp_ggml_backend_buffer_t * buffers; // [n_buffers]
362
+ struct wsp_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
363
+ int n_buffers;
310
364
 
311
- void wsp_ggml_tallocr_free(wsp_ggml_tallocr_t alloc) {
312
- if (alloc == NULL) {
313
- return;
314
- }
365
+ struct wsp_ggml_hash_set hash_set;
366
+ struct hash_node * hash_values; // [hash_set.size]
315
367
 
316
- if (alloc->buffer_owned) {
317
- wsp_ggml_backend_buffer_free(alloc->buffer);
318
- }
319
- free(alloc);
320
- }
368
+ struct node_alloc * node_allocs; // [n_nodes]
369
+ int n_nodes;
321
370
 
322
- bool wsp_ggml_tallocr_is_measure(wsp_ggml_tallocr_t alloc) {
323
- return alloc->measure;
324
- }
371
+ struct leaf_alloc * leaf_allocs; // [n_leafs]
372
+ int n_leafs;
373
+ };
325
374
 
326
- size_t wsp_ggml_tallocr_max_size(wsp_ggml_tallocr_t alloc) {
327
- return alloc->max_size;
328
- }
375
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs) {
376
+ wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)calloc(1, sizeof(struct wsp_ggml_gallocr));
377
+ WSP_GGML_ASSERT(galloc != NULL);
329
378
 
330
- // graph allocator
379
+ galloc->bufts = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_type_t));
380
+ WSP_GGML_ASSERT(galloc->bufts != NULL);
331
381
 
332
- struct hash_node {
333
- int n_children;
334
- int n_views;
335
- };
382
+ galloc->buffers = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_t));
383
+ WSP_GGML_ASSERT(galloc->buffers != NULL);
336
384
 
337
- struct wsp_ggml_gallocr {
338
- wsp_ggml_tallocr_t talloc;
339
- struct wsp_ggml_hash_set hash_set;
340
- struct hash_node * hash_values;
341
- size_t hash_values_size;
342
- wsp_ggml_tallocr_t * hash_allocs;
343
- int * parse_seq;
344
- int parse_seq_len;
345
- };
385
+ galloc->buf_tallocs = calloc(n_bufs, sizeof(struct wsp_ggml_dyn_tallocr *));
386
+ WSP_GGML_ASSERT(galloc->buf_tallocs != NULL);
346
387
 
347
- wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void) {
348
- wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)malloc(sizeof(struct wsp_ggml_gallocr));
349
-
350
- *galloc = (struct wsp_ggml_gallocr) {
351
- /*.talloc = */ NULL,
352
- /*.hash_set = */ {0},
353
- /*.hash_values = */ NULL,
354
- /*.hash_values_size = */ 0,
355
- /*.hash_allocs = */ NULL,
356
- /*.parse_seq = */ NULL,
357
- /*.parse_seq_len = */ 0,
358
- };
388
+ for (int i = 0; i < n_bufs; i++) {
389
+ galloc->bufts[i] = bufts[i];
390
+ galloc->buffers[i] = NULL;
391
+
392
+ // check if the same buffer type is used multiple times and reuse the same allocator
393
+ for (int j = 0; j < i; j++) {
394
+ if (bufts[i] == bufts[j]) {
395
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
396
+ break;
397
+ }
398
+ }
399
+
400
+ if (galloc->buf_tallocs[i] == NULL) {
401
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(bufts[i]);
402
+ galloc->buf_tallocs[i] = wsp_ggml_dyn_tallocr_new(alignment);
403
+ }
404
+ }
405
+ galloc->n_buffers = n_bufs;
359
406
 
360
407
  return galloc;
361
408
  }
362
409
 
410
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft) {
411
+ return wsp_ggml_gallocr_new_n(&buft, 1);
412
+ }
413
+
363
414
  void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc) {
364
415
  if (galloc == NULL) {
365
416
  return;
366
417
  }
367
418
 
368
- if (galloc->hash_set.keys != NULL) {
369
- free(galloc->hash_set.keys);
370
- }
371
- if (galloc->hash_values != NULL) {
372
- free(galloc->hash_values);
373
- }
374
- if (galloc->hash_allocs != NULL) {
375
- free(galloc->hash_allocs);
376
- }
377
- if (galloc->parse_seq != NULL) {
378
- free(galloc->parse_seq);
419
+ for (int i = 0; i < galloc->n_buffers; i++) {
420
+ if (galloc->buffers != NULL) {
421
+ // skip if already freed
422
+ bool freed = false;
423
+ for (int j = 0; j < i; j++) {
424
+ if (galloc->buffers[j] == galloc->buffers[i]) {
425
+ freed = true;
426
+ break;
427
+ }
428
+ }
429
+ if (!freed) {
430
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
431
+ }
432
+ }
433
+ if (galloc->buf_tallocs != NULL) {
434
+ // skip if already freed
435
+ bool freed = false;
436
+ for (int j = 0; j < i; j++) {
437
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
438
+ freed = true;
439
+ break;
440
+ }
441
+ }
442
+ if (!freed) {
443
+ wsp_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
444
+ }
445
+ }
379
446
  }
447
+
448
+ wsp_ggml_hash_set_free(&galloc->hash_set);
449
+ free(galloc->hash_values);
450
+ free(galloc->bufts);
451
+ free(galloc->buffers);
452
+ free(galloc->buf_tallocs);
453
+ free(galloc->node_allocs);
454
+ free(galloc->leaf_allocs);
380
455
  free(galloc);
381
456
  }
382
457
 
383
- void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n) {
384
- free(galloc->parse_seq);
385
- galloc->parse_seq = malloc(sizeof(int) * n);
458
+ typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
386
459
 
387
- for (int i = 0; i < n; i++) {
388
- galloc->parse_seq[i] = list[i];
389
- }
390
- galloc->parse_seq_len = n;
391
- }
392
-
393
- static struct hash_node * hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
394
- size_t i = wsp_ggml_hash_find_or_insert(galloc->hash_set, t);
460
+ static struct hash_node * wsp_ggml_gallocr_hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
461
+ size_t i = wsp_ggml_hash_find_or_insert(&galloc->hash_set, t);
395
462
  return &galloc->hash_values[i];
396
463
  }
397
464
 
398
- static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
399
- if (a->type != b->type) {
400
- return false;
401
- }
402
- for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
403
- if (a->ne[i] != b->ne[i]) {
404
- return false;
405
- }
406
- if (a->nb[i] != b->nb[i]) {
407
- return false;
408
- }
409
- }
410
- return true;
465
+ static bool wsp_ggml_gallocr_is_own(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
466
+ return wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
411
467
  }
412
468
 
413
- static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
414
- switch (op) {
415
- case WSP_GGML_OP_SCALE:
416
- case WSP_GGML_OP_DIAG_MASK_ZERO:
417
- case WSP_GGML_OP_DIAG_MASK_INF:
418
- case WSP_GGML_OP_ADD:
419
- case WSP_GGML_OP_ADD1:
420
- case WSP_GGML_OP_SUB:
421
- case WSP_GGML_OP_MUL:
422
- case WSP_GGML_OP_DIV:
423
- case WSP_GGML_OP_SQR:
424
- case WSP_GGML_OP_SQRT:
425
- case WSP_GGML_OP_LOG:
426
- case WSP_GGML_OP_UNARY:
427
- case WSP_GGML_OP_ROPE:
428
- case WSP_GGML_OP_RMS_NORM:
429
- case WSP_GGML_OP_SOFT_MAX:
430
- return true;
431
-
432
- default:
433
- return false;
434
- }
469
+ static void wsp_ggml_gallocr_set_node_offset(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id, size_t offset) {
470
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
471
+ hn->buffer_id = buffer_id;
472
+ hn->offset = offset;
473
+ hn->allocated = true;
435
474
  }
436
475
 
437
- static wsp_ggml_tallocr_t node_tallocr(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
438
- if (galloc->talloc != NULL) {
439
- return galloc->talloc;
440
- }
441
-
442
- return galloc->hash_allocs[wsp_ggml_hash_find_or_insert(galloc->hash_set, node)];
476
+ static bool wsp_ggml_gallocr_is_allocated(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
477
+ return t->data != NULL || wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
443
478
  }
444
479
 
445
- static void init_view(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * view, bool update_backend) {
446
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, view);
480
+ static void wsp_ggml_gallocr_allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id) {
481
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
447
482
 
448
- WSP_GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
449
- if (update_backend) {
450
- view->backend = view->view_src->backend;
451
- }
452
- view->buffer = view->view_src->buffer;
453
- view->data = (char *)view->view_src->data + view->view_offs;
454
-
455
- // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend
456
- // due to the wsp_ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras
457
- assert(wsp_ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft);
483
+ if (!wsp_ggml_gallocr_is_allocated(galloc, node) && !wsp_ggml_is_view(node)) {
484
+ hn->allocated = true;
485
+ assert(hn->offset == 0);
458
486
 
459
- if (!alloc->measure) {
460
- wsp_ggml_backend_buffer_init_tensor(alloc->buffer, view);
461
- }
462
- }
487
+ // try to reuse a parent's buffer (inplace)
488
+ if (wsp_ggml_op_can_inplace(node->op)) {
489
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
490
+ struct wsp_ggml_tensor * parent = node->src[i];
491
+ if (parent == NULL) {
492
+ continue;
493
+ }
463
494
 
464
- static void allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
465
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
495
+ // if the node's data is external, then we cannot re-use it
496
+ if (!wsp_ggml_gallocr_is_own(galloc, parent)) {
497
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
498
+ continue;
499
+ }
466
500
 
467
- if (node->data == NULL) {
468
- if (wsp_ggml_is_view(node)) {
469
- init_view(galloc, node, true);
470
- } else {
471
- // see if we can reuse a parent's buffer (inplace)
472
- if (wsp_ggml_op_can_inplace(node->op)) {
473
- for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
474
- struct wsp_ggml_tensor * parent = node->src[i];
475
- if (parent == NULL) {
476
- break;
477
- }
501
+ // outputs cannot be reused
502
+ if (parent->flags & WSP_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & WSP_GGML_TENSOR_FLAG_OUTPUT)) {
503
+ AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
504
+ continue;
505
+ }
478
506
 
479
- // if the node's data is external, then we cannot re-use it
480
- if (wsp_ggml_tallocr_is_own(alloc, parent) == false) {
481
- AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
482
- continue;
483
- }
507
+ if (!wsp_ggml_are_same_layout(node, parent)) {
508
+ AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
509
+ continue;
510
+ }
484
511
 
485
- struct hash_node * p_hn = hash_get(galloc, parent);
486
- if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
487
- if (wsp_ggml_is_view(parent)) {
488
- struct wsp_ggml_tensor * view_src = parent->view_src;
489
- struct hash_node * view_src_hn = hash_get(galloc, view_src);
490
- if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
491
- // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
492
- // the parent's data that it will need later (same layout requirement). the problem is that then
493
- // we cannot free the tensor because the original address of the allocation is lost.
494
- // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
495
- // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
496
- AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
497
- node->view_src = view_src;
498
- view_src_hn->n_views += 1;
499
- init_view(galloc, node, false);
500
- return;
501
- }
502
- } else {
503
- AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
504
- node->view_src = parent;
505
- p_hn->n_views += 1;
506
- init_view(galloc, node, false);
512
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
513
+ if (p_hn->n_children == 1 && p_hn->n_views == 0) {
514
+ if (wsp_ggml_is_view(parent)) {
515
+ struct wsp_ggml_tensor * view_src = parent->view_src;
516
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
517
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
518
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
519
+ assert(view_src_hn->offset == p_hn->offset);
520
+ hn->buffer_id = p_hn->buffer_id;
521
+ hn->offset = p_hn->offset;
522
+ p_hn->allocated = false; // avoid freeing the parent
523
+ view_src_hn->allocated = false;
507
524
  return;
508
525
  }
526
+ } else {
527
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
528
+ hn->buffer_id = p_hn->buffer_id;
529
+ hn->offset = p_hn->offset;
530
+ p_hn->allocated = false; // avoid freeing the parent
531
+ return;
509
532
  }
510
533
  }
511
534
  }
512
- wsp_ggml_tallocr_alloc(alloc, node);
513
535
  }
536
+ // allocate tensor from the buffer
537
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
538
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
539
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
540
+ size_t offset = wsp_ggml_dyn_tallocr_alloc(alloc, size, node);
541
+ hn->buffer_id = buffer_id;
542
+ hn->offset = offset;
543
+ return;
514
544
  }
515
545
  }
516
546
 
517
- static void free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
518
- wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
547
+ static void wsp_ggml_gallocr_free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
548
+ // graph outputs are never freed
549
+ if (node->flags & WSP_GGML_TENSOR_FLAG_OUTPUT) {
550
+ AT_PRINTF("not freeing output %s\n", node->name);
551
+ return;
552
+ }
519
553
 
520
- wsp_ggml_tallocr_free_tensor(alloc, node);
554
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
555
+ size_t offset = hn->offset;
556
+ int buffer_id = hn->buffer_id;
557
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
558
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
559
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
560
+ wsp_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
561
+ hn->allocated = false;
521
562
  }
522
563
 
523
- static void wsp_ggml_tallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * gf) {
524
- const int * parse_seq = galloc->parse_seq;
525
- int parse_seq_len = galloc->parse_seq_len;
564
+ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
565
+ return node_buffer_ids ? node_buffer_ids[i] : 0;
566
+ }
526
567
 
527
- // count number of children and views
528
- for (int i = 0; i < gf->n_nodes; i++) {
529
- struct wsp_ggml_tensor * node = gf->nodes[i];
568
+ static void wsp_ggml_gallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
569
+ // clear hash tables
570
+ wsp_ggml_hash_set_reset(&galloc->hash_set);
571
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
530
572
 
531
- if (wsp_ggml_is_view(node)) {
573
+ // allocate leafs
574
+ // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
575
+ for (int i = 0; i < graph->n_leafs; i++) {
576
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
577
+ wsp_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
578
+ }
579
+
580
+ // count number of children and views
581
+ // allocate other graph inputs and leafs first to avoid overwriting them
582
+ for (int i = 0; i < graph->n_nodes; i++) {
583
+ struct wsp_ggml_tensor * node = graph->nodes[i];
584
+
585
+ // TODO: better way to add external dependencies
586
+ // WSP_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
587
+ // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
588
+ // itself is never used and should not be considered a dependency
589
+ if (wsp_ggml_is_view(node) && node->op != WSP_GGML_OP_NONE) {
532
590
  struct wsp_ggml_tensor * view_src = node->view_src;
533
- hash_get(galloc, view_src)->n_views += 1;
534
- if (node->buffer == NULL && node->data != NULL) {
535
- // view of a pre-allocated tensor, didn't call init_view() yet
536
- init_view(galloc, node, true);
537
- }
591
+ wsp_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
592
+ }
593
+
594
+ if (node->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
595
+ wsp_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
538
596
  }
539
597
 
540
598
  for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
541
- struct wsp_ggml_tensor * parent = node->src[j];
542
- if (parent == NULL) {
543
- break;
599
+ struct wsp_ggml_tensor * src = node->src[j];
600
+ if (src == NULL) {
601
+ continue;
544
602
  }
545
- hash_get(galloc, parent)->n_children += 1;
546
- if (wsp_ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) {
547
- init_view(galloc, parent, true);
603
+
604
+ wsp_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
605
+
606
+ // allocate explicit inputs
607
+ if (src->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
608
+ wsp_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
548
609
  }
549
610
  }
550
- }
611
+ }
551
612
 
552
613
  // allocate tensors
553
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
554
- int last_barrier_pos = 0;
555
- int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes;
556
-
557
- for (int ind = 0; ind < n_nodes; ind++) {
558
- // allocate a node if there is no parse_seq or this is not a barrier
559
- if (parse_seq_len == 0 || parse_seq[ind] != -1) {
560
- int i = parse_seq_len ? parse_seq[ind] : ind;
561
- struct wsp_ggml_tensor * node = gf->nodes[i];
562
-
563
- // allocate parents (leafs)
564
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
565
- struct wsp_ggml_tensor * parent = node->src[j];
566
- if (parent == NULL) {
567
- break;
568
- }
569
- allocate_node(galloc, parent);
614
+ for (int i = 0; i < graph->n_nodes; i++) {
615
+ struct wsp_ggml_tensor * node = graph->nodes[i];
616
+ int buffer_id = get_node_buffer_id(node_buffer_ids, i);
617
+
618
+ // allocate parents (only leafs need to be allocated at this point)
619
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
620
+ struct wsp_ggml_tensor * parent = node->src[j];
621
+ if (parent == NULL) {
622
+ continue;
570
623
  }
624
+ wsp_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
625
+ }
571
626
 
572
- // allocate node
573
- allocate_node(galloc, node);
627
+ // allocate node
628
+ wsp_ggml_gallocr_allocate_node(galloc, node, buffer_id);
574
629
 
575
- AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
576
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
577
- struct wsp_ggml_tensor * parent = node->src[j];
578
- if (parent == NULL) {
579
- break;
580
- }
581
- AT_PRINTF("%s", parent->name);
582
- if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
583
- AT_PRINTF(", ");
584
- }
630
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_desc(node), node->name);
631
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
632
+ struct wsp_ggml_tensor * parent = node->src[j];
633
+ if (parent == NULL) {
634
+ continue;
635
+ }
636
+ AT_PRINTF("%s", parent->name);
637
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
638
+ AT_PRINTF(", ");
585
639
  }
586
- AT_PRINTF("\n");
587
640
  }
641
+ AT_PRINTF("\n");
588
642
 
589
643
  // update parents
590
- // update immediately if there is no parse_seq
591
- // update only at barriers if there is parse_seq
592
- if ((parse_seq_len == 0) || parse_seq[ind] == -1) {
593
- int update_start = parse_seq_len ? last_barrier_pos : ind;
594
- int update_end = parse_seq_len ? ind : ind + 1;
595
- for (int i = update_start; i < update_end; i++) {
596
- int node_i = parse_seq_len ? parse_seq[i] : i;
597
- struct wsp_ggml_tensor * node = gf->nodes[node_i];
598
-
599
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
600
- struct wsp_ggml_tensor * parent = node->src[j];
601
- if (parent == NULL) {
602
- break;
603
- }
604
- struct hash_node * p_hn = hash_get(galloc, parent);
605
- p_hn->n_children -= 1;
606
-
607
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
608
-
609
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
610
- if (wsp_ggml_is_view(parent)) {
611
- struct wsp_ggml_tensor * view_src = parent->view_src;
612
- struct hash_node * view_src_hn = hash_get(galloc, view_src);
613
- view_src_hn->n_views -= 1;
614
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
615
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) {
616
- free_node(galloc, view_src);
617
- }
618
- }
619
- else {
620
- free_node(galloc, parent);
621
- }
644
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
645
+ struct wsp_ggml_tensor * parent = node->src[j];
646
+ if (parent == NULL) {
647
+ continue;
648
+ }
649
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
650
+ p_hn->n_children -= 1;
651
+
652
+ AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
653
+ parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
654
+
655
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
656
+ if (wsp_ggml_is_view(parent)) {
657
+ struct wsp_ggml_tensor * view_src = parent->view_src;
658
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
659
+ view_src_hn->n_views -= 1;
660
+ AT_PRINTF("view_src %s: %d children, %d views\n",
661
+ view_src->name, view_src_hn->n_children, view_src_hn->n_views);
662
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
663
+ wsp_ggml_gallocr_free_node(galloc, view_src);
622
664
  }
623
665
  }
666
+ else if (p_hn->allocated) {
667
+ wsp_ggml_gallocr_free_node(galloc, parent);
668
+ }
624
669
  }
625
670
  AT_PRINTF("\n");
626
- if (parse_seq_len) {
627
- last_barrier_pos = ind + 1;
628
- }
629
671
  }
630
672
  }
631
673
  }
632
674
 
633
- size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph) {
634
- size_t hash_size = graph->visited_hash_table.size;
675
+ bool wsp_ggml_gallocr_reserve_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
676
+ size_t min_hash_size = graph->n_nodes + graph->n_leafs;
677
+ // add 25% margin to avoid hash collisions
678
+ min_hash_size += min_hash_size / 4;
679
+
680
+ // initialize hash table
681
+ if (galloc->hash_set.size < min_hash_size) {
682
+ wsp_ggml_hash_set_free(&galloc->hash_set);
683
+ galloc->hash_set = wsp_ggml_hash_set_new(min_hash_size);
684
+ WSP_GGML_ASSERT(galloc->hash_set.keys != NULL);
685
+
686
+ free(galloc->hash_values);
687
+ galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
688
+ WSP_GGML_ASSERT(galloc->hash_values != NULL);
689
+ }
690
+
691
+ // reset allocators
692
+ for (int i = 0; i < galloc->n_buffers; i++) {
693
+ wsp_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
694
+ }
695
+
696
+ // allocate in hash table
697
+ wsp_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
635
698
 
636
- // check if the hash table is initialized and large enough
637
- if (galloc->hash_set.size < hash_size) {
638
- if (galloc->hash_set.keys != NULL) {
639
- free(galloc->hash_set.keys);
699
+ // set the node_allocs from the hash table
700
+ if (galloc->n_nodes < graph->n_nodes) {
701
+ free(galloc->node_allocs);
702
+ galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
703
+ WSP_GGML_ASSERT(galloc->node_allocs != NULL);
704
+ }
705
+ galloc->n_nodes = graph->n_nodes;
706
+ for (int i = 0; i < graph->n_nodes; i++) {
707
+ struct wsp_ggml_tensor * node = graph->nodes[i];
708
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
709
+ if (node->view_src || node->data) {
710
+ node_alloc->dst.buffer_id = -1;
711
+ node_alloc->dst.offset = SIZE_MAX;
712
+ node_alloc->dst.size_max = 0;
713
+ } else {
714
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
715
+ node_alloc->dst.buffer_id = hn->buffer_id;
716
+ node_alloc->dst.offset = hn->offset;
717
+ node_alloc->dst.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
640
718
  }
641
- if (galloc->hash_values != NULL) {
642
- free(galloc->hash_values);
719
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
720
+ struct wsp_ggml_tensor * src = node->src[j];
721
+ if (!src || src->view_src || src->data) {
722
+ node_alloc->src[j].buffer_id = -1;
723
+ node_alloc->src[j].offset = SIZE_MAX;
724
+ node_alloc->src[j].size_max = 0;
725
+ } else {
726
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, src);
727
+ node_alloc->src[j].buffer_id = hn->buffer_id;
728
+ node_alloc->src[j].offset = hn->offset;
729
+ node_alloc->src[j].size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
730
+ }
731
+ }
732
+ }
733
+ if (galloc->n_leafs < graph->n_leafs) {
734
+ free(galloc->leaf_allocs);
735
+ galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
736
+ WSP_GGML_ASSERT(galloc->leaf_allocs != NULL);
737
+ }
738
+ galloc->n_leafs = graph->n_leafs;
739
+ for (int i = 0; i < graph->n_leafs; i++) {
740
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
741
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, leaf);
742
+ if (leaf->view_src || leaf->data) {
743
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
744
+ galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
745
+ galloc->leaf_allocs[i].leaf.size_max = 0;
746
+ } else {
747
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
748
+ galloc->leaf_allocs[i].leaf.offset = hn->offset;
749
+ galloc->leaf_allocs[i].leaf.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
643
750
  }
644
- galloc->hash_set.keys = malloc(sizeof(struct wsp_ggml_tensor *) * hash_size);
645
- galloc->hash_set.size = hash_size;
646
- galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
647
751
  }
648
752
 
649
- // reset hash table
650
- memset(galloc->hash_set.keys, 0, sizeof(struct wsp_ggml_tensor *) * hash_size);
651
- memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
753
+ // reallocate buffers if needed
754
+ for (int i = 0; i < galloc->n_buffers; i++) {
755
+ // if the buffer type is used multiple times, we reuse the same buffer
756
+ for (int j = 0; j < i; j++) {
757
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
758
+ galloc->buffers[i] = galloc->buffers[j];
759
+ break;
760
+ }
761
+ }
652
762
 
653
- galloc->talloc = talloc;
654
- wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
655
- galloc->talloc = NULL;
763
+ size_t cur_size = galloc->buffers[i] ? wsp_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
764
+ size_t new_size = wsp_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
656
765
 
657
- size_t max_size = wsp_ggml_tallocr_max_size(talloc);
766
+ // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
767
+ if (new_size > cur_size || galloc->buffers[i] == NULL) {
768
+ #ifndef NDEBUG
769
+ WSP_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
770
+ #endif
658
771
 
659
- return max_size;
772
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
773
+ galloc->buffers[i] = wsp_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
774
+ if (galloc->buffers[i] == NULL) {
775
+ WSP_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), new_size);
776
+ return false;
777
+ }
778
+ wsp_ggml_backend_buffer_set_usage(galloc->buffers[i], WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
779
+ }
780
+ }
781
+
782
+ return true;
660
783
  }
661
784
 
662
- void wsp_ggml_gallocr_alloc_graph_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, struct wsp_ggml_hash_set hash_set, wsp_ggml_tallocr_t * hash_node_talloc) {
663
- const size_t hash_size = hash_set.size;
785
+ bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph *graph) {
786
+ return wsp_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
787
+ }
788
+
789
+ static void wsp_ggml_gallocr_init_tensor(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
790
+ int buffer_id = tensor_alloc->buffer_id;
791
+ assert(tensor->data || tensor->view_src || wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
664
792
 
665
- WSP_GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs));
793
+ if (tensor->view_src != NULL) {
794
+ if (tensor->buffer == NULL) {
795
+ assert(tensor_alloc->offset == SIZE_MAX);
796
+ if (tensor->view_src->buffer == NULL) {
797
+ // this tensor was allocated without ggml-backend
798
+ return;
799
+ }
800
+ wsp_ggml_backend_view_init(tensor);
801
+ }
802
+ } else {
803
+ if (tensor->data == NULL) {
804
+ assert(tensor_alloc->offset != SIZE_MAX);
805
+ assert(wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
806
+ void * base = wsp_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
807
+ void * addr = (char *)base + tensor_alloc->offset;
808
+ wsp_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
809
+ } else {
810
+ if (tensor->buffer == NULL) {
811
+ // this tensor was allocated without ggml-backend
812
+ return;
813
+ }
814
+ }
815
+ }
816
+ }
666
817
 
667
- galloc->talloc = NULL;
818
+ static bool wsp_ggml_gallocr_node_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, struct tensor_alloc * talloc) {
819
+ size_t node_size = (node->data || node->view_src) ? 0 : wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
820
+ return talloc->size_max >= node_size;
821
+ }
668
822
 
669
- // alloc hash_values if needed
670
- if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) {
671
- free(galloc->hash_values);
672
- galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
673
- galloc->hash_values_size = hash_size;
823
+ static bool wsp_ggml_gallocr_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
824
+ if (galloc->n_nodes != graph->n_nodes) {
825
+ #ifndef NDEBUG
826
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
827
+ #endif
828
+ return true;
674
829
  }
675
830
 
676
- // free hash_set.keys if needed
677
- if (galloc->hash_set.keys != NULL) {
678
- free(galloc->hash_set.keys);
831
+ if (galloc->n_leafs != graph->n_leafs) {
832
+ #ifndef NDEBUG
833
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
834
+ #endif
835
+ return true;
679
836
  }
680
- galloc->hash_set = hash_set;
681
837
 
682
- // reset hash values
683
- memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
838
+ for (int i = 0; i < graph->n_nodes; i++) {
839
+ struct wsp_ggml_tensor * node = graph->nodes[i];
840
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
684
841
 
685
- galloc->hash_allocs = hash_node_talloc;
842
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
843
+ #ifndef NDEBUG
844
+ WSP_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
845
+ #endif
846
+ return true;
847
+ }
686
848
 
687
- wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
849
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
850
+ struct wsp_ggml_tensor * src = node->src[j];
851
+ if (src == NULL) {
852
+ continue;
853
+ }
854
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
855
+ #ifndef NDEBUG
856
+ WSP_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
857
+ #endif
858
+ return true;
859
+ }
860
+ }
861
+ }
688
862
 
689
- // remove unowned resources
690
- galloc->hash_set.keys = NULL;
691
- galloc->hash_allocs = NULL;
863
+ return false;
692
864
  }
693
865
 
694
- // legacy API wrapper
695
-
696
- struct wsp_ggml_allocr {
697
- wsp_ggml_tallocr_t talloc;
698
- wsp_ggml_gallocr_t galloc;
699
- };
866
+ bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
867
+ if (wsp_ggml_gallocr_needs_realloc(galloc, graph)) {
868
+ if (galloc->n_buffers == 1) {
869
+ #ifndef NDEBUG
870
+ WSP_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
871
+ #endif
872
+ if (!wsp_ggml_gallocr_reserve(galloc, graph)) {
873
+ return false;
874
+ }
875
+ } else {
876
+ #ifndef NDEBUG
877
+ WSP_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
878
+ #endif
879
+ return false;
880
+ }
881
+ }
700
882
 
701
- static wsp_ggml_allocr_t wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_t talloc) {
702
- wsp_ggml_allocr_t alloc = (wsp_ggml_allocr_t)malloc(sizeof(struct wsp_ggml_allocr));
703
- *alloc = (struct wsp_ggml_allocr) {
704
- /*.talloc = */ talloc,
705
- /*.galloc = */ wsp_ggml_gallocr_new(),
706
- };
707
- return alloc;
708
- }
883
+ // reset buffers
884
+ for (int i = 0; i < galloc->n_buffers; i++) {
885
+ if (galloc->buffers[i] != NULL) {
886
+ wsp_ggml_backend_buffer_reset(galloc->buffers[i]);
887
+ }
888
+ }
709
889
 
710
- wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
711
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new(data, size, alignment));
712
- }
890
+ // allocate the graph tensors from the previous assignments
891
+ // leafs
892
+ for (int i = 0; i < graph->n_leafs; i++) {
893
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
894
+ struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
895
+ wsp_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
896
+ }
897
+ // nodes
898
+ for (int i = 0; i < graph->n_nodes; i++) {
899
+ struct wsp_ggml_tensor * node = graph->nodes[i];
900
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
901
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
902
+ struct wsp_ggml_tensor * src = node->src[j];
903
+ if (src == NULL) {
904
+ continue;
905
+ }
906
+ wsp_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
907
+ }
908
+ wsp_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
909
+ }
713
910
 
714
- wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment) {
715
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure(alignment));
911
+ return true;
716
912
  }
717
913
 
718
- wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
719
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_buffer(buffer));
720
- }
914
+ size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id) {
915
+ WSP_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
721
916
 
722
- wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
723
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_backend(backend, size));
724
- }
917
+ if (galloc->buffers[buffer_id] == NULL) {
918
+ return 0;
919
+ }
725
920
 
726
- wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
727
- return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure_from_backend(backend));
728
- }
921
+ for (int i = 0; i < buffer_id; i++) {
922
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
923
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
924
+ // only return the buffer size the first time it appears to avoid double counting
925
+ return 0;
926
+ }
927
+ }
729
928
 
730
- struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc) {
731
- return wsp_ggml_tallocr_get_buffer(alloc->talloc);
929
+ return wsp_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
732
930
  }
733
931
 
734
- void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n) {
735
- wsp_ggml_gallocr_set_parse_seq(alloc->galloc, list, n);
736
- }
932
+ // utils
737
933
 
738
- void wsp_ggml_allocr_free(wsp_ggml_allocr_t alloc) {
739
- wsp_ggml_gallocr_free(alloc->galloc);
740
- wsp_ggml_tallocr_free(alloc->talloc);
741
- free(alloc);
742
- }
934
+ static bool alloc_tensor_range(struct wsp_ggml_context * ctx,
935
+ struct wsp_ggml_tensor * first, struct wsp_ggml_tensor * last,
936
+ wsp_ggml_backend_buffer_type_t buft, size_t size,
937
+ wsp_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
938
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, size);
939
+ if (buffer == NULL) {
940
+ #ifndef NDEBUG
941
+ WSP_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(buft), size);
942
+ #endif
943
+ for (size_t i = 0; i < *n_buffers; i++) {
944
+ wsp_ggml_backend_buffer_free((*buffers)[i]);
945
+ }
946
+ free(*buffers);
947
+ return false;
948
+ }
743
949
 
744
- bool wsp_ggml_allocr_is_measure(wsp_ggml_allocr_t alloc) {
745
- return wsp_ggml_tallocr_is_measure(alloc->talloc);
746
- }
950
+ struct wsp_ggml_tallocr tallocr = wsp_ggml_tallocr_new(buffer);
747
951
 
748
- void wsp_ggml_allocr_reset(wsp_ggml_allocr_t alloc) {
749
- wsp_ggml_tallocr_reset(alloc->talloc);
750
- }
952
+ for (struct wsp_ggml_tensor * t = first; t != last; t = wsp_ggml_get_next_tensor(ctx, t)) {
953
+ if (t->data == NULL) {
954
+ if (t->view_src == NULL) {
955
+ wsp_ggml_tallocr_alloc(&tallocr, t);
956
+ } else if (t->buffer == NULL) {
957
+ wsp_ggml_backend_view_init(t);
958
+ }
959
+ } else {
960
+ if (t->view_src != NULL && t->buffer == NULL) {
961
+ // view of a pre-allocated tensor
962
+ wsp_ggml_backend_view_init(t);
963
+ }
964
+ }
965
+ }
751
966
 
752
- void wsp_ggml_allocr_alloc(wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor) {
753
- wsp_ggml_tallocr_alloc(alloc->talloc, tensor);
754
- }
967
+ *buffers = realloc(*buffers, sizeof(wsp_ggml_backend_buffer_t) * (*n_buffers + 1));
968
+ (*buffers)[(*n_buffers)++] = buffer;
755
969
 
756
- size_t wsp_ggml_allocr_max_size(wsp_ggml_allocr_t alloc) {
757
- return wsp_ggml_tallocr_max_size(alloc->talloc);
758
- }
759
-
760
- size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph) {
761
- return wsp_ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
970
+ return true;
762
971
  }
763
972
 
764
- // utils
765
973
  wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft) {
766
974
  WSP_GGML_ASSERT(wsp_ggml_get_no_alloc(ctx) == true);
767
975
 
768
976
  size_t alignment = wsp_ggml_backend_buft_get_alignment(buft);
977
+ size_t max_size = wsp_ggml_backend_buft_get_max_size(buft);
769
978
 
770
- size_t nbytes = 0;
771
- for (struct wsp_ggml_tensor * t = wsp_ggml_get_first_tensor(ctx); t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
979
+ wsp_ggml_backend_buffer_t * buffers = NULL;
980
+ size_t n_buffers = 0;
981
+
982
+ size_t cur_buf_size = 0;
983
+ struct wsp_ggml_tensor * first = wsp_ggml_get_first_tensor(ctx);
984
+ for (struct wsp_ggml_tensor * t = first; t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
985
+ size_t this_size = 0;
772
986
  if (t->data == NULL && t->view_src == NULL) {
773
- nbytes += WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
987
+ this_size = WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
774
988
  }
775
- }
776
989
 
777
- if (nbytes == 0) {
778
- fprintf(stderr, "%s: no tensors to allocate\n", __func__);
779
- return NULL;
780
- }
781
-
782
- wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, nbytes);
783
- wsp_ggml_tallocr_t tallocr = wsp_ggml_tallocr_new_from_buffer(buffer);
990
+ if (this_size > max_size) {
991
+ WSP_GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
992
+ __func__, t->name,
993
+ wsp_ggml_backend_buft_name(buft),
994
+ this_size, max_size);
995
+ for (size_t i = 0; i < n_buffers; i++) {
996
+ wsp_ggml_backend_buffer_free(buffers[i]);
997
+ }
998
+ free(buffers);
999
+ return NULL;
1000
+ }
784
1001
 
785
- for (struct wsp_ggml_tensor * t = wsp_ggml_get_first_tensor(ctx); t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
786
- if (t->data == NULL) {
787
- if (t->view_src == NULL) {
788
- wsp_ggml_tallocr_alloc(tallocr, t);
789
- } else {
790
- wsp_ggml_backend_view_init(buffer, t);
1002
+ if ((cur_buf_size + this_size) > max_size) {
1003
+ // allocate tensors in the current buffer
1004
+ if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
1005
+ return NULL;
791
1006
  }
1007
+ first = t;
1008
+ cur_buf_size = this_size;
1009
+ } else {
1010
+ cur_buf_size += this_size;
792
1011
  }
793
1012
  }
794
1013
 
795
- wsp_ggml_tallocr_free(tallocr);
1014
+ // allocate remaining tensors
1015
+ if (cur_buf_size > 0) {
1016
+ if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1017
+ return NULL;
1018
+ }
1019
+ }
796
1020
 
1021
+ if (n_buffers == 0) {
1022
+ #ifndef NDEBUG
1023
+ WSP_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1024
+ #endif
1025
+ return NULL;
1026
+ }
1027
+
1028
+ wsp_ggml_backend_buffer_t buffer;
1029
+ if (n_buffers == 1) {
1030
+ buffer = buffers[0];
1031
+ } else {
1032
+ buffer = wsp_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1033
+ }
1034
+ free(buffers);
797
1035
  return buffer;
798
1036
  }
799
1037