whisper.rn 0.4.0-rc.1 → 0.4.0-rc.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +6 -6
  2. package/android/build.gradle +4 -0
  3. package/android/src/main/CMakeLists.txt +21 -1
  4. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
  5. package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
  6. package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
  7. package/android/src/main/jni-utils.h +76 -0
  8. package/android/src/main/jni.cpp +226 -109
  9. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  10. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  11. package/cpp/coreml/whisper-encoder-impl.h +1 -1
  12. package/cpp/coreml/whisper-encoder.h +4 -0
  13. package/cpp/coreml/whisper-encoder.mm +5 -3
  14. package/cpp/ggml-alloc.c +797 -400
  15. package/cpp/ggml-alloc.h +60 -10
  16. package/cpp/ggml-backend-impl.h +255 -0
  17. package/cpp/ggml-backend-reg.cpp +582 -0
  18. package/cpp/ggml-backend.cpp +2002 -0
  19. package/cpp/ggml-backend.h +354 -0
  20. package/cpp/ggml-common.h +1851 -0
  21. package/cpp/ggml-cpp.h +39 -0
  22. package/cpp/ggml-cpu-aarch64.cpp +4247 -0
  23. package/cpp/ggml-cpu-aarch64.h +8 -0
  24. package/cpp/ggml-cpu-impl.h +531 -0
  25. package/cpp/ggml-cpu-quants.c +12245 -0
  26. package/cpp/ggml-cpu-quants.h +63 -0
  27. package/cpp/ggml-cpu-traits.cpp +36 -0
  28. package/cpp/ggml-cpu-traits.h +38 -0
  29. package/cpp/ggml-cpu.c +14792 -0
  30. package/cpp/ggml-cpu.cpp +653 -0
  31. package/cpp/ggml-cpu.h +137 -0
  32. package/cpp/ggml-impl.h +567 -0
  33. package/cpp/ggml-metal-impl.h +288 -0
  34. package/cpp/ggml-metal.h +24 -43
  35. package/cpp/ggml-metal.m +4867 -1080
  36. package/cpp/ggml-opt.cpp +854 -0
  37. package/cpp/ggml-opt.h +216 -0
  38. package/cpp/ggml-quants.c +5238 -0
  39. package/cpp/ggml-quants.h +100 -0
  40. package/cpp/ggml-threading.cpp +12 -0
  41. package/cpp/ggml-threading.h +14 -0
  42. package/cpp/ggml-whisper.metallib +0 -0
  43. package/cpp/ggml.c +5106 -19431
  44. package/cpp/ggml.h +847 -669
  45. package/cpp/gguf.cpp +1329 -0
  46. package/cpp/gguf.h +202 -0
  47. package/cpp/rn-audioutils.cpp +68 -0
  48. package/cpp/rn-audioutils.h +14 -0
  49. package/cpp/rn-whisper-log.h +11 -0
  50. package/cpp/rn-whisper.cpp +221 -52
  51. package/cpp/rn-whisper.h +50 -15
  52. package/cpp/whisper.cpp +3174 -1533
  53. package/cpp/whisper.h +176 -44
  54. package/ios/RNWhisper.mm +139 -46
  55. package/ios/RNWhisperAudioUtils.h +1 -2
  56. package/ios/RNWhisperAudioUtils.m +18 -67
  57. package/ios/RNWhisperContext.h +11 -8
  58. package/ios/RNWhisperContext.mm +195 -150
  59. package/jest/mock.js +15 -2
  60. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  61. package/lib/commonjs/index.js +76 -28
  62. package/lib/commonjs/index.js.map +1 -1
  63. package/lib/commonjs/version.json +1 -1
  64. package/lib/module/NativeRNWhisper.js.map +1 -1
  65. package/lib/module/index.js +76 -28
  66. package/lib/module/index.js.map +1 -1
  67. package/lib/module/version.json +1 -1
  68. package/lib/typescript/NativeRNWhisper.d.ts +13 -4
  69. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +37 -5
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +9 -7
  73. package/src/NativeRNWhisper.ts +20 -4
  74. package/src/index.ts +98 -42
  75. package/src/version.json +1 -1
  76. package/whisper-rn.podspec +13 -20
  77. package/cpp/README.md +0 -4
  78. package/cpp/ggml-metal.metal +0 -2353
package/cpp/ggml-alloc.c CHANGED
@@ -1,146 +1,156 @@
1
1
  #include "ggml-alloc.h"
2
+ #include "ggml-backend-impl.h"
2
3
  #include "ggml.h"
4
+ #include "ggml-impl.h"
3
5
  #include <assert.h>
6
+ #include <limits.h>
4
7
  #include <stdarg.h>
5
8
  #include <stdio.h>
6
9
  #include <stdlib.h>
7
10
  #include <string.h>
8
11
 
9
- #ifdef __has_include
10
- #if __has_include(<unistd.h>)
11
- #include <unistd.h>
12
- #if defined(_POSIX_MAPPED_FILES)
13
- #include <sys/types.h>
14
- #include <sys/mman.h>
15
- #endif
16
- #endif
17
- #endif
18
-
19
- #if defined(_WIN32)
20
- #define WIN32_LEAN_AND_MEAN
21
- #ifndef NOMINMAX
22
- #define NOMINMAX
23
- #endif
24
- #include <windows.h>
25
- #include <memoryapi.h>
26
- #endif
27
-
28
-
29
- #define UNUSED(x) (void)(x)
30
12
  #define MAX(a, b) ((a) > (b) ? (a) : (b))
31
- #define WSP_GGML_MAX_CONCUR (2*WSP_GGML_MAX_NODES)
13
+ #define MAX_FREE_BLOCKS 256
32
14
 
33
15
  //#define WSP_GGML_ALLOCATOR_DEBUG
34
16
 
35
- //#define AT_PRINTF printf
36
- #define AT_PRINTF(...) ((void)0)
17
+ //#define AT_PRINTF(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
18
+ #define AT_PRINTF(...)
37
19
 
38
- struct hash_node {
39
- struct wsp_ggml_tensor * t;
40
- int n_children;
41
- int n_views;
42
- };
43
20
 
44
- static size_t hash(void * p) {
45
- return (size_t)p % WSP_GGML_GRAPH_HASHTABLE_SIZE;
21
+ static bool wsp_ggml_is_view(const struct wsp_ggml_tensor * t) {
22
+ return t->view_src != NULL;
46
23
  }
47
24
 
48
- static struct hash_node * hash_get(struct hash_node hash_table[], struct wsp_ggml_tensor * t) {
49
- size_t h = hash(t);
50
-
51
- // linear probing
52
- size_t i = h;
53
- while (hash_table[i].t != NULL) {
54
- if (hash_table[i].t == t) {
55
- return &hash_table[i];
25
+ static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
26
+ if (a->type != b->type) {
27
+ return false;
28
+ }
29
+ for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
30
+ if (a->ne[i] != b->ne[i]) {
31
+ return false;
56
32
  }
57
- i = (i + 1) % WSP_GGML_GRAPH_HASHTABLE_SIZE;
58
- if (i == h) {
59
- // hash table is full
60
- WSP_GGML_ASSERT(false);
33
+ if (a->nb[i] != b->nb[i]) {
34
+ return false;
61
35
  }
62
36
  }
37
+ return true;
38
+ }
63
39
 
64
- hash_table[i].t = t;
65
- return &hash_table[i];
40
+ // ops that return true for this function must not use restrict pointers for their backend implementations
41
+ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
42
+ switch (op) {
43
+ case WSP_GGML_OP_SCALE:
44
+ case WSP_GGML_OP_DIAG_MASK_ZERO:
45
+ case WSP_GGML_OP_DIAG_MASK_INF:
46
+ case WSP_GGML_OP_ADD:
47
+ case WSP_GGML_OP_ADD1:
48
+ case WSP_GGML_OP_SUB:
49
+ case WSP_GGML_OP_MUL:
50
+ case WSP_GGML_OP_DIV:
51
+ case WSP_GGML_OP_SQR:
52
+ case WSP_GGML_OP_SQRT:
53
+ case WSP_GGML_OP_LOG:
54
+ case WSP_GGML_OP_UNARY:
55
+ case WSP_GGML_OP_ROPE:
56
+ case WSP_GGML_OP_ROPE_BACK:
57
+ case WSP_GGML_OP_SILU_BACK:
58
+ case WSP_GGML_OP_RMS_NORM:
59
+ case WSP_GGML_OP_RMS_NORM_BACK:
60
+ case WSP_GGML_OP_SOFT_MAX:
61
+ case WSP_GGML_OP_SOFT_MAX_BACK:
62
+ return true;
63
+
64
+ default:
65
+ return false;
66
+ }
66
67
  }
67
68
 
68
- // TODO: WSP_GGML_PAD ?
69
69
  static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
70
70
  assert(alignment && !(alignment & (alignment - 1))); // power of 2
71
71
  size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
72
72
  return offset + align;
73
73
  }
74
74
 
75
+ // tallocr
76
+
77
+ struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer) {
78
+ void * base = wsp_ggml_backend_buffer_get_base(buffer);
79
+ size_t align = wsp_ggml_backend_buffer_get_alignment(buffer);
80
+
81
+ assert(align && !(align & (align - 1))); // power of 2
82
+
83
+ struct wsp_ggml_tallocr talloc = (struct wsp_ggml_tallocr) {
84
+ /*.buffer = */ buffer,
85
+ /*.base = */ base,
86
+ /*.alignment = */ align,
87
+ /*.offset = */ aligned_offset(base, 0, align),
88
+ };
89
+ return talloc;
90
+ }
91
+
92
+ void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor) {
93
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
94
+ size = WSP_GGML_PAD(size, talloc->alignment);
95
+
96
+ if (talloc->offset + size > wsp_ggml_backend_buffer_get_size(talloc->buffer)) {
97
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
98
+ __func__, tensor->name, size, wsp_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
99
+ WSP_GGML_ABORT("not enough space in the buffer");
100
+ }
101
+
102
+ void * addr = (char *)wsp_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
103
+ talloc->offset += size;
104
+
105
+ assert(((uintptr_t)addr % talloc->alignment) == 0);
106
+
107
+ wsp_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
108
+ }
109
+
110
+ // dynamic tensor allocator
111
+
75
112
  struct free_block {
76
- void * addr;
113
+ size_t offset;
77
114
  size_t size;
78
115
  };
79
116
 
80
- #define MAX_FREE_BLOCKS 128
81
-
82
- struct wsp_ggml_allocr {
83
- void * data;
84
- size_t size;
117
+ struct wsp_ggml_dyn_tallocr {
85
118
  size_t alignment;
86
119
  int n_free_blocks;
87
120
  struct free_block free_blocks[MAX_FREE_BLOCKS];
88
- struct hash_node hash_table[WSP_GGML_GRAPH_HASHTABLE_SIZE];
89
121
  size_t max_size;
90
- bool measure;
91
- int parse_seq[WSP_GGML_MAX_CONCUR];
92
- int parse_seq_len;
93
122
 
94
123
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
95
- struct wsp_ggml_tensor * allocated_tensors[1024];
124
+ struct {
125
+ const struct wsp_ggml_tensor * tensor;
126
+ size_t offset;
127
+ } allocated_tensors[1024];
96
128
  #endif
97
129
  };
98
130
 
99
131
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
100
- static void add_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
132
+ static void add_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
101
133
  for (int i = 0; i < 1024; i++) {
102
- if (alloc->allocated_tensors[i] == NULL) {
103
- alloc->allocated_tensors[i] = tensor;
134
+ if (alloc->allocated_tensors[i].tensor == NULL) {
135
+ alloc->allocated_tensors[i].tensor = tensor;
136
+ alloc->allocated_tensors[i].offset = offset;
104
137
  return;
105
138
  }
106
139
  }
107
- WSP_GGML_ASSERT(!"out of allocated_tensors");
140
+ WSP_GGML_ABORT("out of allocated_tensors");
108
141
  }
109
- static void remove_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
142
+ static void remove_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
110
143
  for (int i = 0; i < 1024; i++) {
111
- if (alloc->allocated_tensors[i] == tensor ||
112
- (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
113
- alloc->allocated_tensors[i] = NULL;
144
+ if (alloc->allocated_tensors[i].offset == offset) {
145
+ alloc->allocated_tensors[i].tensor = NULL;
114
146
  return;
115
147
  }
116
148
  }
117
- printf("tried to free tensor %s not found\n", tensor->name);
118
- WSP_GGML_ASSERT(!"tensor not found");
149
+ WSP_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
119
150
  }
120
151
  #endif
121
152
 
122
- static size_t wsp_ggml_allocr_get_alloc_size(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
123
- return wsp_ggml_nbytes(tensor);
124
-
125
- UNUSED(alloc);
126
- }
127
-
128
- // check if a tensor is allocated by this buffer
129
- static bool wsp_ggml_allocr_is_own(struct wsp_ggml_allocr * alloc, const struct wsp_ggml_tensor * tensor) {
130
- void * ptr = tensor->data;
131
- return ptr >= alloc->data && (char *)ptr < (char *)alloc->data + alloc->max_size;
132
- }
133
-
134
- static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
135
- return t->view_src != NULL;
136
- }
137
-
138
- void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
139
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
140
- WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
141
- WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
142
- #endif
143
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
153
+ static size_t wsp_ggml_dyn_tallocr_alloc(struct wsp_ggml_dyn_tallocr * alloc, size_t size, const struct wsp_ggml_tensor * tensor) {
144
154
  size = aligned_offset(NULL, size, alloc->alignment);
145
155
 
146
156
  AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
@@ -159,8 +169,6 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
159
169
  }
160
170
  }
161
171
 
162
- AT_PRINTF("block %d\n", best_fit_block);
163
-
164
172
  if (best_fit_block == -1) {
165
173
  // the last block is our last resort
166
174
  struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
@@ -168,15 +176,16 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
168
176
  if (block->size >= size) {
169
177
  best_fit_block = alloc->n_free_blocks - 1;
170
178
  } else {
171
- fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
179
+ // this should never happen
180
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
172
181
  __func__, size, max_avail);
173
- WSP_GGML_ASSERT(!"not enough space in the buffer");
174
- return;
182
+ WSP_GGML_ABORT("not enough space in the buffer");
175
183
  }
176
184
  }
185
+
177
186
  struct free_block * block = &alloc->free_blocks[best_fit_block];
178
- void * addr = block->addr;
179
- block->addr = (char*)block->addr + size;
187
+ size_t offset = block->offset;
188
+ block->offset = offset + size;
180
189
  block->size -= size;
181
190
  if (block->size == 0) {
182
191
  // remove block if empty
@@ -186,52 +195,63 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
186
195
  }
187
196
  }
188
197
 
189
- tensor->data = addr;
198
+ AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
190
199
 
191
200
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
192
- add_allocated_tensor(alloc, tensor);
193
- size_t cur_max = (char*)addr - (char*)alloc->data + size;
201
+ add_allocated_tensor(alloc, offset, tensor);
202
+ size_t cur_max = offset + size;
194
203
  if (cur_max > alloc->max_size) {
195
- printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
204
+ // sort allocated_tensors by offset
196
205
  for (int i = 0; i < 1024; i++) {
197
- if (alloc->allocated_tensors[i]) {
198
- printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, wsp_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
206
+ for (int j = i + 1; j < 1024; j++) {
207
+ if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
208
+ const struct wsp_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
209
+ size_t tmp_offset = alloc->allocated_tensors[i].offset;
210
+ alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
211
+ alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
212
+ alloc->allocated_tensors[j].tensor = tmp_tensor;
213
+ alloc->allocated_tensors[j].offset = tmp_offset;
214
+ }
199
215
  }
200
216
  }
201
- printf("\n");
217
+ WSP_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
218
+ for (int i = 0; i < 1024; i++) {
219
+ if (alloc->allocated_tensors[i].tensor) {
220
+ WSP_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
221
+ alloc->allocated_tensors[i].offset,
222
+ alloc->allocated_tensors[i].offset + wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor),
223
+ wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
224
+ }
225
+ }
226
+ WSP_GGML_LOG_DEBUG("\n");
202
227
  }
203
228
  #endif
204
229
 
205
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
206
- }
230
+ alloc->max_size = MAX(alloc->max_size, offset + size);
207
231
 
208
- // this is a very naive implementation, but for our case the number of free blocks should be very small
209
- static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
210
- void * ptr = tensor->data;
232
+ return offset;
211
233
 
212
- if (wsp_ggml_allocr_is_own(alloc, tensor) == false) {
213
- // the tensor was not allocated in this buffer
214
- // this can happen because the graph allocator will try to free weights and other tensors from different buffers
215
- // the easiest way to deal with this is just to ignore it
216
- return;
217
- }
234
+ WSP_GGML_UNUSED(tensor);
235
+ }
218
236
 
219
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
237
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
238
+ static void wsp_ggml_dyn_tallocr_free_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct wsp_ggml_tensor * tensor) {
220
239
  size = aligned_offset(NULL, size, alloc->alignment);
221
- AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
240
+
241
+ AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
222
242
 
223
243
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
224
- remove_allocated_tensor(alloc, tensor);
244
+ remove_allocated_tensor(alloc, offset, tensor);
225
245
  #endif
226
246
 
227
247
  // see if we can merge with an existing block
228
248
  for (int i = 0; i < alloc->n_free_blocks; i++) {
229
249
  struct free_block * block = &alloc->free_blocks[i];
230
250
  // check if ptr is at the end of the block
231
- if ((char*)block->addr + block->size == ptr) {
251
+ if (block->offset + block->size == offset) {
232
252
  block->size += size;
233
253
  // check if we can merge with the next block
234
- if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
254
+ if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
235
255
  block->size += alloc->free_blocks[i+1].size;
236
256
  alloc->n_free_blocks--;
237
257
  for (int j = i+1; j < alloc->n_free_blocks; j++) {
@@ -241,11 +261,11 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
241
261
  return;
242
262
  }
243
263
  // check if ptr is at the beginning of the block
244
- if ((char*)ptr + size == block->addr) {
245
- block->addr = ptr;
264
+ if (offset + size == block->offset) {
265
+ block->offset = offset;
246
266
  block->size += size;
247
267
  // check if we can merge with the previous block
248
- if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
268
+ if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
249
269
  alloc->free_blocks[i-1].size += block->size;
250
270
  alloc->n_free_blocks--;
251
271
  for (int j = i; j < alloc->n_free_blocks; j++) {
@@ -259,7 +279,7 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
259
279
  WSP_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
260
280
  // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
261
281
  int insert_pos = 0;
262
- while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
282
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
263
283
  insert_pos++;
264
284
  }
265
285
  // shift all blocks from insert_pos onward to make room for the new block
@@ -267,367 +287,744 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
267
287
  alloc->free_blocks[i] = alloc->free_blocks[i-1];
268
288
  }
269
289
  // insert the new block
270
- alloc->free_blocks[insert_pos].addr = ptr;
290
+ alloc->free_blocks[insert_pos].offset = offset;
271
291
  alloc->free_blocks[insert_pos].size = size;
272
292
  alloc->n_free_blocks++;
273
- }
274
293
 
275
- void wsp_ggml_allocr_set_parse_seq(struct wsp_ggml_allocr * alloc, const int * list, int n) {
276
- for (int i = 0; i < n; i++) {
277
- alloc->parse_seq[i] = list[i];
278
- }
279
- alloc->parse_seq_len = n;
294
+ WSP_GGML_UNUSED(tensor);
280
295
  }
281
296
 
282
- void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc) {
297
+ static void wsp_ggml_dyn_tallocr_reset(struct wsp_ggml_dyn_tallocr * alloc) {
283
298
  alloc->n_free_blocks = 1;
284
- size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
285
- alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
286
- alloc->free_blocks[0].size = alloc->size - align_offset;
299
+ alloc->free_blocks[0].offset = 0;
300
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
301
+ alloc->max_size = 0;
302
+
303
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
304
+ for (int i = 0; i < 1024; i++) {
305
+ alloc->allocated_tensors[i].tensor = NULL;
306
+ }
307
+ #endif
287
308
  }
288
309
 
289
- struct wsp_ggml_allocr * wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
290
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
310
+ static struct wsp_ggml_dyn_tallocr * wsp_ggml_dyn_tallocr_new(size_t alignment) {
311
+ struct wsp_ggml_dyn_tallocr * alloc = (struct wsp_ggml_dyn_tallocr *)malloc(sizeof(struct wsp_ggml_dyn_tallocr));
291
312
 
292
- *alloc = (struct wsp_ggml_allocr){
293
- /*.data = */ data,
294
- /*.size = */ size,
313
+ *alloc = (struct wsp_ggml_dyn_tallocr) {
295
314
  /*.alignment = */ alignment,
296
315
  /*.n_free_blocks = */ 0,
297
316
  /*.free_blocks = */ {{0}},
298
- /*.hash_table = */ {{0}},
299
317
  /*.max_size = */ 0,
300
- /*.measure = */ false,
301
- /*.parse_seq = */ {0},
302
- /*.parse_seq_len = */ 0,
303
318
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
304
- /*.allocated_tensors = */ {0},
319
+ /*.allocated_tensors = */ {{0}},
305
320
  #endif
306
321
  };
307
322
 
308
- wsp_ggml_allocr_reset(alloc);
323
+ wsp_ggml_dyn_tallocr_reset(alloc);
309
324
 
310
325
  return alloc;
311
326
  }
312
327
 
313
- // OS specific functions to allocate and free uncommitted virtual memory
314
- static void * alloc_vmem(size_t size) {
315
- #if defined(_WIN32)
316
- return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
317
- #elif defined(_POSIX_MAPPED_FILES)
318
- void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
319
- if (ptr == MAP_FAILED) {
320
- return NULL;
321
- }
322
- return ptr;
323
- #else
324
- // use a fixed address for other platforms
325
- uintptr_t base_addr = (uintptr_t)-size - 0x100;
326
- return (void *)base_addr;
327
- #endif
328
+ static void wsp_ggml_dyn_tallocr_free(struct wsp_ggml_dyn_tallocr * alloc) {
329
+ free(alloc);
328
330
  }
329
331
 
330
- static void free_vmem(void * base_addr, size_t size) {
331
- #if defined(_WIN32)
332
- VirtualFree(base_addr, 0, MEM_RELEASE);
333
- UNUSED(size);
334
- #elif defined(_POSIX_MAPPED_FILES)
335
- munmap(base_addr, size);
336
- #else
337
- // nothing to do
338
- UNUSED(base_addr);
339
- UNUSED(size);
340
- #endif
332
+ static size_t wsp_ggml_dyn_tallocr_max_size(struct wsp_ggml_dyn_tallocr * alloc) {
333
+ return alloc->max_size;
341
334
  }
342
335
 
343
- // allocate uncommitted virtual memory to measure the size of the graph
344
- static void alloc_measure_vmem(void ** base_addr, size_t * size) {
345
- // 128GB for 64-bit, 1GB for 32-bit
346
- *size = sizeof(void *) == 4 ? 1ULL<<30 : 1ULL<<37;
347
- do {
348
- *base_addr = alloc_vmem(*size);
349
- if (*base_addr != NULL) {
350
- AT_PRINTF("allocated %.2f GB of virtual memory for measure buffer at %p\n", *size / 1024.0 / 1024.0 / 1024.0, *base_addr);
351
- return;
352
- }
353
- // try again with half the size
354
- *size /= 2;
355
- } while (*size > 0);
356
336
 
357
- WSP_GGML_ASSERT(!"failed to allocate virtual memory for measure buffer");
358
- }
337
+ /////////////////////////////////////
359
338
 
360
- static void free_measure_vmem(void * base_addr, size_t size) {
361
- free_vmem(base_addr, size);
362
- }
339
+ // graph allocator
363
340
 
364
- struct wsp_ggml_allocr * wsp_ggml_allocr_new_measure(size_t alignment) {
365
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
341
+ struct hash_node {
342
+ int n_children;
343
+ int n_views;
344
+ int buffer_id;
345
+ size_t offset; // offset within the buffer
346
+ bool allocated;
347
+ };
366
348
 
367
- void * base_addr;
368
- size_t size;
349
+ struct tensor_alloc {
350
+ int buffer_id;
351
+ size_t offset;
352
+ size_t size_max; // 0 = pre-allocated, unused, or view
353
+ };
369
354
 
370
- alloc_measure_vmem(&base_addr, &size);
355
+ struct leaf_alloc {
356
+ struct tensor_alloc leaf;
357
+ };
371
358
 
372
- *alloc = (struct wsp_ggml_allocr){
373
- /*.data = */ base_addr,
374
- /*.size = */ size,
375
- /*.alignment = */ alignment,
376
- /*.n_free_blocks = */ 0,
377
- /*.free_blocks = */ {{0}},
378
- /*.hash_table = */ {{0}},
379
- /*.max_size = */ 0,
380
- /*.measure = */ true,
381
- /*.parse_seq = */ {0},
382
- /*.parse_seq_len = */ 0,
383
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
384
- /*.allocated_tensors = */ {0},
385
- #endif
386
- };
359
+ struct node_alloc {
360
+ struct tensor_alloc dst;
361
+ struct tensor_alloc src[WSP_GGML_MAX_SRC];
362
+ };
387
363
 
388
- wsp_ggml_allocr_reset(alloc);
364
+ struct wsp_ggml_gallocr {
365
+ wsp_ggml_backend_buffer_type_t * bufts; // [n_buffers]
366
+ wsp_ggml_backend_buffer_t * buffers; // [n_buffers]
367
+ struct wsp_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
368
+ int n_buffers;
389
369
 
390
- return alloc;
391
- }
370
+ struct wsp_ggml_hash_set hash_set;
371
+ struct hash_node * hash_values; // [hash_set.size]
372
+
373
+ struct node_alloc * node_allocs; // [n_nodes]
374
+ int n_nodes;
375
+
376
+ struct leaf_alloc * leaf_allocs; // [n_leafs]
377
+ int n_leafs;
378
+ };
379
+
380
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs) {
381
+ wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)calloc(1, sizeof(struct wsp_ggml_gallocr));
382
+ WSP_GGML_ASSERT(galloc != NULL);
383
+
384
+ galloc->bufts = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_type_t));
385
+ WSP_GGML_ASSERT(galloc->bufts != NULL);
386
+
387
+ galloc->buffers = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_t));
388
+ WSP_GGML_ASSERT(galloc->buffers != NULL);
389
+
390
+ galloc->buf_tallocs = calloc(n_bufs, sizeof(struct wsp_ggml_dyn_tallocr *));
391
+ WSP_GGML_ASSERT(galloc->buf_tallocs != NULL);
392
+
393
+ for (int i = 0; i < n_bufs; i++) {
394
+ galloc->bufts[i] = bufts[i];
395
+ galloc->buffers[i] = NULL;
396
+
397
+ // check if the same buffer type is used multiple times and reuse the same allocator
398
+ for (int j = 0; j < i; j++) {
399
+ if (bufts[i] == bufts[j]) {
400
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
401
+ break;
402
+ }
403
+ }
392
404
 
393
- void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc) {
394
- if (alloc->measure) {
395
- free_measure_vmem(alloc->data, alloc->size);
405
+ if (galloc->buf_tallocs[i] == NULL) {
406
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(bufts[i]);
407
+ galloc->buf_tallocs[i] = wsp_ggml_dyn_tallocr_new(alignment);
408
+ }
396
409
  }
397
- free(alloc);
398
- }
410
+ galloc->n_buffers = n_bufs;
399
411
 
400
- bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc) {
401
- return alloc->measure;
412
+ return galloc;
402
413
  }
403
414
 
404
- //////////// compute graph allocator
415
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft) {
416
+ return wsp_ggml_gallocr_new_n(&buft, 1);
417
+ }
405
418
 
406
- static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
407
- if (a->type != b->type) {
408
- return false;
419
+ void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc) {
420
+ if (galloc == NULL) {
421
+ return;
409
422
  }
410
- for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
411
- if (a->ne[i] != b->ne[i]) {
412
- return false;
423
+
424
+ for (int i = 0; i < galloc->n_buffers; i++) {
425
+ if (galloc->buffers != NULL) {
426
+ // skip if already freed
427
+ bool freed = false;
428
+ for (int j = 0; j < i; j++) {
429
+ if (galloc->buffers[j] == galloc->buffers[i]) {
430
+ freed = true;
431
+ break;
432
+ }
433
+ }
434
+ if (!freed) {
435
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
436
+ }
413
437
  }
414
- if (a->nb[i] != b->nb[i]) {
415
- return false;
438
+ if (galloc->buf_tallocs != NULL) {
439
+ // skip if already freed
440
+ bool freed = false;
441
+ for (int j = 0; j < i; j++) {
442
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
443
+ freed = true;
444
+ break;
445
+ }
446
+ }
447
+ if (!freed) {
448
+ wsp_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
449
+ }
416
450
  }
417
451
  }
418
- return true;
452
+
453
+ wsp_ggml_hash_set_free(&galloc->hash_set);
454
+ free(galloc->hash_values);
455
+ free(galloc->bufts);
456
+ free(galloc->buffers);
457
+ free(galloc->buf_tallocs);
458
+ free(galloc->node_allocs);
459
+ free(galloc->leaf_allocs);
460
+ free(galloc);
419
461
  }
420
462
 
421
- static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
422
- switch (op) {
423
- case WSP_GGML_OP_SCALE:
424
- case WSP_GGML_OP_DIAG_MASK_ZERO:
425
- case WSP_GGML_OP_DIAG_MASK_INF:
426
- case WSP_GGML_OP_ADD:
427
- case WSP_GGML_OP_ADD1:
428
- case WSP_GGML_OP_SUB:
429
- case WSP_GGML_OP_MUL:
430
- case WSP_GGML_OP_DIV:
431
- case WSP_GGML_OP_SQR:
432
- case WSP_GGML_OP_SQRT:
433
- case WSP_GGML_OP_LOG:
434
- case WSP_GGML_OP_UNARY:
435
- case WSP_GGML_OP_ROPE:
436
- case WSP_GGML_OP_RMS_NORM:
437
- case WSP_GGML_OP_SOFT_MAX:
438
- case WSP_GGML_OP_CONT:
439
- return true;
463
+ typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
440
464
 
441
- default:
442
- return false;
443
- }
465
+ static struct hash_node * wsp_ggml_gallocr_hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
466
+ size_t i = wsp_ggml_hash_find_or_insert(&galloc->hash_set, t);
467
+ return &galloc->hash_values[i];
444
468
  }
445
469
 
446
- static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * node) {
447
- struct hash_node * ht = alloc->hash_table;
448
- if (node->data == NULL) {
449
- if (wsp_ggml_is_view(node)) {
450
- assert(node->view_src->data != NULL);
451
- node->data = (char *)node->view_src->data + node->view_offs;
452
- } else {
453
- // see if we can reuse a parent's buffer (inplace)
454
- if (wsp_ggml_op_can_inplace(node->op)) {
455
- for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
456
- struct wsp_ggml_tensor * parent = node->src[i];
457
- if (parent == NULL) {
458
- break;
459
- }
470
+ static bool wsp_ggml_gallocr_is_own(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
471
+ return wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
472
+ }
460
473
 
461
- // if the node's data is external, then we cannot re-use it
462
- if (wsp_ggml_allocr_is_own(alloc, parent) == false) {
463
- AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
464
- continue;
465
- }
474
+ static bool wsp_ggml_gallocr_is_allocated(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
475
+ return t->data != NULL || wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
476
+ }
466
477
 
467
- struct hash_node * p_hn = hash_get(ht, parent);
468
- if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
469
- if (wsp_ggml_is_view(parent)) {
470
- struct wsp_ggml_tensor * view_src = parent->view_src;
471
- struct hash_node * view_src_hn = hash_get(ht, view_src);
472
- if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
473
- // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
474
- // the parent's data that it will need later (same layout requirement). the problem is that then
475
- // we cannot free the tensor because the original address of the allocation is lost.
476
- // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
477
- // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
478
- AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
479
- node->data = parent->data;
480
- return;
481
- }
482
- }
483
- else {
484
- AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
485
- node->data = parent->data;
478
+ static void wsp_ggml_gallocr_allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id) {
479
+ WSP_GGML_ASSERT(buffer_id >= 0);
480
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
481
+
482
+ if (!wsp_ggml_gallocr_is_allocated(galloc, node) && !wsp_ggml_is_view(node)) {
483
+ hn->allocated = true;
484
+ assert(hn->offset == 0);
485
+
486
+ // try to reuse a parent's buffer (inplace)
487
+ if (wsp_ggml_op_can_inplace(node->op)) {
488
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
489
+ struct wsp_ggml_tensor * parent = node->src[i];
490
+ if (parent == NULL) {
491
+ continue;
492
+ }
493
+
494
+ // if the node's data is external, then we cannot re-use it
495
+ if (!wsp_ggml_gallocr_is_own(galloc, parent)) {
496
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
497
+ continue;
498
+ }
499
+
500
+ // outputs cannot be reused
501
+ if (parent->flags & WSP_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & WSP_GGML_TENSOR_FLAG_OUTPUT)) {
502
+ AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
503
+ continue;
504
+ }
505
+
506
+ if (!wsp_ggml_are_same_layout(node, parent)) {
507
+ AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
508
+ continue;
509
+ }
510
+
511
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
512
+ if (p_hn->n_children == 1 && p_hn->n_views == 0) {
513
+ if (wsp_ggml_is_view(parent)) {
514
+ struct wsp_ggml_tensor * view_src = parent->view_src;
515
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
516
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
517
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
518
+ assert(view_src_hn->offset == p_hn->offset);
519
+ hn->buffer_id = p_hn->buffer_id;
520
+ hn->offset = p_hn->offset;
521
+ p_hn->allocated = false; // avoid freeing the parent
522
+ view_src_hn->allocated = false;
486
523
  return;
487
524
  }
525
+ } else {
526
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
527
+ hn->buffer_id = p_hn->buffer_id;
528
+ hn->offset = p_hn->offset;
529
+ p_hn->allocated = false; // avoid freeing the parent
530
+ return;
488
531
  }
489
532
  }
490
533
  }
491
- wsp_ggml_allocr_alloc(alloc, node);
492
534
  }
535
+ // allocate tensor from the buffer
536
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
537
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
538
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
539
+ size_t offset = wsp_ggml_dyn_tallocr_alloc(alloc, size, node);
540
+ hn->buffer_id = buffer_id;
541
+ hn->offset = offset;
493
542
  }
494
543
  }
495
544
 
496
- static size_t wsp_ggml_allocr_alloc_graph_tensors_n(
497
- struct wsp_ggml_allocr * alloc,
498
- struct wsp_ggml_cgraph ** graphs, int n_graphs,
499
- struct wsp_ggml_tensor *** inputs, struct wsp_ggml_tensor *** outputs) {
545
+ static void wsp_ggml_gallocr_free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
546
+ // graph outputs are never freed
547
+ if (node->flags & WSP_GGML_TENSOR_FLAG_OUTPUT) {
548
+ AT_PRINTF("not freeing output %s\n", node->name);
549
+ return;
550
+ }
500
551
 
501
- // reset hash table
502
- struct hash_node * ht = alloc->hash_table;
503
- memset(ht, 0, sizeof(struct hash_node) * WSP_GGML_GRAPH_HASHTABLE_SIZE);
552
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
553
+ size_t offset = hn->offset;
554
+ int buffer_id = hn->buffer_id;
555
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
556
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
557
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
558
+ wsp_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
559
+ hn->allocated = false;
560
+ }
561
+
562
+ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
563
+ return node_buffer_ids ? node_buffer_ids[i] : 0;
564
+ }
565
+
566
+ static void wsp_ggml_gallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
567
+ // clear hash tables
568
+ wsp_ggml_hash_set_reset(&galloc->hash_set);
569
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
570
+
571
+ // allocate leafs
572
+ // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
573
+ for (int i = 0; i < graph->n_leafs; i++) {
574
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
575
+ wsp_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
576
+ }
504
577
 
505
578
  // count number of children and views
506
- for (int g = 0; g < n_graphs; g++) {
507
- struct wsp_ggml_cgraph * gf = graphs[g];
508
- for (int i = 0; i < gf->n_nodes; i++) {
509
- struct wsp_ggml_tensor * node = gf->nodes[i];
510
-
511
- if (wsp_ggml_is_view(node)) {
512
- struct wsp_ggml_tensor * view_src = node->view_src;
513
- hash_get(ht, view_src)->n_views += 1;
579
+ // allocate other graph inputs and leafs first to avoid overwriting them
580
+ for (int i = 0; i < graph->n_nodes; i++) {
581
+ struct wsp_ggml_tensor * node = graph->nodes[i];
582
+
583
+ // TODO: better way to add external dependencies
584
+ // WSP_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
585
+ // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
586
+ // itself is never used and should not be considered a dependency
587
+ if (wsp_ggml_is_view(node) && node->op != WSP_GGML_OP_NONE) {
588
+ struct wsp_ggml_tensor * view_src = node->view_src;
589
+ wsp_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
590
+ }
591
+
592
+ if (node->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
593
+ wsp_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
594
+ }
595
+
596
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
597
+ struct wsp_ggml_tensor * src = node->src[j];
598
+ if (src == NULL) {
599
+ continue;
514
600
  }
515
601
 
516
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
517
- struct wsp_ggml_tensor * parent = node->src[j];
518
- if (parent == NULL) {
519
- break;
520
- }
521
- hash_get(ht, parent)->n_children += 1;
602
+ wsp_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
603
+
604
+ // allocate explicit inputs
605
+ if (src->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
606
+ wsp_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
522
607
  }
523
608
  }
524
609
  }
525
610
 
526
611
  // allocate tensors
527
- for (int g = 0; g < n_graphs; g++) {
528
- struct wsp_ggml_cgraph * gf = graphs[g];
529
- AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
530
- // graph inputs are allocated first to ensure that they are not overwritten by each other
531
- if (inputs != NULL && inputs[g] != NULL) {
532
- for (int i = 0; inputs[g][i] != NULL; i++) {
533
- struct wsp_ggml_tensor * input = inputs[g][i];
534
- AT_PRINTF("input: %s\n", input->name);
535
- allocate_node(alloc, input);
612
+ for (int i = 0; i < graph->n_nodes; i++) {
613
+ struct wsp_ggml_tensor * node = graph->nodes[i];
614
+ int buffer_id = get_node_buffer_id(node_buffer_ids, i);
615
+
616
+ // allocate parents (only leafs need to be allocated at this point)
617
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
618
+ struct wsp_ggml_tensor * parent = node->src[j];
619
+ if (parent == NULL) {
620
+ continue;
536
621
  }
622
+ wsp_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
537
623
  }
538
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
539
- int last_barrier_pos = 0;
540
- int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
541
-
542
- for (int ind = 0; ind < n_nodes; ind++) {
543
- // allocate a node if there is no parse_seq or this is not a barrier
544
- if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
545
- int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
546
- struct wsp_ggml_tensor * node = gf->nodes[i];
547
-
548
- // allocate parents (leafs)
549
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
550
- struct wsp_ggml_tensor * parent = node->src[j];
551
- if (parent == NULL) {
552
- break;
553
- }
554
- allocate_node(alloc, parent);
555
- }
556
624
 
557
- // allocate node
558
- allocate_node(alloc, node);
625
+ // allocate node
626
+ wsp_ggml_gallocr_allocate_node(galloc, node, buffer_id);
559
627
 
560
- AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
561
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
562
- struct wsp_ggml_tensor * parent = node->src[j];
563
- if (parent == NULL) {
564
- break;
565
- }
566
- AT_PRINTF("%s", parent->name);
567
- if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
568
- AT_PRINTF(", ");
569
- }
570
- }
571
- AT_PRINTF("\n");
628
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_desc(node), node->name);
629
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
630
+ struct wsp_ggml_tensor * parent = node->src[j];
631
+ if (parent == NULL) {
632
+ continue;
633
+ }
634
+ AT_PRINTF("%s", parent->name);
635
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
636
+ AT_PRINTF(", ");
572
637
  }
638
+ }
639
+ AT_PRINTF("\n");
573
640
 
574
- // update parents
575
- // update immediately if there is no parse_seq
576
- // update only at barriers if there is parse_seq
577
- if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) {
578
- int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
579
- int update_end = alloc->parse_seq_len ? ind : ind + 1;
580
- for (int i = update_start; i < update_end; i++) {
581
- int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
582
- struct wsp_ggml_tensor * node = gf->nodes[node_i];
583
-
584
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
585
- struct wsp_ggml_tensor * parent = node->src[j];
586
- if (parent == NULL) {
587
- break;
588
- }
589
- struct hash_node * p_hn = hash_get(ht, parent);
590
- p_hn->n_children -= 1;
591
-
592
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
593
-
594
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
595
- if (wsp_ggml_is_view(parent)) {
596
- struct wsp_ggml_tensor * view_src = parent->view_src;
597
- struct hash_node * view_src_hn = hash_get(ht, view_src);
598
- view_src_hn->n_views -= 1;
599
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
600
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
601
- wsp_ggml_allocr_free_tensor(alloc, view_src);
602
- }
603
- }
604
- else {
605
- if (parent->data != node->data) {
606
- wsp_ggml_allocr_free_tensor(alloc, parent);
607
- }
608
- }
609
- }
641
+ // update parents
642
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
643
+ struct wsp_ggml_tensor * parent = node->src[j];
644
+ if (parent == NULL) {
645
+ continue;
646
+ }
647
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
648
+ p_hn->n_children -= 1;
649
+
650
+ AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
651
+ parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
652
+
653
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
654
+ if (wsp_ggml_is_view(parent)) {
655
+ struct wsp_ggml_tensor * view_src = parent->view_src;
656
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
657
+ view_src_hn->n_views -= 1;
658
+ AT_PRINTF("view_src %s: %d children, %d views\n",
659
+ view_src->name, view_src_hn->n_children, view_src_hn->n_views);
660
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
661
+ wsp_ggml_gallocr_free_node(galloc, view_src);
610
662
  }
611
663
  }
612
- AT_PRINTF("\n");
613
- if (alloc->parse_seq_len) {
614
- last_barrier_pos = ind + 1;
664
+ else if (p_hn->allocated) {
665
+ wsp_ggml_gallocr_free_node(galloc, parent);
615
666
  }
616
667
  }
668
+ AT_PRINTF("\n");
669
+ }
670
+ }
671
+ }
672
+
673
+ bool wsp_ggml_gallocr_reserve_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
674
+ size_t min_hash_size = graph->n_nodes + graph->n_leafs;
675
+ // add 25% margin to avoid hash collisions
676
+ min_hash_size += min_hash_size / 4;
677
+
678
+ // initialize hash table
679
+ if (galloc->hash_set.size < min_hash_size) {
680
+ wsp_ggml_hash_set_free(&galloc->hash_set);
681
+ galloc->hash_set = wsp_ggml_hash_set_new(min_hash_size);
682
+ WSP_GGML_ASSERT(galloc->hash_set.keys != NULL);
683
+
684
+ free(galloc->hash_values);
685
+ galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
686
+ WSP_GGML_ASSERT(galloc->hash_values != NULL);
687
+ }
688
+
689
+ // reset allocators
690
+ for (int i = 0; i < galloc->n_buffers; i++) {
691
+ wsp_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
692
+ }
693
+
694
+ // allocate in hash table
695
+ wsp_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
696
+
697
+ // set the node_allocs from the hash table
698
+ if (galloc->n_nodes < graph->n_nodes) {
699
+ free(galloc->node_allocs);
700
+ galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
701
+ WSP_GGML_ASSERT(galloc->node_allocs != NULL);
702
+ }
703
+ galloc->n_nodes = graph->n_nodes;
704
+ for (int i = 0; i < graph->n_nodes; i++) {
705
+ struct wsp_ggml_tensor * node = graph->nodes[i];
706
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
707
+ if (node->view_src || node->data) {
708
+ node_alloc->dst.buffer_id = -1;
709
+ node_alloc->dst.offset = SIZE_MAX;
710
+ node_alloc->dst.size_max = 0;
711
+ } else {
712
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
713
+ node_alloc->dst.buffer_id = hn->buffer_id;
714
+ node_alloc->dst.offset = hn->offset;
715
+ node_alloc->dst.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
617
716
  }
618
- // free graph outputs here that wouldn't be freed otherwise because they have no children
619
- if (outputs != NULL && outputs[g] != NULL) {
620
- for (int i = 0; outputs[g][i] != NULL; i++) {
621
- struct wsp_ggml_tensor * output = outputs[g][i];
622
- AT_PRINTF("output: %s\n", output->name);
623
- wsp_ggml_allocr_free_tensor(alloc, output);
717
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
718
+ struct wsp_ggml_tensor * src = node->src[j];
719
+ if (!src || src->view_src || src->data) {
720
+ node_alloc->src[j].buffer_id = -1;
721
+ node_alloc->src[j].offset = SIZE_MAX;
722
+ node_alloc->src[j].size_max = 0;
723
+ } else {
724
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, src);
725
+ node_alloc->src[j].buffer_id = hn->buffer_id;
726
+ node_alloc->src[j].offset = hn->offset;
727
+ node_alloc->src[j].size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
624
728
  }
625
729
  }
626
730
  }
731
+ if (galloc->n_leafs < graph->n_leafs) {
732
+ free(galloc->leaf_allocs);
733
+ galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
734
+ WSP_GGML_ASSERT(galloc->leaf_allocs != NULL);
735
+ }
736
+ galloc->n_leafs = graph->n_leafs;
737
+ for (int i = 0; i < graph->n_leafs; i++) {
738
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
739
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, leaf);
740
+ if (leaf->view_src || leaf->data) {
741
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
742
+ galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
743
+ galloc->leaf_allocs[i].leaf.size_max = 0;
744
+ } else {
745
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
746
+ galloc->leaf_allocs[i].leaf.offset = hn->offset;
747
+ galloc->leaf_allocs[i].leaf.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
748
+ }
749
+ }
627
750
 
628
- return alloc->max_size;
751
+ // reallocate buffers if needed
752
+ for (int i = 0; i < galloc->n_buffers; i++) {
753
+ // if the buffer type is used multiple times, we reuse the same buffer
754
+ for (int j = 0; j < i; j++) {
755
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
756
+ galloc->buffers[i] = galloc->buffers[j];
757
+ break;
758
+ }
759
+ }
760
+
761
+ size_t cur_size = galloc->buffers[i] ? wsp_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
762
+ size_t new_size = wsp_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
763
+
764
+ // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
765
+ if (new_size > cur_size || galloc->buffers[i] == NULL) {
766
+ #ifndef NDEBUG
767
+ WSP_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
768
+ #endif
769
+
770
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
771
+ galloc->buffers[i] = wsp_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
772
+ if (galloc->buffers[i] == NULL) {
773
+ WSP_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), new_size);
774
+ return false;
775
+ }
776
+ wsp_ggml_backend_buffer_set_usage(galloc->buffers[i], WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
777
+ }
778
+ }
779
+
780
+ return true;
781
+ }
782
+
783
+ bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph *graph) {
784
+ return wsp_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
785
+ }
786
+
787
+ static void wsp_ggml_gallocr_init_tensor(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
788
+ int buffer_id = tensor_alloc->buffer_id;
789
+ assert(tensor->data || tensor->view_src || wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
790
+
791
+ if (tensor->view_src != NULL) {
792
+ if (tensor->buffer == NULL) {
793
+ assert(tensor_alloc->offset == SIZE_MAX);
794
+ if (tensor->view_src->buffer == NULL) {
795
+ // this tensor was allocated without ggml-backend
796
+ return;
797
+ }
798
+ wsp_ggml_backend_view_init(tensor);
799
+ }
800
+ } else {
801
+ if (tensor->data == NULL) {
802
+ assert(tensor_alloc->offset != SIZE_MAX);
803
+ assert(wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
804
+ void * base = wsp_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
805
+ void * addr = (char *)base + tensor_alloc->offset;
806
+ wsp_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
807
+ } else {
808
+ if (tensor->buffer == NULL) {
809
+ // this tensor was allocated without ggml-backend
810
+ return;
811
+ }
812
+ }
813
+ }
814
+ }
815
+
816
+ static bool wsp_ggml_gallocr_node_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, struct tensor_alloc * talloc) {
817
+ size_t node_size = 0;
818
+ if (!node->data && !node->view_src) {
819
+ WSP_GGML_ASSERT(talloc->buffer_id >= 0); // prevent segfault when misusing the API
820
+ node_size = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
821
+ }
822
+ return talloc->size_max >= node_size;
823
+ }
824
+
825
+ static bool wsp_ggml_gallocr_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
826
+ if (galloc->n_nodes != graph->n_nodes) {
827
+ #ifndef NDEBUG
828
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
829
+ #endif
830
+ return true;
831
+ }
832
+
833
+ if (galloc->n_leafs != graph->n_leafs) {
834
+ #ifndef NDEBUG
835
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
836
+ #endif
837
+ return true;
838
+ }
839
+
840
+ for (int i = 0; i < graph->n_nodes; i++) {
841
+ struct wsp_ggml_tensor * node = graph->nodes[i];
842
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
843
+
844
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
845
+ #ifndef NDEBUG
846
+ WSP_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
847
+ #endif
848
+ return true;
849
+ }
850
+
851
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
852
+ struct wsp_ggml_tensor * src = node->src[j];
853
+ if (src == NULL) {
854
+ continue;
855
+ }
856
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
857
+ #ifndef NDEBUG
858
+ WSP_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
859
+ #endif
860
+ return true;
861
+ }
862
+ }
863
+ }
864
+
865
+ return false;
866
+ }
867
+
868
+ bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
869
+ if (wsp_ggml_gallocr_needs_realloc(galloc, graph)) {
870
+ if (galloc->n_buffers == 1) {
871
+ #ifndef NDEBUG
872
+ WSP_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
873
+ #endif
874
+ if (!wsp_ggml_gallocr_reserve(galloc, graph)) {
875
+ return false;
876
+ }
877
+ } else {
878
+ #ifndef NDEBUG
879
+ WSP_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
880
+ #endif
881
+ return false;
882
+ }
883
+ }
884
+
885
+ // reset buffers
886
+ for (int i = 0; i < galloc->n_buffers; i++) {
887
+ if (galloc->buffers[i] != NULL) {
888
+ wsp_ggml_backend_buffer_reset(galloc->buffers[i]);
889
+ }
890
+ }
891
+
892
+ // allocate the graph tensors from the previous assignments
893
+ // leafs
894
+ for (int i = 0; i < graph->n_leafs; i++) {
895
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
896
+ struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
897
+ wsp_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
898
+ }
899
+ // nodes
900
+ for (int i = 0; i < graph->n_nodes; i++) {
901
+ struct wsp_ggml_tensor * node = graph->nodes[i];
902
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
903
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
904
+ struct wsp_ggml_tensor * src = node->src[j];
905
+ if (src == NULL) {
906
+ continue;
907
+ }
908
+ wsp_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
909
+ }
910
+ wsp_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
911
+ }
912
+
913
+ return true;
914
+ }
915
+
916
+ size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id) {
917
+ WSP_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
918
+
919
+ if (galloc->buffers[buffer_id] == NULL) {
920
+ return 0;
921
+ }
922
+
923
+ for (int i = 0; i < buffer_id; i++) {
924
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
925
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
926
+ // only return the buffer size the first time it appears to avoid double counting
927
+ return 0;
928
+ }
929
+ }
930
+
931
+ return wsp_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
932
+ }
933
+
934
+ // utils
935
+
936
+ static bool alloc_tensor_range(struct wsp_ggml_context * ctx,
937
+ struct wsp_ggml_tensor * first, struct wsp_ggml_tensor * last,
938
+ wsp_ggml_backend_buffer_type_t buft, size_t size,
939
+ wsp_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
940
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, size);
941
+ if (buffer == NULL) {
942
+ #ifndef NDEBUG
943
+ WSP_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(buft), size);
944
+ #endif
945
+ for (size_t i = 0; i < *n_buffers; i++) {
946
+ wsp_ggml_backend_buffer_free((*buffers)[i]);
947
+ }
948
+ free(*buffers);
949
+ return false;
950
+ }
951
+
952
+ struct wsp_ggml_tallocr tallocr = wsp_ggml_tallocr_new(buffer);
953
+
954
+ for (struct wsp_ggml_tensor * t = first; t != last; t = wsp_ggml_get_next_tensor(ctx, t)) {
955
+ if (t->data == NULL) {
956
+ if (t->view_src == NULL) {
957
+ wsp_ggml_tallocr_alloc(&tallocr, t);
958
+ } else if (t->buffer == NULL) {
959
+ wsp_ggml_backend_view_init(t);
960
+ }
961
+ } else {
962
+ if (t->view_src != NULL && t->buffer == NULL) {
963
+ // view of a pre-allocated tensor
964
+ wsp_ggml_backend_view_init(t);
965
+ }
966
+ }
967
+ }
968
+
969
+ *buffers = realloc(*buffers, sizeof(wsp_ggml_backend_buffer_t) * (*n_buffers + 1));
970
+ (*buffers)[(*n_buffers)++] = buffer;
971
+
972
+ return true;
973
+ }
974
+
975
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft) {
976
+ WSP_GGML_ASSERT(wsp_ggml_get_no_alloc(ctx) == true);
977
+
978
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(buft);
979
+ size_t max_size = wsp_ggml_backend_buft_get_max_size(buft);
980
+
981
+ wsp_ggml_backend_buffer_t * buffers = NULL;
982
+ size_t n_buffers = 0;
983
+
984
+ size_t cur_buf_size = 0;
985
+ struct wsp_ggml_tensor * first = wsp_ggml_get_first_tensor(ctx);
986
+ for (struct wsp_ggml_tensor * t = first; t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
987
+ size_t this_size = 0;
988
+ if (t->data == NULL && t->view_src == NULL) {
989
+ this_size = WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
990
+ }
991
+
992
+ if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
993
+ // allocate tensors in the current buffer
994
+ if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
995
+ return NULL;
996
+ }
997
+ first = t;
998
+ cur_buf_size = this_size;
999
+ } else {
1000
+ cur_buf_size += this_size;
1001
+ }
1002
+ }
1003
+
1004
+ // allocate remaining tensors
1005
+ if (cur_buf_size > 0) {
1006
+ if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1007
+ return NULL;
1008
+ }
1009
+ }
1010
+
1011
+ if (n_buffers == 0) {
1012
+ #ifndef NDEBUG
1013
+ WSP_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1014
+ #endif
1015
+ return NULL;
1016
+ }
1017
+
1018
+ wsp_ggml_backend_buffer_t buffer;
1019
+ if (n_buffers == 1) {
1020
+ buffer = buffers[0];
1021
+ } else {
1022
+ buffer = wsp_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1023
+ }
1024
+ free(buffers);
1025
+ return buffer;
629
1026
  }
630
1027
 
631
- size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph) {
632
- return wsp_ggml_allocr_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
1028
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, wsp_ggml_backend_t backend) {
1029
+ return wsp_ggml_backend_alloc_ctx_tensors_from_buft(ctx, wsp_ggml_backend_get_default_buffer_type(backend));
633
1030
  }