whisper.rn 0.4.0-rc.1 → 0.4.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +6 -6
  2. package/android/build.gradle +4 -0
  3. package/android/src/main/CMakeLists.txt +14 -0
  4. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
  5. package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
  6. package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
  7. package/android/src/main/jni-utils.h +76 -0
  8. package/android/src/main/jni.cpp +226 -109
  9. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  10. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  11. package/cpp/README.md +1 -1
  12. package/cpp/coreml/whisper-encoder-impl.h +1 -1
  13. package/cpp/coreml/whisper-encoder.h +4 -0
  14. package/cpp/coreml/whisper-encoder.mm +5 -3
  15. package/cpp/ggml-aarch64.c +129 -0
  16. package/cpp/ggml-aarch64.h +19 -0
  17. package/cpp/ggml-alloc.c +805 -400
  18. package/cpp/ggml-alloc.h +60 -10
  19. package/cpp/ggml-backend-impl.h +216 -0
  20. package/cpp/ggml-backend-reg.cpp +204 -0
  21. package/cpp/ggml-backend.cpp +1996 -0
  22. package/cpp/ggml-backend.cpp.rej +12 -0
  23. package/cpp/ggml-backend.h +336 -0
  24. package/cpp/ggml-common.h +1853 -0
  25. package/cpp/ggml-cpp.h +38 -0
  26. package/cpp/ggml-cpu-aarch64.c +3560 -0
  27. package/cpp/ggml-cpu-aarch64.h +30 -0
  28. package/cpp/ggml-cpu-impl.h +371 -0
  29. package/cpp/ggml-cpu-quants.c +10822 -0
  30. package/cpp/ggml-cpu-quants.h +63 -0
  31. package/cpp/ggml-cpu.c +13970 -0
  32. package/cpp/ggml-cpu.cpp +663 -0
  33. package/cpp/ggml-cpu.h +177 -0
  34. package/cpp/ggml-impl.h +551 -0
  35. package/cpp/ggml-metal-impl.h +249 -0
  36. package/cpp/ggml-metal.h +24 -43
  37. package/cpp/ggml-metal.m +4190 -1075
  38. package/cpp/ggml-quants.c +5247 -0
  39. package/cpp/ggml-quants.h +100 -0
  40. package/cpp/ggml-threading.cpp +12 -0
  41. package/cpp/ggml-threading.h +12 -0
  42. package/cpp/ggml-whisper.metallib +0 -0
  43. package/cpp/ggml.c +5474 -18763
  44. package/cpp/ggml.h +833 -628
  45. package/cpp/rn-audioutils.cpp +68 -0
  46. package/cpp/rn-audioutils.h +14 -0
  47. package/cpp/rn-whisper-log.h +11 -0
  48. package/cpp/rn-whisper.cpp +221 -52
  49. package/cpp/rn-whisper.h +50 -15
  50. package/cpp/whisper.cpp +2872 -1371
  51. package/cpp/whisper.h +170 -41
  52. package/ios/RNWhisper.mm +139 -46
  53. package/ios/RNWhisperAudioUtils.h +1 -2
  54. package/ios/RNWhisperAudioUtils.m +18 -67
  55. package/ios/RNWhisperContext.h +11 -8
  56. package/ios/RNWhisperContext.mm +195 -150
  57. package/jest/mock.js +15 -2
  58. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  59. package/lib/commonjs/index.js +76 -28
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/commonjs/version.json +1 -1
  62. package/lib/module/NativeRNWhisper.js.map +1 -1
  63. package/lib/module/index.js +76 -28
  64. package/lib/module/index.js.map +1 -1
  65. package/lib/module/version.json +1 -1
  66. package/lib/typescript/NativeRNWhisper.d.ts +13 -4
  67. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  68. package/lib/typescript/index.d.ts +37 -5
  69. package/lib/typescript/index.d.ts.map +1 -1
  70. package/package.json +9 -7
  71. package/src/NativeRNWhisper.ts +20 -4
  72. package/src/index.ts +98 -42
  73. package/src/version.json +1 -1
  74. package/whisper-rn.podspec +11 -18
  75. package/cpp/ggml-metal.metal +0 -2353
package/cpp/ggml-alloc.c CHANGED
@@ -1,146 +1,151 @@
1
1
  #include "ggml-alloc.h"
2
+ #include "ggml-backend-impl.h"
2
3
  #include "ggml.h"
4
+ #include "ggml-impl.h"
3
5
  #include <assert.h>
6
+ #include <limits.h>
4
7
  #include <stdarg.h>
5
8
  #include <stdio.h>
6
9
  #include <stdlib.h>
7
10
  #include <string.h>
8
11
 
9
- #ifdef __has_include
10
- #if __has_include(<unistd.h>)
11
- #include <unistd.h>
12
- #if defined(_POSIX_MAPPED_FILES)
13
- #include <sys/types.h>
14
- #include <sys/mman.h>
15
- #endif
16
- #endif
17
- #endif
18
-
19
- #if defined(_WIN32)
20
- #define WIN32_LEAN_AND_MEAN
21
- #ifndef NOMINMAX
22
- #define NOMINMAX
23
- #endif
24
- #include <windows.h>
25
- #include <memoryapi.h>
26
- #endif
27
-
28
-
29
- #define UNUSED(x) (void)(x)
30
12
  #define MAX(a, b) ((a) > (b) ? (a) : (b))
31
- #define WSP_GGML_MAX_CONCUR (2*WSP_GGML_MAX_NODES)
13
+ #define MAX_FREE_BLOCKS 256
32
14
 
33
15
  //#define WSP_GGML_ALLOCATOR_DEBUG
34
16
 
35
- //#define AT_PRINTF printf
36
- #define AT_PRINTF(...) ((void)0)
17
+ //#define AT_PRINTF(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
18
+ #define AT_PRINTF(...)
37
19
 
38
- struct hash_node {
39
- struct wsp_ggml_tensor * t;
40
- int n_children;
41
- int n_views;
42
- };
43
20
 
44
- static size_t hash(void * p) {
45
- return (size_t)p % WSP_GGML_GRAPH_HASHTABLE_SIZE;
21
+ static bool wsp_ggml_is_view(const struct wsp_ggml_tensor * t) {
22
+ return t->view_src != NULL;
46
23
  }
47
24
 
48
- static struct hash_node * hash_get(struct hash_node hash_table[], struct wsp_ggml_tensor * t) {
49
- size_t h = hash(t);
50
-
51
- // linear probing
52
- size_t i = h;
53
- while (hash_table[i].t != NULL) {
54
- if (hash_table[i].t == t) {
55
- return &hash_table[i];
25
+ static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
26
+ if (a->type != b->type) {
27
+ return false;
28
+ }
29
+ for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
30
+ if (a->ne[i] != b->ne[i]) {
31
+ return false;
56
32
  }
57
- i = (i + 1) % WSP_GGML_GRAPH_HASHTABLE_SIZE;
58
- if (i == h) {
59
- // hash table is full
60
- WSP_GGML_ASSERT(false);
33
+ if (a->nb[i] != b->nb[i]) {
34
+ return false;
61
35
  }
62
36
  }
37
+ return true;
38
+ }
63
39
 
64
- hash_table[i].t = t;
65
- return &hash_table[i];
40
+ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
41
+ switch (op) {
42
+ case WSP_GGML_OP_SCALE:
43
+ case WSP_GGML_OP_DIAG_MASK_ZERO:
44
+ case WSP_GGML_OP_DIAG_MASK_INF:
45
+ case WSP_GGML_OP_ADD:
46
+ case WSP_GGML_OP_ADD1:
47
+ case WSP_GGML_OP_SUB:
48
+ case WSP_GGML_OP_MUL:
49
+ case WSP_GGML_OP_DIV:
50
+ case WSP_GGML_OP_SQR:
51
+ case WSP_GGML_OP_SQRT:
52
+ case WSP_GGML_OP_LOG:
53
+ case WSP_GGML_OP_UNARY:
54
+ case WSP_GGML_OP_ROPE:
55
+ case WSP_GGML_OP_RMS_NORM:
56
+ case WSP_GGML_OP_SOFT_MAX:
57
+ return true;
58
+
59
+ default:
60
+ return false;
61
+ }
66
62
  }
67
63
 
68
- // TODO: WSP_GGML_PAD ?
69
64
  static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
70
65
  assert(alignment && !(alignment & (alignment - 1))); // power of 2
71
66
  size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
72
67
  return offset + align;
73
68
  }
74
69
 
70
+ // tallocr
71
+
72
+ struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer) {
73
+ void * base = wsp_ggml_backend_buffer_get_base(buffer);
74
+ size_t align = wsp_ggml_backend_buffer_get_alignment(buffer);
75
+
76
+ assert(align && !(align & (align - 1))); // power of 2
77
+
78
+ struct wsp_ggml_tallocr talloc = (struct wsp_ggml_tallocr) {
79
+ /*.buffer = */ buffer,
80
+ /*.base = */ base,
81
+ /*.alignment = */ align,
82
+ /*.offset = */ aligned_offset(base, 0, align),
83
+ };
84
+ return talloc;
85
+ }
86
+
87
+ void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor) {
88
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
89
+ size = WSP_GGML_PAD(size, talloc->alignment);
90
+
91
+ if (talloc->offset + size > wsp_ggml_backend_buffer_get_size(talloc->buffer)) {
92
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
93
+ __func__, tensor->name, size, wsp_ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
94
+ WSP_GGML_ABORT("not enough space in the buffer");
95
+ }
96
+
97
+ void * addr = (char *)wsp_ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
98
+ talloc->offset += size;
99
+
100
+ assert(((uintptr_t)addr % talloc->alignment) == 0);
101
+
102
+ wsp_ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
103
+ }
104
+
105
+ // dynamic tensor allocator
106
+
75
107
  struct free_block {
76
- void * addr;
108
+ size_t offset;
77
109
  size_t size;
78
110
  };
79
111
 
80
- #define MAX_FREE_BLOCKS 128
81
-
82
- struct wsp_ggml_allocr {
83
- void * data;
84
- size_t size;
112
+ struct wsp_ggml_dyn_tallocr {
85
113
  size_t alignment;
86
114
  int n_free_blocks;
87
115
  struct free_block free_blocks[MAX_FREE_BLOCKS];
88
- struct hash_node hash_table[WSP_GGML_GRAPH_HASHTABLE_SIZE];
89
116
  size_t max_size;
90
- bool measure;
91
- int parse_seq[WSP_GGML_MAX_CONCUR];
92
- int parse_seq_len;
93
117
 
94
118
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
95
- struct wsp_ggml_tensor * allocated_tensors[1024];
119
+ struct {
120
+ const struct wsp_ggml_tensor * tensor;
121
+ size_t offset;
122
+ } allocated_tensors[1024];
96
123
  #endif
97
124
  };
98
125
 
99
126
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
100
- static void add_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
127
+ static void add_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
101
128
  for (int i = 0; i < 1024; i++) {
102
- if (alloc->allocated_tensors[i] == NULL) {
103
- alloc->allocated_tensors[i] = tensor;
129
+ if (alloc->allocated_tensors[i].tensor == NULL) {
130
+ alloc->allocated_tensors[i].tensor = tensor;
131
+ alloc->allocated_tensors[i].offset = offset;
104
132
  return;
105
133
  }
106
134
  }
107
- WSP_GGML_ASSERT(!"out of allocated_tensors");
135
+ WSP_GGML_ABORT("out of allocated_tensors");
108
136
  }
109
- static void remove_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
137
+ static void remove_allocated_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, const struct wsp_ggml_tensor * tensor) {
110
138
  for (int i = 0; i < 1024; i++) {
111
- if (alloc->allocated_tensors[i] == tensor ||
112
- (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
113
- alloc->allocated_tensors[i] = NULL;
139
+ if (alloc->allocated_tensors[i].offset == offset) {
140
+ alloc->allocated_tensors[i].tensor = NULL;
114
141
  return;
115
142
  }
116
143
  }
117
- printf("tried to free tensor %s not found\n", tensor->name);
118
- WSP_GGML_ASSERT(!"tensor not found");
144
+ WSP_GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
119
145
  }
120
146
  #endif
121
147
 
122
- static size_t wsp_ggml_allocr_get_alloc_size(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
123
- return wsp_ggml_nbytes(tensor);
124
-
125
- UNUSED(alloc);
126
- }
127
-
128
- // check if a tensor is allocated by this buffer
129
- static bool wsp_ggml_allocr_is_own(struct wsp_ggml_allocr * alloc, const struct wsp_ggml_tensor * tensor) {
130
- void * ptr = tensor->data;
131
- return ptr >= alloc->data && (char *)ptr < (char *)alloc->data + alloc->max_size;
132
- }
133
-
134
- static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
135
- return t->view_src != NULL;
136
- }
137
-
138
- void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
139
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
140
- WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
141
- WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
142
- #endif
143
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
148
+ static size_t wsp_ggml_dyn_tallocr_alloc(struct wsp_ggml_dyn_tallocr * alloc, size_t size, const struct wsp_ggml_tensor * tensor) {
144
149
  size = aligned_offset(NULL, size, alloc->alignment);
145
150
 
146
151
  AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
@@ -159,8 +164,6 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
159
164
  }
160
165
  }
161
166
 
162
- AT_PRINTF("block %d\n", best_fit_block);
163
-
164
167
  if (best_fit_block == -1) {
165
168
  // the last block is our last resort
166
169
  struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
@@ -168,15 +171,16 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
168
171
  if (block->size >= size) {
169
172
  best_fit_block = alloc->n_free_blocks - 1;
170
173
  } else {
171
- fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
174
+ // this should never happen
175
+ WSP_GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
172
176
  __func__, size, max_avail);
173
- WSP_GGML_ASSERT(!"not enough space in the buffer");
174
- return;
177
+ WSP_GGML_ABORT("not enough space in the buffer");
175
178
  }
176
179
  }
180
+
177
181
  struct free_block * block = &alloc->free_blocks[best_fit_block];
178
- void * addr = block->addr;
179
- block->addr = (char*)block->addr + size;
182
+ size_t offset = block->offset;
183
+ block->offset = offset + size;
180
184
  block->size -= size;
181
185
  if (block->size == 0) {
182
186
  // remove block if empty
@@ -186,52 +190,63 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
186
190
  }
187
191
  }
188
192
 
189
- tensor->data = addr;
193
+ AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
190
194
 
191
195
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
192
- add_allocated_tensor(alloc, tensor);
193
- size_t cur_max = (char*)addr - (char*)alloc->data + size;
196
+ add_allocated_tensor(alloc, offset, tensor);
197
+ size_t cur_max = offset + size;
194
198
  if (cur_max > alloc->max_size) {
195
- printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
199
+ // sort allocated_tensors by offset
200
+ for (int i = 0; i < 1024; i++) {
201
+ for (int j = i + 1; j < 1024; j++) {
202
+ if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
203
+ const struct wsp_ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
204
+ size_t tmp_offset = alloc->allocated_tensors[i].offset;
205
+ alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
206
+ alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
207
+ alloc->allocated_tensors[j].tensor = tmp_tensor;
208
+ alloc->allocated_tensors[j].offset = tmp_offset;
209
+ }
210
+ }
211
+ }
212
+ WSP_GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
196
213
  for (int i = 0; i < 1024; i++) {
197
- if (alloc->allocated_tensors[i]) {
198
- printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, wsp_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
214
+ if (alloc->allocated_tensors[i].tensor) {
215
+ WSP_GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
216
+ alloc->allocated_tensors[i].offset,
217
+ alloc->allocated_tensors[i].offset + wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor),
218
+ wsp_ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
199
219
  }
200
220
  }
201
- printf("\n");
221
+ WSP_GGML_LOG_DEBUG("\n");
202
222
  }
203
223
  #endif
204
224
 
205
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
206
- }
225
+ alloc->max_size = MAX(alloc->max_size, offset + size);
207
226
 
208
- // this is a very naive implementation, but for our case the number of free blocks should be very small
209
- static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
210
- void * ptr = tensor->data;
227
+ return offset;
211
228
 
212
- if (wsp_ggml_allocr_is_own(alloc, tensor) == false) {
213
- // the tensor was not allocated in this buffer
214
- // this can happen because the graph allocator will try to free weights and other tensors from different buffers
215
- // the easiest way to deal with this is just to ignore it
216
- return;
217
- }
229
+ WSP_GGML_UNUSED(tensor);
230
+ }
218
231
 
219
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
232
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
233
+ static void wsp_ggml_dyn_tallocr_free_tensor(struct wsp_ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct wsp_ggml_tensor * tensor) {
220
234
  size = aligned_offset(NULL, size, alloc->alignment);
221
- AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
235
+
236
+ AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
222
237
 
223
238
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
224
- remove_allocated_tensor(alloc, tensor);
239
+ remove_allocated_tensor(alloc, offset, tensor);
225
240
  #endif
226
241
 
227
242
  // see if we can merge with an existing block
228
243
  for (int i = 0; i < alloc->n_free_blocks; i++) {
229
244
  struct free_block * block = &alloc->free_blocks[i];
230
245
  // check if ptr is at the end of the block
231
- if ((char*)block->addr + block->size == ptr) {
246
+ if (block->offset + block->size == offset) {
232
247
  block->size += size;
233
248
  // check if we can merge with the next block
234
- if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
249
+ if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
235
250
  block->size += alloc->free_blocks[i+1].size;
236
251
  alloc->n_free_blocks--;
237
252
  for (int j = i+1; j < alloc->n_free_blocks; j++) {
@@ -241,11 +256,11 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
241
256
  return;
242
257
  }
243
258
  // check if ptr is at the beginning of the block
244
- if ((char*)ptr + size == block->addr) {
245
- block->addr = ptr;
259
+ if (offset + size == block->offset) {
260
+ block->offset = offset;
246
261
  block->size += size;
247
262
  // check if we can merge with the previous block
248
- if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
263
+ if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
249
264
  alloc->free_blocks[i-1].size += block->size;
250
265
  alloc->n_free_blocks--;
251
266
  for (int j = i; j < alloc->n_free_blocks; j++) {
@@ -259,7 +274,7 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
259
274
  WSP_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
260
275
  // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
261
276
  int insert_pos = 0;
262
- while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
277
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
263
278
  insert_pos++;
264
279
  }
265
280
  // shift all blocks from insert_pos onward to make room for the new block
@@ -267,367 +282,757 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
267
282
  alloc->free_blocks[i] = alloc->free_blocks[i-1];
268
283
  }
269
284
  // insert the new block
270
- alloc->free_blocks[insert_pos].addr = ptr;
285
+ alloc->free_blocks[insert_pos].offset = offset;
271
286
  alloc->free_blocks[insert_pos].size = size;
272
287
  alloc->n_free_blocks++;
273
- }
274
288
 
275
- void wsp_ggml_allocr_set_parse_seq(struct wsp_ggml_allocr * alloc, const int * list, int n) {
276
- for (int i = 0; i < n; i++) {
277
- alloc->parse_seq[i] = list[i];
278
- }
279
- alloc->parse_seq_len = n;
289
+ WSP_GGML_UNUSED(tensor);
280
290
  }
281
291
 
282
- void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc) {
292
+ static void wsp_ggml_dyn_tallocr_reset(struct wsp_ggml_dyn_tallocr * alloc) {
283
293
  alloc->n_free_blocks = 1;
284
- size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
285
- alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
286
- alloc->free_blocks[0].size = alloc->size - align_offset;
294
+ alloc->free_blocks[0].offset = 0;
295
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
296
+ alloc->max_size = 0;
297
+
298
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
299
+ for (int i = 0; i < 1024; i++) {
300
+ alloc->allocated_tensors[i].tensor = NULL;
301
+ }
302
+ #endif
287
303
  }
288
304
 
289
- struct wsp_ggml_allocr * wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
290
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
305
+ static struct wsp_ggml_dyn_tallocr * wsp_ggml_dyn_tallocr_new(size_t alignment) {
306
+ struct wsp_ggml_dyn_tallocr * alloc = (struct wsp_ggml_dyn_tallocr *)malloc(sizeof(struct wsp_ggml_dyn_tallocr));
291
307
 
292
- *alloc = (struct wsp_ggml_allocr){
293
- /*.data = */ data,
294
- /*.size = */ size,
308
+ *alloc = (struct wsp_ggml_dyn_tallocr) {
295
309
  /*.alignment = */ alignment,
296
310
  /*.n_free_blocks = */ 0,
297
311
  /*.free_blocks = */ {{0}},
298
- /*.hash_table = */ {{0}},
299
312
  /*.max_size = */ 0,
300
- /*.measure = */ false,
301
- /*.parse_seq = */ {0},
302
- /*.parse_seq_len = */ 0,
303
313
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
304
- /*.allocated_tensors = */ {0},
314
+ /*.allocated_tensors = */ {{0}},
305
315
  #endif
306
316
  };
307
317
 
308
- wsp_ggml_allocr_reset(alloc);
318
+ wsp_ggml_dyn_tallocr_reset(alloc);
309
319
 
310
320
  return alloc;
311
321
  }
312
322
 
313
- // OS specific functions to allocate and free uncommitted virtual memory
314
- static void * alloc_vmem(size_t size) {
315
- #if defined(_WIN32)
316
- return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
317
- #elif defined(_POSIX_MAPPED_FILES)
318
- void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
319
- if (ptr == MAP_FAILED) {
320
- return NULL;
321
- }
322
- return ptr;
323
- #else
324
- // use a fixed address for other platforms
325
- uintptr_t base_addr = (uintptr_t)-size - 0x100;
326
- return (void *)base_addr;
327
- #endif
323
+ static void wsp_ggml_dyn_tallocr_free(struct wsp_ggml_dyn_tallocr * alloc) {
324
+ free(alloc);
328
325
  }
329
326
 
330
- static void free_vmem(void * base_addr, size_t size) {
331
- #if defined(_WIN32)
332
- VirtualFree(base_addr, 0, MEM_RELEASE);
333
- UNUSED(size);
334
- #elif defined(_POSIX_MAPPED_FILES)
335
- munmap(base_addr, size);
336
- #else
337
- // nothing to do
338
- UNUSED(base_addr);
339
- UNUSED(size);
340
- #endif
327
+ static size_t wsp_ggml_dyn_tallocr_max_size(struct wsp_ggml_dyn_tallocr * alloc) {
328
+ return alloc->max_size;
341
329
  }
342
330
 
343
- // allocate uncommitted virtual memory to measure the size of the graph
344
- static void alloc_measure_vmem(void ** base_addr, size_t * size) {
345
- // 128GB for 64-bit, 1GB for 32-bit
346
- *size = sizeof(void *) == 4 ? 1ULL<<30 : 1ULL<<37;
347
- do {
348
- *base_addr = alloc_vmem(*size);
349
- if (*base_addr != NULL) {
350
- AT_PRINTF("allocated %.2f GB of virtual memory for measure buffer at %p\n", *size / 1024.0 / 1024.0 / 1024.0, *base_addr);
351
- return;
352
- }
353
- // try again with half the size
354
- *size /= 2;
355
- } while (*size > 0);
356
331
 
357
- WSP_GGML_ASSERT(!"failed to allocate virtual memory for measure buffer");
358
- }
332
+ /////////////////////////////////////
359
333
 
360
- static void free_measure_vmem(void * base_addr, size_t size) {
361
- free_vmem(base_addr, size);
362
- }
334
+ // graph allocator
363
335
 
364
- struct wsp_ggml_allocr * wsp_ggml_allocr_new_measure(size_t alignment) {
365
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
336
+ struct hash_node {
337
+ int n_children;
338
+ int n_views;
339
+ int buffer_id;
340
+ size_t offset; // offset within the buffer
341
+ bool allocated;
342
+ };
366
343
 
367
- void * base_addr;
368
- size_t size;
344
+ struct tensor_alloc {
345
+ int buffer_id;
346
+ size_t offset;
347
+ size_t size_max; // 0 = pre-allocated, unused, or view
348
+ };
369
349
 
370
- alloc_measure_vmem(&base_addr, &size);
350
+ struct leaf_alloc {
351
+ struct tensor_alloc leaf;
352
+ };
371
353
 
372
- *alloc = (struct wsp_ggml_allocr){
373
- /*.data = */ base_addr,
374
- /*.size = */ size,
375
- /*.alignment = */ alignment,
376
- /*.n_free_blocks = */ 0,
377
- /*.free_blocks = */ {{0}},
378
- /*.hash_table = */ {{0}},
379
- /*.max_size = */ 0,
380
- /*.measure = */ true,
381
- /*.parse_seq = */ {0},
382
- /*.parse_seq_len = */ 0,
383
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
384
- /*.allocated_tensors = */ {0},
385
- #endif
386
- };
354
+ struct node_alloc {
355
+ struct tensor_alloc dst;
356
+ struct tensor_alloc src[WSP_GGML_MAX_SRC];
357
+ };
387
358
 
388
- wsp_ggml_allocr_reset(alloc);
359
+ struct wsp_ggml_gallocr {
360
+ wsp_ggml_backend_buffer_type_t * bufts; // [n_buffers]
361
+ wsp_ggml_backend_buffer_t * buffers; // [n_buffers]
362
+ struct wsp_ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
363
+ int n_buffers;
389
364
 
390
- return alloc;
391
- }
365
+ struct wsp_ggml_hash_set hash_set;
366
+ struct hash_node * hash_values; // [hash_set.size]
367
+
368
+ struct node_alloc * node_allocs; // [n_nodes]
369
+ int n_nodes;
370
+
371
+ struct leaf_alloc * leaf_allocs; // [n_leafs]
372
+ int n_leafs;
373
+ };
374
+
375
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs) {
376
+ wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)calloc(1, sizeof(struct wsp_ggml_gallocr));
377
+ WSP_GGML_ASSERT(galloc != NULL);
378
+
379
+ galloc->bufts = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_type_t));
380
+ WSP_GGML_ASSERT(galloc->bufts != NULL);
381
+
382
+ galloc->buffers = calloc(n_bufs, sizeof(wsp_ggml_backend_buffer_t));
383
+ WSP_GGML_ASSERT(galloc->buffers != NULL);
384
+
385
+ galloc->buf_tallocs = calloc(n_bufs, sizeof(struct wsp_ggml_dyn_tallocr *));
386
+ WSP_GGML_ASSERT(galloc->buf_tallocs != NULL);
387
+
388
+ for (int i = 0; i < n_bufs; i++) {
389
+ galloc->bufts[i] = bufts[i];
390
+ galloc->buffers[i] = NULL;
392
391
 
393
- void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc) {
394
- if (alloc->measure) {
395
- free_measure_vmem(alloc->data, alloc->size);
392
+ // check if the same buffer type is used multiple times and reuse the same allocator
393
+ for (int j = 0; j < i; j++) {
394
+ if (bufts[i] == bufts[j]) {
395
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
396
+ break;
397
+ }
398
+ }
399
+
400
+ if (galloc->buf_tallocs[i] == NULL) {
401
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(bufts[i]);
402
+ galloc->buf_tallocs[i] = wsp_ggml_dyn_tallocr_new(alignment);
403
+ }
396
404
  }
397
- free(alloc);
398
- }
405
+ galloc->n_buffers = n_bufs;
399
406
 
400
- bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc) {
401
- return alloc->measure;
407
+ return galloc;
402
408
  }
403
409
 
404
- //////////// compute graph allocator
410
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft) {
411
+ return wsp_ggml_gallocr_new_n(&buft, 1);
412
+ }
405
413
 
406
- static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
407
- if (a->type != b->type) {
408
- return false;
414
+ void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc) {
415
+ if (galloc == NULL) {
416
+ return;
409
417
  }
410
- for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
411
- if (a->ne[i] != b->ne[i]) {
412
- return false;
418
+
419
+ for (int i = 0; i < galloc->n_buffers; i++) {
420
+ if (galloc->buffers != NULL) {
421
+ // skip if already freed
422
+ bool freed = false;
423
+ for (int j = 0; j < i; j++) {
424
+ if (galloc->buffers[j] == galloc->buffers[i]) {
425
+ freed = true;
426
+ break;
427
+ }
428
+ }
429
+ if (!freed) {
430
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
431
+ }
413
432
  }
414
- if (a->nb[i] != b->nb[i]) {
415
- return false;
433
+ if (galloc->buf_tallocs != NULL) {
434
+ // skip if already freed
435
+ bool freed = false;
436
+ for (int j = 0; j < i; j++) {
437
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
438
+ freed = true;
439
+ break;
440
+ }
441
+ }
442
+ if (!freed) {
443
+ wsp_ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
444
+ }
416
445
  }
417
446
  }
418
- return true;
447
+
448
+ wsp_ggml_hash_set_free(&galloc->hash_set);
449
+ free(galloc->hash_values);
450
+ free(galloc->bufts);
451
+ free(galloc->buffers);
452
+ free(galloc->buf_tallocs);
453
+ free(galloc->node_allocs);
454
+ free(galloc->leaf_allocs);
455
+ free(galloc);
419
456
  }
420
457
 
421
- static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
422
- switch (op) {
423
- case WSP_GGML_OP_SCALE:
424
- case WSP_GGML_OP_DIAG_MASK_ZERO:
425
- case WSP_GGML_OP_DIAG_MASK_INF:
426
- case WSP_GGML_OP_ADD:
427
- case WSP_GGML_OP_ADD1:
428
- case WSP_GGML_OP_SUB:
429
- case WSP_GGML_OP_MUL:
430
- case WSP_GGML_OP_DIV:
431
- case WSP_GGML_OP_SQR:
432
- case WSP_GGML_OP_SQRT:
433
- case WSP_GGML_OP_LOG:
434
- case WSP_GGML_OP_UNARY:
435
- case WSP_GGML_OP_ROPE:
436
- case WSP_GGML_OP_RMS_NORM:
437
- case WSP_GGML_OP_SOFT_MAX:
438
- case WSP_GGML_OP_CONT:
439
- return true;
458
+ typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
440
459
 
441
- default:
442
- return false;
443
- }
460
+ static struct hash_node * wsp_ggml_gallocr_hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
461
+ size_t i = wsp_ggml_hash_find_or_insert(&galloc->hash_set, t);
462
+ return &galloc->hash_values[i];
444
463
  }
445
464
 
446
- static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * node) {
447
- struct hash_node * ht = alloc->hash_table;
448
- if (node->data == NULL) {
449
- if (wsp_ggml_is_view(node)) {
450
- assert(node->view_src->data != NULL);
451
- node->data = (char *)node->view_src->data + node->view_offs;
452
- } else {
453
- // see if we can reuse a parent's buffer (inplace)
454
- if (wsp_ggml_op_can_inplace(node->op)) {
455
- for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
456
- struct wsp_ggml_tensor * parent = node->src[i];
457
- if (parent == NULL) {
458
- break;
459
- }
465
+ static bool wsp_ggml_gallocr_is_own(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
466
+ return wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
467
+ }
460
468
 
461
- // if the node's data is external, then we cannot re-use it
462
- if (wsp_ggml_allocr_is_own(alloc, parent) == false) {
463
- AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
464
- continue;
465
- }
469
+ static bool wsp_ggml_gallocr_is_allocated(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
470
+ return t->data != NULL || wsp_ggml_gallocr_hash_get(galloc, t)->allocated;
471
+ }
466
472
 
467
- struct hash_node * p_hn = hash_get(ht, parent);
468
- if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
469
- if (wsp_ggml_is_view(parent)) {
470
- struct wsp_ggml_tensor * view_src = parent->view_src;
471
- struct hash_node * view_src_hn = hash_get(ht, view_src);
472
- if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
473
- // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
474
- // the parent's data that it will need later (same layout requirement). the problem is that then
475
- // we cannot free the tensor because the original address of the allocation is lost.
476
- // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
477
- // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
478
- AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
479
- node->data = parent->data;
480
- return;
481
- }
482
- }
483
- else {
484
- AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
485
- node->data = parent->data;
473
+ static void wsp_ggml_gallocr_allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, int buffer_id) {
474
+ WSP_GGML_ASSERT(buffer_id >= 0);
475
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
476
+
477
+ if (!wsp_ggml_gallocr_is_allocated(galloc, node) && !wsp_ggml_is_view(node)) {
478
+ hn->allocated = true;
479
+ assert(hn->offset == 0);
480
+
481
+ // try to reuse a parent's buffer (inplace)
482
+ if (wsp_ggml_op_can_inplace(node->op)) {
483
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
484
+ struct wsp_ggml_tensor * parent = node->src[i];
485
+ if (parent == NULL) {
486
+ continue;
487
+ }
488
+
489
+ // if the node's data is external, then we cannot re-use it
490
+ if (!wsp_ggml_gallocr_is_own(galloc, parent)) {
491
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
492
+ continue;
493
+ }
494
+
495
+ // outputs cannot be reused
496
+ if (parent->flags & WSP_GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & WSP_GGML_TENSOR_FLAG_OUTPUT)) {
497
+ AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
498
+ continue;
499
+ }
500
+
501
+ if (!wsp_ggml_are_same_layout(node, parent)) {
502
+ AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
503
+ continue;
504
+ }
505
+
506
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
507
+ if (p_hn->n_children == 1 && p_hn->n_views == 0) {
508
+ if (wsp_ggml_is_view(parent)) {
509
+ struct wsp_ggml_tensor * view_src = parent->view_src;
510
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
511
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
512
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
513
+ assert(view_src_hn->offset == p_hn->offset);
514
+ hn->buffer_id = p_hn->buffer_id;
515
+ hn->offset = p_hn->offset;
516
+ p_hn->allocated = false; // avoid freeing the parent
517
+ view_src_hn->allocated = false;
486
518
  return;
487
519
  }
520
+ } else {
521
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
522
+ hn->buffer_id = p_hn->buffer_id;
523
+ hn->offset = p_hn->offset;
524
+ p_hn->allocated = false; // avoid freeing the parent
525
+ return;
488
526
  }
489
527
  }
490
528
  }
491
- wsp_ggml_allocr_alloc(alloc, node);
492
529
  }
530
+ // allocate tensor from the buffer
531
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
532
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
533
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
534
+ size_t offset = wsp_ggml_dyn_tallocr_alloc(alloc, size, node);
535
+ hn->buffer_id = buffer_id;
536
+ hn->offset = offset;
537
+ return;
538
+ }
539
+ }
540
+
541
+ static void wsp_ggml_gallocr_free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
542
+ // graph outputs are never freed
543
+ if (node->flags & WSP_GGML_TENSOR_FLAG_OUTPUT) {
544
+ AT_PRINTF("not freeing output %s\n", node->name);
545
+ return;
493
546
  }
547
+
548
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
549
+ size_t offset = hn->offset;
550
+ int buffer_id = hn->buffer_id;
551
+ struct wsp_ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
552
+ wsp_ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
553
+ size_t size = wsp_ggml_backend_buft_get_alloc_size(buft, node);
554
+ wsp_ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
555
+ hn->allocated = false;
556
+ }
557
+
558
+ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
559
+ return node_buffer_ids ? node_buffer_ids[i] : 0;
494
560
  }
495
561
 
496
- static size_t wsp_ggml_allocr_alloc_graph_tensors_n(
497
- struct wsp_ggml_allocr * alloc,
498
- struct wsp_ggml_cgraph ** graphs, int n_graphs,
499
- struct wsp_ggml_tensor *** inputs, struct wsp_ggml_tensor *** outputs) {
562
+ static void wsp_ggml_gallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
563
+ // clear hash tables
564
+ wsp_ggml_hash_set_reset(&galloc->hash_set);
565
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
500
566
 
501
- // reset hash table
502
- struct hash_node * ht = alloc->hash_table;
503
- memset(ht, 0, sizeof(struct hash_node) * WSP_GGML_GRAPH_HASHTABLE_SIZE);
567
+ // allocate leafs
568
+ // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
569
+ for (int i = 0; i < graph->n_leafs; i++) {
570
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
571
+ wsp_ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
572
+ }
504
573
 
505
574
  // count number of children and views
506
- for (int g = 0; g < n_graphs; g++) {
507
- struct wsp_ggml_cgraph * gf = graphs[g];
508
- for (int i = 0; i < gf->n_nodes; i++) {
509
- struct wsp_ggml_tensor * node = gf->nodes[i];
510
-
511
- if (wsp_ggml_is_view(node)) {
512
- struct wsp_ggml_tensor * view_src = node->view_src;
513
- hash_get(ht, view_src)->n_views += 1;
575
+ // allocate other graph inputs and leafs first to avoid overwriting them
576
+ for (int i = 0; i < graph->n_nodes; i++) {
577
+ struct wsp_ggml_tensor * node = graph->nodes[i];
578
+
579
+ // TODO: better way to add external dependencies
580
+ // WSP_GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
581
+ // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
582
+ // itself is never used and should not be considered a dependency
583
+ if (wsp_ggml_is_view(node) && node->op != WSP_GGML_OP_NONE) {
584
+ struct wsp_ggml_tensor * view_src = node->view_src;
585
+ wsp_ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
586
+ }
587
+
588
+ if (node->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
589
+ wsp_ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
590
+ }
591
+
592
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
593
+ struct wsp_ggml_tensor * src = node->src[j];
594
+ if (src == NULL) {
595
+ continue;
514
596
  }
515
597
 
516
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
517
- struct wsp_ggml_tensor * parent = node->src[j];
518
- if (parent == NULL) {
519
- break;
520
- }
521
- hash_get(ht, parent)->n_children += 1;
598
+ wsp_ggml_gallocr_hash_get(galloc, src)->n_children += 1;
599
+
600
+ // allocate explicit inputs
601
+ if (src->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
602
+ wsp_ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
522
603
  }
523
604
  }
524
605
  }
525
606
 
526
607
  // allocate tensors
527
- for (int g = 0; g < n_graphs; g++) {
528
- struct wsp_ggml_cgraph * gf = graphs[g];
529
- AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
530
- // graph inputs are allocated first to ensure that they are not overwritten by each other
531
- if (inputs != NULL && inputs[g] != NULL) {
532
- for (int i = 0; inputs[g][i] != NULL; i++) {
533
- struct wsp_ggml_tensor * input = inputs[g][i];
534
- AT_PRINTF("input: %s\n", input->name);
535
- allocate_node(alloc, input);
608
+ for (int i = 0; i < graph->n_nodes; i++) {
609
+ struct wsp_ggml_tensor * node = graph->nodes[i];
610
+ int buffer_id = get_node_buffer_id(node_buffer_ids, i);
611
+
612
+ // allocate parents (only leafs need to be allocated at this point)
613
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
614
+ struct wsp_ggml_tensor * parent = node->src[j];
615
+ if (parent == NULL) {
616
+ continue;
536
617
  }
618
+ wsp_ggml_gallocr_allocate_node(galloc, parent, buffer_id);
537
619
  }
538
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
539
- int last_barrier_pos = 0;
540
- int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
541
-
542
- for (int ind = 0; ind < n_nodes; ind++) {
543
- // allocate a node if there is no parse_seq or this is not a barrier
544
- if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
545
- int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
546
- struct wsp_ggml_tensor * node = gf->nodes[i];
547
-
548
- // allocate parents (leafs)
549
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
550
- struct wsp_ggml_tensor * parent = node->src[j];
551
- if (parent == NULL) {
552
- break;
553
- }
554
- allocate_node(alloc, parent);
555
- }
556
620
 
557
- // allocate node
558
- allocate_node(alloc, node);
621
+ // allocate node
622
+ wsp_ggml_gallocr_allocate_node(galloc, node, buffer_id);
559
623
 
560
- AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
561
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
562
- struct wsp_ggml_tensor * parent = node->src[j];
563
- if (parent == NULL) {
564
- break;
565
- }
566
- AT_PRINTF("%s", parent->name);
567
- if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
568
- AT_PRINTF(", ");
569
- }
570
- }
571
- AT_PRINTF("\n");
624
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_desc(node), node->name);
625
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
626
+ struct wsp_ggml_tensor * parent = node->src[j];
627
+ if (parent == NULL) {
628
+ continue;
572
629
  }
630
+ AT_PRINTF("%s", parent->name);
631
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
632
+ AT_PRINTF(", ");
633
+ }
634
+ }
635
+ AT_PRINTF("\n");
573
636
 
574
- // update parents
575
- // update immediately if there is no parse_seq
576
- // update only at barriers if there is parse_seq
577
- if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) {
578
- int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
579
- int update_end = alloc->parse_seq_len ? ind : ind + 1;
580
- for (int i = update_start; i < update_end; i++) {
581
- int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
582
- struct wsp_ggml_tensor * node = gf->nodes[node_i];
583
-
584
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
585
- struct wsp_ggml_tensor * parent = node->src[j];
586
- if (parent == NULL) {
587
- break;
588
- }
589
- struct hash_node * p_hn = hash_get(ht, parent);
590
- p_hn->n_children -= 1;
591
-
592
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
593
-
594
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
595
- if (wsp_ggml_is_view(parent)) {
596
- struct wsp_ggml_tensor * view_src = parent->view_src;
597
- struct hash_node * view_src_hn = hash_get(ht, view_src);
598
- view_src_hn->n_views -= 1;
599
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
600
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
601
- wsp_ggml_allocr_free_tensor(alloc, view_src);
602
- }
603
- }
604
- else {
605
- if (parent->data != node->data) {
606
- wsp_ggml_allocr_free_tensor(alloc, parent);
607
- }
608
- }
609
- }
637
+ // update parents
638
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
639
+ struct wsp_ggml_tensor * parent = node->src[j];
640
+ if (parent == NULL) {
641
+ continue;
642
+ }
643
+ struct hash_node * p_hn = wsp_ggml_gallocr_hash_get(galloc, parent);
644
+ p_hn->n_children -= 1;
645
+
646
+ AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
647
+ parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
648
+
649
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
650
+ if (wsp_ggml_is_view(parent)) {
651
+ struct wsp_ggml_tensor * view_src = parent->view_src;
652
+ struct hash_node * view_src_hn = wsp_ggml_gallocr_hash_get(galloc, view_src);
653
+ view_src_hn->n_views -= 1;
654
+ AT_PRINTF("view_src %s: %d children, %d views\n",
655
+ view_src->name, view_src_hn->n_children, view_src_hn->n_views);
656
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
657
+ wsp_ggml_gallocr_free_node(galloc, view_src);
610
658
  }
611
659
  }
612
- AT_PRINTF("\n");
613
- if (alloc->parse_seq_len) {
614
- last_barrier_pos = ind + 1;
660
+ else if (p_hn->allocated) {
661
+ wsp_ggml_gallocr_free_node(galloc, parent);
615
662
  }
616
663
  }
664
+ AT_PRINTF("\n");
617
665
  }
618
- // free graph outputs here that wouldn't be freed otherwise because they have no children
619
- if (outputs != NULL && outputs[g] != NULL) {
620
- for (int i = 0; outputs[g][i] != NULL; i++) {
621
- struct wsp_ggml_tensor * output = outputs[g][i];
622
- AT_PRINTF("output: %s\n", output->name);
623
- wsp_ggml_allocr_free_tensor(alloc, output);
666
+ }
667
+ }
668
+
669
+ bool wsp_ggml_gallocr_reserve_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
670
+ size_t min_hash_size = graph->n_nodes + graph->n_leafs;
671
+ // add 25% margin to avoid hash collisions
672
+ min_hash_size += min_hash_size / 4;
673
+
674
+ // initialize hash table
675
+ if (galloc->hash_set.size < min_hash_size) {
676
+ wsp_ggml_hash_set_free(&galloc->hash_set);
677
+ galloc->hash_set = wsp_ggml_hash_set_new(min_hash_size);
678
+ WSP_GGML_ASSERT(galloc->hash_set.keys != NULL);
679
+
680
+ free(galloc->hash_values);
681
+ galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
682
+ WSP_GGML_ASSERT(galloc->hash_values != NULL);
683
+ }
684
+
685
+ // reset allocators
686
+ for (int i = 0; i < galloc->n_buffers; i++) {
687
+ wsp_ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
688
+ }
689
+
690
+ // allocate in hash table
691
+ wsp_ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
692
+
693
+ // set the node_allocs from the hash table
694
+ if (galloc->n_nodes < graph->n_nodes) {
695
+ free(galloc->node_allocs);
696
+ galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
697
+ WSP_GGML_ASSERT(galloc->node_allocs != NULL);
698
+ }
699
+ galloc->n_nodes = graph->n_nodes;
700
+ for (int i = 0; i < graph->n_nodes; i++) {
701
+ struct wsp_ggml_tensor * node = graph->nodes[i];
702
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
703
+ if (node->view_src || node->data) {
704
+ node_alloc->dst.buffer_id = -1;
705
+ node_alloc->dst.offset = SIZE_MAX;
706
+ node_alloc->dst.size_max = 0;
707
+ } else {
708
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, node);
709
+ node_alloc->dst.buffer_id = hn->buffer_id;
710
+ node_alloc->dst.offset = hn->offset;
711
+ node_alloc->dst.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
712
+ }
713
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
714
+ struct wsp_ggml_tensor * src = node->src[j];
715
+ if (!src || src->view_src || src->data) {
716
+ node_alloc->src[j].buffer_id = -1;
717
+ node_alloc->src[j].offset = SIZE_MAX;
718
+ node_alloc->src[j].size_max = 0;
719
+ } else {
720
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, src);
721
+ node_alloc->src[j].buffer_id = hn->buffer_id;
722
+ node_alloc->src[j].offset = hn->offset;
723
+ node_alloc->src[j].size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
624
724
  }
625
725
  }
626
726
  }
727
+ if (galloc->n_leafs < graph->n_leafs) {
728
+ free(galloc->leaf_allocs);
729
+ galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
730
+ WSP_GGML_ASSERT(galloc->leaf_allocs != NULL);
731
+ }
732
+ galloc->n_leafs = graph->n_leafs;
733
+ for (int i = 0; i < graph->n_leafs; i++) {
734
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
735
+ struct hash_node * hn = wsp_ggml_gallocr_hash_get(galloc, leaf);
736
+ if (leaf->view_src || leaf->data) {
737
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
738
+ galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
739
+ galloc->leaf_allocs[i].leaf.size_max = 0;
740
+ } else {
741
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
742
+ galloc->leaf_allocs[i].leaf.offset = hn->offset;
743
+ galloc->leaf_allocs[i].leaf.size_max = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
744
+ }
745
+ }
627
746
 
628
- return alloc->max_size;
747
+ // reallocate buffers if needed
748
+ for (int i = 0; i < galloc->n_buffers; i++) {
749
+ // if the buffer type is used multiple times, we reuse the same buffer
750
+ for (int j = 0; j < i; j++) {
751
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
752
+ galloc->buffers[i] = galloc->buffers[j];
753
+ break;
754
+ }
755
+ }
756
+
757
+ size_t cur_size = galloc->buffers[i] ? wsp_ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
758
+ size_t new_size = wsp_ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
759
+
760
+ // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
761
+ if (new_size > cur_size || galloc->buffers[i] == NULL) {
762
+ #ifndef NDEBUG
763
+ WSP_GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
764
+ #endif
765
+
766
+ wsp_ggml_backend_buffer_free(galloc->buffers[i]);
767
+ galloc->buffers[i] = wsp_ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
768
+ if (galloc->buffers[i] == NULL) {
769
+ WSP_GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(galloc->bufts[i]), new_size);
770
+ return false;
771
+ }
772
+ wsp_ggml_backend_buffer_set_usage(galloc->buffers[i], WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE);
773
+ }
774
+ }
775
+
776
+ return true;
777
+ }
778
+
779
+ bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph *graph) {
780
+ return wsp_ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
781
+ }
782
+
783
+ static void wsp_ggml_gallocr_init_tensor(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
784
+ int buffer_id = tensor_alloc->buffer_id;
785
+ assert(tensor->data || tensor->view_src || wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
786
+
787
+ if (tensor->view_src != NULL) {
788
+ if (tensor->buffer == NULL) {
789
+ assert(tensor_alloc->offset == SIZE_MAX);
790
+ if (tensor->view_src->buffer == NULL) {
791
+ // this tensor was allocated without ggml-backend
792
+ return;
793
+ }
794
+ wsp_ggml_backend_view_init(tensor);
795
+ }
796
+ } else {
797
+ if (tensor->data == NULL) {
798
+ assert(tensor_alloc->offset != SIZE_MAX);
799
+ assert(wsp_ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
800
+ void * base = wsp_ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
801
+ void * addr = (char *)base + tensor_alloc->offset;
802
+ wsp_ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
803
+ } else {
804
+ if (tensor->buffer == NULL) {
805
+ // this tensor was allocated without ggml-backend
806
+ return;
807
+ }
808
+ }
809
+ }
810
+ }
811
+
812
+ static bool wsp_ggml_gallocr_node_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node, struct tensor_alloc * talloc) {
813
+ size_t node_size = 0;
814
+ if (!node->data && !node->view_src) {
815
+ WSP_GGML_ASSERT(talloc->buffer_id >= 0); // prevent segfault when misusing the API
816
+ node_size = wsp_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
817
+ }
818
+ return talloc->size_max >= node_size;
819
+ }
820
+
821
+ static bool wsp_ggml_gallocr_needs_realloc(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
822
+ if (galloc->n_nodes != graph->n_nodes) {
823
+ #ifndef NDEBUG
824
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
825
+ #endif
826
+ return true;
827
+ }
828
+
829
+ if (galloc->n_leafs != graph->n_leafs) {
830
+ #ifndef NDEBUG
831
+ WSP_GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
832
+ #endif
833
+ return true;
834
+ }
835
+
836
+ for (int i = 0; i < graph->n_nodes; i++) {
837
+ struct wsp_ggml_tensor * node = graph->nodes[i];
838
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
839
+
840
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
841
+ #ifndef NDEBUG
842
+ WSP_GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
843
+ #endif
844
+ return true;
845
+ }
846
+
847
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
848
+ struct wsp_ggml_tensor * src = node->src[j];
849
+ if (src == NULL) {
850
+ continue;
851
+ }
852
+ if (!wsp_ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
853
+ #ifndef NDEBUG
854
+ WSP_GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
855
+ #endif
856
+ return true;
857
+ }
858
+ }
859
+ }
860
+
861
+ return false;
862
+ }
863
+
864
+ bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph) {
865
+ if (wsp_ggml_gallocr_needs_realloc(galloc, graph)) {
866
+ if (galloc->n_buffers == 1) {
867
+ #ifndef NDEBUG
868
+ WSP_GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
869
+ #endif
870
+ if (!wsp_ggml_gallocr_reserve(galloc, graph)) {
871
+ return false;
872
+ }
873
+ } else {
874
+ #ifndef NDEBUG
875
+ WSP_GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
876
+ #endif
877
+ return false;
878
+ }
879
+ }
880
+
881
+ // reset buffers
882
+ for (int i = 0; i < galloc->n_buffers; i++) {
883
+ if (galloc->buffers[i] != NULL) {
884
+ wsp_ggml_backend_buffer_reset(galloc->buffers[i]);
885
+ }
886
+ }
887
+
888
+ // allocate the graph tensors from the previous assignments
889
+ // leafs
890
+ for (int i = 0; i < graph->n_leafs; i++) {
891
+ struct wsp_ggml_tensor * leaf = graph->leafs[i];
892
+ struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
893
+ wsp_ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
894
+ }
895
+ // nodes
896
+ for (int i = 0; i < graph->n_nodes; i++) {
897
+ struct wsp_ggml_tensor * node = graph->nodes[i];
898
+ struct node_alloc * node_alloc = &galloc->node_allocs[i];
899
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
900
+ struct wsp_ggml_tensor * src = node->src[j];
901
+ if (src == NULL) {
902
+ continue;
903
+ }
904
+ wsp_ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
905
+ }
906
+ wsp_ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
907
+ }
908
+
909
+ return true;
910
+ }
911
+
912
+ size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id) {
913
+ WSP_GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
914
+
915
+ if (galloc->buffers[buffer_id] == NULL) {
916
+ return 0;
917
+ }
918
+
919
+ for (int i = 0; i < buffer_id; i++) {
920
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
921
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
922
+ // only return the buffer size the first time it appears to avoid double counting
923
+ return 0;
924
+ }
925
+ }
926
+
927
+ return wsp_ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
928
+ }
929
+
930
+ // utils
931
+
932
+ static bool alloc_tensor_range(struct wsp_ggml_context * ctx,
933
+ struct wsp_ggml_tensor * first, struct wsp_ggml_tensor * last,
934
+ wsp_ggml_backend_buffer_type_t buft, size_t size,
935
+ wsp_ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
936
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_buft_alloc_buffer(buft, size);
937
+ if (buffer == NULL) {
938
+ #ifndef NDEBUG
939
+ WSP_GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, wsp_ggml_backend_buft_name(buft), size);
940
+ #endif
941
+ for (size_t i = 0; i < *n_buffers; i++) {
942
+ wsp_ggml_backend_buffer_free((*buffers)[i]);
943
+ }
944
+ free(*buffers);
945
+ return false;
946
+ }
947
+
948
+ struct wsp_ggml_tallocr tallocr = wsp_ggml_tallocr_new(buffer);
949
+
950
+ for (struct wsp_ggml_tensor * t = first; t != last; t = wsp_ggml_get_next_tensor(ctx, t)) {
951
+ if (t->data == NULL) {
952
+ if (t->view_src == NULL) {
953
+ wsp_ggml_tallocr_alloc(&tallocr, t);
954
+ } else if (t->buffer == NULL) {
955
+ wsp_ggml_backend_view_init(t);
956
+ }
957
+ } else {
958
+ if (t->view_src != NULL && t->buffer == NULL) {
959
+ // view of a pre-allocated tensor
960
+ wsp_ggml_backend_view_init(t);
961
+ }
962
+ }
963
+ }
964
+
965
+ *buffers = realloc(*buffers, sizeof(wsp_ggml_backend_buffer_t) * (*n_buffers + 1));
966
+ (*buffers)[(*n_buffers)++] = buffer;
967
+
968
+ return true;
969
+ }
970
+
971
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft) {
972
+ WSP_GGML_ASSERT(wsp_ggml_get_no_alloc(ctx) == true);
973
+
974
+ size_t alignment = wsp_ggml_backend_buft_get_alignment(buft);
975
+ size_t max_size = wsp_ggml_backend_buft_get_max_size(buft);
976
+
977
+ wsp_ggml_backend_buffer_t * buffers = NULL;
978
+ size_t n_buffers = 0;
979
+
980
+ size_t cur_buf_size = 0;
981
+ struct wsp_ggml_tensor * first = wsp_ggml_get_first_tensor(ctx);
982
+ for (struct wsp_ggml_tensor * t = first; t != NULL; t = wsp_ggml_get_next_tensor(ctx, t)) {
983
+ size_t this_size = 0;
984
+ if (t->data == NULL && t->view_src == NULL) {
985
+ this_size = WSP_GGML_PAD(wsp_ggml_backend_buft_get_alloc_size(buft, t), alignment);
986
+ }
987
+
988
+ if (this_size > max_size) {
989
+ WSP_GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
990
+ __func__, t->name,
991
+ wsp_ggml_backend_buft_name(buft),
992
+ this_size, max_size);
993
+ for (size_t i = 0; i < n_buffers; i++) {
994
+ wsp_ggml_backend_buffer_free(buffers[i]);
995
+ }
996
+ free(buffers);
997
+ return NULL;
998
+ }
999
+
1000
+ if ((cur_buf_size + this_size) > max_size) {
1001
+ // allocate tensors in the current buffer
1002
+ if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
1003
+ return NULL;
1004
+ }
1005
+ first = t;
1006
+ cur_buf_size = this_size;
1007
+ } else {
1008
+ cur_buf_size += this_size;
1009
+ }
1010
+ }
1011
+
1012
+ // allocate remaining tensors
1013
+ if (cur_buf_size > 0) {
1014
+ if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
1015
+ return NULL;
1016
+ }
1017
+ }
1018
+
1019
+ if (n_buffers == 0) {
1020
+ #ifndef NDEBUG
1021
+ WSP_GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1022
+ #endif
1023
+ return NULL;
1024
+ }
1025
+
1026
+ wsp_ggml_backend_buffer_t buffer;
1027
+ if (n_buffers == 1) {
1028
+ buffer = buffers[0];
1029
+ } else {
1030
+ buffer = wsp_ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1031
+ }
1032
+ free(buffers);
1033
+ return buffer;
629
1034
  }
630
1035
 
631
- size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph) {
632
- return wsp_ggml_allocr_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
1036
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, wsp_ggml_backend_t backend) {
1037
+ return wsp_ggml_backend_alloc_ctx_tensors_from_buft(ctx, wsp_ggml_backend_get_default_buffer_type(backend));
633
1038
  }