whisper.rn 0.4.0-rc.2 → 0.4.0-rc.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/android/src/main/CMakeLists.txt +2 -0
  2. package/android/src/main/java/com/rnwhisper/RNWhisper.java +6 -1
  3. package/android/src/main/java/com/rnwhisper/WhisperContext.java +29 -15
  4. package/android/src/main/jni.cpp +6 -2
  5. package/cpp/ggml-alloc.c +413 -280
  6. package/cpp/ggml-alloc.h +67 -8
  7. package/cpp/ggml-backend-impl.h +87 -0
  8. package/cpp/ggml-backend.c +950 -0
  9. package/cpp/ggml-backend.h +136 -0
  10. package/cpp/ggml-impl.h +243 -0
  11. package/cpp/{ggml-metal.metal → ggml-metal-whisper.metal} +591 -121
  12. package/cpp/ggml-metal.h +21 -0
  13. package/cpp/ggml-metal.m +623 -234
  14. package/cpp/ggml-quants.c +7377 -0
  15. package/cpp/ggml-quants.h +224 -0
  16. package/cpp/ggml.c +3773 -4455
  17. package/cpp/ggml.h +279 -146
  18. package/cpp/whisper.cpp +182 -103
  19. package/cpp/whisper.h +48 -11
  20. package/ios/RNWhisper.mm +8 -2
  21. package/ios/RNWhisperContext.h +6 -2
  22. package/ios/RNWhisperContext.mm +97 -26
  23. package/jest/mock.js +1 -1
  24. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  25. package/lib/commonjs/index.js +28 -9
  26. package/lib/commonjs/index.js.map +1 -1
  27. package/lib/commonjs/version.json +1 -1
  28. package/lib/module/NativeRNWhisper.js.map +1 -1
  29. package/lib/module/index.js +28 -9
  30. package/lib/module/index.js.map +1 -1
  31. package/lib/module/version.json +1 -1
  32. package/lib/typescript/NativeRNWhisper.d.ts +7 -1
  33. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  34. package/lib/typescript/index.d.ts +8 -3
  35. package/lib/typescript/index.d.ts.map +1 -1
  36. package/package.json +1 -1
  37. package/src/NativeRNWhisper.ts +8 -1
  38. package/src/index.ts +30 -18
  39. package/src/version.json +1 -1
  40. package/whisper-rn.podspec +1 -2
package/cpp/ggml-alloc.c CHANGED
@@ -1,69 +1,21 @@
1
1
  #include "ggml-alloc.h"
2
+ #include "ggml-backend-impl.h"
2
3
  #include "ggml.h"
4
+ #include "ggml-impl.h"
3
5
  #include <assert.h>
6
+ #include <limits.h>
4
7
  #include <stdarg.h>
5
8
  #include <stdio.h>
6
9
  #include <stdlib.h>
7
10
  #include <string.h>
8
11
 
9
- #ifdef __has_include
10
- #if __has_include(<unistd.h>)
11
- #include <unistd.h>
12
- #if defined(_POSIX_MAPPED_FILES)
13
- #include <sys/types.h>
14
- #include <sys/mman.h>
15
- #endif
16
- #endif
17
- #endif
18
-
19
- #if defined(_WIN32)
20
- #define WIN32_LEAN_AND_MEAN
21
- #ifndef NOMINMAX
22
- #define NOMINMAX
23
- #endif
24
- #include <windows.h>
25
- #include <memoryapi.h>
26
- #endif
27
-
28
-
29
- #define UNUSED(x) (void)(x)
30
12
  #define MAX(a, b) ((a) > (b) ? (a) : (b))
31
- #define WSP_GGML_MAX_CONCUR (2*WSP_GGML_MAX_NODES)
13
+ #define MAX_FREE_BLOCKS 256
32
14
 
33
15
  //#define WSP_GGML_ALLOCATOR_DEBUG
34
16
 
35
- //#define AT_PRINTF printf
36
- #define AT_PRINTF(...) ((void)0)
37
-
38
- struct hash_node {
39
- struct wsp_ggml_tensor * t;
40
- int n_children;
41
- int n_views;
42
- };
43
-
44
- static size_t hash(void * p) {
45
- return (size_t)p % WSP_GGML_GRAPH_HASHTABLE_SIZE;
46
- }
47
-
48
- static struct hash_node * hash_get(struct hash_node hash_table[], struct wsp_ggml_tensor * t) {
49
- size_t h = hash(t);
50
-
51
- // linear probing
52
- size_t i = h;
53
- while (hash_table[i].t != NULL) {
54
- if (hash_table[i].t == t) {
55
- return &hash_table[i];
56
- }
57
- i = (i + 1) % WSP_GGML_GRAPH_HASHTABLE_SIZE;
58
- if (i == h) {
59
- // hash table is full
60
- WSP_GGML_ASSERT(false);
61
- }
62
- }
63
-
64
- hash_table[i].t = t;
65
- return &hash_table[i];
66
- }
17
+ //#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
18
+ #define AT_PRINTF(...)
67
19
 
68
20
  // TODO: WSP_GGML_PAD ?
69
21
  static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
@@ -77,19 +29,18 @@ struct free_block {
77
29
  size_t size;
78
30
  };
79
31
 
80
- #define MAX_FREE_BLOCKS 128
81
-
82
- struct wsp_ggml_allocr {
83
- void * data;
84
- size_t size;
32
+ struct wsp_ggml_tallocr {
33
+ struct wsp_ggml_backend_buffer * buffer;
34
+ bool buffer_owned;
35
+ void * base;
85
36
  size_t alignment;
37
+
86
38
  int n_free_blocks;
87
39
  struct free_block free_blocks[MAX_FREE_BLOCKS];
88
- struct hash_node hash_table[WSP_GGML_GRAPH_HASHTABLE_SIZE];
40
+
89
41
  size_t max_size;
42
+
90
43
  bool measure;
91
- int parse_seq[WSP_GGML_MAX_CONCUR];
92
- int parse_seq_len;
93
44
 
94
45
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
95
46
  struct wsp_ggml_tensor * allocated_tensors[1024];
@@ -97,7 +48,7 @@ struct wsp_ggml_allocr {
97
48
  };
98
49
 
99
50
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
100
- static void add_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
51
+ static void add_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
101
52
  for (int i = 0; i < 1024; i++) {
102
53
  if (alloc->allocated_tensors[i] == NULL) {
103
54
  alloc->allocated_tensors[i] = tensor;
@@ -106,7 +57,7 @@ static void add_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml
106
57
  }
107
58
  WSP_GGML_ASSERT(!"out of allocated_tensors");
108
59
  }
109
- static void remove_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
60
+ static void remove_allocated_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
110
61
  for (int i = 0; i < 1024; i++) {
111
62
  if (alloc->allocated_tensors[i] == tensor ||
112
63
  (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
@@ -119,28 +70,20 @@ static void remove_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_g
119
70
  }
120
71
  #endif
121
72
 
122
- static size_t wsp_ggml_allocr_get_alloc_size(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
123
- return wsp_ggml_nbytes(tensor);
124
-
125
- UNUSED(alloc);
126
- }
127
-
128
73
  // check if a tensor is allocated by this buffer
129
- static bool wsp_ggml_allocr_is_own(struct wsp_ggml_allocr * alloc, const struct wsp_ggml_tensor * tensor) {
130
- void * ptr = tensor->data;
131
- return ptr >= alloc->data && (char *)ptr < (char *)alloc->data + alloc->max_size;
74
+ static bool wsp_ggml_tallocr_is_own(wsp_ggml_tallocr_t alloc, const struct wsp_ggml_tensor * tensor) {
75
+ return tensor->buffer == alloc->buffer;
132
76
  }
133
77
 
134
78
  static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
135
79
  return t->view_src != NULL;
136
80
  }
137
81
 
138
- void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
139
- #ifdef WSP_GGML_ALLOCATOR_DEBUG
82
+ void wsp_ggml_tallocr_alloc(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
140
83
  WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
141
84
  WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
142
- #endif
143
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
85
+
86
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
144
87
  size = aligned_offset(NULL, size, alloc->alignment);
145
88
 
146
89
  AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
@@ -187,6 +130,10 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
187
130
  }
188
131
 
189
132
  tensor->data = addr;
133
+ tensor->buffer = alloc->buffer;
134
+ if (!alloc->measure) {
135
+ wsp_ggml_backend_buffer_init_tensor(alloc->buffer, tensor);
136
+ }
190
137
 
191
138
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
192
139
  add_allocated_tensor(alloc, tensor);
@@ -202,23 +149,28 @@ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tenso
202
149
  }
203
150
  #endif
204
151
 
205
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
152
+ alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size);
206
153
  }
207
154
 
208
155
  // this is a very naive implementation, but for our case the number of free blocks should be very small
209
- static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
210
- void * ptr = tensor->data;
211
-
212
- if (wsp_ggml_allocr_is_own(alloc, tensor) == false) {
156
+ static void wsp_ggml_tallocr_free_tensor(wsp_ggml_tallocr_t alloc, struct wsp_ggml_tensor * tensor) {
157
+ if (wsp_ggml_tallocr_is_own(alloc, tensor) == false) {
213
158
  // the tensor was not allocated in this buffer
214
159
  // this can happen because the graph allocator will try to free weights and other tensors from different buffers
215
160
  // the easiest way to deal with this is just to ignore it
161
+ // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer);
216
162
  return;
217
163
  }
218
164
 
219
- size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
165
+ void * ptr = tensor->data;
166
+
167
+ size_t size = wsp_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
220
168
  size = aligned_offset(NULL, size, alloc->alignment);
221
- AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
169
+ AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
170
+
171
+ if (!alloc->measure) {
172
+ wsp_ggml_backend_buffer_free_tensor(alloc->buffer, tensor);
173
+ }
222
174
 
223
175
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
224
176
  remove_allocated_tensor(alloc, tensor);
@@ -272,136 +224,180 @@ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct w
272
224
  alloc->n_free_blocks++;
273
225
  }
274
226
 
275
- void wsp_ggml_allocr_set_parse_seq(struct wsp_ggml_allocr * alloc, const int * list, int n) {
276
- for (int i = 0; i < n; i++) {
277
- alloc->parse_seq[i] = list[i];
227
+ void wsp_ggml_tallocr_reset(wsp_ggml_tallocr_t alloc) {
228
+ alloc->n_free_blocks = 1;
229
+ size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment);
230
+ alloc->free_blocks[0].addr = (char *)alloc->base + align_offset;
231
+
232
+ if (alloc->measure) {
233
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
234
+ } else {
235
+ alloc->free_blocks[0].size = wsp_ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
278
236
  }
279
- alloc->parse_seq_len = n;
280
237
  }
281
238
 
282
- void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc) {
283
- alloc->n_free_blocks = 1;
284
- size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
285
- alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
286
- alloc->free_blocks[0].size = alloc->size - align_offset;
287
- }
239
+ wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment) {
240
+ struct wsp_ggml_backend_buffer * buffer = wsp_ggml_backend_cpu_buffer_from_ptr(NULL, data, size);
288
241
 
289
- struct wsp_ggml_allocr * wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
290
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
242
+ wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
291
243
 
292
- *alloc = (struct wsp_ggml_allocr){
293
- /*.data = */ data,
294
- /*.size = */ size,
244
+ *alloc = (struct wsp_ggml_tallocr) {
245
+ /*.buffer = */ buffer,
246
+ /*.buffer_owned = */ true,
247
+ /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
295
248
  /*.alignment = */ alignment,
296
249
  /*.n_free_blocks = */ 0,
297
250
  /*.free_blocks = */ {{0}},
298
- /*.hash_table = */ {{0}},
299
251
  /*.max_size = */ 0,
300
252
  /*.measure = */ false,
301
- /*.parse_seq = */ {0},
302
- /*.parse_seq_len = */ 0,
303
253
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
304
254
  /*.allocated_tensors = */ {0},
305
255
  #endif
306
256
  };
307
257
 
308
- wsp_ggml_allocr_reset(alloc);
258
+ wsp_ggml_tallocr_reset(alloc);
309
259
 
310
260
  return alloc;
311
261
  }
312
262
 
313
- // OS specific functions to allocate and free uncommitted virtual memory
314
- static void * alloc_vmem(size_t size) {
315
- #if defined(_WIN32)
316
- return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
317
- #elif defined(_POSIX_MAPPED_FILES)
318
- void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
319
- if (ptr == MAP_FAILED) {
320
- return NULL;
321
- }
322
- return ptr;
323
- #else
324
- // use a fixed address for other platforms
325
- uintptr_t base_addr = (uintptr_t)-size - 0x100;
326
- return (void *)base_addr;
327
- #endif
328
- }
263
+ wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment) {
264
+ wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment);
265
+ alloc->measure = true;
329
266
 
330
- static void free_vmem(void * base_addr, size_t size) {
331
- #if defined(_WIN32)
332
- VirtualFree(base_addr, 0, MEM_RELEASE);
333
- UNUSED(size);
334
- #elif defined(_POSIX_MAPPED_FILES)
335
- munmap(base_addr, size);
336
- #else
337
- // nothing to do
338
- UNUSED(base_addr);
339
- UNUSED(size);
340
- #endif
267
+ return alloc;
341
268
  }
342
269
 
343
- // allocate uncommitted virtual memory to measure the size of the graph
344
- static void alloc_measure_vmem(void ** base_addr, size_t * size) {
345
- // 128GB for 64-bit, 1GB for 32-bit
346
- *size = sizeof(void *) == 4 ? 1ULL<<30 : 1ULL<<37;
347
- do {
348
- *base_addr = alloc_vmem(*size);
349
- if (*base_addr != NULL) {
350
- AT_PRINTF("allocated %.2f GB of virtual memory for measure buffer at %p\n", *size / 1024.0 / 1024.0 / 1024.0, *base_addr);
351
- return;
352
- }
353
- // try again with half the size
354
- *size /= 2;
355
- } while (*size > 0);
270
+ wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
271
+ // create a backend buffer to get the correct tensor allocation sizes
272
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_alloc_buffer(backend, 1);
356
273
 
357
- WSP_GGML_ASSERT(!"failed to allocate virtual memory for measure buffer");
274
+ // TODO: move alloc initialization to a common wsp_ggml_tallocr_new_impl function
275
+ wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
276
+ alloc->buffer_owned = true;
277
+ alloc->measure = true;
278
+ wsp_ggml_tallocr_reset(alloc);
279
+ return alloc;
358
280
  }
359
281
 
360
- static void free_measure_vmem(void * base_addr, size_t size) {
361
- free_vmem(base_addr, size);
282
+ wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
283
+ wsp_ggml_backend_buffer_t buffer = wsp_ggml_backend_alloc_buffer(backend, size);
284
+ wsp_ggml_tallocr_t alloc = wsp_ggml_tallocr_new_from_buffer(buffer);
285
+ alloc->buffer_owned = true;
286
+ return alloc;
362
287
  }
363
288
 
364
- struct wsp_ggml_allocr * wsp_ggml_allocr_new_measure(size_t alignment) {
365
- struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
289
+ wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
290
+ wsp_ggml_tallocr_t alloc = (wsp_ggml_tallocr_t)malloc(sizeof(struct wsp_ggml_tallocr));
366
291
 
367
- void * base_addr;
368
- size_t size;
369
-
370
- alloc_measure_vmem(&base_addr, &size);
371
-
372
- *alloc = (struct wsp_ggml_allocr){
373
- /*.data = */ base_addr,
374
- /*.size = */ size,
375
- /*.alignment = */ alignment,
292
+ *alloc = (struct wsp_ggml_tallocr) {
293
+ /*.buffer = */ buffer,
294
+ /*.buffer_owned = */ false,
295
+ /*.base = */ wsp_ggml_backend_buffer_get_base(buffer),
296
+ /*.alignment = */ wsp_ggml_backend_buffer_get_alignment(buffer),
376
297
  /*.n_free_blocks = */ 0,
377
298
  /*.free_blocks = */ {{0}},
378
- /*.hash_table = */ {{0}},
379
299
  /*.max_size = */ 0,
380
- /*.measure = */ true,
381
- /*.parse_seq = */ {0},
382
- /*.parse_seq_len = */ 0,
300
+ /*.measure = */ false,
383
301
  #ifdef WSP_GGML_ALLOCATOR_DEBUG
384
302
  /*.allocated_tensors = */ {0},
385
303
  #endif
386
304
  };
387
305
 
388
- wsp_ggml_allocr_reset(alloc);
306
+ wsp_ggml_tallocr_reset(alloc);
389
307
 
390
308
  return alloc;
391
309
  }
392
310
 
393
- void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc) {
394
- if (alloc->measure) {
395
- free_measure_vmem(alloc->data, alloc->size);
311
+ struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t alloc) {
312
+ return alloc->buffer;
313
+ }
314
+
315
+ void wsp_ggml_tallocr_free(wsp_ggml_tallocr_t alloc) {
316
+ if (alloc == NULL) {
317
+ return;
318
+ }
319
+
320
+ if (alloc->buffer_owned) {
321
+ wsp_ggml_backend_buffer_free(alloc->buffer);
396
322
  }
397
323
  free(alloc);
398
324
  }
399
325
 
400
- bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc) {
326
+ bool wsp_ggml_tallocr_is_measure(wsp_ggml_tallocr_t alloc) {
401
327
  return alloc->measure;
402
328
  }
403
329
 
404
- //////////// compute graph allocator
330
+ size_t wsp_ggml_tallocr_max_size(wsp_ggml_tallocr_t alloc) {
331
+ return alloc->max_size;
332
+ }
333
+
334
+ // graph allocator
335
+
336
+ struct hash_node {
337
+ int n_children;
338
+ int n_views;
339
+ };
340
+
341
+ struct wsp_ggml_gallocr {
342
+ wsp_ggml_tallocr_t talloc;
343
+ struct wsp_ggml_hash_set hash_set;
344
+ struct hash_node * hash_values;
345
+ size_t hash_values_size;
346
+ wsp_ggml_tallocr_t * hash_allocs;
347
+ int * parse_seq;
348
+ int parse_seq_len;
349
+ };
350
+
351
+ wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void) {
352
+ wsp_ggml_gallocr_t galloc = (wsp_ggml_gallocr_t)malloc(sizeof(struct wsp_ggml_gallocr));
353
+
354
+ *galloc = (struct wsp_ggml_gallocr) {
355
+ /*.talloc = */ NULL,
356
+ /*.hash_set = */ {0},
357
+ /*.hash_values = */ NULL,
358
+ /*.hash_values_size = */ 0,
359
+ /*.hash_allocs = */ NULL,
360
+ /*.parse_seq = */ NULL,
361
+ /*.parse_seq_len = */ 0,
362
+ };
363
+
364
+ return galloc;
365
+ }
366
+
367
+ void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc) {
368
+ if (galloc == NULL) {
369
+ return;
370
+ }
371
+
372
+ if (galloc->hash_set.keys != NULL) {
373
+ free(galloc->hash_set.keys);
374
+ }
375
+ if (galloc->hash_values != NULL) {
376
+ free(galloc->hash_values);
377
+ }
378
+ if (galloc->hash_allocs != NULL) {
379
+ free(galloc->hash_allocs);
380
+ }
381
+ if (galloc->parse_seq != NULL) {
382
+ free(galloc->parse_seq);
383
+ }
384
+ free(galloc);
385
+ }
386
+
387
+ void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n) {
388
+ free(galloc->parse_seq);
389
+ galloc->parse_seq = malloc(sizeof(int) * n);
390
+
391
+ for (int i = 0; i < n; i++) {
392
+ galloc->parse_seq[i] = list[i];
393
+ }
394
+ galloc->parse_seq_len = n;
395
+ }
396
+
397
+ static struct hash_node * hash_get(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * t) {
398
+ size_t i = wsp_ggml_hash_find_or_insert(galloc->hash_set, t);
399
+ return &galloc->hash_values[i];
400
+ }
405
401
 
406
402
  static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
407
403
  if (a->type != b->type) {
@@ -435,7 +431,6 @@ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
435
431
  case WSP_GGML_OP_ROPE:
436
432
  case WSP_GGML_OP_RMS_NORM:
437
433
  case WSP_GGML_OP_SOFT_MAX:
438
- case WSP_GGML_OP_CONT:
439
434
  return true;
440
435
 
441
436
  default:
@@ -443,12 +438,38 @@ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
443
438
  }
444
439
  }
445
440
 
446
- static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * node) {
447
- struct hash_node * ht = alloc->hash_table;
441
+ static wsp_ggml_tallocr_t node_tallocr(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
442
+ if (galloc->talloc != NULL) {
443
+ return galloc->talloc;
444
+ }
445
+
446
+ return galloc->hash_allocs[wsp_ggml_hash_find_or_insert(galloc->hash_set, node)];
447
+ }
448
+
449
+ static void init_view(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * view) {
450
+ wsp_ggml_tallocr_t alloc = node_tallocr(galloc, view);
451
+
452
+ //printf("init_view: %s from src %s\n", view->name, view->view_src->name);
453
+ WSP_GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
454
+ view->backend = view->view_src->backend;
455
+ view->buffer = view->view_src->buffer;
456
+ view->data = (char *)view->view_src->data + view->view_offs;
457
+
458
+ // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend
459
+ // due to the wsp_ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras
460
+ assert(wsp_ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend);
461
+
462
+ if (!alloc->measure) {
463
+ wsp_ggml_backend_buffer_init_tensor(alloc->buffer, view);
464
+ }
465
+ }
466
+
467
+ static void allocate_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
468
+ wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
469
+
448
470
  if (node->data == NULL) {
449
471
  if (wsp_ggml_is_view(node)) {
450
- assert(node->view_src->data != NULL);
451
- node->data = (char *)node->view_src->data + node->view_offs;
472
+ init_view(galloc, node);
452
473
  } else {
453
474
  // see if we can reuse a parent's buffer (inplace)
454
475
  if (wsp_ggml_op_can_inplace(node->op)) {
@@ -459,16 +480,16 @@ static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor
459
480
  }
460
481
 
461
482
  // if the node's data is external, then we cannot re-use it
462
- if (wsp_ggml_allocr_is_own(alloc, parent) == false) {
483
+ if (wsp_ggml_tallocr_is_own(alloc, parent) == false) {
463
484
  AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
464
485
  continue;
465
486
  }
466
487
 
467
- struct hash_node * p_hn = hash_get(ht, parent);
488
+ struct hash_node * p_hn = hash_get(galloc, parent);
468
489
  if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
469
490
  if (wsp_ggml_is_view(parent)) {
470
491
  struct wsp_ggml_tensor * view_src = parent->view_src;
471
- struct hash_node * view_src_hn = hash_get(ht, view_src);
492
+ struct hash_node * view_src_hn = hash_get(galloc, view_src);
472
493
  if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
473
494
  // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
474
495
  // the parent's data that it will need later (same layout requirement). the problem is that then
@@ -476,158 +497,270 @@ static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor
476
497
  // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
477
498
  // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
478
499
  AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
479
- node->data = parent->data;
500
+ node->view_src = view_src;
501
+ view_src_hn->n_views += 1;
502
+ init_view(galloc, node);
480
503
  return;
481
504
  }
482
505
  }
483
506
  else {
484
507
  AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
485
- node->data = parent->data;
508
+ node->view_src = parent;
509
+ p_hn->n_views += 1;
510
+ init_view(galloc, node);
486
511
  return;
487
512
  }
488
513
  }
489
514
  }
490
515
  }
491
- wsp_ggml_allocr_alloc(alloc, node);
516
+ wsp_ggml_tallocr_alloc(alloc, node);
492
517
  }
493
518
  }
494
519
  }
495
520
 
496
- static size_t wsp_ggml_allocr_alloc_graph_tensors_n(
497
- struct wsp_ggml_allocr * alloc,
498
- struct wsp_ggml_cgraph ** graphs, int n_graphs,
499
- struct wsp_ggml_tensor *** inputs, struct wsp_ggml_tensor *** outputs) {
521
+ static void free_node(wsp_ggml_gallocr_t galloc, struct wsp_ggml_tensor * node) {
522
+ wsp_ggml_tallocr_t alloc = node_tallocr(galloc, node);
500
523
 
501
- // reset hash table
502
- struct hash_node * ht = alloc->hash_table;
503
- memset(ht, 0, sizeof(struct hash_node) * WSP_GGML_GRAPH_HASHTABLE_SIZE);
524
+ wsp_ggml_tallocr_free_tensor(alloc, node);
525
+ }
526
+
527
+ static void wsp_ggml_tallocr_alloc_graph_impl(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * gf) {
528
+ const int * parse_seq = galloc->parse_seq;
529
+ int parse_seq_len = galloc->parse_seq_len;
504
530
 
505
531
  // count number of children and views
506
- for (int g = 0; g < n_graphs; g++) {
507
- struct wsp_ggml_cgraph * gf = graphs[g];
508
- for (int i = 0; i < gf->n_nodes; i++) {
509
- struct wsp_ggml_tensor * node = gf->nodes[i];
532
+ for (int i = 0; i < gf->n_nodes; i++) {
533
+ struct wsp_ggml_tensor * node = gf->nodes[i];
534
+
535
+ if (wsp_ggml_is_view(node)) {
536
+ struct wsp_ggml_tensor * view_src = node->view_src;
537
+ hash_get(galloc, view_src)->n_views += 1;
538
+ if (node->buffer == NULL && node->data != NULL) {
539
+ // view of a pre-allocated tensor, didn't call init_view() yet
540
+ init_view(galloc, node);
541
+ }
542
+ }
510
543
 
511
- if (wsp_ggml_is_view(node)) {
512
- struct wsp_ggml_tensor * view_src = node->view_src;
513
- hash_get(ht, view_src)->n_views += 1;
544
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
545
+ struct wsp_ggml_tensor * parent = node->src[j];
546
+ if (parent == NULL) {
547
+ break;
514
548
  }
549
+ hash_get(galloc, parent)->n_children += 1;
550
+ if (wsp_ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) {
551
+ init_view(galloc, parent);
552
+ }
553
+ }
554
+ }
515
555
 
556
+ // allocate tensors
557
+ // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
558
+ int last_barrier_pos = 0;
559
+ int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes;
560
+
561
+ for (int ind = 0; ind < n_nodes; ind++) {
562
+ // allocate a node if there is no parse_seq or this is not a barrier
563
+ if (parse_seq_len == 0 || parse_seq[ind] != -1) {
564
+ int i = parse_seq_len ? parse_seq[ind] : ind;
565
+ struct wsp_ggml_tensor * node = gf->nodes[i];
566
+
567
+ // allocate parents (leafs)
516
568
  for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
517
569
  struct wsp_ggml_tensor * parent = node->src[j];
518
570
  if (parent == NULL) {
519
571
  break;
520
572
  }
521
- hash_get(ht, parent)->n_children += 1;
573
+ allocate_node(galloc, parent);
522
574
  }
523
- }
524
- }
525
575
 
526
- // allocate tensors
527
- for (int g = 0; g < n_graphs; g++) {
528
- struct wsp_ggml_cgraph * gf = graphs[g];
529
- AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
530
- // graph inputs are allocated first to ensure that they are not overwritten by each other
531
- if (inputs != NULL && inputs[g] != NULL) {
532
- for (int i = 0; inputs[g][i] != NULL; i++) {
533
- struct wsp_ggml_tensor * input = inputs[g][i];
534
- AT_PRINTF("input: %s\n", input->name);
535
- allocate_node(alloc, input);
576
+ // allocate node
577
+ allocate_node(galloc, node);
578
+
579
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
580
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
581
+ struct wsp_ggml_tensor * parent = node->src[j];
582
+ if (parent == NULL) {
583
+ break;
584
+ }
585
+ AT_PRINTF("%s", parent->name);
586
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
587
+ AT_PRINTF(", ");
588
+ }
536
589
  }
590
+ AT_PRINTF("\n");
537
591
  }
538
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
539
- int last_barrier_pos = 0;
540
- int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
541
592
 
542
- for (int ind = 0; ind < n_nodes; ind++) {
543
- // allocate a node if there is no parse_seq or this is not a barrier
544
- if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
545
- int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
546
- struct wsp_ggml_tensor * node = gf->nodes[i];
593
+ // update parents
594
+ // update immediately if there is no parse_seq
595
+ // update only at barriers if there is parse_seq
596
+ if ((parse_seq_len == 0) || parse_seq[ind] == -1) {
597
+ int update_start = parse_seq_len ? last_barrier_pos : ind;
598
+ int update_end = parse_seq_len ? ind : ind + 1;
599
+ for (int i = update_start; i < update_end; i++) {
600
+ int node_i = parse_seq_len ? parse_seq[i] : i;
601
+ struct wsp_ggml_tensor * node = gf->nodes[node_i];
547
602
 
548
- // allocate parents (leafs)
549
603
  for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
550
604
  struct wsp_ggml_tensor * parent = node->src[j];
551
605
  if (parent == NULL) {
552
606
  break;
553
607
  }
554
- allocate_node(alloc, parent);
555
- }
608
+ struct hash_node * p_hn = hash_get(galloc, parent);
609
+ p_hn->n_children -= 1;
556
610
 
557
- // allocate node
558
- allocate_node(alloc, node);
611
+ //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
559
612
 
560
- AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
561
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
562
- struct wsp_ggml_tensor * parent = node->src[j];
563
- if (parent == NULL) {
564
- break;
565
- }
566
- AT_PRINTF("%s", parent->name);
567
- if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
568
- AT_PRINTF(", ");
569
- }
570
- }
571
- AT_PRINTF("\n");
572
- }
573
-
574
- // update parents
575
- // update immediately if there is no parse_seq
576
- // update only at barriers if there is parse_seq
577
- if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) {
578
- int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
579
- int update_end = alloc->parse_seq_len ? ind : ind + 1;
580
- for (int i = update_start; i < update_end; i++) {
581
- int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
582
- struct wsp_ggml_tensor * node = gf->nodes[node_i];
583
-
584
- for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
585
- struct wsp_ggml_tensor * parent = node->src[j];
586
- if (parent == NULL) {
587
- break;
588
- }
589
- struct hash_node * p_hn = hash_get(ht, parent);
590
- p_hn->n_children -= 1;
591
-
592
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
593
-
594
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
595
- if (wsp_ggml_is_view(parent)) {
596
- struct wsp_ggml_tensor * view_src = parent->view_src;
597
- struct hash_node * view_src_hn = hash_get(ht, view_src);
598
- view_src_hn->n_views -= 1;
599
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
600
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
601
- wsp_ggml_allocr_free_tensor(alloc, view_src);
602
- }
603
- }
604
- else {
605
- if (parent->data != node->data) {
606
- wsp_ggml_allocr_free_tensor(alloc, parent);
607
- }
613
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
614
+ if (wsp_ggml_is_view(parent)) {
615
+ struct wsp_ggml_tensor * view_src = parent->view_src;
616
+ struct hash_node * view_src_hn = hash_get(galloc, view_src);
617
+ view_src_hn->n_views -= 1;
618
+ AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
619
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) {
620
+ free_node(galloc, view_src);
608
621
  }
609
622
  }
623
+ else {
624
+ free_node(galloc, parent);
625
+ }
610
626
  }
611
627
  }
612
- AT_PRINTF("\n");
613
- if (alloc->parse_seq_len) {
614
- last_barrier_pos = ind + 1;
615
- }
616
628
  }
617
- }
618
- // free graph outputs here that wouldn't be freed otherwise because they have no children
619
- if (outputs != NULL && outputs[g] != NULL) {
620
- for (int i = 0; outputs[g][i] != NULL; i++) {
621
- struct wsp_ggml_tensor * output = outputs[g][i];
622
- AT_PRINTF("output: %s\n", output->name);
623
- wsp_ggml_allocr_free_tensor(alloc, output);
629
+ AT_PRINTF("\n");
630
+ if (parse_seq_len) {
631
+ last_barrier_pos = ind + 1;
624
632
  }
625
633
  }
626
634
  }
635
+ }
627
636
 
628
- return alloc->max_size;
637
+ size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph) {
638
+ size_t hash_size = graph->visited_hash_table.size;
639
+
640
+ // check if the hash table is initialized and large enough
641
+ if (galloc->hash_set.size < hash_size) {
642
+ if (galloc->hash_set.keys != NULL) {
643
+ free(galloc->hash_set.keys);
644
+ }
645
+ if (galloc->hash_values != NULL) {
646
+ free(galloc->hash_values);
647
+ }
648
+ galloc->hash_set.keys = malloc(sizeof(struct wsp_ggml_tensor *) * hash_size);
649
+ galloc->hash_set.size = hash_size;
650
+ galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
651
+ }
652
+
653
+ // reset hash table
654
+ memset(galloc->hash_set.keys, 0, sizeof(struct wsp_ggml_tensor *) * hash_size);
655
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
656
+
657
+ galloc->talloc = talloc;
658
+ wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
659
+ galloc->talloc = NULL;
660
+
661
+ size_t max_size = wsp_ggml_tallocr_max_size(talloc);
662
+
663
+ return max_size;
664
+ }
665
+
666
+ void wsp_ggml_gallocr_alloc_graph_n(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph, struct wsp_ggml_hash_set hash_set, wsp_ggml_tallocr_t * hash_node_alloct) {
667
+ const size_t hash_size = hash_set.size;
668
+
669
+ WSP_GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs));
670
+
671
+ galloc->talloc = NULL;
672
+
673
+ // alloc hash_values if needed
674
+ if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) {
675
+ free(galloc->hash_values);
676
+ galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
677
+ galloc->hash_values_size = hash_size;
678
+ }
679
+
680
+ // free hash_set.keys if needed
681
+ if (galloc->hash_set.keys != NULL) {
682
+ free(galloc->hash_set.keys);
683
+ }
684
+ galloc->hash_set = hash_set;
685
+
686
+ // reset hash values
687
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
688
+
689
+ galloc->hash_allocs = hash_node_alloct;
690
+
691
+ wsp_ggml_tallocr_alloc_graph_impl(galloc, graph);
692
+
693
+ // remove unowned resources
694
+ galloc->hash_set.keys = NULL;
695
+ galloc->hash_allocs = NULL;
696
+ }
697
+
698
+ // legacy API wrapper
699
+
700
+ struct wsp_ggml_allocr {
701
+ wsp_ggml_tallocr_t talloc;
702
+ wsp_ggml_gallocr_t galloc;
703
+ };
704
+
705
+ static wsp_ggml_allocr_t wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_t talloc) {
706
+ wsp_ggml_allocr_t alloc = (wsp_ggml_allocr_t)malloc(sizeof(struct wsp_ggml_allocr));
707
+ *alloc = (struct wsp_ggml_allocr) {
708
+ /*.talloc = */ talloc,
709
+ /*.galloc = */ wsp_ggml_gallocr_new(),
710
+ };
711
+ return alloc;
712
+ }
713
+
714
+ wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
715
+ return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new(data, size, alignment));
716
+ }
717
+
718
+ wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment) {
719
+ return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure(alignment));
720
+ }
721
+
722
+ wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer) {
723
+ return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_buffer(buffer));
724
+ }
725
+
726
+ wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size) {
727
+ return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_from_backend(backend, size));
728
+ }
729
+
730
+ wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend) {
731
+ return wsp_ggml_allocr_new_impl(wsp_ggml_tallocr_new_measure_from_backend(backend));
732
+ }
733
+
734
+ struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc) {
735
+ return wsp_ggml_tallocr_get_buffer(alloc->talloc);
736
+ }
737
+
738
+ void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n) {
739
+ wsp_ggml_gallocr_set_parse_seq(alloc->galloc, list, n);
740
+ }
741
+
742
+ void wsp_ggml_allocr_free(wsp_ggml_allocr_t alloc) {
743
+ wsp_ggml_gallocr_free(alloc->galloc);
744
+ wsp_ggml_tallocr_free(alloc->talloc);
745
+ free(alloc);
746
+ }
747
+
748
+ bool wsp_ggml_allocr_is_measure(wsp_ggml_allocr_t alloc) {
749
+ return wsp_ggml_tallocr_is_measure(alloc->talloc);
750
+ }
751
+
752
+ void wsp_ggml_allocr_reset(wsp_ggml_allocr_t alloc) {
753
+ wsp_ggml_tallocr_reset(alloc->talloc);
754
+ }
755
+
756
+ void wsp_ggml_allocr_alloc(wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor) {
757
+ wsp_ggml_tallocr_alloc(alloc->talloc, tensor);
758
+ }
759
+
760
+ size_t wsp_ggml_allocr_max_size(wsp_ggml_allocr_t alloc) {
761
+ return wsp_ggml_tallocr_max_size(alloc->talloc);
629
762
  }
630
763
 
631
- size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph) {
632
- return wsp_ggml_allocr_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
764
+ size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph) {
765
+ return wsp_ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
633
766
  }