whisper.rn 0.3.9 → 0.4.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/jni.cpp +7 -1
  3. package/cpp/coreml/whisper-encoder.mm +7 -1
  4. package/cpp/ggml-alloc.c +633 -0
  5. package/cpp/ggml-alloc.h +26 -0
  6. package/cpp/ggml-metal.h +85 -0
  7. package/cpp/ggml-metal.m +1283 -0
  8. package/cpp/ggml-metal.metal +2353 -0
  9. package/cpp/ggml.c +5024 -2924
  10. package/cpp/ggml.h +569 -95
  11. package/cpp/whisper.cpp +1014 -667
  12. package/cpp/whisper.h +13 -0
  13. package/ios/RNWhisper.mm +2 -0
  14. package/ios/RNWhisperContext.h +1 -1
  15. package/ios/RNWhisperContext.mm +18 -4
  16. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  17. package/lib/commonjs/index.js +3 -1
  18. package/lib/commonjs/index.js.map +1 -1
  19. package/lib/module/NativeRNWhisper.js.map +1 -1
  20. package/lib/module/index.js +3 -1
  21. package/lib/module/index.js.map +1 -1
  22. package/lib/typescript/NativeRNWhisper.d.ts +1 -0
  23. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  24. package/lib/typescript/index.d.ts +3 -1
  25. package/lib/typescript/index.d.ts.map +1 -1
  26. package/package.json +1 -1
  27. package/src/NativeRNWhisper.ts +1 -0
  28. package/src/index.ts +4 -0
  29. package/whisper-rn.podspec +8 -2
  30. package/ios/RNWhisper.xcodeproj/project.xcworkspace/contents.xcworkspacedata +0 -4
  31. package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +0 -8
  32. package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcuserdata/jhen.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
  33. package/ios/RNWhisper.xcodeproj/xcuserdata/jhen.xcuserdatad/xcschemes/xcschememanagement.plist +0 -19
@@ -8,6 +8,7 @@ set(RNWHISPER_LIB_DIR ${CMAKE_SOURCE_DIR}/../../../cpp)
8
8
  set(
9
9
  SOURCE_FILES
10
10
  ${RNWHISPER_LIB_DIR}/ggml.c
11
+ ${RNWHISPER_LIB_DIR}/ggml-alloc.c
11
12
  ${RNWHISPER_LIB_DIR}/whisper.cpp
12
13
  ${RNWHISPER_LIB_DIR}/rn-whisper.cpp
13
14
  ${CMAKE_SOURCE_DIR}/jni.cpp
@@ -21,7 +22,7 @@ function(build_library target_name)
21
22
  SHARED
22
23
  ${SOURCE_FILES}
23
24
  )
24
-
25
+
25
26
  target_link_libraries(${target_name} ${LOG_LIB} android)
26
27
 
27
28
  if (${target_name} STREQUAL "whisper_v8fp16_va")
@@ -85,7 +85,7 @@ static void input_stream_close(void *ctx) {
85
85
  JNIEnv *env = context->env;
86
86
  jobject input_stream = context->input_stream;
87
87
  jclass input_stream_class = env->GetObjectClass(input_stream);
88
-
88
+
89
89
  env->CallVoidMethod(
90
90
  input_stream,
91
91
  env->GetMethodID(input_stream_class, "close", "()V")
@@ -296,11 +296,17 @@ Java_com_rnwhisper_WhisperContext_fullTranscribe(
296
296
  params.initial_prompt = env->GetStringUTFChars(prompt, nullptr);
297
297
  }
298
298
 
299
+ // abort handlers
299
300
  params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
300
301
  bool is_aborted = *(bool*)user_data;
301
302
  return !is_aborted;
302
303
  };
303
304
  params.encoder_begin_callback_user_data = rn_whisper_assign_abort_map(job_id);
305
+ params.abort_callback = [](void * user_data) {
306
+ bool is_aborted = *(bool*)user_data;
307
+ return is_aborted;
308
+ };
309
+ params.abort_callback_user_data = rn_whisper_assign_abort_map(job_id);
304
310
 
305
311
  if (callback_instance != nullptr) {
306
312
  callback_context *cb_ctx = new callback_context;
@@ -22,7 +22,13 @@ struct whisper_coreml_context * whisper_coreml_init(const char * path_model) {
22
22
 
23
23
  NSURL * url_model = [NSURL fileURLWithPath: path_model_str];
24
24
 
25
- const void * data = CFBridgingRetain([[whisper_encoder_impl alloc] initWithContentsOfURL:url_model error:nil]);
25
+ // select which device to run the Core ML model on
26
+ MLModelConfiguration *config = [[MLModelConfiguration alloc] init];
27
+ //config.computeUnits = MLComputeUnitsCPUAndGPU;
28
+ //config.computeUnits = MLComputeUnitsCPUAndNeuralEngine;
29
+ config.computeUnits = MLComputeUnitsAll;
30
+
31
+ const void * data = CFBridgingRetain([[whisper_encoder_impl alloc] initWithContentsOfURL:url_model configuration:config error:nil]);
26
32
 
27
33
  if (data == NULL) {
28
34
  return NULL;
@@ -0,0 +1,633 @@
1
+ #include "ggml-alloc.h"
2
+ #include "ggml.h"
3
+ #include <assert.h>
4
+ #include <stdarg.h>
5
+ #include <stdio.h>
6
+ #include <stdlib.h>
7
+ #include <string.h>
8
+
9
+ #ifdef __has_include
10
+ #if __has_include(<unistd.h>)
11
+ #include <unistd.h>
12
+ #if defined(_POSIX_MAPPED_FILES)
13
+ #include <sys/types.h>
14
+ #include <sys/mman.h>
15
+ #endif
16
+ #endif
17
+ #endif
18
+
19
+ #if defined(_WIN32)
20
+ #define WIN32_LEAN_AND_MEAN
21
+ #ifndef NOMINMAX
22
+ #define NOMINMAX
23
+ #endif
24
+ #include <windows.h>
25
+ #include <memoryapi.h>
26
+ #endif
27
+
28
+
29
+ #define UNUSED(x) (void)(x)
30
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
31
+ #define WSP_GGML_MAX_CONCUR (2*WSP_GGML_MAX_NODES)
32
+
33
+ //#define WSP_GGML_ALLOCATOR_DEBUG
34
+
35
+ //#define AT_PRINTF printf
36
+ #define AT_PRINTF(...) ((void)0)
37
+
38
+ struct hash_node {
39
+ struct wsp_ggml_tensor * t;
40
+ int n_children;
41
+ int n_views;
42
+ };
43
+
44
+ static size_t hash(void * p) {
45
+ return (size_t)p % WSP_GGML_GRAPH_HASHTABLE_SIZE;
46
+ }
47
+
48
+ static struct hash_node * hash_get(struct hash_node hash_table[], struct wsp_ggml_tensor * t) {
49
+ size_t h = hash(t);
50
+
51
+ // linear probing
52
+ size_t i = h;
53
+ while (hash_table[i].t != NULL) {
54
+ if (hash_table[i].t == t) {
55
+ return &hash_table[i];
56
+ }
57
+ i = (i + 1) % WSP_GGML_GRAPH_HASHTABLE_SIZE;
58
+ if (i == h) {
59
+ // hash table is full
60
+ WSP_GGML_ASSERT(false);
61
+ }
62
+ }
63
+
64
+ hash_table[i].t = t;
65
+ return &hash_table[i];
66
+ }
67
+
68
+ // TODO: WSP_GGML_PAD ?
69
+ static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
70
+ assert(alignment && !(alignment & (alignment - 1))); // power of 2
71
+ size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
72
+ return offset + align;
73
+ }
74
+
75
+ struct free_block {
76
+ void * addr;
77
+ size_t size;
78
+ };
79
+
80
+ #define MAX_FREE_BLOCKS 128
81
+
82
+ struct wsp_ggml_allocr {
83
+ void * data;
84
+ size_t size;
85
+ size_t alignment;
86
+ int n_free_blocks;
87
+ struct free_block free_blocks[MAX_FREE_BLOCKS];
88
+ struct hash_node hash_table[WSP_GGML_GRAPH_HASHTABLE_SIZE];
89
+ size_t max_size;
90
+ bool measure;
91
+ int parse_seq[WSP_GGML_MAX_CONCUR];
92
+ int parse_seq_len;
93
+
94
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
95
+ struct wsp_ggml_tensor * allocated_tensors[1024];
96
+ #endif
97
+ };
98
+
99
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
100
+ static void add_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
101
+ for (int i = 0; i < 1024; i++) {
102
+ if (alloc->allocated_tensors[i] == NULL) {
103
+ alloc->allocated_tensors[i] = tensor;
104
+ return;
105
+ }
106
+ }
107
+ WSP_GGML_ASSERT(!"out of allocated_tensors");
108
+ }
109
+ static void remove_allocated_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
110
+ for (int i = 0; i < 1024; i++) {
111
+ if (alloc->allocated_tensors[i] == tensor ||
112
+ (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
113
+ alloc->allocated_tensors[i] = NULL;
114
+ return;
115
+ }
116
+ }
117
+ printf("tried to free tensor %s not found\n", tensor->name);
118
+ WSP_GGML_ASSERT(!"tensor not found");
119
+ }
120
+ #endif
121
+
122
+ static size_t wsp_ggml_allocr_get_alloc_size(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
123
+ return wsp_ggml_nbytes(tensor);
124
+
125
+ UNUSED(alloc);
126
+ }
127
+
128
+ // check if a tensor is allocated by this buffer
129
+ static bool wsp_ggml_allocr_is_own(struct wsp_ggml_allocr * alloc, const struct wsp_ggml_tensor * tensor) {
130
+ void * ptr = tensor->data;
131
+ return ptr >= alloc->data && (char *)ptr < (char *)alloc->data + alloc->max_size;
132
+ }
133
+
134
+ static bool wsp_ggml_is_view(struct wsp_ggml_tensor * t) {
135
+ return t->view_src != NULL;
136
+ }
137
+
138
+ void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
139
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
140
+ WSP_GGML_ASSERT(!wsp_ggml_is_view(tensor)); // views generally get data pointer from one of their sources
141
+ WSP_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
142
+ #endif
143
+ size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
144
+ size = aligned_offset(NULL, size, alloc->alignment);
145
+
146
+ AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
147
+
148
+ size_t max_avail = 0;
149
+
150
+ // find the best fitting free block besides the last block
151
+ int best_fit_block = -1;
152
+ size_t best_fit_size = SIZE_MAX;
153
+ for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
154
+ struct free_block * block = &alloc->free_blocks[i];
155
+ max_avail = MAX(max_avail, block->size);
156
+ if (block->size >= size && block->size <= best_fit_size) {
157
+ best_fit_block = i;
158
+ best_fit_size = block->size;
159
+ }
160
+ }
161
+
162
+ AT_PRINTF("block %d\n", best_fit_block);
163
+
164
+ if (best_fit_block == -1) {
165
+ // the last block is our last resort
166
+ struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
167
+ max_avail = MAX(max_avail, block->size);
168
+ if (block->size >= size) {
169
+ best_fit_block = alloc->n_free_blocks - 1;
170
+ } else {
171
+ fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
172
+ __func__, size, max_avail);
173
+ WSP_GGML_ASSERT(!"not enough space in the buffer");
174
+ return;
175
+ }
176
+ }
177
+ struct free_block * block = &alloc->free_blocks[best_fit_block];
178
+ void * addr = block->addr;
179
+ block->addr = (char*)block->addr + size;
180
+ block->size -= size;
181
+ if (block->size == 0) {
182
+ // remove block if empty
183
+ alloc->n_free_blocks--;
184
+ for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
185
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
186
+ }
187
+ }
188
+
189
+ tensor->data = addr;
190
+
191
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
192
+ add_allocated_tensor(alloc, tensor);
193
+ size_t cur_max = (char*)addr - (char*)alloc->data + size;
194
+ if (cur_max > alloc->max_size) {
195
+ printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
196
+ for (int i = 0; i < 1024; i++) {
197
+ if (alloc->allocated_tensors[i]) {
198
+ printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, wsp_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
199
+ }
200
+ }
201
+ printf("\n");
202
+ }
203
+ #endif
204
+
205
+ alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
206
+ }
207
+
208
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
209
+ static void wsp_ggml_allocr_free_tensor(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor) {
210
+ void * ptr = tensor->data;
211
+
212
+ if (wsp_ggml_allocr_is_own(alloc, tensor) == false) {
213
+ // the tensor was not allocated in this buffer
214
+ // this can happen because the graph allocator will try to free weights and other tensors from different buffers
215
+ // the easiest way to deal with this is just to ignore it
216
+ return;
217
+ }
218
+
219
+ size_t size = wsp_ggml_allocr_get_alloc_size(alloc, tensor);
220
+ size = aligned_offset(NULL, size, alloc->alignment);
221
+ AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
222
+
223
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
224
+ remove_allocated_tensor(alloc, tensor);
225
+ #endif
226
+
227
+ // see if we can merge with an existing block
228
+ for (int i = 0; i < alloc->n_free_blocks; i++) {
229
+ struct free_block * block = &alloc->free_blocks[i];
230
+ // check if ptr is at the end of the block
231
+ if ((char*)block->addr + block->size == ptr) {
232
+ block->size += size;
233
+ // check if we can merge with the next block
234
+ if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
235
+ block->size += alloc->free_blocks[i+1].size;
236
+ alloc->n_free_blocks--;
237
+ for (int j = i+1; j < alloc->n_free_blocks; j++) {
238
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
239
+ }
240
+ }
241
+ return;
242
+ }
243
+ // check if ptr is at the beginning of the block
244
+ if ((char*)ptr + size == block->addr) {
245
+ block->addr = ptr;
246
+ block->size += size;
247
+ // check if we can merge with the previous block
248
+ if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
249
+ alloc->free_blocks[i-1].size += block->size;
250
+ alloc->n_free_blocks--;
251
+ for (int j = i; j < alloc->n_free_blocks; j++) {
252
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
253
+ }
254
+ }
255
+ return;
256
+ }
257
+ }
258
+ // otherwise, add a new block
259
+ WSP_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
260
+ // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
261
+ int insert_pos = 0;
262
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
263
+ insert_pos++;
264
+ }
265
+ // shift all blocks from insert_pos onward to make room for the new block
266
+ for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
267
+ alloc->free_blocks[i] = alloc->free_blocks[i-1];
268
+ }
269
+ // insert the new block
270
+ alloc->free_blocks[insert_pos].addr = ptr;
271
+ alloc->free_blocks[insert_pos].size = size;
272
+ alloc->n_free_blocks++;
273
+ }
274
+
275
+ void wsp_ggml_allocr_set_parse_seq(struct wsp_ggml_allocr * alloc, const int * list, int n) {
276
+ for (int i = 0; i < n; i++) {
277
+ alloc->parse_seq[i] = list[i];
278
+ }
279
+ alloc->parse_seq_len = n;
280
+ }
281
+
282
+ void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc) {
283
+ alloc->n_free_blocks = 1;
284
+ size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
285
+ alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
286
+ alloc->free_blocks[0].size = alloc->size - align_offset;
287
+ }
288
+
289
+ struct wsp_ggml_allocr * wsp_ggml_allocr_new(void * data, size_t size, size_t alignment) {
290
+ struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
291
+
292
+ *alloc = (struct wsp_ggml_allocr){
293
+ /*.data = */ data,
294
+ /*.size = */ size,
295
+ /*.alignment = */ alignment,
296
+ /*.n_free_blocks = */ 0,
297
+ /*.free_blocks = */ {{0}},
298
+ /*.hash_table = */ {{0}},
299
+ /*.max_size = */ 0,
300
+ /*.measure = */ false,
301
+ /*.parse_seq = */ {0},
302
+ /*.parse_seq_len = */ 0,
303
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
304
+ /*.allocated_tensors = */ {0},
305
+ #endif
306
+ };
307
+
308
+ wsp_ggml_allocr_reset(alloc);
309
+
310
+ return alloc;
311
+ }
312
+
313
+ // OS specific functions to allocate and free uncommitted virtual memory
314
+ static void * alloc_vmem(size_t size) {
315
+ #if defined(_WIN32)
316
+ return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
317
+ #elif defined(_POSIX_MAPPED_FILES)
318
+ void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
319
+ if (ptr == MAP_FAILED) {
320
+ return NULL;
321
+ }
322
+ return ptr;
323
+ #else
324
+ // use a fixed address for other platforms
325
+ uintptr_t base_addr = (uintptr_t)-size - 0x100;
326
+ return (void *)base_addr;
327
+ #endif
328
+ }
329
+
330
+ static void free_vmem(void * base_addr, size_t size) {
331
+ #if defined(_WIN32)
332
+ VirtualFree(base_addr, 0, MEM_RELEASE);
333
+ UNUSED(size);
334
+ #elif defined(_POSIX_MAPPED_FILES)
335
+ munmap(base_addr, size);
336
+ #else
337
+ // nothing to do
338
+ UNUSED(base_addr);
339
+ UNUSED(size);
340
+ #endif
341
+ }
342
+
343
+ // allocate uncommitted virtual memory to measure the size of the graph
344
+ static void alloc_measure_vmem(void ** base_addr, size_t * size) {
345
+ // 128GB for 64-bit, 1GB for 32-bit
346
+ *size = sizeof(void *) == 4 ? 1ULL<<30 : 1ULL<<37;
347
+ do {
348
+ *base_addr = alloc_vmem(*size);
349
+ if (*base_addr != NULL) {
350
+ AT_PRINTF("allocated %.2f GB of virtual memory for measure buffer at %p\n", *size / 1024.0 / 1024.0 / 1024.0, *base_addr);
351
+ return;
352
+ }
353
+ // try again with half the size
354
+ *size /= 2;
355
+ } while (*size > 0);
356
+
357
+ WSP_GGML_ASSERT(!"failed to allocate virtual memory for measure buffer");
358
+ }
359
+
360
+ static void free_measure_vmem(void * base_addr, size_t size) {
361
+ free_vmem(base_addr, size);
362
+ }
363
+
364
+ struct wsp_ggml_allocr * wsp_ggml_allocr_new_measure(size_t alignment) {
365
+ struct wsp_ggml_allocr * alloc = (struct wsp_ggml_allocr *)malloc(sizeof(struct wsp_ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
366
+
367
+ void * base_addr;
368
+ size_t size;
369
+
370
+ alloc_measure_vmem(&base_addr, &size);
371
+
372
+ *alloc = (struct wsp_ggml_allocr){
373
+ /*.data = */ base_addr,
374
+ /*.size = */ size,
375
+ /*.alignment = */ alignment,
376
+ /*.n_free_blocks = */ 0,
377
+ /*.free_blocks = */ {{0}},
378
+ /*.hash_table = */ {{0}},
379
+ /*.max_size = */ 0,
380
+ /*.measure = */ true,
381
+ /*.parse_seq = */ {0},
382
+ /*.parse_seq_len = */ 0,
383
+ #ifdef WSP_GGML_ALLOCATOR_DEBUG
384
+ /*.allocated_tensors = */ {0},
385
+ #endif
386
+ };
387
+
388
+ wsp_ggml_allocr_reset(alloc);
389
+
390
+ return alloc;
391
+ }
392
+
393
+ void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc) {
394
+ if (alloc->measure) {
395
+ free_measure_vmem(alloc->data, alloc->size);
396
+ }
397
+ free(alloc);
398
+ }
399
+
400
+ bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc) {
401
+ return alloc->measure;
402
+ }
403
+
404
+ //////////// compute graph allocator
405
+
406
+ static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
407
+ if (a->type != b->type) {
408
+ return false;
409
+ }
410
+ for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
411
+ if (a->ne[i] != b->ne[i]) {
412
+ return false;
413
+ }
414
+ if (a->nb[i] != b->nb[i]) {
415
+ return false;
416
+ }
417
+ }
418
+ return true;
419
+ }
420
+
421
+ static bool wsp_ggml_op_can_inplace(enum wsp_ggml_op op) {
422
+ switch (op) {
423
+ case WSP_GGML_OP_SCALE:
424
+ case WSP_GGML_OP_DIAG_MASK_ZERO:
425
+ case WSP_GGML_OP_DIAG_MASK_INF:
426
+ case WSP_GGML_OP_ADD:
427
+ case WSP_GGML_OP_ADD1:
428
+ case WSP_GGML_OP_SUB:
429
+ case WSP_GGML_OP_MUL:
430
+ case WSP_GGML_OP_DIV:
431
+ case WSP_GGML_OP_SQR:
432
+ case WSP_GGML_OP_SQRT:
433
+ case WSP_GGML_OP_LOG:
434
+ case WSP_GGML_OP_UNARY:
435
+ case WSP_GGML_OP_ROPE:
436
+ case WSP_GGML_OP_RMS_NORM:
437
+ case WSP_GGML_OP_SOFT_MAX:
438
+ case WSP_GGML_OP_CONT:
439
+ return true;
440
+
441
+ default:
442
+ return false;
443
+ }
444
+ }
445
+
446
+ static void allocate_node(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * node) {
447
+ struct hash_node * ht = alloc->hash_table;
448
+ if (node->data == NULL) {
449
+ if (wsp_ggml_is_view(node)) {
450
+ assert(node->view_src->data != NULL);
451
+ node->data = (char *)node->view_src->data + node->view_offs;
452
+ } else {
453
+ // see if we can reuse a parent's buffer (inplace)
454
+ if (wsp_ggml_op_can_inplace(node->op)) {
455
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
456
+ struct wsp_ggml_tensor * parent = node->src[i];
457
+ if (parent == NULL) {
458
+ break;
459
+ }
460
+
461
+ // if the node's data is external, then we cannot re-use it
462
+ if (wsp_ggml_allocr_is_own(alloc, parent) == false) {
463
+ AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
464
+ continue;
465
+ }
466
+
467
+ struct hash_node * p_hn = hash_get(ht, parent);
468
+ if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && wsp_ggml_are_same_layout(node, parent)) {
469
+ if (wsp_ggml_is_view(parent)) {
470
+ struct wsp_ggml_tensor * view_src = parent->view_src;
471
+ struct hash_node * view_src_hn = hash_get(ht, view_src);
472
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
473
+ // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
474
+ // the parent's data that it will need later (same layout requirement). the problem is that then
475
+ // we cannot free the tensor because the original address of the allocation is lost.
476
+ // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
477
+ // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
478
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
479
+ node->data = parent->data;
480
+ return;
481
+ }
482
+ }
483
+ else {
484
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
485
+ node->data = parent->data;
486
+ return;
487
+ }
488
+ }
489
+ }
490
+ }
491
+ wsp_ggml_allocr_alloc(alloc, node);
492
+ }
493
+ }
494
+ }
495
+
496
+ static size_t wsp_ggml_allocr_alloc_graph_tensors_n(
497
+ struct wsp_ggml_allocr * alloc,
498
+ struct wsp_ggml_cgraph ** graphs, int n_graphs,
499
+ struct wsp_ggml_tensor *** inputs, struct wsp_ggml_tensor *** outputs) {
500
+
501
+ // reset hash table
502
+ struct hash_node * ht = alloc->hash_table;
503
+ memset(ht, 0, sizeof(struct hash_node) * WSP_GGML_GRAPH_HASHTABLE_SIZE);
504
+
505
+ // count number of children and views
506
+ for (int g = 0; g < n_graphs; g++) {
507
+ struct wsp_ggml_cgraph * gf = graphs[g];
508
+ for (int i = 0; i < gf->n_nodes; i++) {
509
+ struct wsp_ggml_tensor * node = gf->nodes[i];
510
+
511
+ if (wsp_ggml_is_view(node)) {
512
+ struct wsp_ggml_tensor * view_src = node->view_src;
513
+ hash_get(ht, view_src)->n_views += 1;
514
+ }
515
+
516
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
517
+ struct wsp_ggml_tensor * parent = node->src[j];
518
+ if (parent == NULL) {
519
+ break;
520
+ }
521
+ hash_get(ht, parent)->n_children += 1;
522
+ }
523
+ }
524
+ }
525
+
526
+ // allocate tensors
527
+ for (int g = 0; g < n_graphs; g++) {
528
+ struct wsp_ggml_cgraph * gf = graphs[g];
529
+ AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
530
+ // graph inputs are allocated first to ensure that they are not overwritten by each other
531
+ if (inputs != NULL && inputs[g] != NULL) {
532
+ for (int i = 0; inputs[g][i] != NULL; i++) {
533
+ struct wsp_ggml_tensor * input = inputs[g][i];
534
+ AT_PRINTF("input: %s\n", input->name);
535
+ allocate_node(alloc, input);
536
+ }
537
+ }
538
+ // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
539
+ int last_barrier_pos = 0;
540
+ int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
541
+
542
+ for (int ind = 0; ind < n_nodes; ind++) {
543
+ // allocate a node if there is no parse_seq or this is not a barrier
544
+ if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
545
+ int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
546
+ struct wsp_ggml_tensor * node = gf->nodes[i];
547
+
548
+ // allocate parents (leafs)
549
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
550
+ struct wsp_ggml_tensor * parent = node->src[j];
551
+ if (parent == NULL) {
552
+ break;
553
+ }
554
+ allocate_node(alloc, parent);
555
+ }
556
+
557
+ // allocate node
558
+ allocate_node(alloc, node);
559
+
560
+ AT_PRINTF("exec: %s (%s) <= ", wsp_ggml_op_name(node->op), node->name);
561
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
562
+ struct wsp_ggml_tensor * parent = node->src[j];
563
+ if (parent == NULL) {
564
+ break;
565
+ }
566
+ AT_PRINTF("%s", parent->name);
567
+ if (j < WSP_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
568
+ AT_PRINTF(", ");
569
+ }
570
+ }
571
+ AT_PRINTF("\n");
572
+ }
573
+
574
+ // update parents
575
+ // update immediately if there is no parse_seq
576
+ // update only at barriers if there is parse_seq
577
+ if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) {
578
+ int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
579
+ int update_end = alloc->parse_seq_len ? ind : ind + 1;
580
+ for (int i = update_start; i < update_end; i++) {
581
+ int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
582
+ struct wsp_ggml_tensor * node = gf->nodes[node_i];
583
+
584
+ for (int j = 0; j < WSP_GGML_MAX_SRC; j++) {
585
+ struct wsp_ggml_tensor * parent = node->src[j];
586
+ if (parent == NULL) {
587
+ break;
588
+ }
589
+ struct hash_node * p_hn = hash_get(ht, parent);
590
+ p_hn->n_children -= 1;
591
+
592
+ //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
593
+
594
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
595
+ if (wsp_ggml_is_view(parent)) {
596
+ struct wsp_ggml_tensor * view_src = parent->view_src;
597
+ struct hash_node * view_src_hn = hash_get(ht, view_src);
598
+ view_src_hn->n_views -= 1;
599
+ AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
600
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
601
+ wsp_ggml_allocr_free_tensor(alloc, view_src);
602
+ }
603
+ }
604
+ else {
605
+ if (parent->data != node->data) {
606
+ wsp_ggml_allocr_free_tensor(alloc, parent);
607
+ }
608
+ }
609
+ }
610
+ }
611
+ }
612
+ AT_PRINTF("\n");
613
+ if (alloc->parse_seq_len) {
614
+ last_barrier_pos = ind + 1;
615
+ }
616
+ }
617
+ }
618
+ // free graph outputs here that wouldn't be freed otherwise because they have no children
619
+ if (outputs != NULL && outputs[g] != NULL) {
620
+ for (int i = 0; outputs[g][i] != NULL; i++) {
621
+ struct wsp_ggml_tensor * output = outputs[g][i];
622
+ AT_PRINTF("output: %s\n", output->name);
623
+ wsp_ggml_allocr_free_tensor(alloc, output);
624
+ }
625
+ }
626
+ }
627
+
628
+ return alloc->max_size;
629
+ }
630
+
631
+ size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph) {
632
+ return wsp_ggml_allocr_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
633
+ }