whisper.rn 0.4.0-rc.7 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/coreml/whisper-encoder.mm +1 -1
  9. package/cpp/ggml-aarch64.c +3209 -0
  10. package/cpp/ggml-aarch64.h +39 -0
  11. package/cpp/ggml-alloc.c +732 -494
  12. package/cpp/ggml-alloc.h +47 -63
  13. package/cpp/ggml-backend-impl.h +162 -47
  14. package/cpp/ggml-backend.cpp +2635 -0
  15. package/cpp/ggml-backend.h +216 -71
  16. package/cpp/ggml-common.h +1853 -0
  17. package/cpp/ggml-cpu-impl.h +614 -0
  18. package/cpp/ggml-impl.h +144 -178
  19. package/cpp/ggml-metal.h +14 -60
  20. package/cpp/ggml-metal.m +3437 -2097
  21. package/cpp/ggml-quants.c +12559 -4189
  22. package/cpp/ggml-quants.h +135 -212
  23. package/cpp/ggml-whisper.metallib +0 -0
  24. package/cpp/ggml.c +9029 -5219
  25. package/cpp/ggml.h +673 -338
  26. package/cpp/rn-whisper.cpp +91 -0
  27. package/cpp/rn-whisper.h +2 -0
  28. package/cpp/whisper.cpp +1476 -675
  29. package/cpp/whisper.h +84 -28
  30. package/ios/RNWhisper.mm +124 -37
  31. package/ios/RNWhisperAudioUtils.h +1 -0
  32. package/ios/RNWhisperAudioUtils.m +20 -13
  33. package/ios/RNWhisperContext.h +3 -2
  34. package/ios/RNWhisperContext.mm +41 -8
  35. package/jest/mock.js +9 -1
  36. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  37. package/lib/commonjs/index.js +48 -19
  38. package/lib/commonjs/index.js.map +1 -1
  39. package/lib/commonjs/version.json +1 -1
  40. package/lib/module/NativeRNWhisper.js.map +1 -1
  41. package/lib/module/index.js +48 -19
  42. package/lib/module/index.js.map +1 -1
  43. package/lib/module/version.json +1 -1
  44. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  45. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  46. package/lib/typescript/index.d.ts +25 -3
  47. package/lib/typescript/index.d.ts.map +1 -1
  48. package/package.json +6 -5
  49. package/src/NativeRNWhisper.ts +12 -3
  50. package/src/index.ts +63 -24
  51. package/src/version.json +1 -1
  52. package/whisper-rn.podspec +9 -2
  53. package/cpp/ggml-backend.c +0 -1357
  54. package/cpp/ggml-metal-whisper.metal +0 -4908
package/cpp/ggml-alloc.h CHANGED
@@ -6,86 +6,70 @@
6
6
  extern "C" {
7
7
  #endif
8
8
 
9
- struct wsp_ggml_backend;
10
- struct wsp_ggml_backend_buffer;
11
- struct wsp_ggml_backend_buffer_type;
9
+ typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
10
+ typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
11
+ typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
12
12
 
13
- //
14
- // Legacy API
15
- //
16
-
17
- typedef struct wsp_ggml_allocr * wsp_ggml_allocr_t;
18
-
19
- // initialize allocator for use with CPU backend only
20
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment);
21
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment);
22
-
23
- // initialize allocator for use with ggml-backend
24
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
25
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
26
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
27
-
28
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc);
29
-
30
- // tell the allocator to parse nodes following the order described in the list
31
- // you should call this if your graph are optimized to execute out-of-order
32
- WSP_GGML_API void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n);
33
-
34
- WSP_GGML_API void wsp_ggml_allocr_free (wsp_ggml_allocr_t alloc);
35
- WSP_GGML_API bool wsp_ggml_allocr_is_measure (wsp_ggml_allocr_t alloc);
36
- WSP_GGML_API void wsp_ggml_allocr_reset (wsp_ggml_allocr_t alloc);
37
- WSP_GGML_API void wsp_ggml_allocr_alloc (wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor);
38
- WSP_GGML_API size_t wsp_ggml_allocr_max_size (wsp_ggml_allocr_t alloc);
13
+ // Tensor allocator
14
+ struct wsp_ggml_tallocr {
15
+ wsp_ggml_backend_buffer_t buffer;
16
+ void * base;
17
+ size_t alignment;
18
+ size_t offset;
19
+ };
39
20
 
40
- WSP_GGML_API size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph);
21
+ WSP_GGML_API struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer);
22
+ WSP_GGML_API void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor);
41
23
 
42
- //
43
- // ggml-backend v2 API
44
- //
24
+ // Graph allocator
25
+ /*
26
+ Example usage:
27
+ wsp_ggml_gallocr_t galloc = wsp_ggml_gallocr_new(wsp_ggml_backend_cpu_buffer_type());
45
28
 
46
- // Separate tensor and graph allocator objects
47
- // This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
48
- // The original API is kept as a wrapper around the new API
29
+ // optional: create a worst-case graph and reserve the buffers to avoid reallocations
30
+ wsp_ggml_gallocr_reserve(galloc, build_graph(max_batch));
49
31
 
50
- // Tensor allocator
51
- typedef struct wsp_ggml_tallocr * wsp_ggml_tallocr_t;
32
+ // allocate the graph
33
+ struct wsp_ggml_cgraph * graph = build_graph(batch);
34
+ wsp_ggml_gallocr_alloc_graph(galloc, graph);
52
35
 
53
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment);
54
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment);
55
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
56
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
57
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
36
+ printf("compute buffer size: %zu bytes\n", wsp_ggml_gallocr_get_buffer_size(galloc, 0));
58
37
 
59
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t talloc);
38
+ // evaluate the graph
39
+ wsp_ggml_backend_graph_compute(backend, graph);
40
+ */
60
41
 
61
- WSP_GGML_API void wsp_ggml_tallocr_free (wsp_ggml_tallocr_t talloc);
62
- WSP_GGML_API bool wsp_ggml_tallocr_is_measure (wsp_ggml_tallocr_t talloc);
63
- WSP_GGML_API void wsp_ggml_tallocr_reset (wsp_ggml_tallocr_t talloc);
64
- WSP_GGML_API void wsp_ggml_tallocr_alloc (wsp_ggml_tallocr_t talloc, struct wsp_ggml_tensor * tensor);
65
- WSP_GGML_API size_t wsp_ggml_tallocr_max_size (wsp_ggml_tallocr_t talloc);
42
+ // special tensor flags for use with the graph allocator:
43
+ // wsp_ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
44
+ // wsp_ggml_set_output(): output tensors are never freed and never overwritten
66
45
 
67
-
68
- // Graph allocator
69
46
  typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
70
47
 
71
- WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void);
72
- WSP_GGML_API void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc);
48
+ WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft);
49
+ WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs);
50
+ WSP_GGML_API void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc);
73
51
 
74
- WSP_GGML_API void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n);
75
- WSP_GGML_API size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph);
52
+ // pre-allocate buffers from a measure graph - does not allocate or modify the graph
53
+ // call with a worst-case graph to avoid buffer reallocations
54
+ // not strictly required for single buffer usage: wsp_ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
55
+ // returns false if the buffer allocation failed
56
+ WSP_GGML_API bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph);
57
+ WSP_GGML_API bool wsp_ggml_gallocr_reserve_n(
58
+ wsp_ggml_gallocr_t galloc,
59
+ struct wsp_ggml_cgraph * graph,
60
+ const int * node_buffer_ids,
61
+ const int * leaf_buffer_ids);
76
62
 
77
- // Allocate tensors from the allocators given by the hash table
78
- WSP_GGML_API void wsp_ggml_gallocr_alloc_graph_n(
79
- wsp_ggml_gallocr_t galloc,
80
- struct wsp_ggml_cgraph * graph,
81
- struct wsp_ggml_hash_set hash_set,
82
- wsp_ggml_tallocr_t * hash_node_talloc);
63
+ // automatic reallocation if the topology changes when using a single buffer
64
+ // returns false if using multiple buffers and a re-allocation is needed (call wsp_ggml_gallocr_reserve_n first to set the node buffers)
65
+ WSP_GGML_API bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph);
83
66
 
67
+ WSP_GGML_API size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id);
84
68
 
85
69
  // Utils
86
70
  // Create a buffer and allocate all the tensors in a wsp_ggml_context
87
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, struct wsp_ggml_backend_buffer_type * buft);
88
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, struct wsp_ggml_backend * backend);
71
+ WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft);
72
+ WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, wsp_ggml_backend_t backend);
89
73
 
90
74
  #ifdef __cplusplus
91
75
  }
@@ -9,103 +9,218 @@ extern "C" {
9
9
  #endif
10
10
 
11
11
  //
12
- // Backend buffer
12
+ // Backend buffer type
13
13
  //
14
14
 
15
- // buffer type
16
- typedef void * wsp_ggml_backend_buffer_type_context_t;
17
-
18
15
  struct wsp_ggml_backend_buffer_type_i {
19
- wsp_ggml_backend_buffer_t (*alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
20
- size_t (*get_alignment) (wsp_ggml_backend_buffer_type_t buft); // tensor alignment
21
- size_t (*get_alloc_size) (wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
22
- bool (*supports_backend)(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend); // check if the buffer type is usable by the backend
16
+ const char * (*get_name) (wsp_ggml_backend_buffer_type_t buft);
17
+ // allocate a buffer of this type
18
+ wsp_ggml_backend_buffer_t (*alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
19
+ // tensor alignment
20
+ size_t (*get_alignment) (wsp_ggml_backend_buffer_type_t buft);
21
+ // (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
22
+ size_t (*get_max_size) (wsp_ggml_backend_buffer_type_t buft);
23
+ // (optional) data size needed to allocate the tensor, including padding (defaults to wsp_ggml_nbytes)
24
+ size_t (*get_alloc_size)(wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor);
25
+ // (optional) check if tensor data is in host memory (defaults to false)
26
+ bool (*is_host) (wsp_ggml_backend_buffer_type_t buft);
23
27
  };
24
28
 
25
29
  struct wsp_ggml_backend_buffer_type {
26
30
  struct wsp_ggml_backend_buffer_type_i iface;
27
- wsp_ggml_backend_buffer_type_context_t context;
31
+ wsp_ggml_backend_dev_t device;
32
+ void * context;
28
33
  };
29
34
 
30
- // buffer
31
- typedef void * wsp_ggml_backend_buffer_context_t;
35
+ //
36
+ // Backend buffer
37
+ //
32
38
 
33
39
  struct wsp_ggml_backend_buffer_i {
34
- void (*free_buffer)(wsp_ggml_backend_buffer_t buffer);
35
- //void (*reset) (wsp_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
36
- void * (*get_base) (wsp_ggml_backend_buffer_t buffer);
37
- void (*init_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
38
- void (*set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
39
- void (*get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
40
- // (optional) copy tensor between different buffer-type, allow for single-copy tranfers
41
- void (*cpy_tensor_from)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
42
- void (*cpy_tensor_to) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
40
+ const char * (*get_name) (wsp_ggml_backend_buffer_t buffer);
41
+ // (optional) free the buffer
42
+ void (*free_buffer) (wsp_ggml_backend_buffer_t buffer);
43
+ // base address of the buffer
44
+ void * (*get_base) (wsp_ggml_backend_buffer_t buffer);
45
+ // (optional) initialize a tensor in the buffer (eg. add tensor extras)
46
+ void (*init_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
47
+ // tensor data access
48
+ void (*memset_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
49
+ void (*set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
50
+ void (*get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
51
+ // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
52
+ bool (*cpy_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
53
+ // clear the entire buffer
54
+ void (*clear) (wsp_ggml_backend_buffer_t buffer, uint8_t value);
55
+ // (optional) reset any internal state due to tensor initialization, such as tensor extras
56
+ void (*reset) (wsp_ggml_backend_buffer_t buffer);
43
57
  };
44
58
 
45
59
  struct wsp_ggml_backend_buffer {
46
60
  struct wsp_ggml_backend_buffer_i iface;
47
61
  wsp_ggml_backend_buffer_type_t buft;
48
- wsp_ggml_backend_buffer_context_t context;
62
+ void * context;
49
63
  size_t size;
64
+ enum wsp_ggml_backend_buffer_usage usage;
50
65
  };
51
66
 
52
67
  wsp_ggml_backend_buffer_t wsp_ggml_backend_buffer_init(
53
- wsp_ggml_backend_buffer_type_t buft,
54
- struct wsp_ggml_backend_buffer_i iface,
55
- wsp_ggml_backend_buffer_context_t context,
56
- size_t size);
68
+ wsp_ggml_backend_buffer_type_t buft,
69
+ struct wsp_ggml_backend_buffer_i iface,
70
+ void * context,
71
+ size_t size);
57
72
 
73
+ // do not use directly, use wsp_ggml_backend_tensor_copy instead
74
+ bool wsp_ggml_backend_buffer_copy_tensor(const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
75
+
76
+ // multi-buffer
77
+ // buffer that contains a collection of buffers
78
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_multi_buffer_alloc_buffer(wsp_ggml_backend_buffer_t * buffers, size_t n_buffers);
79
+ bool wsp_ggml_backend_buffer_is_multi_buffer(wsp_ggml_backend_buffer_t buffer);
80
+ void wsp_ggml_backend_multi_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
58
81
 
59
82
  //
60
- // Backend
83
+ // Backend (stream)
61
84
  //
62
85
 
63
- typedef void * wsp_ggml_backend_context_t;
64
-
65
86
  struct wsp_ggml_backend_i {
66
87
  const char * (*get_name)(wsp_ggml_backend_t backend);
67
88
 
68
89
  void (*free)(wsp_ggml_backend_t backend);
69
90
 
91
+ // Will be moved to the device interface
70
92
  // buffer allocation
71
93
  wsp_ggml_backend_buffer_type_t (*get_default_buffer_type)(wsp_ggml_backend_t backend);
72
94
 
73
- // (optional) asynchroneous tensor data access
95
+ // (optional) asynchronous tensor data access
74
96
  void (*set_tensor_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
75
97
  void (*get_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
98
+ bool (*cpy_tensor_async)(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
76
99
 
77
- // (optional) asynchroneous tensor copy
78
- void (*cpy_tensor_from_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
79
- void (*cpy_tensor_to_async) (wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
100
+ // (optional) complete all pending operations
101
+ void (*synchronize)(wsp_ggml_backend_t backend);
80
102
 
81
- void (*synchronize) (wsp_ggml_backend_t backend);
82
-
83
- // compute graph with a plan
84
- wsp_ggml_backend_graph_plan_t (*graph_plan_create) (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
103
+ // (optional) compute graph with a plan (not used currently)
104
+ wsp_ggml_backend_graph_plan_t (*graph_plan_create) (wsp_ggml_backend_t backend, const struct wsp_ggml_cgraph * cgraph);
85
105
  void (*graph_plan_free) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
86
- void (*graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
87
-
88
- // compute graph without a plan
89
- void (*graph_compute)(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
90
-
91
- // check if the backend supports an operation
92
- bool (*supports_op)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
106
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
107
+ void (*graph_plan_update) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan, const struct wsp_ggml_cgraph * cgraph);
108
+ // compute the graph with the plan
109
+ enum wsp_ggml_status (*graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
110
+
111
+ // compute graph (always async if supported by the backend)
112
+ enum wsp_ggml_status (*graph_compute) (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
113
+
114
+ // IMPORTANT: these functions have been moved to the device interface and will be removed from the backend interface
115
+ // new backends should implement the device interface instead
116
+ // These functions are being moved to the device interface
117
+ bool (*supports_op) (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
118
+ bool (*supports_buft)(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft);
119
+ bool (*offload_op) (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
120
+
121
+ // (optional) event synchronization
122
+ // record an event on this stream
123
+ void (*event_record)(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
124
+ // wait for an event on on a different stream
125
+ void (*event_wait) (wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
93
126
  };
94
127
 
95
128
  struct wsp_ggml_backend {
129
+ wsp_ggml_guid_t guid;
96
130
  struct wsp_ggml_backend_i iface;
131
+ wsp_ggml_backend_dev_t device;
132
+ void * context;
133
+ };
97
134
 
98
- wsp_ggml_backend_context_t context;
135
+ struct wsp_ggml_backend_event {
136
+ struct wsp_ggml_backend_device * device;
137
+ void * context;
99
138
  };
100
139
 
140
+ //
141
+ // Backend device
142
+ //
143
+
144
+ // Note: if additional properties are needed, we should add a struct with all of them
145
+ // the current functions to obtain the properties can remain, since they are more convenient for often used properties
146
+ struct wsp_ggml_backend_device_i {
147
+ // device name: short identifier for this device, such as "CPU" or "CUDA0"
148
+ const char * (*get_name)(wsp_ggml_backend_dev_t dev);
149
+
150
+ // device description: short informative description of the device, could be the model name
151
+ const char * (*get_description)(wsp_ggml_backend_dev_t dev);
152
+
153
+ // device memory in bytes
154
+ void (*get_memory)(wsp_ggml_backend_dev_t dev, size_t * free, size_t * total);
155
+
156
+ // device type
157
+ enum wsp_ggml_backend_dev_type (*get_type)(wsp_ggml_backend_dev_t dev);
158
+
159
+ // device properties
160
+ void (*get_props)(wsp_ggml_backend_dev_t dev, struct wsp_ggml_backend_dev_props * props);
161
+
162
+ // backend (stream) initialization
163
+ wsp_ggml_backend_t (*init_backend)(wsp_ggml_backend_dev_t dev, const char * params);
164
+
165
+ // preferred buffer type
166
+ wsp_ggml_backend_buffer_type_t (*get_buffer_type)(wsp_ggml_backend_dev_t dev);
167
+
168
+ // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
169
+ wsp_ggml_backend_buffer_type_t (*get_host_buffer_type)(wsp_ggml_backend_dev_t dev);
170
+
171
+ // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
172
+ wsp_ggml_backend_buffer_t (*buffer_from_host_ptr)(wsp_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
173
+
174
+ // check if the backend can compute an operation
175
+ bool (*supports_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
176
+
177
+ // check if the backend can use tensors allocated in a buffer type
178
+ bool (*supports_buft)(wsp_ggml_backend_dev_t dev, wsp_ggml_backend_buffer_type_t buft);
179
+
180
+ // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
181
+ // these should be expensive operations that may benefit from running on this backend instead of the CPU backend
182
+ bool (*offload_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
183
+
184
+ // (optional) event synchronization
185
+ wsp_ggml_backend_event_t (*event_new) (wsp_ggml_backend_dev_t dev);
186
+ void (*event_free) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
187
+ void (*event_synchronize) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
188
+ };
189
+
190
+ struct wsp_ggml_backend_device {
191
+ struct wsp_ggml_backend_device_i iface;
192
+ wsp_ggml_backend_reg_t reg;
193
+ void * context;
194
+ };
101
195
 
102
196
  //
103
- // Backend registry
197
+ // Backend (reg)
104
198
  //
105
199
 
106
- typedef wsp_ggml_backend_t (*wsp_ggml_backend_init_fn)(const char * params, void * user_data);
200
+ struct wsp_ggml_backend_reg_i {
201
+ const char * (*get_name)(wsp_ggml_backend_reg_t reg);
202
+
203
+ // enumerate available devices
204
+ size_t (*get_device_count)(wsp_ggml_backend_reg_t reg);
205
+ wsp_ggml_backend_dev_t (*get_device)(wsp_ggml_backend_reg_t reg, size_t index);
206
+
207
+ // (optional) get a pointer to a function in the backend
208
+ // backends can add custom functions that are not part of the standard ggml-backend interface
209
+ void * (*get_proc_address)(wsp_ggml_backend_reg_t reg, const char * name);
210
+ };
211
+
212
+ struct wsp_ggml_backend_reg {
213
+ // int api_version; // TODO: for dynamic loading
214
+ struct wsp_ggml_backend_reg_i iface;
215
+ void * context;
216
+ };
217
+
107
218
 
108
- void wsp_ggml_backend_register(const char * name, wsp_ggml_backend_init_fn init_fn, wsp_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
219
+ // Internal backend registry API
220
+ void wsp_ggml_backend_register(wsp_ggml_backend_reg_t reg);
221
+ void wsp_ggml_backend_device_register(wsp_ggml_backend_dev_t device);
222
+ // TODO: backends can be loaded as a dynamic library, in which case it needs to export this function
223
+ // typedef wsp_ggml_backend_register_t * (*wsp_ggml_backend_init)(void);
109
224
 
110
225
  #ifdef __cplusplus
111
226
  }