whisper.rn 0.4.0-rc.8 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/ggml-aarch64.c +3209 -0
  9. package/cpp/ggml-aarch64.h +39 -0
  10. package/cpp/ggml-alloc.c +725 -517
  11. package/cpp/ggml-alloc.h +47 -65
  12. package/cpp/ggml-backend-impl.h +166 -55
  13. package/cpp/ggml-backend.cpp +2635 -0
  14. package/cpp/ggml-backend.h +202 -85
  15. package/cpp/ggml-common.h +1853 -0
  16. package/cpp/ggml-cpu-impl.h +614 -0
  17. package/cpp/ggml-impl.h +143 -180
  18. package/cpp/ggml-metal.h +13 -11
  19. package/cpp/ggml-metal.m +2955 -1632
  20. package/cpp/ggml-quants.c +9824 -3263
  21. package/cpp/ggml-quants.h +133 -248
  22. package/cpp/ggml-whisper.metallib +0 -0
  23. package/cpp/ggml.c +8482 -5142
  24. package/cpp/ggml.h +633 -349
  25. package/cpp/rn-whisper.cpp +91 -0
  26. package/cpp/rn-whisper.h +2 -0
  27. package/cpp/whisper.cpp +1427 -658
  28. package/cpp/whisper.h +84 -28
  29. package/ios/RNWhisper.mm +124 -37
  30. package/ios/RNWhisperAudioUtils.h +1 -0
  31. package/ios/RNWhisperAudioUtils.m +20 -13
  32. package/ios/RNWhisperContext.h +3 -2
  33. package/ios/RNWhisperContext.mm +39 -7
  34. package/jest/mock.js +9 -1
  35. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  36. package/lib/commonjs/index.js +48 -19
  37. package/lib/commonjs/index.js.map +1 -1
  38. package/lib/commonjs/version.json +1 -1
  39. package/lib/module/NativeRNWhisper.js.map +1 -1
  40. package/lib/module/index.js +48 -19
  41. package/lib/module/index.js.map +1 -1
  42. package/lib/module/version.json +1 -1
  43. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  44. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  45. package/lib/typescript/index.d.ts +25 -3
  46. package/lib/typescript/index.d.ts.map +1 -1
  47. package/package.json +6 -5
  48. package/src/NativeRNWhisper.ts +12 -3
  49. package/src/index.ts +63 -24
  50. package/src/version.json +1 -1
  51. package/whisper-rn.podspec +9 -2
  52. package/cpp/ggml-backend.c +0 -1718
  53. package/cpp/ggml-metal-whisper.metal +0 -5820
package/cpp/ggml-alloc.h CHANGED
@@ -6,88 +6,70 @@
6
6
  extern "C" {
7
7
  #endif
8
8
 
9
- struct wsp_ggml_backend;
10
- struct wsp_ggml_backend_buffer;
11
- struct wsp_ggml_backend_buffer_type;
9
+ typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
10
+ typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
11
+ typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
12
12
 
13
- //
14
- // Legacy API
15
- //
16
-
17
- typedef struct wsp_ggml_allocr * wsp_ggml_allocr_t;
18
-
19
- // initialize allocator for use with CPU backend only
20
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment);
21
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment);
22
-
23
- // initialize allocator for use with ggml-backend
24
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
25
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
26
- WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
27
-
28
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc);
29
-
30
- // tell the allocator to parse nodes following the order described in the list
31
- // you should call this if your graph are optimized to execute out-of-order
32
- WSP_GGML_API void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n);
33
-
34
- WSP_GGML_API void wsp_ggml_allocr_free (wsp_ggml_allocr_t alloc);
35
- WSP_GGML_API bool wsp_ggml_allocr_is_measure (wsp_ggml_allocr_t alloc);
36
- WSP_GGML_API void wsp_ggml_allocr_reset (wsp_ggml_allocr_t alloc);
37
- WSP_GGML_API void wsp_ggml_allocr_alloc (wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor);
38
- WSP_GGML_API size_t wsp_ggml_allocr_max_size (wsp_ggml_allocr_t alloc);
13
+ // Tensor allocator
14
+ struct wsp_ggml_tallocr {
15
+ wsp_ggml_backend_buffer_t buffer;
16
+ void * base;
17
+ size_t alignment;
18
+ size_t offset;
19
+ };
39
20
 
40
- WSP_GGML_API size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph);
21
+ WSP_GGML_API struct wsp_ggml_tallocr wsp_ggml_tallocr_new(wsp_ggml_backend_buffer_t buffer);
22
+ WSP_GGML_API void wsp_ggml_tallocr_alloc(struct wsp_ggml_tallocr * talloc, struct wsp_ggml_tensor * tensor);
41
23
 
42
- //
43
- // ggml-backend v2 API
44
- //
24
+ // Graph allocator
25
+ /*
26
+ Example usage:
27
+ wsp_ggml_gallocr_t galloc = wsp_ggml_gallocr_new(wsp_ggml_backend_cpu_buffer_type());
45
28
 
46
- // Separate tensor and graph allocator objects
47
- // This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
48
- // The original API is kept as a wrapper around the new API
29
+ // optional: create a worst-case graph and reserve the buffers to avoid reallocations
30
+ wsp_ggml_gallocr_reserve(galloc, build_graph(max_batch));
49
31
 
50
- // Tensor allocator
51
- typedef struct wsp_ggml_tallocr * wsp_ggml_tallocr_t;
32
+ // allocate the graph
33
+ struct wsp_ggml_cgraph * graph = build_graph(batch);
34
+ wsp_ggml_gallocr_alloc_graph(galloc, graph);
52
35
 
53
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment);
54
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment);
55
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buft(struct wsp_ggml_backend_buffer_type * buft, size_t size);
56
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
57
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
58
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_buft(struct wsp_ggml_backend_buffer_type * buft);
59
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
36
+ printf("compute buffer size: %zu bytes\n", wsp_ggml_gallocr_get_buffer_size(galloc, 0));
60
37
 
61
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t talloc);
38
+ // evaluate the graph
39
+ wsp_ggml_backend_graph_compute(backend, graph);
40
+ */
62
41
 
63
- WSP_GGML_API void wsp_ggml_tallocr_free (wsp_ggml_tallocr_t talloc);
64
- WSP_GGML_API bool wsp_ggml_tallocr_is_measure (wsp_ggml_tallocr_t talloc);
65
- WSP_GGML_API void wsp_ggml_tallocr_reset (wsp_ggml_tallocr_t talloc);
66
- WSP_GGML_API void wsp_ggml_tallocr_alloc (wsp_ggml_tallocr_t talloc, struct wsp_ggml_tensor * tensor);
67
- WSP_GGML_API size_t wsp_ggml_tallocr_max_size (wsp_ggml_tallocr_t talloc);
42
+ // special tensor flags for use with the graph allocator:
43
+ // wsp_ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
44
+ // wsp_ggml_set_output(): output tensors are never freed and never overwritten
68
45
 
69
-
70
- // Graph allocator
71
46
  typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
72
47
 
73
- WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void);
74
- WSP_GGML_API void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc);
48
+ WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new(wsp_ggml_backend_buffer_type_t buft);
49
+ WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new_n(wsp_ggml_backend_buffer_type_t * bufts, int n_bufs);
50
+ WSP_GGML_API void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc);
75
51
 
76
- WSP_GGML_API void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n);
77
- WSP_GGML_API size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph);
52
+ // pre-allocate buffers from a measure graph - does not allocate or modify the graph
53
+ // call with a worst-case graph to avoid buffer reallocations
54
+ // not strictly required for single buffer usage: wsp_ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
55
+ // returns false if the buffer allocation failed
56
+ WSP_GGML_API bool wsp_ggml_gallocr_reserve(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph);
57
+ WSP_GGML_API bool wsp_ggml_gallocr_reserve_n(
58
+ wsp_ggml_gallocr_t galloc,
59
+ struct wsp_ggml_cgraph * graph,
60
+ const int * node_buffer_ids,
61
+ const int * leaf_buffer_ids);
78
62
 
79
- // Allocate tensors from the allocators given by the hash table
80
- WSP_GGML_API void wsp_ggml_gallocr_alloc_graph_n(
81
- wsp_ggml_gallocr_t galloc,
82
- struct wsp_ggml_cgraph * graph,
83
- struct wsp_ggml_hash_set hash_set,
84
- wsp_ggml_tallocr_t * hash_node_talloc);
63
+ // automatic reallocation if the topology changes when using a single buffer
64
+ // returns false if using multiple buffers and a re-allocation is needed (call wsp_ggml_gallocr_reserve_n first to set the node buffers)
65
+ WSP_GGML_API bool wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, struct wsp_ggml_cgraph * graph);
85
66
 
67
+ WSP_GGML_API size_t wsp_ggml_gallocr_get_buffer_size(wsp_ggml_gallocr_t galloc, int buffer_id);
86
68
 
87
69
  // Utils
88
70
  // Create a buffer and allocate all the tensors in a wsp_ggml_context
89
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, struct wsp_ggml_backend_buffer_type * buft);
90
- WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, struct wsp_ggml_backend * backend);
71
+ WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, wsp_ggml_backend_buffer_type_t buft);
72
+ WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, wsp_ggml_backend_t backend);
91
73
 
92
74
  #ifdef __cplusplus
93
75
  }
@@ -9,107 +9,218 @@ extern "C" {
9
9
  #endif
10
10
 
11
11
  //
12
- // Backend buffer
12
+ // Backend buffer type
13
13
  //
14
14
 
15
- // buffer type
16
- typedef void * wsp_ggml_backend_buffer_type_context_t;
17
-
18
15
  struct wsp_ggml_backend_buffer_type_i {
19
- const char * (*WSP_GGML_CALL get_name) (wsp_ggml_backend_buffer_type_t buft);
20
- wsp_ggml_backend_buffer_t (*WSP_GGML_CALL alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
21
- size_t (*WSP_GGML_CALL get_alignment) (wsp_ggml_backend_buffer_type_t buft); // tensor alignment
22
- size_t (*WSP_GGML_CALL get_alloc_size) (wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
23
- bool (*WSP_GGML_CALL supports_backend)(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend); // check if the buffer type is usable by the backend
24
- // check if tensor data is in host memory
25
- // should be equivalent to supports_backend(buft, wsp_ggml_backend_cpu_init())
26
- bool (*WSP_GGML_CALL is_host) (wsp_ggml_backend_buffer_type_t buft);
16
+ const char * (*get_name) (wsp_ggml_backend_buffer_type_t buft);
17
+ // allocate a buffer of this type
18
+ wsp_ggml_backend_buffer_t (*alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
19
+ // tensor alignment
20
+ size_t (*get_alignment) (wsp_ggml_backend_buffer_type_t buft);
21
+ // (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
22
+ size_t (*get_max_size) (wsp_ggml_backend_buffer_type_t buft);
23
+ // (optional) data size needed to allocate the tensor, including padding (defaults to wsp_ggml_nbytes)
24
+ size_t (*get_alloc_size)(wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor);
25
+ // (optional) check if tensor data is in host memory (defaults to false)
26
+ bool (*is_host) (wsp_ggml_backend_buffer_type_t buft);
27
27
  };
28
28
 
29
29
  struct wsp_ggml_backend_buffer_type {
30
30
  struct wsp_ggml_backend_buffer_type_i iface;
31
- wsp_ggml_backend_buffer_type_context_t context;
31
+ wsp_ggml_backend_dev_t device;
32
+ void * context;
32
33
  };
33
34
 
34
- // buffer
35
- typedef void * wsp_ggml_backend_buffer_context_t;
35
+ //
36
+ // Backend buffer
37
+ //
36
38
 
37
39
  struct wsp_ggml_backend_buffer_i {
38
- const char * (*WSP_GGML_CALL get_name) (wsp_ggml_backend_buffer_t buffer);
39
- void (*WSP_GGML_CALL free_buffer)(wsp_ggml_backend_buffer_t buffer);
40
- void * (*WSP_GGML_CALL get_base) (wsp_ggml_backend_buffer_t buffer);
41
- void (*WSP_GGML_CALL init_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
42
- void (*WSP_GGML_CALL set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
43
- void (*WSP_GGML_CALL get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
44
- bool (*WSP_GGML_CALL cpy_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
45
- void (*WSP_GGML_CALL clear) (wsp_ggml_backend_buffer_t buffer, uint8_t value);
46
- void (*WSP_GGML_CALL reset) (wsp_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
40
+ const char * (*get_name) (wsp_ggml_backend_buffer_t buffer);
41
+ // (optional) free the buffer
42
+ void (*free_buffer) (wsp_ggml_backend_buffer_t buffer);
43
+ // base address of the buffer
44
+ void * (*get_base) (wsp_ggml_backend_buffer_t buffer);
45
+ // (optional) initialize a tensor in the buffer (eg. add tensor extras)
46
+ void (*init_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
47
+ // tensor data access
48
+ void (*memset_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
49
+ void (*set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
50
+ void (*get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
51
+ // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
52
+ bool (*cpy_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
53
+ // clear the entire buffer
54
+ void (*clear) (wsp_ggml_backend_buffer_t buffer, uint8_t value);
55
+ // (optional) reset any internal state due to tensor initialization, such as tensor extras
56
+ void (*reset) (wsp_ggml_backend_buffer_t buffer);
47
57
  };
48
58
 
49
59
  struct wsp_ggml_backend_buffer {
50
60
  struct wsp_ggml_backend_buffer_i iface;
51
61
  wsp_ggml_backend_buffer_type_t buft;
52
- wsp_ggml_backend_buffer_context_t context;
62
+ void * context;
53
63
  size_t size;
54
64
  enum wsp_ggml_backend_buffer_usage usage;
55
65
  };
56
66
 
57
- WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_buffer_init(
58
- wsp_ggml_backend_buffer_type_t buft,
59
- struct wsp_ggml_backend_buffer_i iface,
60
- wsp_ggml_backend_buffer_context_t context,
61
- size_t size);
67
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_buffer_init(
68
+ wsp_ggml_backend_buffer_type_t buft,
69
+ struct wsp_ggml_backend_buffer_i iface,
70
+ void * context,
71
+ size_t size);
62
72
 
63
73
  // do not use directly, use wsp_ggml_backend_tensor_copy instead
64
74
  bool wsp_ggml_backend_buffer_copy_tensor(const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
65
75
 
76
+ // multi-buffer
77
+ // buffer that contains a collection of buffers
78
+ wsp_ggml_backend_buffer_t wsp_ggml_backend_multi_buffer_alloc_buffer(wsp_ggml_backend_buffer_t * buffers, size_t n_buffers);
79
+ bool wsp_ggml_backend_buffer_is_multi_buffer(wsp_ggml_backend_buffer_t buffer);
80
+ void wsp_ggml_backend_multi_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
81
+
66
82
  //
67
- // Backend
83
+ // Backend (stream)
68
84
  //
69
85
 
70
- typedef void * wsp_ggml_backend_context_t;
71
-
72
86
  struct wsp_ggml_backend_i {
73
- const char * (*WSP_GGML_CALL get_name)(wsp_ggml_backend_t backend);
87
+ const char * (*get_name)(wsp_ggml_backend_t backend);
74
88
 
75
- void (*WSP_GGML_CALL free)(wsp_ggml_backend_t backend);
89
+ void (*free)(wsp_ggml_backend_t backend);
76
90
 
91
+ // Will be moved to the device interface
77
92
  // buffer allocation
78
- wsp_ggml_backend_buffer_type_t (*WSP_GGML_CALL get_default_buffer_type)(wsp_ggml_backend_t backend);
93
+ wsp_ggml_backend_buffer_type_t (*get_default_buffer_type)(wsp_ggml_backend_t backend);
79
94
 
80
95
  // (optional) asynchronous tensor data access
81
- void (*WSP_GGML_CALL set_tensor_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
82
- void (*WSP_GGML_CALL get_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
83
- bool (*WSP_GGML_CALL cpy_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
96
+ void (*set_tensor_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
97
+ void (*get_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
98
+ bool (*cpy_tensor_async)(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
84
99
 
85
100
  // (optional) complete all pending operations
86
- void (*WSP_GGML_CALL synchronize)(wsp_ggml_backend_t backend);
87
-
88
- // compute graph with a plan
89
- wsp_ggml_backend_graph_plan_t (*WSP_GGML_CALL graph_plan_create) (wsp_ggml_backend_t backend, const struct wsp_ggml_cgraph * cgraph);
90
- void (*WSP_GGML_CALL graph_plan_free) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
91
- void (*WSP_GGML_CALL graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
92
-
93
- // compute graph without a plan (async)
94
- bool (*WSP_GGML_CALL graph_compute)(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
95
-
96
- // check if the backend supports an operation
97
- bool (*WSP_GGML_CALL supports_op)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
101
+ void (*synchronize)(wsp_ggml_backend_t backend);
102
+
103
+ // (optional) compute graph with a plan (not used currently)
104
+ wsp_ggml_backend_graph_plan_t (*graph_plan_create) (wsp_ggml_backend_t backend, const struct wsp_ggml_cgraph * cgraph);
105
+ void (*graph_plan_free) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
106
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
107
+ void (*graph_plan_update) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan, const struct wsp_ggml_cgraph * cgraph);
108
+ // compute the graph with the plan
109
+ enum wsp_ggml_status (*graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
110
+
111
+ // compute graph (always async if supported by the backend)
112
+ enum wsp_ggml_status (*graph_compute) (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
113
+
114
+ // IMPORTANT: these functions have been moved to the device interface and will be removed from the backend interface
115
+ // new backends should implement the device interface instead
116
+ // These functions are being moved to the device interface
117
+ bool (*supports_op) (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
118
+ bool (*supports_buft)(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft);
119
+ bool (*offload_op) (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
120
+
121
+ // (optional) event synchronization
122
+ // record an event on this stream
123
+ void (*event_record)(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
124
+ // wait for an event on on a different stream
125
+ void (*event_wait) (wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
98
126
  };
99
127
 
100
128
  struct wsp_ggml_backend {
129
+ wsp_ggml_guid_t guid;
101
130
  struct wsp_ggml_backend_i iface;
131
+ wsp_ggml_backend_dev_t device;
132
+ void * context;
133
+ };
134
+
135
+ struct wsp_ggml_backend_event {
136
+ struct wsp_ggml_backend_device * device;
137
+ void * context;
138
+ };
139
+
140
+ //
141
+ // Backend device
142
+ //
143
+
144
+ // Note: if additional properties are needed, we should add a struct with all of them
145
+ // the current functions to obtain the properties can remain, since they are more convenient for often used properties
146
+ struct wsp_ggml_backend_device_i {
147
+ // device name: short identifier for this device, such as "CPU" or "CUDA0"
148
+ const char * (*get_name)(wsp_ggml_backend_dev_t dev);
149
+
150
+ // device description: short informative description of the device, could be the model name
151
+ const char * (*get_description)(wsp_ggml_backend_dev_t dev);
102
152
 
103
- wsp_ggml_backend_context_t context;
153
+ // device memory in bytes
154
+ void (*get_memory)(wsp_ggml_backend_dev_t dev, size_t * free, size_t * total);
155
+
156
+ // device type
157
+ enum wsp_ggml_backend_dev_type (*get_type)(wsp_ggml_backend_dev_t dev);
158
+
159
+ // device properties
160
+ void (*get_props)(wsp_ggml_backend_dev_t dev, struct wsp_ggml_backend_dev_props * props);
161
+
162
+ // backend (stream) initialization
163
+ wsp_ggml_backend_t (*init_backend)(wsp_ggml_backend_dev_t dev, const char * params);
164
+
165
+ // preferred buffer type
166
+ wsp_ggml_backend_buffer_type_t (*get_buffer_type)(wsp_ggml_backend_dev_t dev);
167
+
168
+ // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
169
+ wsp_ggml_backend_buffer_type_t (*get_host_buffer_type)(wsp_ggml_backend_dev_t dev);
170
+
171
+ // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
172
+ wsp_ggml_backend_buffer_t (*buffer_from_host_ptr)(wsp_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
173
+
174
+ // check if the backend can compute an operation
175
+ bool (*supports_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
176
+
177
+ // check if the backend can use tensors allocated in a buffer type
178
+ bool (*supports_buft)(wsp_ggml_backend_dev_t dev, wsp_ggml_backend_buffer_type_t buft);
179
+
180
+ // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
181
+ // these should be expensive operations that may benefit from running on this backend instead of the CPU backend
182
+ bool (*offload_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
183
+
184
+ // (optional) event synchronization
185
+ wsp_ggml_backend_event_t (*event_new) (wsp_ggml_backend_dev_t dev);
186
+ void (*event_free) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
187
+ void (*event_synchronize) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
188
+ };
189
+
190
+ struct wsp_ggml_backend_device {
191
+ struct wsp_ggml_backend_device_i iface;
192
+ wsp_ggml_backend_reg_t reg;
193
+ void * context;
104
194
  };
105
195
 
106
196
  //
107
- // Backend registry
197
+ // Backend (reg)
108
198
  //
109
199
 
110
- typedef wsp_ggml_backend_t (*WSP_GGML_CALL wsp_ggml_backend_init_fn)(const char * params, void * user_data);
200
+ struct wsp_ggml_backend_reg_i {
201
+ const char * (*get_name)(wsp_ggml_backend_reg_t reg);
202
+
203
+ // enumerate available devices
204
+ size_t (*get_device_count)(wsp_ggml_backend_reg_t reg);
205
+ wsp_ggml_backend_dev_t (*get_device)(wsp_ggml_backend_reg_t reg, size_t index);
206
+
207
+ // (optional) get a pointer to a function in the backend
208
+ // backends can add custom functions that are not part of the standard ggml-backend interface
209
+ void * (*get_proc_address)(wsp_ggml_backend_reg_t reg, const char * name);
210
+ };
211
+
212
+ struct wsp_ggml_backend_reg {
213
+ // int api_version; // TODO: for dynamic loading
214
+ struct wsp_ggml_backend_reg_i iface;
215
+ void * context;
216
+ };
217
+
111
218
 
112
- WSP_GGML_CALL void wsp_ggml_backend_register(const char * name, wsp_ggml_backend_init_fn init_fn, wsp_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
219
+ // Internal backend registry API
220
+ void wsp_ggml_backend_register(wsp_ggml_backend_reg_t reg);
221
+ void wsp_ggml_backend_device_register(wsp_ggml_backend_dev_t device);
222
+ // TODO: backends can be loaded as a dynamic library, in which case it needs to export this function
223
+ // typedef wsp_ggml_backend_register_t * (*wsp_ggml_backend_init)(void);
113
224
 
114
225
  #ifdef __cplusplus
115
226
  }