whisper.rn 0.4.0-rc.7 → 0.4.0-rc.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/cpp/ggml-metal.h CHANGED
@@ -36,70 +36,22 @@ struct wsp_ggml_cgraph;
36
36
  extern "C" {
37
37
  #endif
38
38
 
39
- //
40
- // internal API
41
- // temporary exposed to user-code
42
- //
43
-
44
- struct wsp_ggml_metal_context;
45
-
46
- void wsp_ggml_metal_log_set_callback(wsp_ggml_log_callback log_callback, void * user_data);
47
-
48
- // number of command buffers to use
49
- struct wsp_ggml_metal_context * wsp_ggml_metal_init(int n_cb);
50
- void wsp_ggml_metal_free(struct wsp_ggml_metal_context * ctx);
51
-
52
- void * wsp_ggml_metal_host_malloc(size_t n);
53
- void wsp_ggml_metal_host_free (void * data);
54
-
55
- // set the number of command buffers to use
56
- void wsp_ggml_metal_set_n_cb(struct wsp_ggml_metal_context * ctx, int n_cb);
57
-
58
- // creates a mapping between a host memory buffer and a device memory buffer
59
- // - make sure to map all buffers used in the graph before calling wsp_ggml_metal_graph_compute
60
- // - the mapping is used during computation to determine the arguments of the compute kernels
61
- // - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
62
- // - max_size specifies the maximum size of a tensor and is used to create shared views such
63
- // that it is guaranteed that the tensor will fit in at least one of the views
64
- //
65
- bool wsp_ggml_metal_add_buffer(
66
- struct wsp_ggml_metal_context * ctx,
67
- const char * name,
68
- void * data,
69
- size_t size,
70
- size_t max_size);
71
-
72
- // set data from host memory into the device
73
- void wsp_ggml_metal_set_tensor(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_tensor * t);
74
-
75
- // get data from the device into host memory
76
- void wsp_ggml_metal_get_tensor(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_tensor * t);
77
-
78
- // try to find operations that can be run concurrently in the graph
79
- // you should run it again if the topology of your graph changes
80
- void wsp_ggml_metal_graph_find_concurrency(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_cgraph * gf, bool check_mem);
81
-
82
- // if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized
83
- int wsp_ggml_metal_if_optimized(struct wsp_ggml_metal_context * ctx);
84
-
85
- // output the concur_list for wsp_ggml_alloc
86
- int * wsp_ggml_metal_get_concur_list(struct wsp_ggml_metal_context * ctx);
87
-
88
- // same as wsp_ggml_graph_compute but uses Metal
89
- // creates gf->n_threads command buffers in parallel
90
- void wsp_ggml_metal_graph_compute(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_cgraph * gf);
91
-
92
39
  //
93
40
  // backend API
94
41
  // user-code should use only these functions
95
42
  //
96
43
 
44
+ WSP_GGML_API void wsp_ggml_backend_metal_log_set_callback(wsp_ggml_log_callback log_callback, void * user_data);
45
+
97
46
  WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_metal_init(void);
98
47
 
99
48
  WSP_GGML_API bool wsp_ggml_backend_is_metal(wsp_ggml_backend_t backend);
100
49
 
50
+ WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
51
+
101
52
  WSP_GGML_API void wsp_ggml_backend_metal_set_n_cb(wsp_ggml_backend_t backend, int n_cb);
102
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_metal_buffer_type(void);
53
+
54
+ WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_type_t wsp_ggml_backend_metal_buffer_type(void);
103
55
 
104
56
  // helper to check if the device supports a specific family
105
57
  // ideally, the user code should be doing these checks