llama_cpp 0.13.0 → 0.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,6 +10,7 @@ extern "C" {
10
10
  #define GGML_VK_NAME "Vulkan"
11
11
  #define GGML_VK_MAX_DEVICES 16
12
12
 
13
+ GGML_API void ggml_vk_instance_init(void);
13
14
  GGML_API void ggml_vk_init_cpu_assist(void);
14
15
 
15
16
  GGML_API void ggml_vk_preallocate_buffers_graph_cpu_assist(struct ggml_tensor * node);