llama_cpp 0.13.0 → 0.14.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -10,6 +10,7 @@ extern "C" {
10
10
  #define GGML_VK_NAME "Vulkan"
11
11
  #define GGML_VK_MAX_DEVICES 16
12
12
 
13
+ GGML_API void ggml_vk_instance_init(void);
13
14
  GGML_API void ggml_vk_init_cpu_assist(void);
14
15
 
15
16
  GGML_API void ggml_vk_preallocate_buffers_graph_cpu_assist(struct ggml_tensor * node);