llama_cpp 0.12.7 → 0.14.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -24,6 +24,11 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
24
24
  GGML_API void ggml_backend_sycl_print_sycl_devices(void);
25
25
  GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
26
26
  GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
27
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
28
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
29
+ GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
30
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
31
+
27
32
  #ifdef __cplusplus
28
33
  }
29
34
  #endif