llama_cpp 0.13.0 → 0.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,11 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
24
24
  GGML_API void ggml_backend_sycl_print_sycl_devices(void);
25
25
  GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
26
26
  GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
27
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
28
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
29
+ GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
30
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
31
+
27
32
  #ifdef __cplusplus
28
33
  }
29
34
  #endif