llama_cpp 0.16.1 → 0.16.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -8,14 +8,12 @@
8
8
 
9
9
  #include "ggml.h"
10
10
  #include "ggml-backend.h"
11
+ #include "ggml-sycl/presets.hpp"
11
12
 
12
13
  #ifdef __cplusplus
13
14
  extern "C" {
14
15
  #endif
15
16
 
16
- #define GGML_SYCL_MAX_DEVICES 48
17
- #define GGML_SYCL_NAME "SYCL"
18
-
19
17
  // backend API
20
18
  GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
21
19
 
@@ -33,13 +31,6 @@ GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
33
31
  GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
34
32
  GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
35
33
  GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
36
- GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
37
-
38
- // TODO: these are temporary
39
- // ref: https://github.com/ggerganov/llama.cpp/pull/6022#issuecomment-1992615670
40
- GGML_API GGML_CALL int ggml_backend_sycl_get_device_id(int device_index);
41
- GGML_API GGML_CALL void ggml_backend_sycl_set_single_device_mode(int main_gpu_id);
42
- GGML_API GGML_CALL void ggml_backend_sycl_set_mul_device_mode();
43
34
 
44
35
  // SYCL doesn't support registering host memory, keep here for reference
45
36
  // GGML_API GGML_CALL bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);