llama_cpp 0.13.0 → 0.14.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -2231,7 +2231,7 @@ static ggml_backend_buffer_type_t ggml_backend_opencl_get_default_buffer_type(gg
2231
2231
  GGML_UNUSED(backend);
2232
2232
  }
2233
2233
 
2234
- static bool ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) {
2234
+ static ggml_status ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) {
2235
2235
  for (int i = 0; i < graph->n_nodes; ++i) {
2236
2236
  ggml_tensor * node = graph->nodes[i];
2237
2237
  switch (node->op) {
@@ -2246,7 +2246,7 @@ static bool ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgrap
2246
2246
  }
2247
2247
  }
2248
2248
 
2249
- return true;
2249
+ return GGML_STATUS_SUCCESS;
2250
2250
 
2251
2251
  GGML_UNUSED(backend);
2252
2252
  }