llama_cpp 0.1.1 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,23 +1,21 @@
1
1
  #pragma once
2
2
 
3
+ #include "ggml.h"
4
+
3
5
  #ifdef __cplusplus
4
6
  extern "C" {
5
7
  #endif
6
8
 
7
9
  void ggml_cl_init(void);
8
10
 
9
- enum ggml_blas_order {
10
- GGML_BLAS_ORDER_ROW_MAJOR = 101,
11
- GGML_BLAS_ORDER_COLUMN_MAJOR = 102,
12
- };
11
+ bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
12
+ size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
13
+ void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
13
14
 
14
- enum ggml_blas_op {
15
- GGML_BLAS_OP_N = 111,
16
- GGML_BLAS_OP_T = 112,
17
- GGML_BLAS_OP_C = 113,
18
- };
15
+ void * ggml_cl_host_malloc(size_t size);
16
+ void ggml_cl_host_free(void * ptr);
19
17
 
20
- void ggml_cl_sgemm_wrapper(const enum ggml_blas_order order, const enum ggml_blas_op trans_a, const enum ggml_blas_op trans_b, const int m, const int n, const int k, const float alpha, const void *host_a, const int lda, const float *host_b, const int ldb, const float beta, float *host_c, const int ldc, const int btype);
18
+ void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
21
19
 
22
20
  #ifdef __cplusplus
23
21
  }