cui-llama.rn 1.2.3 → 1.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/cpp/ggml-cpu.h ADDED
@@ -0,0 +1,150 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ // Scheduling priorities
11
+ enum lm_ggml_sched_priority {
12
+ LM_GGML_SCHED_PRIO_NORMAL,
13
+ LM_GGML_SCHED_PRIO_MEDIUM,
14
+ LM_GGML_SCHED_PRIO_HIGH,
15
+ LM_GGML_SCHED_PRIO_REALTIME
16
+ };
17
+
18
+ // Threadpool params
19
+ // Use lm_ggml_threadpool_params_default() or lm_ggml_threadpool_params_init() to populate the defaults
20
+ struct lm_ggml_threadpool_params {
21
+ bool cpumask[LM_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
22
+ int n_threads; // number of threads
23
+ enum lm_ggml_sched_priority prio; // thread priority
24
+ uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
25
+ bool strict_cpu; // strict cpu placement
26
+ bool paused; // start in paused state
27
+ };
28
+
29
+ struct lm_ggml_threadpool; // forward declaration, see ggml.c
30
+
31
+ typedef struct lm_ggml_threadpool * lm_ggml_threadpool_t;
32
+
33
+ // the compute plan that needs to be prepared for lm_ggml_graph_compute()
34
+ // since https://github.com/ggerganov/ggml/issues/287
35
+ struct lm_ggml_cplan {
36
+ size_t work_size; // size of work buffer, calculated by `lm_ggml_graph_plan()`
37
+ uint8_t * work_data; // work buffer, to be allocated by caller before calling to `lm_ggml_graph_compute()`
38
+
39
+ int n_threads;
40
+ struct lm_ggml_threadpool * threadpool;
41
+
42
+ // abort lm_ggml_graph_compute when true
43
+ lm_ggml_abort_callback abort_callback;
44
+ void * abort_callback_data;
45
+ };
46
+
47
+ // numa strategies
48
+ enum lm_ggml_numa_strategy {
49
+ LM_GGML_NUMA_STRATEGY_DISABLED = 0,
50
+ LM_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
51
+ LM_GGML_NUMA_STRATEGY_ISOLATE = 2,
52
+ LM_GGML_NUMA_STRATEGY_NUMACTL = 3,
53
+ LM_GGML_NUMA_STRATEGY_MIRROR = 4,
54
+ LM_GGML_NUMA_STRATEGY_COUNT
55
+ };
56
+
57
+ LM_GGML_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
58
+ LM_GGML_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
59
+
60
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_new_i32(struct lm_ggml_context * ctx, int32_t value);
61
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
62
+
63
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
64
+ LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
65
+
66
+ LM_GGML_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
67
+ LM_GGML_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
68
+
69
+ LM_GGML_API int32_t lm_ggml_get_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70
+ LM_GGML_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
71
+
72
+ LM_GGML_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
73
+ LM_GGML_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
74
+
75
+ LM_GGML_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76
+ LM_GGML_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
77
+
78
+ LM_GGML_API struct lm_ggml_threadpool_params lm_ggml_threadpool_params_default(int n_threads);
79
+ LM_GGML_API void lm_ggml_threadpool_params_init (struct lm_ggml_threadpool_params * p, int n_threads);
80
+ LM_GGML_API bool lm_ggml_threadpool_params_match (const struct lm_ggml_threadpool_params * p0, const struct lm_ggml_threadpool_params * p1);
81
+ LM_GGML_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
82
+ LM_GGML_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
83
+ LM_GGML_API int lm_ggml_threadpool_get_n_threads(struct lm_ggml_threadpool * threadpool);
84
+ LM_GGML_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
85
+ LM_GGML_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
86
+
87
+ // lm_ggml_graph_plan() has to be called before lm_ggml_graph_compute()
88
+ // when plan.work_size > 0, caller must allocate memory for plan.work_data
89
+ LM_GGML_API struct lm_ggml_cplan lm_ggml_graph_plan(
90
+ const struct lm_ggml_cgraph * cgraph,
91
+ int n_threads, /* = LM_GGML_DEFAULT_N_THREADS */
92
+ struct lm_ggml_threadpool * threadpool /* = NULL */ );
93
+ LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
94
+
95
+ // same as lm_ggml_graph_compute() but the work data is allocated as a part of the context
96
+ // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
97
+ LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
98
+
99
+ // TODO: move to backend interface
100
+ LM_GGML_API int lm_ggml_cpu_has_neon (void);
101
+ LM_GGML_API int lm_ggml_cpu_has_sve (void);
102
+ LM_GGML_API int lm_ggml_cpu_has_matmul_int8(void);
103
+ // get the sve vector length in bytes
104
+ LM_GGML_API int lm_ggml_cpu_get_sve_cnt(void);
105
+
106
+ // Internal types and functions exposed for tests and benchmarks
107
+
108
+ typedef void (*lm_ggml_from_float_to_mat_t)
109
+ (const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
110
+ typedef void (*lm_ggml_vec_dot_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x, size_t bx,
111
+ const void * LM_GGML_RESTRICT y, size_t by, int nrc);
112
+ typedef void (*lm_ggml_gemv_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
113
+ const void * LM_GGML_RESTRICT y, int nr, int nc);
114
+ typedef void (*lm_ggml_gemm_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
115
+ const void * LM_GGML_RESTRICT y, int nr, int nc);
116
+
117
+ struct lm_ggml_type_traits_cpu {
118
+ lm_ggml_from_float_to_mat_t from_float_to_mat;
119
+ lm_ggml_vec_dot_t vec_dot;
120
+ enum lm_ggml_type vec_dot_type;
121
+ int64_t nrows; // number of rows to process simultaneously
122
+ int64_t ncols; // number of columns to process simultaneously
123
+ lm_ggml_gemv_t gemv;
124
+ lm_ggml_gemm_t gemm;
125
+ };
126
+
127
+ LM_GGML_API const struct lm_ggml_type_traits_cpu * lm_ggml_get_type_traits_cpu(enum lm_ggml_type type);
128
+
129
+ LM_GGML_API void lm_ggml_cpu_init(void);
130
+
131
+ //
132
+ // CPU backend
133
+ //
134
+
135
+ LM_GGML_API lm_ggml_backend_t lm_ggml_backend_cpu_init(void);
136
+
137
+ LM_GGML_API bool lm_ggml_backend_is_cpu (lm_ggml_backend_t backend);
138
+ LM_GGML_API void lm_ggml_backend_cpu_set_n_threads (lm_ggml_backend_t backend_cpu, int n_threads);
139
+ LM_GGML_API void lm_ggml_backend_cpu_set_threadpool (lm_ggml_backend_t backend_cpu, lm_ggml_threadpool_t threadpool);
140
+ LM_GGML_API void lm_ggml_backend_cpu_set_abort_callback(lm_ggml_backend_t backend_cpu, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
141
+
142
+ LM_GGML_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
143
+
144
+ #ifdef LM_GGML_USE_CPU_HBM
145
+ LM_GGML_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_hbm_buffer_type(void);
146
+ #endif
147
+
148
+ #ifdef __cplusplus
149
+ }
150
+ #endif
package/cpp/ggml-impl.h CHANGED
@@ -8,6 +8,7 @@
8
8
  #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
9
  #include <stdbool.h>
10
10
  #include <stdint.h>
11
+ #include <string.h>
11
12
 
12
13
  #ifdef __cplusplus
13
14
  extern "C" {
@@ -19,6 +20,9 @@ extern "C" {
19
20
  #define MIN(a, b) ((a) < (b) ? (a) : (b))
20
21
  #define MAX(a, b) ((a) > (b) ? (a) : (b))
21
22
 
23
+ // required for mmap as gguf only guarantees 32-byte alignment
24
+ #define TENSOR_ALIGNMENT 32
25
+
22
26
  // static_assert should be a #define, but if it's not,
23
27
  // fall back to the _Static_assert C11 keyword.
24
28
  // if C99 - static_assert is noop
@@ -33,6 +37,20 @@ extern "C" {
33
37
  #endif
34
38
  #endif
35
39
 
40
+ static inline int lm_ggml_up32(int n) {
41
+ return (n + 31) & ~31;
42
+ }
43
+
44
+ //static inline int lm_ggml_up64(int n) {
45
+ // return (n + 63) & ~63;
46
+ //}
47
+
48
+ static inline int lm_ggml_up(int n, int m) {
49
+ // assert m is a power of 2
50
+ LM_GGML_ASSERT((m & (m - 1)) == 0);
51
+ return (n + m - 1) & ~(m - 1);
52
+ }
53
+
36
54
  //
37
55
  // logging
38
56
  //
@@ -48,6 +66,74 @@ void lm_ggml_log_callback_default(enum lm_ggml_log_level level, const char * tex
48
66
  #define LM_GGML_LOG_DEBUG(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
49
67
  #define LM_GGML_LOG_CONT(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
50
68
 
69
+ #define LM_GGML_DEBUG 0
70
+
71
+ #if (LM_GGML_DEBUG >= 1)
72
+ #define LM_GGML_PRINT_DEBUG(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
73
+ #else
74
+ #define LM_GGML_PRINT_DEBUG(...)
75
+ #endif
76
+
77
+ #if (LM_GGML_DEBUG >= 5)
78
+ #define LM_GGML_PRINT_DEBUG_5(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
79
+ #else
80
+ #define LM_GGML_PRINT_DEBUG_5(...)
81
+ #endif
82
+
83
+ #if (LM_GGML_DEBUG >= 10)
84
+ #define LM_GGML_PRINT_DEBUG_10(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
85
+ #else
86
+ #define LM_GGML_PRINT_DEBUG_10(...)
87
+ #endif
88
+
89
+ // tensor params
90
+
91
+ static void lm_ggml_set_op_params(struct lm_ggml_tensor * tensor, const void * params, size_t params_size) {
92
+ LM_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
93
+ assert(params_size <= LM_GGML_MAX_OP_PARAMS);
94
+ memcpy(tensor->op_params, params, params_size);
95
+ }
96
+
97
+ static int32_t lm_ggml_get_op_params_i32(const struct lm_ggml_tensor * tensor, uint32_t i) {
98
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
99
+ return ((const int32_t *)(tensor->op_params))[i];
100
+ }
101
+
102
+ static float lm_ggml_get_op_params_f32(const struct lm_ggml_tensor * tensor, uint32_t i) {
103
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
104
+ return ((const float *)(tensor->op_params))[i];
105
+ }
106
+
107
+ static void lm_ggml_set_op_params_i32(struct lm_ggml_tensor * tensor, uint32_t i, int32_t value) {
108
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
109
+ ((int32_t *)(tensor->op_params))[i] = value;
110
+ }
111
+
112
+ static void lm_ggml_set_op_params_f32(struct lm_ggml_tensor * tensor, uint32_t i, float value) {
113
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
114
+ ((float *)(tensor->op_params))[i] = value;
115
+ }
116
+
117
+ struct lm_ggml_map_custom1_op_params {
118
+ lm_ggml_custom1_op_t fun;
119
+ int n_tasks;
120
+ void * userdata;
121
+ };
122
+
123
+
124
+ struct lm_ggml_map_custom2_op_params {
125
+ lm_ggml_custom2_op_t fun;
126
+ int n_tasks;
127
+ void * userdata;
128
+ };
129
+
130
+
131
+ struct lm_ggml_map_custom3_op_params {
132
+ lm_ggml_custom3_op_t fun;
133
+ int n_tasks;
134
+ void * userdata;
135
+ };
136
+
51
137
  // bitset
52
138
 
53
139
  typedef uint32_t lm_ggml_bitset_t;
@@ -196,6 +282,15 @@ struct lm_ggml_cgraph {
196
282
 
197
283
  struct lm_ggml_cgraph lm_ggml_graph_view(struct lm_ggml_cgraph * cgraph, int i0, int i1);
198
284
 
285
+ // Memory allocation
286
+
287
+ void * lm_ggml_aligned_malloc(size_t size);
288
+ void lm_ggml_aligned_free(void * ptr, size_t size);
289
+
290
+ // TODO: move to threading file
291
+ void lm_ggml_critical_section_start(void);
292
+ void lm_ggml_critical_section_end(void);
293
+
199
294
  #ifdef __cplusplus
200
295
  }
201
296
  #endif