cui-llama.rn 1.2.6 → 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -2
- package/android/src/main/CMakeLists.txt +26 -6
- package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
- package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
- package/android/src/main/jni.cpp +228 -40
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/cpp/amx/amx.cpp +196 -0
- package/cpp/amx/amx.h +20 -0
- package/cpp/amx/common.h +101 -0
- package/cpp/amx/mmq.cpp +2524 -0
- package/cpp/amx/mmq.h +16 -0
- package/cpp/common.cpp +118 -251
- package/cpp/common.h +53 -30
- package/cpp/ggml-aarch64.c +46 -3395
- package/cpp/ggml-aarch64.h +0 -20
- package/cpp/ggml-alloc.c +6 -8
- package/cpp/ggml-backend-impl.h +33 -11
- package/cpp/ggml-backend-reg.cpp +423 -0
- package/cpp/ggml-backend.cpp +14 -676
- package/cpp/ggml-backend.h +46 -9
- package/cpp/ggml-common.h +6 -0
- package/cpp/ggml-cpu-aarch64.c +3823 -0
- package/cpp/ggml-cpu-aarch64.h +32 -0
- package/cpp/ggml-cpu-impl.h +14 -242
- package/cpp/ggml-cpu-quants.c +10835 -0
- package/cpp/ggml-cpu-quants.h +63 -0
- package/cpp/ggml-cpu.c +13971 -13720
- package/cpp/ggml-cpu.cpp +715 -0
- package/cpp/ggml-cpu.h +65 -63
- package/cpp/ggml-impl.h +285 -25
- package/cpp/ggml-metal.h +8 -8
- package/cpp/ggml-metal.m +1221 -728
- package/cpp/ggml-quants.c +189 -10681
- package/cpp/ggml-quants.h +78 -125
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +12 -0
- package/cpp/ggml.c +688 -1460
- package/cpp/ggml.h +58 -244
- package/cpp/json-schema-to-grammar.cpp +1045 -1045
- package/cpp/json.hpp +24766 -24766
- package/cpp/llama-sampling.cpp +5 -2
- package/cpp/llama.cpp +409 -123
- package/cpp/llama.h +8 -4
- package/cpp/rn-llama.hpp +89 -25
- package/cpp/sampling.cpp +42 -3
- package/cpp/sampling.h +22 -1
- package/cpp/sgemm.cpp +608 -0
- package/cpp/speculative.cpp +270 -0
- package/cpp/speculative.h +28 -0
- package/cpp/unicode.cpp +11 -0
- package/ios/RNLlama.mm +43 -20
- package/ios/RNLlamaContext.h +9 -3
- package/ios/RNLlamaContext.mm +146 -33
- package/jest/mock.js +0 -1
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/grammar.js +4 -2
- package/lib/commonjs/grammar.js.map +1 -1
- package/lib/commonjs/index.js +52 -15
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/grammar.js +2 -1
- package/lib/module/grammar.js.map +1 -1
- package/lib/module/index.js +51 -15
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +122 -8
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/grammar.d.ts +5 -6
- package/lib/typescript/grammar.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +15 -6
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +2 -1
- package/src/NativeRNLlama.ts +135 -13
- package/src/grammar.ts +10 -8
- package/src/index.ts +104 -28
package/cpp/ggml.h
CHANGED
@@ -176,15 +176,15 @@
|
|
176
176
|
#ifdef LM_GGML_SHARED
|
177
177
|
# if defined(_WIN32) && !defined(__MINGW32__)
|
178
178
|
# ifdef LM_GGML_BUILD
|
179
|
-
# define LM_GGML_API __declspec(dllexport)
|
179
|
+
# define LM_GGML_API __declspec(dllexport) extern
|
180
180
|
# else
|
181
|
-
# define LM_GGML_API __declspec(dllimport)
|
181
|
+
# define LM_GGML_API __declspec(dllimport) extern
|
182
182
|
# endif
|
183
183
|
# else
|
184
|
-
# define LM_GGML_API __attribute__ ((visibility ("default")))
|
184
|
+
# define LM_GGML_API __attribute__ ((visibility ("default"))) extern
|
185
185
|
# endif
|
186
186
|
#else
|
187
|
-
# define LM_GGML_API
|
187
|
+
# define LM_GGML_API extern
|
188
188
|
#endif
|
189
189
|
|
190
190
|
// TODO: support for clang
|
@@ -390,6 +390,9 @@ extern "C" {
|
|
390
390
|
LM_GGML_TYPE_Q4_0_8_8 = 33,
|
391
391
|
LM_GGML_TYPE_TQ1_0 = 34,
|
392
392
|
LM_GGML_TYPE_TQ2_0 = 35,
|
393
|
+
LM_GGML_TYPE_IQ4_NL_4_4 = 36,
|
394
|
+
// LM_GGML_TYPE_IQ4_NL_4_8 = 37,
|
395
|
+
// LM_GGML_TYPE_IQ4_NL_8_8 = 38,
|
393
396
|
LM_GGML_TYPE_COUNT,
|
394
397
|
};
|
395
398
|
|
@@ -510,7 +513,7 @@ extern "C" {
|
|
510
513
|
LM_GGML_OP_WIN_UNPART,
|
511
514
|
LM_GGML_OP_GET_REL_POS,
|
512
515
|
LM_GGML_OP_ADD_REL_POS,
|
513
|
-
|
516
|
+
LM_GGML_OP_RWKV_WKV6,
|
514
517
|
|
515
518
|
LM_GGML_OP_UNARY,
|
516
519
|
|
@@ -603,7 +606,6 @@ extern "C" {
|
|
603
606
|
|
604
607
|
int32_t flags;
|
605
608
|
|
606
|
-
struct lm_ggml_tensor * grad;
|
607
609
|
struct lm_ggml_tensor * src[LM_GGML_MAX_SRC];
|
608
610
|
|
609
611
|
// source tensor and offset for views
|
@@ -616,7 +618,7 @@ extern "C" {
|
|
616
618
|
|
617
619
|
void * extra; // extra things e.g. for ggml-cuda.cu
|
618
620
|
|
619
|
-
|
621
|
+
char padding[8];
|
620
622
|
};
|
621
623
|
|
622
624
|
static const size_t LM_GGML_TENSOR_SIZE = sizeof(struct lm_ggml_tensor);
|
@@ -1491,7 +1493,7 @@ extern "C" {
|
|
1491
1493
|
"use lm_ggml_rope_ext_inplace instead");
|
1492
1494
|
|
1493
1495
|
// compute correction dims for YaRN RoPE scaling
|
1494
|
-
void lm_ggml_rope_yarn_corr_dims(
|
1496
|
+
LM_GGML_API void lm_ggml_rope_yarn_corr_dims(
|
1495
1497
|
int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]);
|
1496
1498
|
|
1497
1499
|
// rotary position embedding backward, i.e compute dx from dy
|
@@ -1747,6 +1749,9 @@ extern "C" {
|
|
1747
1749
|
struct lm_ggml_tensor * a,
|
1748
1750
|
enum lm_ggml_prec prec);
|
1749
1751
|
|
1752
|
+
LM_GGML_API enum lm_ggml_prec lm_ggml_flash_attn_ext_get_prec(
|
1753
|
+
const struct lm_ggml_tensor * a);
|
1754
|
+
|
1750
1755
|
// TODO: needs to be adapted to lm_ggml_flash_attn_ext
|
1751
1756
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_flash_attn_back(
|
1752
1757
|
struct lm_ggml_context * ctx,
|
@@ -1820,7 +1825,7 @@ extern "C" {
|
|
1820
1825
|
struct lm_ggml_tensor * pw,
|
1821
1826
|
struct lm_ggml_tensor * ph);
|
1822
1827
|
|
1823
|
-
LM_GGML_API struct lm_ggml_tensor *
|
1828
|
+
LM_GGML_API struct lm_ggml_tensor * lm_ggml_rwkv_wkv6(
|
1824
1829
|
struct lm_ggml_context * ctx,
|
1825
1830
|
struct lm_ggml_tensor * k,
|
1826
1831
|
struct lm_ggml_tensor * v,
|
@@ -1983,28 +1988,20 @@ extern "C" {
|
|
1983
1988
|
struct lm_ggml_context * ctx,
|
1984
1989
|
struct lm_ggml_tensor * a,
|
1985
1990
|
struct lm_ggml_tensor * grad,
|
1986
|
-
|
1987
|
-
|
1988
|
-
|
1989
|
-
float eps,
|
1990
|
-
float wd); // weight decay
|
1991
|
+
struct lm_ggml_tensor * m,
|
1992
|
+
struct lm_ggml_tensor * v,
|
1993
|
+
struct lm_ggml_tensor * adamw_params); // parameters such a the learning rate
|
1991
1994
|
|
1992
1995
|
//
|
1993
1996
|
// automatic differentiation
|
1994
1997
|
//
|
1995
1998
|
|
1996
|
-
LM_GGML_API void lm_ggml_build_forward_expand
|
1997
|
-
LM_GGML_API void lm_ggml_build_backward_expand(
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
struct lm_ggml_cgraph * gb,
|
2003
|
-
float alpha,
|
2004
|
-
float beta1,
|
2005
|
-
float beta2,
|
2006
|
-
float eps,
|
2007
|
-
float wd); // weight decay
|
1999
|
+
LM_GGML_API void lm_ggml_build_forward_expand(struct lm_ggml_cgraph * cgraph, struct lm_ggml_tensor * tensor);
|
2000
|
+
LM_GGML_API void lm_ggml_build_backward_expand(
|
2001
|
+
struct lm_ggml_context * ctx_static, // context for static gradients (loss + gradient accumulation)
|
2002
|
+
struct lm_ggml_context * ctx_compute, // context for gradient computation
|
2003
|
+
struct lm_ggml_cgraph * cgraph,
|
2004
|
+
bool accumulate); // whether or not gradients should be accumulated, requires static allocation of tensors in ctx_static
|
2008
2005
|
|
2009
2006
|
// graph allocation in a context
|
2010
2007
|
LM_GGML_API struct lm_ggml_cgraph * lm_ggml_new_graph (struct lm_ggml_context * ctx); // size = LM_GGML_DEFAULT_GRAPH_SIZE, grads = false
|
@@ -2024,7 +2021,9 @@ extern "C" {
|
|
2024
2021
|
LM_GGML_API size_t lm_ggml_graph_overhead(void);
|
2025
2022
|
LM_GGML_API size_t lm_ggml_graph_overhead_custom(size_t size, bool grads);
|
2026
2023
|
|
2027
|
-
LM_GGML_API struct lm_ggml_tensor * lm_ggml_graph_get_tensor(struct lm_ggml_cgraph * cgraph, const char * name);
|
2024
|
+
LM_GGML_API struct lm_ggml_tensor * lm_ggml_graph_get_tensor (const struct lm_ggml_cgraph * cgraph, const char * name);
|
2025
|
+
LM_GGML_API struct lm_ggml_tensor * lm_ggml_graph_get_grad (const struct lm_ggml_cgraph * cgraph, const struct lm_ggml_tensor * node);
|
2026
|
+
LM_GGML_API struct lm_ggml_tensor * lm_ggml_graph_get_grad_acc(const struct lm_ggml_cgraph * cgraph, const struct lm_ggml_tensor * node);
|
2028
2027
|
|
2029
2028
|
LM_GGML_API void lm_ggml_graph_export(const struct lm_ggml_cgraph * cgraph, const char * fname);
|
2030
2029
|
LM_GGML_API struct lm_ggml_cgraph * lm_ggml_graph_import(const char * fname, struct lm_ggml_context ** ctx_data, struct lm_ggml_context ** ctx_eval);
|
@@ -2035,198 +2034,15 @@ extern "C" {
|
|
2035
2034
|
// dump the graph into a file using the dot format
|
2036
2035
|
LM_GGML_API void lm_ggml_graph_dump_dot(const struct lm_ggml_cgraph * gb, const struct lm_ggml_cgraph * gf, const char * filename);
|
2037
2036
|
|
2038
|
-
//
|
2039
|
-
// gb_tmp will contain original backward graph with rewritten backward process nodes,
|
2040
|
-
// but without the second forward pass nodes.
|
2041
|
-
LM_GGML_API void lm_ggml_build_backward_gradient_checkpointing(
|
2042
|
-
struct lm_ggml_context * ctx,
|
2043
|
-
struct lm_ggml_cgraph * gf,
|
2044
|
-
struct lm_ggml_cgraph * gb,
|
2045
|
-
struct lm_ggml_cgraph * gb_tmp,
|
2046
|
-
struct lm_ggml_tensor * * checkpoints,
|
2047
|
-
int n_checkpoints);
|
2048
|
-
//
|
2049
|
-
// optimization
|
2050
|
-
//
|
2051
|
-
|
2052
|
-
// optimization methods
|
2053
|
-
enum lm_ggml_opt_type {
|
2054
|
-
LM_GGML_OPT_TYPE_ADAM,
|
2055
|
-
LM_GGML_OPT_TYPE_LBFGS,
|
2056
|
-
};
|
2057
|
-
|
2058
|
-
// linesearch methods
|
2059
|
-
enum lm_ggml_linesearch {
|
2060
|
-
LM_GGML_LINESEARCH_DEFAULT = 1,
|
2061
|
-
|
2062
|
-
LM_GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
|
2063
|
-
LM_GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
|
2064
|
-
LM_GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
|
2065
|
-
};
|
2066
|
-
|
2067
|
-
// optimization return values
|
2068
|
-
enum lm_ggml_opt_result {
|
2069
|
-
LM_GGML_OPT_RESULT_OK = 0,
|
2070
|
-
LM_GGML_OPT_RESULT_DID_NOT_CONVERGE,
|
2071
|
-
LM_GGML_OPT_RESULT_NO_CONTEXT,
|
2072
|
-
LM_GGML_OPT_RESULT_INVALID_WOLFE,
|
2073
|
-
LM_GGML_OPT_RESULT_FAIL,
|
2074
|
-
LM_GGML_OPT_RESULT_CANCEL,
|
2075
|
-
|
2076
|
-
LM_GGML_LINESEARCH_FAIL = -128,
|
2077
|
-
LM_GGML_LINESEARCH_MINIMUM_STEP,
|
2078
|
-
LM_GGML_LINESEARCH_MAXIMUM_STEP,
|
2079
|
-
LM_GGML_LINESEARCH_MAXIMUM_ITERATIONS,
|
2080
|
-
LM_GGML_LINESEARCH_INVALID_PARAMETERS,
|
2081
|
-
};
|
2082
|
-
|
2083
|
-
typedef void (*lm_ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
|
2037
|
+
// TODO these functions were sandwiched in the old optimization interface, is there a better place for them?
|
2084
2038
|
typedef void (*lm_ggml_log_callback)(enum lm_ggml_log_level level, const char * text, void * user_data);
|
2085
2039
|
|
2086
2040
|
// Set callback for all future logging events.
|
2087
2041
|
// If this is not called, or NULL is supplied, everything is output on stderr.
|
2088
2042
|
LM_GGML_API void lm_ggml_log_set(lm_ggml_log_callback log_callback, void * user_data);
|
2089
2043
|
|
2090
|
-
// optimization parameters
|
2091
|
-
//
|
2092
|
-
// see ggml.c (lm_ggml_opt_default_params) for default values
|
2093
|
-
//
|
2094
|
-
struct lm_ggml_opt_params {
|
2095
|
-
enum lm_ggml_opt_type type;
|
2096
|
-
|
2097
|
-
size_t graph_size;
|
2098
|
-
|
2099
|
-
int n_threads;
|
2100
|
-
|
2101
|
-
// delta-based convergence test
|
2102
|
-
//
|
2103
|
-
// if past == 0 - disabled
|
2104
|
-
// if past > 0:
|
2105
|
-
// stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
|
2106
|
-
//
|
2107
|
-
int past;
|
2108
|
-
float delta;
|
2109
|
-
|
2110
|
-
// maximum number of iterations without improvement
|
2111
|
-
//
|
2112
|
-
// if 0 - disabled
|
2113
|
-
// if > 0:
|
2114
|
-
// assume convergence if no cost improvement in this number of iterations
|
2115
|
-
//
|
2116
|
-
int max_no_improvement;
|
2117
|
-
|
2118
|
-
bool print_forward_graph;
|
2119
|
-
bool print_backward_graph;
|
2120
|
-
|
2121
|
-
int n_gradient_accumulation;
|
2122
|
-
|
2123
|
-
// ADAM parameters
|
2124
|
-
struct {
|
2125
|
-
int n_iter;
|
2126
|
-
|
2127
|
-
float sched; // schedule multiplier (fixed, decay or warmup)
|
2128
|
-
float decay; // weight decay for AdamW, use 0.0f to disable
|
2129
|
-
int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
|
2130
|
-
float alpha; // learning rate
|
2131
|
-
float beta1;
|
2132
|
-
float beta2;
|
2133
|
-
float eps; // epsilon for numerical stability
|
2134
|
-
float eps_f; // epsilon for convergence test
|
2135
|
-
float eps_g; // epsilon for convergence test
|
2136
|
-
float gclip; // gradient clipping
|
2137
|
-
} adam;
|
2138
|
-
|
2139
|
-
// LBFGS parameters
|
2140
|
-
struct {
|
2141
|
-
int m; // number of corrections to approximate the inv. Hessian
|
2142
|
-
int n_iter;
|
2143
|
-
int max_linesearch;
|
2144
|
-
|
2145
|
-
float eps; // convergence tolerance
|
2146
|
-
float ftol; // line search tolerance
|
2147
|
-
float wolfe;
|
2148
|
-
float min_step;
|
2149
|
-
float max_step;
|
2150
|
-
|
2151
|
-
enum lm_ggml_linesearch linesearch;
|
2152
|
-
} lbfgs;
|
2153
|
-
};
|
2154
|
-
|
2155
|
-
struct lm_ggml_opt_context {
|
2156
|
-
struct lm_ggml_context * ctx;
|
2157
|
-
struct lm_ggml_opt_params params;
|
2158
|
-
|
2159
|
-
int iter;
|
2160
|
-
int64_t nx; // number of parameter elements
|
2161
|
-
|
2162
|
-
bool just_initialized;
|
2163
|
-
|
2164
|
-
float loss_before;
|
2165
|
-
float loss_after;
|
2166
|
-
|
2167
|
-
struct {
|
2168
|
-
struct lm_ggml_tensor * g; // current gradient
|
2169
|
-
struct lm_ggml_tensor * m; // first moment
|
2170
|
-
struct lm_ggml_tensor * v; // second moment
|
2171
|
-
struct lm_ggml_tensor * pf; // past function values
|
2172
|
-
float fx_best;
|
2173
|
-
float fx_prev;
|
2174
|
-
int n_no_improvement;
|
2175
|
-
} adam;
|
2176
|
-
|
2177
|
-
struct {
|
2178
|
-
struct lm_ggml_tensor * x; // current parameters
|
2179
|
-
struct lm_ggml_tensor * xp; // previous parameters
|
2180
|
-
struct lm_ggml_tensor * g; // current gradient
|
2181
|
-
struct lm_ggml_tensor * gp; // previous gradient
|
2182
|
-
struct lm_ggml_tensor * d; // search direction
|
2183
|
-
struct lm_ggml_tensor * pf; // past function values
|
2184
|
-
struct lm_ggml_tensor * lmal; // the L-BFGS memory alpha
|
2185
|
-
struct lm_ggml_tensor * lmys; // the L-BFGS memory ys
|
2186
|
-
struct lm_ggml_tensor * lms; // the L-BFGS memory s
|
2187
|
-
struct lm_ggml_tensor * lmy; // the L-BFGS memory y
|
2188
|
-
float fx_best;
|
2189
|
-
float step;
|
2190
|
-
int j;
|
2191
|
-
int k;
|
2192
|
-
int end;
|
2193
|
-
int n_no_improvement;
|
2194
|
-
} lbfgs;
|
2195
|
-
};
|
2196
|
-
|
2197
2044
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_zero(struct lm_ggml_tensor * tensor);
|
2198
2045
|
|
2199
|
-
LM_GGML_API struct lm_ggml_opt_params lm_ggml_opt_default_params(enum lm_ggml_opt_type type);
|
2200
|
-
|
2201
|
-
// optimize the function defined by the tensor f
|
2202
|
-
LM_GGML_API enum lm_ggml_opt_result lm_ggml_opt(
|
2203
|
-
struct lm_ggml_context * ctx,
|
2204
|
-
struct lm_ggml_opt_params params,
|
2205
|
-
struct lm_ggml_tensor * f);
|
2206
|
-
|
2207
|
-
// initialize optimizer context
|
2208
|
-
LM_GGML_API void lm_ggml_opt_init(
|
2209
|
-
struct lm_ggml_context * ctx,
|
2210
|
-
struct lm_ggml_opt_context * opt,
|
2211
|
-
struct lm_ggml_opt_params params,
|
2212
|
-
int64_t nx);
|
2213
|
-
|
2214
|
-
// continue optimizing the function defined by the tensor f
|
2215
|
-
LM_GGML_API enum lm_ggml_opt_result lm_ggml_opt_resume(
|
2216
|
-
struct lm_ggml_context * ctx,
|
2217
|
-
struct lm_ggml_opt_context * opt,
|
2218
|
-
struct lm_ggml_tensor * f);
|
2219
|
-
|
2220
|
-
// continue optimizing the function defined by the tensor f
|
2221
|
-
LM_GGML_API enum lm_ggml_opt_result lm_ggml_opt_resume_g(
|
2222
|
-
struct lm_ggml_context * ctx,
|
2223
|
-
struct lm_ggml_opt_context * opt,
|
2224
|
-
struct lm_ggml_tensor * f,
|
2225
|
-
struct lm_ggml_cgraph * gf,
|
2226
|
-
struct lm_ggml_cgraph * gb,
|
2227
|
-
lm_ggml_opt_callback callback,
|
2228
|
-
void * callback_data);
|
2229
|
-
|
2230
2046
|
//
|
2231
2047
|
// quantization
|
2232
2048
|
//
|
@@ -2382,38 +2198,6 @@ extern "C" {
|
|
2382
2198
|
LM_GGML_API size_t lm_gguf_get_meta_size(const struct lm_gguf_context * ctx);
|
2383
2199
|
LM_GGML_API void lm_gguf_get_meta_data(const struct lm_gguf_context * ctx, void * data);
|
2384
2200
|
|
2385
|
-
//
|
2386
|
-
// system info
|
2387
|
-
//
|
2388
|
-
|
2389
|
-
LM_GGML_API int lm_ggml_cpu_has_avx (void);
|
2390
|
-
LM_GGML_API int lm_ggml_cpu_has_avx_vnni (void);
|
2391
|
-
LM_GGML_API int lm_ggml_cpu_has_avx2 (void);
|
2392
|
-
LM_GGML_API int lm_ggml_cpu_has_avx512 (void);
|
2393
|
-
LM_GGML_API int lm_ggml_cpu_has_avx512_vbmi(void);
|
2394
|
-
LM_GGML_API int lm_ggml_cpu_has_avx512_vnni(void);
|
2395
|
-
LM_GGML_API int lm_ggml_cpu_has_avx512_bf16(void);
|
2396
|
-
LM_GGML_API int lm_ggml_cpu_has_amx_int8 (void);
|
2397
|
-
LM_GGML_API int lm_ggml_cpu_has_fma (void);
|
2398
|
-
LM_GGML_API int lm_ggml_cpu_has_arm_fma (void);
|
2399
|
-
LM_GGML_API int lm_ggml_cpu_has_metal (void);
|
2400
|
-
LM_GGML_API int lm_ggml_cpu_has_f16c (void);
|
2401
|
-
LM_GGML_API int lm_ggml_cpu_has_fp16_va (void);
|
2402
|
-
LM_GGML_API int lm_ggml_cpu_has_wasm_simd (void);
|
2403
|
-
LM_GGML_API int lm_ggml_cpu_has_blas (void);
|
2404
|
-
LM_GGML_API int lm_ggml_cpu_has_cuda (void);
|
2405
|
-
LM_GGML_API int lm_ggml_cpu_has_vulkan (void);
|
2406
|
-
LM_GGML_API int lm_ggml_cpu_has_kompute (void);
|
2407
|
-
LM_GGML_API int lm_ggml_cpu_has_gpublas (void);
|
2408
|
-
LM_GGML_API int lm_ggml_cpu_has_sse3 (void);
|
2409
|
-
LM_GGML_API int lm_ggml_cpu_has_ssse3 (void);
|
2410
|
-
LM_GGML_API int lm_ggml_cpu_has_riscv_v (void);
|
2411
|
-
LM_GGML_API int lm_ggml_cpu_has_sycl (void);
|
2412
|
-
LM_GGML_API int lm_ggml_cpu_has_rpc (void);
|
2413
|
-
LM_GGML_API int lm_ggml_cpu_has_vsx (void);
|
2414
|
-
LM_GGML_API int lm_ggml_cpu_has_cann (void);
|
2415
|
-
LM_GGML_API int lm_ggml_cpu_has_llamafile (void);
|
2416
|
-
|
2417
2201
|
#ifdef __cplusplus
|
2418
2202
|
// restrict not standard in C++
|
2419
2203
|
#define LM_GGML_RESTRICT
|
@@ -2430,12 +2214,42 @@ extern "C" {
|
|
2430
2214
|
size_t type_size;
|
2431
2215
|
bool is_quantized;
|
2432
2216
|
lm_ggml_to_float_t to_float;
|
2433
|
-
lm_ggml_from_float_t from_float;
|
2434
2217
|
lm_ggml_from_float_t from_float_ref;
|
2435
2218
|
};
|
2436
2219
|
|
2437
2220
|
LM_GGML_API const struct lm_ggml_type_traits * lm_ggml_get_type_traits(enum lm_ggml_type type);
|
2438
2221
|
|
2222
|
+
// ggml threadpool
|
2223
|
+
// TODO: currently, only a few functions are in the base ggml API, while the rest are in the CPU backend
|
2224
|
+
// the goal should be to create an API that other backends can use move everything to the ggml base
|
2225
|
+
|
2226
|
+
// scheduling priorities
|
2227
|
+
enum lm_ggml_sched_priority {
|
2228
|
+
LM_GGML_SCHED_PRIO_NORMAL,
|
2229
|
+
LM_GGML_SCHED_PRIO_MEDIUM,
|
2230
|
+
LM_GGML_SCHED_PRIO_HIGH,
|
2231
|
+
LM_GGML_SCHED_PRIO_REALTIME
|
2232
|
+
};
|
2233
|
+
|
2234
|
+
// threadpool params
|
2235
|
+
// Use lm_ggml_threadpool_params_default() or lm_ggml_threadpool_params_init() to populate the defaults
|
2236
|
+
struct lm_ggml_threadpool_params {
|
2237
|
+
bool cpumask[LM_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
|
2238
|
+
int n_threads; // number of threads
|
2239
|
+
enum lm_ggml_sched_priority prio; // thread priority
|
2240
|
+
uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
|
2241
|
+
bool strict_cpu; // strict cpu placement
|
2242
|
+
bool paused; // start in paused state
|
2243
|
+
};
|
2244
|
+
|
2245
|
+
struct lm_ggml_threadpool; // forward declaration, see ggml.c
|
2246
|
+
|
2247
|
+
typedef struct lm_ggml_threadpool * lm_ggml_threadpool_t;
|
2248
|
+
|
2249
|
+
LM_GGML_API struct lm_ggml_threadpool_params lm_ggml_threadpool_params_default(int n_threads);
|
2250
|
+
LM_GGML_API void lm_ggml_threadpool_params_init (struct lm_ggml_threadpool_params * p, int n_threads);
|
2251
|
+
LM_GGML_API bool lm_ggml_threadpool_params_match (const struct lm_ggml_threadpool_params * p0, const struct lm_ggml_threadpool_params * p1);
|
2252
|
+
|
2439
2253
|
#ifdef __cplusplus
|
2440
2254
|
}
|
2441
2255
|
#endif
|