cui-llama.rn 1.2.3 → 1.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -2
- package/android/src/main/CMakeLists.txt +1 -0
- package/android/src/main/java/com/rnllama/LlamaContext.java +0 -3
- package/android/src/main/jni.cpp +9 -11
- package/cpp/common.cpp +85 -75
- package/cpp/common.h +127 -91
- package/cpp/ggml-aarch64.c +269 -0
- package/cpp/ggml-alloc.c +17 -19
- package/cpp/ggml-backend-impl.h +4 -15
- package/cpp/ggml-backend.cpp +1697 -1626
- package/cpp/ggml-backend.h +13 -25
- package/cpp/ggml-cpp.h +38 -0
- package/cpp/ggml-cpu.c +13720 -0
- package/cpp/ggml-cpu.h +150 -0
- package/cpp/ggml-impl.h +95 -0
- package/cpp/ggml-metal.m +185 -71
- package/cpp/ggml-quants.c +38 -51
- package/cpp/ggml.c +4468 -19500
- package/cpp/ggml.h +26 -146
- package/cpp/json-schema-to-grammar.cpp +1 -1
- package/cpp/llama-sampling.cpp +742 -249
- package/cpp/llama-sampling.h +21 -2
- package/cpp/llama-vocab.cpp +49 -9
- package/cpp/llama-vocab.h +35 -11
- package/cpp/llama.cpp +2468 -2307
- package/cpp/llama.h +65 -32
- package/cpp/log.cpp +50 -50
- package/cpp/log.h +18 -18
- package/cpp/rn-llama.hpp +23 -22
- package/cpp/sampling.cpp +117 -118
- package/cpp/sampling.h +20 -20
- package/cpp/sgemm.cpp +57 -0
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +0 -1
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +0 -1
package/cpp/ggml.h
CHANGED
@@ -218,7 +218,6 @@
|
|
218
218
|
|
219
219
|
#define LM_GGML_MAX_DIMS 4
|
220
220
|
#define LM_GGML_MAX_PARAMS 2048
|
221
|
-
#define LM_GGML_MAX_CONTEXTS 64
|
222
221
|
#define LM_GGML_MAX_SRC 10
|
223
222
|
#define LM_GGML_MAX_N_THREADS 512
|
224
223
|
#define LM_GGML_MAX_OP_PARAMS 64
|
@@ -560,10 +559,10 @@ extern "C" {
|
|
560
559
|
|
561
560
|
enum lm_ggml_log_level {
|
562
561
|
LM_GGML_LOG_LEVEL_NONE = 0,
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
562
|
+
LM_GGML_LOG_LEVEL_DEBUG = 1,
|
563
|
+
LM_GGML_LOG_LEVEL_INFO = 2,
|
564
|
+
LM_GGML_LOG_LEVEL_WARN = 3,
|
565
|
+
LM_GGML_LOG_LEVEL_ERROR = 4,
|
567
566
|
LM_GGML_LOG_LEVEL_CONT = 5, // continue previous log
|
568
567
|
};
|
569
568
|
|
@@ -575,6 +574,13 @@ extern "C" {
|
|
575
574
|
LM_GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up)
|
576
575
|
};
|
577
576
|
|
577
|
+
struct lm_ggml_init_params {
|
578
|
+
// memory pool
|
579
|
+
size_t mem_size; // bytes
|
580
|
+
void * mem_buffer; // if NULL, memory will be allocated internally
|
581
|
+
bool no_alloc; // don't allocate memory for the tensor data
|
582
|
+
};
|
583
|
+
|
578
584
|
// n-dimensional tensor
|
579
585
|
struct lm_ggml_tensor {
|
580
586
|
enum lm_ggml_type type;
|
@@ -620,66 +626,6 @@ extern "C" {
|
|
620
626
|
// If it returns true, the computation is aborted
|
621
627
|
typedef bool (*lm_ggml_abort_callback)(void * data);
|
622
628
|
|
623
|
-
// Scheduling priorities
|
624
|
-
enum lm_ggml_sched_priority {
|
625
|
-
LM_GGML_SCHED_PRIO_NORMAL,
|
626
|
-
LM_GGML_SCHED_PRIO_MEDIUM,
|
627
|
-
LM_GGML_SCHED_PRIO_HIGH,
|
628
|
-
LM_GGML_SCHED_PRIO_REALTIME
|
629
|
-
};
|
630
|
-
|
631
|
-
// Threadpool params
|
632
|
-
// Use lm_ggml_threadpool_params_default() or lm_ggml_threadpool_params_init() to populate the defaults
|
633
|
-
struct lm_ggml_threadpool_params {
|
634
|
-
bool cpumask[LM_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
|
635
|
-
int n_threads; // number of threads
|
636
|
-
enum lm_ggml_sched_priority prio; // thread priority
|
637
|
-
uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
|
638
|
-
bool strict_cpu; // strict cpu placement
|
639
|
-
bool paused; // start in paused state
|
640
|
-
};
|
641
|
-
|
642
|
-
struct lm_ggml_threadpool; // forward declaration, see ggml.c
|
643
|
-
|
644
|
-
typedef struct lm_ggml_threadpool * lm_ggml_threadpool_t;
|
645
|
-
|
646
|
-
// the compute plan that needs to be prepared for lm_ggml_graph_compute()
|
647
|
-
// since https://github.com/ggerganov/ggml/issues/287
|
648
|
-
struct lm_ggml_cplan {
|
649
|
-
size_t work_size; // size of work buffer, calculated by `lm_ggml_graph_plan()`
|
650
|
-
uint8_t * work_data; // work buffer, to be allocated by caller before calling to `lm_ggml_graph_compute()`
|
651
|
-
|
652
|
-
int n_threads;
|
653
|
-
struct lm_ggml_threadpool * threadpool;
|
654
|
-
|
655
|
-
// abort lm_ggml_graph_compute when true
|
656
|
-
lm_ggml_abort_callback abort_callback;
|
657
|
-
void * abort_callback_data;
|
658
|
-
};
|
659
|
-
|
660
|
-
// scratch buffer
|
661
|
-
struct lm_ggml_scratch {
|
662
|
-
size_t offs;
|
663
|
-
size_t size;
|
664
|
-
void * data;
|
665
|
-
};
|
666
|
-
|
667
|
-
struct lm_ggml_init_params {
|
668
|
-
// memory pool
|
669
|
-
size_t mem_size; // bytes
|
670
|
-
void * mem_buffer; // if NULL, memory will be allocated internally
|
671
|
-
bool no_alloc; // don't allocate memory for the tensor data
|
672
|
-
};
|
673
|
-
|
674
|
-
// numa strategies
|
675
|
-
enum lm_ggml_numa_strategy {
|
676
|
-
LM_GGML_NUMA_STRATEGY_DISABLED = 0,
|
677
|
-
LM_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
|
678
|
-
LM_GGML_NUMA_STRATEGY_ISOLATE = 2,
|
679
|
-
LM_GGML_NUMA_STRATEGY_NUMACTL = 3,
|
680
|
-
LM_GGML_NUMA_STRATEGY_MIRROR = 4,
|
681
|
-
LM_GGML_NUMA_STRATEGY_COUNT
|
682
|
-
};
|
683
629
|
|
684
630
|
//
|
685
631
|
// GUID
|
@@ -702,9 +648,6 @@ extern "C" {
|
|
702
648
|
// accepts a UTF-8 path, even on Windows
|
703
649
|
LM_GGML_API FILE * lm_ggml_fopen(const char * fname, const char * mode);
|
704
650
|
|
705
|
-
LM_GGML_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
|
706
|
-
LM_GGML_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
707
|
-
|
708
651
|
LM_GGML_API void lm_ggml_print_object (const struct lm_ggml_object * obj);
|
709
652
|
LM_GGML_API void lm_ggml_print_objects(const struct lm_ggml_context * ctx);
|
710
653
|
|
@@ -761,12 +704,12 @@ extern "C" {
|
|
761
704
|
|
762
705
|
// main
|
763
706
|
|
764
|
-
LM_GGML_API struct lm_ggml_context * lm_ggml_init(struct lm_ggml_init_params params);
|
765
|
-
LM_GGML_API void
|
707
|
+
LM_GGML_API struct lm_ggml_context * lm_ggml_init (struct lm_ggml_init_params params);
|
708
|
+
LM_GGML_API void lm_ggml_reset(struct lm_ggml_context * ctx);
|
709
|
+
LM_GGML_API void lm_ggml_free (struct lm_ggml_context * ctx);
|
766
710
|
|
767
711
|
LM_GGML_API size_t lm_ggml_used_mem(const struct lm_ggml_context * ctx);
|
768
712
|
|
769
|
-
LM_GGML_API size_t lm_ggml_set_scratch (struct lm_ggml_context * ctx, struct lm_ggml_scratch scratch);
|
770
713
|
LM_GGML_API bool lm_ggml_get_no_alloc(struct lm_ggml_context * ctx);
|
771
714
|
LM_GGML_API void lm_ggml_set_no_alloc(struct lm_ggml_context * ctx, bool no_alloc);
|
772
715
|
|
@@ -806,8 +749,7 @@ extern "C" {
|
|
806
749
|
int64_t ne2,
|
807
750
|
int64_t ne3);
|
808
751
|
|
809
|
-
LM_GGML_API
|
810
|
-
LM_GGML_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
|
752
|
+
LM_GGML_API void * lm_ggml_new_buffer(struct lm_ggml_context * ctx, size_t nbytes);
|
811
753
|
|
812
754
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_dup_tensor (struct lm_ggml_context * ctx, const struct lm_ggml_tensor * src);
|
813
755
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_view_tensor(struct lm_ggml_context * ctx, struct lm_ggml_tensor * src);
|
@@ -817,35 +759,25 @@ extern "C" {
|
|
817
759
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_get_next_tensor (const struct lm_ggml_context * ctx, struct lm_ggml_tensor * tensor);
|
818
760
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_get_tensor(struct lm_ggml_context * ctx, const char * name);
|
819
761
|
|
820
|
-
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_zero(struct lm_ggml_tensor * tensor);
|
821
|
-
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
|
822
|
-
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
|
823
|
-
|
824
762
|
// Converts a flat index into coordinates
|
825
|
-
LM_GGML_API void
|
826
|
-
|
827
|
-
LM_GGML_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
|
828
|
-
LM_GGML_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
|
763
|
+
LM_GGML_API void lm_ggml_unravel_index(const struct lm_ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
|
829
764
|
|
830
|
-
LM_GGML_API
|
831
|
-
LM_GGML_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
|
832
|
-
|
833
|
-
LM_GGML_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
|
834
|
-
LM_GGML_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
|
835
|
-
|
836
|
-
LM_GGML_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
|
837
|
-
LM_GGML_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
|
765
|
+
LM_GGML_API enum lm_ggml_unary_op lm_ggml_get_unary_op(const struct lm_ggml_tensor * tensor);
|
838
766
|
|
839
767
|
LM_GGML_API void * lm_ggml_get_data (const struct lm_ggml_tensor * tensor);
|
840
768
|
LM_GGML_API float * lm_ggml_get_data_f32(const struct lm_ggml_tensor * tensor);
|
841
769
|
|
842
|
-
LM_GGML_API enum lm_ggml_unary_op lm_ggml_get_unary_op(const struct lm_ggml_tensor * tensor);
|
843
|
-
|
844
770
|
LM_GGML_API const char * lm_ggml_get_name (const struct lm_ggml_tensor * tensor);
|
845
771
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_name ( struct lm_ggml_tensor * tensor, const char * name);
|
846
772
|
LM_GGML_ATTRIBUTE_FORMAT(2, 3)
|
847
773
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_format_name( struct lm_ggml_tensor * tensor, const char * fmt, ...);
|
848
774
|
|
775
|
+
// Tensor flags
|
776
|
+
LM_GGML_API void lm_ggml_set_input(struct lm_ggml_tensor * tensor);
|
777
|
+
LM_GGML_API void lm_ggml_set_output(struct lm_ggml_tensor * tensor);
|
778
|
+
LM_GGML_API void lm_ggml_set_param(struct lm_ggml_context * ctx, struct lm_ggml_tensor * tensor);
|
779
|
+
LM_GGML_API void lm_ggml_set_loss(struct lm_ggml_tensor * tensor);
|
780
|
+
|
849
781
|
//
|
850
782
|
// operations on tensors with backpropagation
|
851
783
|
//
|
@@ -2061,9 +1993,6 @@ extern "C" {
|
|
2061
1993
|
// automatic differentiation
|
2062
1994
|
//
|
2063
1995
|
|
2064
|
-
LM_GGML_API void lm_ggml_set_param(struct lm_ggml_context * ctx, struct lm_ggml_tensor * tensor);
|
2065
|
-
LM_GGML_API void lm_ggml_set_loss(struct lm_ggml_tensor * tensor);
|
2066
|
-
|
2067
1996
|
LM_GGML_API void lm_ggml_build_forward_expand (struct lm_ggml_cgraph * cgraph, struct lm_ggml_tensor * tensor);
|
2068
1997
|
LM_GGML_API void lm_ggml_build_backward_expand(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * gf, struct lm_ggml_cgraph * gb, bool accumulate);
|
2069
1998
|
|
@@ -2095,27 +2024,6 @@ extern "C" {
|
|
2095
2024
|
LM_GGML_API size_t lm_ggml_graph_overhead(void);
|
2096
2025
|
LM_GGML_API size_t lm_ggml_graph_overhead_custom(size_t size, bool grads);
|
2097
2026
|
|
2098
|
-
LM_GGML_API struct lm_ggml_threadpool_params lm_ggml_threadpool_params_default(int n_threads);
|
2099
|
-
LM_GGML_API void lm_ggml_threadpool_params_init (struct lm_ggml_threadpool_params * p, int n_threads);
|
2100
|
-
LM_GGML_API bool lm_ggml_threadpool_params_match (const struct lm_ggml_threadpool_params * p0, const struct lm_ggml_threadpool_params * p1);
|
2101
|
-
LM_GGML_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
|
2102
|
-
LM_GGML_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
|
2103
|
-
LM_GGML_API int lm_ggml_threadpool_get_n_threads(struct lm_ggml_threadpool * threadpool);
|
2104
|
-
LM_GGML_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
|
2105
|
-
LM_GGML_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
|
2106
|
-
|
2107
|
-
// lm_ggml_graph_plan() has to be called before lm_ggml_graph_compute()
|
2108
|
-
// when plan.work_size > 0, caller must allocate memory for plan.work_data
|
2109
|
-
LM_GGML_API struct lm_ggml_cplan lm_ggml_graph_plan(
|
2110
|
-
const struct lm_ggml_cgraph * cgraph,
|
2111
|
-
int n_threads, /* = LM_GGML_DEFAULT_N_THREADS */
|
2112
|
-
struct lm_ggml_threadpool * threadpool /* = NULL */ );
|
2113
|
-
LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
|
2114
|
-
|
2115
|
-
// same as lm_ggml_graph_compute() but the work data is allocated as a part of the context
|
2116
|
-
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
|
2117
|
-
LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
|
2118
|
-
|
2119
2027
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_graph_get_tensor(struct lm_ggml_cgraph * cgraph, const char * name);
|
2120
2028
|
|
2121
2029
|
LM_GGML_API void lm_ggml_graph_export(const struct lm_ggml_cgraph * cgraph, const char * fname);
|
@@ -2286,6 +2194,8 @@ extern "C" {
|
|
2286
2194
|
} lbfgs;
|
2287
2195
|
};
|
2288
2196
|
|
2197
|
+
LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_zero(struct lm_ggml_tensor * tensor);
|
2198
|
+
|
2289
2199
|
LM_GGML_API struct lm_ggml_opt_params lm_ggml_opt_default_params(enum lm_ggml_opt_type type);
|
2290
2200
|
|
2291
2201
|
// optimize the function defined by the tensor f
|
@@ -2317,12 +2227,6 @@ extern "C" {
|
|
2317
2227
|
lm_ggml_opt_callback callback,
|
2318
2228
|
void * callback_data);
|
2319
2229
|
|
2320
|
-
//
|
2321
|
-
// tensor flags
|
2322
|
-
//
|
2323
|
-
LM_GGML_API void lm_ggml_set_input(struct lm_ggml_tensor * tensor);
|
2324
|
-
LM_GGML_API void lm_ggml_set_output(struct lm_ggml_tensor * tensor);
|
2325
|
-
|
2326
2230
|
//
|
2327
2231
|
// quantization
|
2328
2232
|
//
|
@@ -2489,9 +2393,8 @@ extern "C" {
|
|
2489
2393
|
LM_GGML_API int lm_ggml_cpu_has_avx512_vbmi(void);
|
2490
2394
|
LM_GGML_API int lm_ggml_cpu_has_avx512_vnni(void);
|
2491
2395
|
LM_GGML_API int lm_ggml_cpu_has_avx512_bf16(void);
|
2396
|
+
LM_GGML_API int lm_ggml_cpu_has_amx_int8 (void);
|
2492
2397
|
LM_GGML_API int lm_ggml_cpu_has_fma (void);
|
2493
|
-
LM_GGML_API int lm_ggml_cpu_has_neon (void);
|
2494
|
-
LM_GGML_API int lm_ggml_cpu_has_sve (void);
|
2495
2398
|
LM_GGML_API int lm_ggml_cpu_has_arm_fma (void);
|
2496
2399
|
LM_GGML_API int lm_ggml_cpu_has_metal (void);
|
2497
2400
|
LM_GGML_API int lm_ggml_cpu_has_f16c (void);
|
@@ -2508,17 +2411,9 @@ extern "C" {
|
|
2508
2411
|
LM_GGML_API int lm_ggml_cpu_has_sycl (void);
|
2509
2412
|
LM_GGML_API int lm_ggml_cpu_has_rpc (void);
|
2510
2413
|
LM_GGML_API int lm_ggml_cpu_has_vsx (void);
|
2511
|
-
LM_GGML_API int lm_ggml_cpu_has_matmul_int8(void);
|
2512
2414
|
LM_GGML_API int lm_ggml_cpu_has_cann (void);
|
2513
2415
|
LM_GGML_API int lm_ggml_cpu_has_llamafile (void);
|
2514
2416
|
|
2515
|
-
// get the sve vector length in bytes
|
2516
|
-
LM_GGML_API int lm_ggml_cpu_get_sve_cnt(void);
|
2517
|
-
|
2518
|
-
//
|
2519
|
-
// Internal types and functions exposed for tests and benchmarks
|
2520
|
-
//
|
2521
|
-
|
2522
2417
|
#ifdef __cplusplus
|
2523
2418
|
// restrict not standard in C++
|
2524
2419
|
#define LM_GGML_RESTRICT
|
@@ -2527,14 +2422,6 @@ extern "C" {
|
|
2527
2422
|
#endif
|
2528
2423
|
typedef void (*lm_ggml_to_float_t) (const void * LM_GGML_RESTRICT x, float * LM_GGML_RESTRICT y, int64_t k);
|
2529
2424
|
typedef void (*lm_ggml_from_float_t)(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t k);
|
2530
|
-
typedef void (*lm_ggml_from_float_to_mat_t)
|
2531
|
-
(const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
|
2532
|
-
typedef void (*lm_ggml_vec_dot_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x, size_t bx,
|
2533
|
-
const void * LM_GGML_RESTRICT y, size_t by, int nrc);
|
2534
|
-
typedef void (*lm_ggml_gemv_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
|
2535
|
-
const void * LM_GGML_RESTRICT y, int nr, int nc);
|
2536
|
-
typedef void (*lm_ggml_gemm_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
|
2537
|
-
const void * LM_GGML_RESTRICT y, int nr, int nc);
|
2538
2425
|
|
2539
2426
|
struct lm_ggml_type_traits {
|
2540
2427
|
const char * type_name;
|
@@ -2545,13 +2432,6 @@ extern "C" {
|
|
2545
2432
|
lm_ggml_to_float_t to_float;
|
2546
2433
|
lm_ggml_from_float_t from_float;
|
2547
2434
|
lm_ggml_from_float_t from_float_ref;
|
2548
|
-
lm_ggml_from_float_to_mat_t from_float_to_mat;
|
2549
|
-
lm_ggml_vec_dot_t vec_dot;
|
2550
|
-
enum lm_ggml_type vec_dot_type;
|
2551
|
-
int64_t nrows; // number of rows to process simultaneously
|
2552
|
-
int64_t ncols; // number of columns to process simultaneously
|
2553
|
-
lm_ggml_gemv_t gemv;
|
2554
|
-
lm_ggml_gemm_t gemm;
|
2555
2435
|
};
|
2556
2436
|
|
2557
2437
|
LM_GGML_API const struct lm_ggml_type_traits * lm_ggml_get_type_traits(enum lm_ggml_type type);
|