cui-llama.rn 1.3.0 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/android/src/main/CMakeLists.txt +6 -1
  2. package/android/src/main/jni.cpp +6 -6
  3. package/cpp/amx/amx.cpp +196 -0
  4. package/cpp/amx/amx.h +20 -0
  5. package/cpp/amx/common.h +101 -0
  6. package/cpp/amx/mmq.cpp +2524 -0
  7. package/cpp/amx/mmq.h +16 -0
  8. package/cpp/common.cpp +1981 -1682
  9. package/cpp/common.h +636 -600
  10. package/cpp/ggml-aarch64.c +129 -129
  11. package/cpp/ggml-aarch64.h +19 -19
  12. package/cpp/ggml-alloc.c +1038 -1040
  13. package/cpp/ggml-alloc.h +76 -76
  14. package/cpp/ggml-backend-impl.h +238 -216
  15. package/cpp/ggml-backend-reg.cpp +423 -195
  16. package/cpp/ggml-backend.cpp +1999 -1997
  17. package/cpp/ggml-backend.h +351 -328
  18. package/cpp/ggml-common.h +1859 -1853
  19. package/cpp/ggml-cpp.h +38 -38
  20. package/cpp/ggml-cpu-aarch64.c +3823 -3560
  21. package/cpp/ggml-cpu-aarch64.h +32 -30
  22. package/cpp/ggml-cpu-impl.h +386 -371
  23. package/cpp/ggml-cpu-quants.c +10835 -10822
  24. package/cpp/ggml-cpu-quants.h +63 -63
  25. package/cpp/ggml-cpu.c +99 -103
  26. package/cpp/ggml-cpu.cpp +69 -17
  27. package/cpp/ggml-cpu.h +152 -177
  28. package/cpp/ggml-impl.h +556 -550
  29. package/cpp/ggml-metal.h +66 -66
  30. package/cpp/ggml-metal.m +4426 -4294
  31. package/cpp/ggml-quants.c +5247 -5247
  32. package/cpp/ggml-quants.h +100 -100
  33. package/cpp/ggml-threading.cpp +12 -12
  34. package/cpp/ggml-threading.h +12 -12
  35. package/cpp/ggml.c +7618 -8180
  36. package/cpp/ggml.h +2255 -2411
  37. package/cpp/json-schema-to-grammar.cpp +1045 -0
  38. package/cpp/json-schema-to-grammar.h +8 -0
  39. package/cpp/json.hpp +24766 -0
  40. package/cpp/llama-grammar.cpp +1138 -1138
  41. package/cpp/llama-grammar.h +144 -144
  42. package/cpp/llama-impl.h +181 -181
  43. package/cpp/llama-sampling.cpp +2348 -2348
  44. package/cpp/llama-sampling.h +48 -48
  45. package/cpp/llama-vocab.cpp +1984 -1984
  46. package/cpp/llama-vocab.h +170 -170
  47. package/cpp/llama.cpp +22332 -22132
  48. package/cpp/llama.h +1259 -1253
  49. package/cpp/log.cpp +401 -401
  50. package/cpp/log.h +121 -121
  51. package/cpp/rn-llama.hpp +6 -6
  52. package/cpp/sampling.cpp +505 -466
  53. package/cpp/sampling.h +22 -1
  54. package/cpp/sgemm.cpp +1884 -1884
  55. package/cpp/speculative.cpp +270 -0
  56. package/cpp/speculative.h +28 -0
  57. package/cpp/unicode.cpp +11 -0
  58. package/ios/RNLlamaContext.mm +13 -0
  59. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  60. package/lib/commonjs/grammar.js +4 -2
  61. package/lib/commonjs/grammar.js.map +1 -1
  62. package/lib/commonjs/index.js.map +1 -1
  63. package/lib/module/NativeRNLlama.js.map +1 -1
  64. package/lib/module/grammar.js +2 -1
  65. package/lib/module/grammar.js.map +1 -1
  66. package/lib/module/index.js.map +1 -1
  67. package/lib/typescript/NativeRNLlama.d.ts +94 -4
  68. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  69. package/lib/typescript/grammar.d.ts +5 -6
  70. package/lib/typescript/grammar.d.ts.map +1 -1
  71. package/lib/typescript/index.d.ts +4 -2
  72. package/lib/typescript/index.d.ts.map +1 -1
  73. package/package.json +2 -1
  74. package/src/NativeRNLlama.ts +97 -10
  75. package/src/grammar.ts +10 -8
  76. package/src/index.ts +22 -1
package/cpp/ggml-cpu.cpp CHANGED
@@ -3,6 +3,7 @@
3
3
  #include "ggml-cpu.h"
4
4
  #include "ggml-cpu-aarch64.h"
5
5
  #include "ggml-impl.h"
6
+ #include "amx/amx.h"
6
7
  #include <cctype>
7
8
  #include <string>
8
9
  #include <vector>
@@ -134,12 +135,16 @@ static lm_ggml_backend_buffer_type_t * lm_ggml_backend_cpu_get_extra_bufts(lm_gg
134
135
  static std::vector<lm_ggml_backend_buffer_type_t> bufts = []() {
135
136
  std::vector<lm_ggml_backend_buffer_type_t> bufts;
136
137
 
137
- #ifdef LM_GGML_USE_CPU_HBM
138
- bufts.push_back(lm_ggml_backend_cpu_hbm_buffer_type());
138
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
139
+ if (lm_ggml_backend_amx_buffer_type()) {
140
+ bufts.push_back(lm_ggml_backend_amx_buffer_type());
141
+ }
139
142
  #endif
140
143
 
141
144
  #ifdef LM_GGML_USE_CPU_AARCH64
142
- bufts.push_back(lm_ggml_backend_cpu_aarch64_buffer_type());
145
+ if (lm_ggml_backend_cpu_aarch64_buffer_type()) {
146
+ bufts.push_back(lm_ggml_backend_cpu_aarch64_buffer_type());
147
+ }
143
148
  #endif
144
149
 
145
150
  bufts.push_back(NULL);
@@ -456,12 +461,27 @@ static bool lm_ggml_backend_cpu_device_supports_op(lm_ggml_backend_dev_t dev, co
456
461
  const struct lm_ggml_tensor * src0 = op->src[0];
457
462
  const struct lm_ggml_tensor * src1 = op->src[1];
458
463
 
464
+ if (op->op == LM_GGML_OP_NONE || op->op == LM_GGML_OP_RESHAPE || op->op == LM_GGML_OP_VIEW || op->op == LM_GGML_OP_PERMUTE || op->op == LM_GGML_OP_TRANSPOSE) {
465
+ return true;
466
+ }
467
+
459
468
  if (src0 && src0->buffer && lm_ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) {
460
- if (op->op != LM_GGML_OP_MUL_MAT || src0->type != LM_GGML_TYPE_Q4_0 || lm_ggml_aarch64_get_optimal_repack_type(src0) == LM_GGML_TYPE_Q4_0) {
469
+ if (op->op != LM_GGML_OP_MUL_MAT || src0->type == lm_ggml_aarch64_get_optimal_repack_type(src0)) {
461
470
  return false;
462
471
  }
463
472
  }
464
473
 
474
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
475
+ if (src0 && src0->buffer && lm_ggml_backend_amx_buft_is_amx(src0->buffer->buft)) {
476
+ return lm_ggml_backend_amx_device_supports_op(op);
477
+ }
478
+ for (int i = 1; i < LM_GGML_MAX_SRC; i++) {
479
+ if (op->src[i] && op->src[i]->buffer && lm_ggml_backend_amx_buft_is_amx(op->src[i]->buffer->buft)) {
480
+ return false;
481
+ }
482
+ }
483
+ #endif
484
+
465
485
  for (int i = 1; i < LM_GGML_MAX_SRC; i++) {
466
486
  if (op->src[i] && op->src[i]->buffer && lm_ggml_backend_cpu_buft_is_aarch64(op->src[i]->buffer->buft)) {
467
487
  return false;
@@ -491,7 +511,13 @@ static bool lm_ggml_backend_cpu_device_supports_op(lm_ggml_backend_dev_t dev, co
491
511
  }
492
512
 
493
513
  static bool lm_ggml_backend_cpu_device_supports_buft(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft) {
494
- return lm_ggml_backend_buft_is_host(buft) || lm_ggml_backend_cpu_buft_is_aarch64(buft);
514
+ bool supported = lm_ggml_backend_buft_is_host(buft) || lm_ggml_backend_cpu_buft_is_aarch64(buft);
515
+
516
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
517
+ supported = supported || lm_ggml_backend_amx_buft_is_amx(buft);
518
+ #endif
519
+
520
+ return supported;
495
521
 
496
522
  LM_GGML_UNUSED(dev);
497
523
  }
@@ -541,16 +567,12 @@ static lm_ggml_backend_dev_t lm_ggml_backend_cpu_reg_get_device(lm_ggml_backend_
541
567
  return &lm_ggml_backend_cpu_device;
542
568
  }
543
569
 
544
- struct lm_ggml_backend_feature {
545
- const char * name;
546
- const char * value;
547
- };
548
-
549
- // Not used yet
550
570
  // This is intended to replace the the lm_ggml_cpu_has_* functions when loading the CPU backend dynamically,
551
- // and additionally to allow other backends to expose their own list of features that applications can query using the same API.
571
+ // and additionally to allow other backends to expose their own list of features that applications can query using the same API
552
572
  static lm_ggml_backend_feature * lm_ggml_backend_cpu_get_features(lm_ggml_backend_reg_t reg) {
553
573
  static std::vector<lm_ggml_backend_feature> features = []() {
574
+ lm_ggml_cpu_init();
575
+
554
576
  std::vector<lm_ggml_backend_feature> features;
555
577
  if (lm_ggml_cpu_has_sse3()) {
556
578
  features.push_back({ "SSE3", "1" });
@@ -561,6 +583,9 @@ static lm_ggml_backend_feature * lm_ggml_backend_cpu_get_features(lm_ggml_backen
561
583
  if (lm_ggml_cpu_has_avx()) {
562
584
  features.push_back({ "AVX", "1" });
563
585
  }
586
+ if (lm_ggml_cpu_has_avx_vnni()) {
587
+ features.push_back({ "AVX_VNNI", "1" });
588
+ }
564
589
  if (lm_ggml_cpu_has_avx2()) {
565
590
  features.push_back({ "AVX2", "1" });
566
591
  }
@@ -570,9 +595,6 @@ static lm_ggml_backend_feature * lm_ggml_backend_cpu_get_features(lm_ggml_backen
570
595
  if (lm_ggml_cpu_has_fma()) {
571
596
  features.push_back({ "FMA", "1" });
572
597
  }
573
- if (lm_ggml_cpu_has_avx_vnni()) {
574
- features.push_back({ "AVX_VNNI", "1" });
575
- }
576
598
  if (lm_ggml_cpu_has_avx512()) {
577
599
  features.push_back({ "AVX512", "1" });
578
600
  }
@@ -619,6 +641,10 @@ static lm_ggml_backend_feature * lm_ggml_backend_cpu_get_features(lm_ggml_backen
619
641
  if (lm_ggml_cpu_has_llamafile()) {
620
642
  features.push_back({ "LLAMAFILE", "1" });
621
643
  }
644
+ // TODO: rename this
645
+ #ifdef LM_GGML_USE_CPU_AARCH64
646
+ features.push_back({ "AARCH64_REPACK", "1" });
647
+ #endif
622
648
 
623
649
  features.push_back({ nullptr, nullptr });
624
650
 
@@ -637,6 +663,29 @@ static void * lm_ggml_backend_cpu_get_proc_address(lm_ggml_backend_reg_t reg, co
637
663
  if (strcmp(name, "lm_ggml_backend_dev_get_extra_bufts") == 0) {
638
664
  return (void *)lm_ggml_backend_cpu_get_extra_bufts;
639
665
  }
666
+ if (strcmp(name, "lm_ggml_backend_get_features") == 0) {
667
+ return (void *)lm_ggml_backend_cpu_get_features;
668
+ }
669
+ if (strcmp(name, "lm_ggml_backend_set_abort_callback") == 0) {
670
+ return (void *)lm_ggml_backend_cpu_set_abort_callback;
671
+ }
672
+ if (strcmp(name, "lm_ggml_backend_cpu_numa_init") == 0) {
673
+ return (void *)lm_ggml_numa_init;
674
+ }
675
+ if (strcmp(name, "lm_ggml_backend_cpu_is_numa") == 0) {
676
+ return (void *)lm_ggml_is_numa;
677
+ }
678
+
679
+ // threadpool - TODO: move to ggml-base
680
+ if (strcmp(name, "lm_ggml_threadpool_new") == 0) {
681
+ return (void *)lm_ggml_threadpool_new;
682
+ }
683
+ if (strcmp(name, "lm_ggml_threadpool_free") == 0) {
684
+ return (void *)lm_ggml_threadpool_free;
685
+ }
686
+ if (strcmp(name, "lm_ggml_backend_cpu_set_threadpool") == 0) {
687
+ return (void *)lm_ggml_backend_cpu_set_threadpool;
688
+ }
640
689
 
641
690
  return NULL;
642
691
 
@@ -655,9 +704,12 @@ lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void) {
655
704
  lm_ggml_cpu_init();
656
705
 
657
706
  static struct lm_ggml_backend_reg lm_ggml_backend_cpu_reg = {
658
- /* .iface = */ lm_ggml_backend_cpu_reg_i,
659
- /* .context = */ NULL,
707
+ /* .api_version = */ LM_GGML_BACKEND_API_VERSION,
708
+ /* .iface = */ lm_ggml_backend_cpu_reg_i,
709
+ /* .context = */ NULL,
660
710
  };
661
711
 
662
712
  return &lm_ggml_backend_cpu_reg;
663
713
  }
714
+
715
+ LM_GGML_BACKEND_DL_IMPL(lm_ggml_backend_cpu_reg)
package/cpp/ggml-cpu.h CHANGED
@@ -1,177 +1,152 @@
1
- #pragma once
2
-
3
- #include "ggml.h"
4
- #include "ggml-backend.h"
5
-
6
- #ifdef __cplusplus
7
- extern "C" {
8
- #endif
9
-
10
- // Scheduling priorities
11
- enum lm_ggml_sched_priority {
12
- LM_GGML_SCHED_PRIO_NORMAL,
13
- LM_GGML_SCHED_PRIO_MEDIUM,
14
- LM_GGML_SCHED_PRIO_HIGH,
15
- LM_GGML_SCHED_PRIO_REALTIME
16
- };
17
-
18
- // Threadpool params
19
- // Use lm_ggml_threadpool_params_default() or lm_ggml_threadpool_params_init() to populate the defaults
20
- struct lm_ggml_threadpool_params {
21
- bool cpumask[LM_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
22
- int n_threads; // number of threads
23
- enum lm_ggml_sched_priority prio; // thread priority
24
- uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
25
- bool strict_cpu; // strict cpu placement
26
- bool paused; // start in paused state
27
- };
28
-
29
- struct lm_ggml_threadpool; // forward declaration, see ggml.c
30
-
31
- typedef struct lm_ggml_threadpool * lm_ggml_threadpool_t;
32
-
33
- // the compute plan that needs to be prepared for lm_ggml_graph_compute()
34
- // since https://github.com/ggerganov/ggml/issues/287
35
- struct lm_ggml_cplan {
36
- size_t work_size; // size of work buffer, calculated by `lm_ggml_graph_plan()`
37
- uint8_t * work_data; // work buffer, to be allocated by caller before calling to `lm_ggml_graph_compute()`
38
-
39
- int n_threads;
40
- struct lm_ggml_threadpool * threadpool;
41
-
42
- // abort lm_ggml_graph_compute when true
43
- lm_ggml_abort_callback abort_callback;
44
- void * abort_callback_data;
45
- };
46
-
47
- // numa strategies
48
- enum lm_ggml_numa_strategy {
49
- LM_GGML_NUMA_STRATEGY_DISABLED = 0,
50
- LM_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
51
- LM_GGML_NUMA_STRATEGY_ISOLATE = 2,
52
- LM_GGML_NUMA_STRATEGY_NUMACTL = 3,
53
- LM_GGML_NUMA_STRATEGY_MIRROR = 4,
54
- LM_GGML_NUMA_STRATEGY_COUNT
55
- };
56
-
57
- LM_GGML_BACKEND_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
58
- LM_GGML_BACKEND_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
59
-
60
- LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_i32(struct lm_ggml_context * ctx, int32_t value);
61
- LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
62
-
63
- LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
64
- LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
65
-
66
- LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
67
- LM_GGML_BACKEND_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
68
-
69
- LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70
- LM_GGML_BACKEND_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
71
-
72
- LM_GGML_BACKEND_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
73
- LM_GGML_BACKEND_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
74
-
75
- LM_GGML_BACKEND_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76
- LM_GGML_BACKEND_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
77
-
78
- LM_GGML_BACKEND_API struct lm_ggml_threadpool_params lm_ggml_threadpool_params_default(int n_threads);
79
- LM_GGML_BACKEND_API void lm_ggml_threadpool_params_init (struct lm_ggml_threadpool_params * p, int n_threads);
80
- LM_GGML_BACKEND_API bool lm_ggml_threadpool_params_match (const struct lm_ggml_threadpool_params * p0, const struct lm_ggml_threadpool_params * p1);
81
- LM_GGML_BACKEND_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
82
- LM_GGML_BACKEND_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
83
- LM_GGML_BACKEND_API int lm_ggml_threadpool_get_n_threads(struct lm_ggml_threadpool * threadpool);
84
- LM_GGML_BACKEND_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
85
- LM_GGML_BACKEND_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
86
-
87
- // lm_ggml_graph_plan() has to be called before lm_ggml_graph_compute()
88
- // when plan.work_size > 0, caller must allocate memory for plan.work_data
89
- LM_GGML_BACKEND_API struct lm_ggml_cplan lm_ggml_graph_plan(
90
- const struct lm_ggml_cgraph * cgraph,
91
- int n_threads, /* = LM_GGML_DEFAULT_N_THREADS */
92
- struct lm_ggml_threadpool * threadpool /* = NULL */ );
93
- LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
94
-
95
- // same as lm_ggml_graph_compute() but the work data is allocated as a part of the context
96
- // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
97
- LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
98
-
99
- //
100
- // system info
101
- //
102
-
103
- // x86
104
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_sse3 (void);
105
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_ssse3 (void);
106
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx (void);
107
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx2 (void);
108
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_f16c (void);
109
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_fma (void);
110
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx_vnni (void);
111
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512 (void);
112
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vbmi(void);
113
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vnni(void);
114
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_bf16(void);
115
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_amx_int8 (void);
116
- // ARM
117
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_neon (void);
118
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_arm_fma (void);
119
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_fp16_va (void);
120
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_matmul_int8(void);
121
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_sve (void);
122
- LM_GGML_BACKEND_API int lm_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
123
- // other
124
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_riscv_v (void);
125
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_vsx (void);
126
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_wasm_simd (void);
127
- LM_GGML_BACKEND_API int lm_ggml_cpu_has_llamafile (void);
128
-
129
- // Internal types and functions exposed for tests and benchmarks
130
-
131
- typedef void (*lm_ggml_from_float_to_mat_t)
132
- (const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
133
- typedef void (*lm_ggml_vec_dot_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x, size_t bx,
134
- const void * LM_GGML_RESTRICT y, size_t by, int nrc);
135
- typedef void (*lm_ggml_gemv_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
136
- const void * LM_GGML_RESTRICT y, int nr, int nc);
137
- typedef void (*lm_ggml_gemm_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
138
- const void * LM_GGML_RESTRICT y, int nr, int nc);
139
-
140
- struct lm_ggml_type_traits_cpu {
141
- lm_ggml_from_float_t from_float;
142
- lm_ggml_from_float_to_mat_t from_float_to_mat;
143
- lm_ggml_vec_dot_t vec_dot;
144
- enum lm_ggml_type vec_dot_type;
145
- int64_t nrows; // number of rows to process simultaneously
146
- int64_t ncols; // number of columns to process simultaneously
147
- lm_ggml_gemv_t gemv;
148
- lm_ggml_gemm_t gemm;
149
- };
150
-
151
- LM_GGML_BACKEND_API const struct lm_ggml_type_traits_cpu * lm_ggml_get_type_traits_cpu(enum lm_ggml_type type);
152
-
153
- LM_GGML_BACKEND_API void lm_ggml_cpu_init(void);
154
-
155
- //
156
- // CPU backend
157
- //
158
-
159
- LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_cpu_init(void);
160
-
161
- LM_GGML_BACKEND_API bool lm_ggml_backend_is_cpu (lm_ggml_backend_t backend);
162
- LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_n_threads (lm_ggml_backend_t backend_cpu, int n_threads);
163
- LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_threadpool (lm_ggml_backend_t backend_cpu, lm_ggml_threadpool_t threadpool);
164
- LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_abort_callback(lm_ggml_backend_t backend_cpu, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
165
-
166
- LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
167
-
168
- #ifdef LM_GGML_USE_CPU_HBM
169
- LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_hbm_buffer_type(void);
170
- #endif
171
-
172
- LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_aarch64_buffer_type(void);
173
- LM_GGML_BACKEND_API bool lm_ggml_backend_cpu_buft_is_aarch64(lm_ggml_backend_buffer_type_t buft);
174
-
175
- #ifdef __cplusplus
176
- }
177
- #endif
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ // the compute plan that needs to be prepared for lm_ggml_graph_compute()
11
+ // since https://github.com/ggerganov/ggml/issues/287
12
+ struct lm_ggml_cplan {
13
+ size_t work_size; // size of work buffer, calculated by `lm_ggml_graph_plan()`
14
+ uint8_t * work_data; // work buffer, to be allocated by caller before calling to `lm_ggml_graph_compute()`
15
+
16
+ int n_threads;
17
+ struct lm_ggml_threadpool * threadpool;
18
+
19
+ // abort lm_ggml_graph_compute when true
20
+ lm_ggml_abort_callback abort_callback;
21
+ void * abort_callback_data;
22
+ };
23
+
24
+ // numa strategies
25
+ enum lm_ggml_numa_strategy {
26
+ LM_GGML_NUMA_STRATEGY_DISABLED = 0,
27
+ LM_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
28
+ LM_GGML_NUMA_STRATEGY_ISOLATE = 2,
29
+ LM_GGML_NUMA_STRATEGY_NUMACTL = 3,
30
+ LM_GGML_NUMA_STRATEGY_MIRROR = 4,
31
+ LM_GGML_NUMA_STRATEGY_COUNT
32
+ };
33
+
34
+ LM_GGML_BACKEND_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
35
+ LM_GGML_BACKEND_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
36
+
37
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_i32(struct lm_ggml_context * ctx, int32_t value);
38
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
39
+
40
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
41
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
42
+
43
+ LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
44
+ LM_GGML_BACKEND_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
45
+
46
+ LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
47
+ LM_GGML_BACKEND_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
48
+
49
+ LM_GGML_BACKEND_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
50
+ LM_GGML_BACKEND_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
51
+
52
+ LM_GGML_BACKEND_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
53
+ LM_GGML_BACKEND_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
54
+
55
+ LM_GGML_BACKEND_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
56
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
57
+ LM_GGML_BACKEND_API int lm_ggml_threadpool_get_n_threads (struct lm_ggml_threadpool * threadpool);
58
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
59
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
60
+
61
+ // lm_ggml_graph_plan() has to be called before lm_ggml_graph_compute()
62
+ // when plan.work_size > 0, caller must allocate memory for plan.work_data
63
+ LM_GGML_BACKEND_API struct lm_ggml_cplan lm_ggml_graph_plan(
64
+ const struct lm_ggml_cgraph * cgraph,
65
+ int n_threads, /* = LM_GGML_DEFAULT_N_THREADS */
66
+ struct lm_ggml_threadpool * threadpool /* = NULL */ );
67
+ LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
68
+
69
+ // same as lm_ggml_graph_compute() but the work data is allocated as a part of the context
70
+ // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
71
+ LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
72
+
73
+ //
74
+ // system info
75
+ //
76
+
77
+ // x86
78
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_sse3 (void);
79
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_ssse3 (void);
80
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx (void);
81
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx_vnni (void);
82
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx2 (void);
83
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_f16c (void);
84
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_fma (void);
85
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512 (void);
86
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vbmi(void);
87
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vnni(void);
88
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_bf16(void);
89
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_amx_int8 (void);
90
+ // ARM
91
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_neon (void);
92
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_arm_fma (void);
93
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_fp16_va (void);
94
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_dotprod (void);
95
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_matmul_int8(void);
96
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_sve (void);
97
+ LM_GGML_BACKEND_API int lm_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
98
+ // other
99
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_riscv_v (void);
100
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_vsx (void);
101
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_wasm_simd (void);
102
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_llamafile (void);
103
+
104
+ // Internal types and functions exposed for tests and benchmarks
105
+
106
+ typedef void (*lm_ggml_from_float_to_mat_t)
107
+ (const float * LM_GGML_RESTRICT x, void * LM_GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
108
+ typedef void (*lm_ggml_vec_dot_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x, size_t bx,
109
+ const void * LM_GGML_RESTRICT y, size_t by, int nrc);
110
+ typedef void (*lm_ggml_gemv_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
111
+ const void * LM_GGML_RESTRICT y, int nr, int nc);
112
+ typedef void (*lm_ggml_gemm_t) (int n, float * LM_GGML_RESTRICT s, size_t bs, const void * LM_GGML_RESTRICT x,
113
+ const void * LM_GGML_RESTRICT y, int nr, int nc);
114
+
115
+ struct lm_ggml_type_traits_cpu {
116
+ lm_ggml_from_float_t from_float;
117
+ lm_ggml_from_float_to_mat_t from_float_to_mat;
118
+ lm_ggml_vec_dot_t vec_dot;
119
+ enum lm_ggml_type vec_dot_type;
120
+ int64_t nrows; // number of rows to process simultaneously
121
+ int64_t ncols; // number of columns to process simultaneously
122
+ lm_ggml_gemv_t gemv;
123
+ lm_ggml_gemm_t gemm;
124
+ };
125
+
126
+ LM_GGML_BACKEND_API const struct lm_ggml_type_traits_cpu * lm_ggml_get_type_traits_cpu(enum lm_ggml_type type);
127
+
128
+ LM_GGML_BACKEND_API void lm_ggml_cpu_init(void);
129
+
130
+ //
131
+ // CPU backend
132
+ //
133
+
134
+ LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_cpu_init(void);
135
+
136
+ LM_GGML_BACKEND_API bool lm_ggml_backend_is_cpu (lm_ggml_backend_t backend);
137
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_n_threads (lm_ggml_backend_t backend_cpu, int n_threads);
138
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_threadpool (lm_ggml_backend_t backend_cpu, lm_ggml_threadpool_t threadpool);
139
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_abort_callback(lm_ggml_backend_t backend_cpu, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
140
+
141
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
142
+
143
+ #ifdef LM_GGML_USE_CPU_HBM
144
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_hbm_buffer_type(void);
145
+ #endif
146
+
147
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_aarch64_buffer_type(void);
148
+ LM_GGML_BACKEND_API bool lm_ggml_backend_cpu_buft_is_aarch64(lm_ggml_backend_buffer_type_t buft);
149
+
150
+ #ifdef __cplusplus
151
+ }
152
+ #endif