cui-llama.rn 1.2.6 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +3 -2
  2. package/android/src/main/CMakeLists.txt +26 -6
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
  4. package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
  5. package/android/src/main/jni.cpp +228 -40
  6. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
  7. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
  8. package/cpp/amx/amx.cpp +196 -0
  9. package/cpp/amx/amx.h +20 -0
  10. package/cpp/amx/common.h +101 -0
  11. package/cpp/amx/mmq.cpp +2524 -0
  12. package/cpp/amx/mmq.h +16 -0
  13. package/cpp/common.cpp +118 -251
  14. package/cpp/common.h +53 -30
  15. package/cpp/ggml-aarch64.c +46 -3395
  16. package/cpp/ggml-aarch64.h +0 -20
  17. package/cpp/ggml-alloc.c +6 -8
  18. package/cpp/ggml-backend-impl.h +33 -11
  19. package/cpp/ggml-backend-reg.cpp +423 -0
  20. package/cpp/ggml-backend.cpp +14 -676
  21. package/cpp/ggml-backend.h +46 -9
  22. package/cpp/ggml-common.h +6 -0
  23. package/cpp/ggml-cpu-aarch64.c +3823 -0
  24. package/cpp/ggml-cpu-aarch64.h +32 -0
  25. package/cpp/ggml-cpu-impl.h +14 -242
  26. package/cpp/ggml-cpu-quants.c +10835 -0
  27. package/cpp/ggml-cpu-quants.h +63 -0
  28. package/cpp/ggml-cpu.c +13971 -13720
  29. package/cpp/ggml-cpu.cpp +715 -0
  30. package/cpp/ggml-cpu.h +65 -63
  31. package/cpp/ggml-impl.h +285 -25
  32. package/cpp/ggml-metal.h +8 -8
  33. package/cpp/ggml-metal.m +1221 -728
  34. package/cpp/ggml-quants.c +189 -10681
  35. package/cpp/ggml-quants.h +78 -125
  36. package/cpp/ggml-threading.cpp +12 -0
  37. package/cpp/ggml-threading.h +12 -0
  38. package/cpp/ggml.c +688 -1460
  39. package/cpp/ggml.h +58 -244
  40. package/cpp/json-schema-to-grammar.cpp +1045 -1045
  41. package/cpp/json.hpp +24766 -24766
  42. package/cpp/llama-sampling.cpp +5 -2
  43. package/cpp/llama.cpp +409 -123
  44. package/cpp/llama.h +8 -4
  45. package/cpp/rn-llama.hpp +89 -25
  46. package/cpp/sampling.cpp +42 -3
  47. package/cpp/sampling.h +22 -1
  48. package/cpp/sgemm.cpp +608 -0
  49. package/cpp/speculative.cpp +270 -0
  50. package/cpp/speculative.h +28 -0
  51. package/cpp/unicode.cpp +11 -0
  52. package/ios/RNLlama.mm +43 -20
  53. package/ios/RNLlamaContext.h +9 -3
  54. package/ios/RNLlamaContext.mm +146 -33
  55. package/jest/mock.js +0 -1
  56. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  57. package/lib/commonjs/grammar.js +4 -2
  58. package/lib/commonjs/grammar.js.map +1 -1
  59. package/lib/commonjs/index.js +52 -15
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/module/NativeRNLlama.js.map +1 -1
  62. package/lib/module/grammar.js +2 -1
  63. package/lib/module/grammar.js.map +1 -1
  64. package/lib/module/index.js +51 -15
  65. package/lib/module/index.js.map +1 -1
  66. package/lib/typescript/NativeRNLlama.d.ts +122 -8
  67. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  68. package/lib/typescript/grammar.d.ts +5 -6
  69. package/lib/typescript/grammar.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +15 -6
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +2 -1
  73. package/src/NativeRNLlama.ts +135 -13
  74. package/src/grammar.ts +10 -8
  75. package/src/index.ts +104 -28
package/cpp/ggml-cpu.h CHANGED
@@ -7,29 +7,6 @@
7
7
  extern "C" {
8
8
  #endif
9
9
 
10
- // Scheduling priorities
11
- enum lm_ggml_sched_priority {
12
- LM_GGML_SCHED_PRIO_NORMAL,
13
- LM_GGML_SCHED_PRIO_MEDIUM,
14
- LM_GGML_SCHED_PRIO_HIGH,
15
- LM_GGML_SCHED_PRIO_REALTIME
16
- };
17
-
18
- // Threadpool params
19
- // Use lm_ggml_threadpool_params_default() or lm_ggml_threadpool_params_init() to populate the defaults
20
- struct lm_ggml_threadpool_params {
21
- bool cpumask[LM_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
22
- int n_threads; // number of threads
23
- enum lm_ggml_sched_priority prio; // thread priority
24
- uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
25
- bool strict_cpu; // strict cpu placement
26
- bool paused; // start in paused state
27
- };
28
-
29
- struct lm_ggml_threadpool; // forward declaration, see ggml.c
30
-
31
- typedef struct lm_ggml_threadpool * lm_ggml_threadpool_t;
32
-
33
10
  // the compute plan that needs to be prepared for lm_ggml_graph_compute()
34
11
  // since https://github.com/ggerganov/ggml/issues/287
35
12
  struct lm_ggml_cplan {
@@ -54,54 +31,75 @@ extern "C" {
54
31
  LM_GGML_NUMA_STRATEGY_COUNT
55
32
  };
56
33
 
57
- LM_GGML_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
58
- LM_GGML_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
34
+ LM_GGML_BACKEND_API void lm_ggml_numa_init(enum lm_ggml_numa_strategy numa); // call once for better performance on NUMA systems
35
+ LM_GGML_BACKEND_API bool lm_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
59
36
 
60
- LM_GGML_API struct lm_ggml_tensor * lm_ggml_new_i32(struct lm_ggml_context * ctx, int32_t value);
61
- LM_GGML_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
37
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_i32(struct lm_ggml_context * ctx, int32_t value);
38
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_new_f32(struct lm_ggml_context * ctx, float value);
62
39
 
63
- LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
64
- LM_GGML_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
40
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_i32 (struct lm_ggml_tensor * tensor, int32_t value);
41
+ LM_GGML_BACKEND_API struct lm_ggml_tensor * lm_ggml_set_f32 (struct lm_ggml_tensor * tensor, float value);
65
42
 
66
- LM_GGML_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
67
- LM_GGML_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
43
+ LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_1d(const struct lm_ggml_tensor * tensor, int i);
44
+ LM_GGML_BACKEND_API void lm_ggml_set_i32_1d(const struct lm_ggml_tensor * tensor, int i, int32_t value);
68
45
 
69
- LM_GGML_API int32_t lm_ggml_get_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70
- LM_GGML_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
46
+ LM_GGML_BACKEND_API int32_t lm_ggml_get_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
47
+ LM_GGML_BACKEND_API void lm_ggml_set_i32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
71
48
 
72
- LM_GGML_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
73
- LM_GGML_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
49
+ LM_GGML_BACKEND_API float lm_ggml_get_f32_1d(const struct lm_ggml_tensor * tensor, int i);
50
+ LM_GGML_BACKEND_API void lm_ggml_set_f32_1d(const struct lm_ggml_tensor * tensor, int i, float value);
74
51
 
75
- LM_GGML_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76
- LM_GGML_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
52
+ LM_GGML_BACKEND_API float lm_ggml_get_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
53
+ LM_GGML_BACKEND_API void lm_ggml_set_f32_nd(const struct lm_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
77
54
 
78
- LM_GGML_API struct lm_ggml_threadpool_params lm_ggml_threadpool_params_default(int n_threads);
79
- LM_GGML_API void lm_ggml_threadpool_params_init (struct lm_ggml_threadpool_params * p, int n_threads);
80
- LM_GGML_API bool lm_ggml_threadpool_params_match (const struct lm_ggml_threadpool_params * p0, const struct lm_ggml_threadpool_params * p1);
81
- LM_GGML_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
82
- LM_GGML_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
83
- LM_GGML_API int lm_ggml_threadpool_get_n_threads(struct lm_ggml_threadpool * threadpool);
84
- LM_GGML_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
85
- LM_GGML_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
55
+ LM_GGML_BACKEND_API struct lm_ggml_threadpool * lm_ggml_threadpool_new (struct lm_ggml_threadpool_params * params);
56
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_free (struct lm_ggml_threadpool * threadpool);
57
+ LM_GGML_BACKEND_API int lm_ggml_threadpool_get_n_threads (struct lm_ggml_threadpool * threadpool);
58
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_pause (struct lm_ggml_threadpool * threadpool);
59
+ LM_GGML_BACKEND_API void lm_ggml_threadpool_resume (struct lm_ggml_threadpool * threadpool);
86
60
 
87
61
  // lm_ggml_graph_plan() has to be called before lm_ggml_graph_compute()
88
62
  // when plan.work_size > 0, caller must allocate memory for plan.work_data
89
- LM_GGML_API struct lm_ggml_cplan lm_ggml_graph_plan(
63
+ LM_GGML_BACKEND_API struct lm_ggml_cplan lm_ggml_graph_plan(
90
64
  const struct lm_ggml_cgraph * cgraph,
91
65
  int n_threads, /* = LM_GGML_DEFAULT_N_THREADS */
92
66
  struct lm_ggml_threadpool * threadpool /* = NULL */ );
93
- LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
67
+ LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute(struct lm_ggml_cgraph * cgraph, struct lm_ggml_cplan * cplan);
94
68
 
95
69
  // same as lm_ggml_graph_compute() but the work data is allocated as a part of the context
96
70
  // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
97
- LM_GGML_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
71
+ LM_GGML_BACKEND_API enum lm_ggml_status lm_ggml_graph_compute_with_ctx(struct lm_ggml_context * ctx, struct lm_ggml_cgraph * cgraph, int n_threads);
98
72
 
99
- // TODO: move to backend interface
100
- LM_GGML_API int lm_ggml_cpu_has_neon (void);
101
- LM_GGML_API int lm_ggml_cpu_has_sve (void);
102
- LM_GGML_API int lm_ggml_cpu_has_matmul_int8(void);
103
- // get the sve vector length in bytes
104
- LM_GGML_API int lm_ggml_cpu_get_sve_cnt(void);
73
+ //
74
+ // system info
75
+ //
76
+
77
+ // x86
78
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_sse3 (void);
79
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_ssse3 (void);
80
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx (void);
81
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx_vnni (void);
82
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx2 (void);
83
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_f16c (void);
84
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_fma (void);
85
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512 (void);
86
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vbmi(void);
87
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_vnni(void);
88
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_avx512_bf16(void);
89
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_amx_int8 (void);
90
+ // ARM
91
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_neon (void);
92
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_arm_fma (void);
93
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_fp16_va (void);
94
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_dotprod (void);
95
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_matmul_int8(void);
96
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_sve (void);
97
+ LM_GGML_BACKEND_API int lm_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
98
+ // other
99
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_riscv_v (void);
100
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_vsx (void);
101
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_wasm_simd (void);
102
+ LM_GGML_BACKEND_API int lm_ggml_cpu_has_llamafile (void);
105
103
 
106
104
  // Internal types and functions exposed for tests and benchmarks
107
105
 
@@ -115,6 +113,7 @@ extern "C" {
115
113
  const void * LM_GGML_RESTRICT y, int nr, int nc);
116
114
 
117
115
  struct lm_ggml_type_traits_cpu {
116
+ lm_ggml_from_float_t from_float;
118
117
  lm_ggml_from_float_to_mat_t from_float_to_mat;
119
118
  lm_ggml_vec_dot_t vec_dot;
120
119
  enum lm_ggml_type vec_dot_type;
@@ -124,27 +123,30 @@ extern "C" {
124
123
  lm_ggml_gemm_t gemm;
125
124
  };
126
125
 
127
- LM_GGML_API const struct lm_ggml_type_traits_cpu * lm_ggml_get_type_traits_cpu(enum lm_ggml_type type);
126
+ LM_GGML_BACKEND_API const struct lm_ggml_type_traits_cpu * lm_ggml_get_type_traits_cpu(enum lm_ggml_type type);
128
127
 
129
- LM_GGML_API void lm_ggml_cpu_init(void);
128
+ LM_GGML_BACKEND_API void lm_ggml_cpu_init(void);
130
129
 
131
130
  //
132
131
  // CPU backend
133
132
  //
134
133
 
135
- LM_GGML_API lm_ggml_backend_t lm_ggml_backend_cpu_init(void);
134
+ LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_cpu_init(void);
136
135
 
137
- LM_GGML_API bool lm_ggml_backend_is_cpu (lm_ggml_backend_t backend);
138
- LM_GGML_API void lm_ggml_backend_cpu_set_n_threads (lm_ggml_backend_t backend_cpu, int n_threads);
139
- LM_GGML_API void lm_ggml_backend_cpu_set_threadpool (lm_ggml_backend_t backend_cpu, lm_ggml_threadpool_t threadpool);
140
- LM_GGML_API void lm_ggml_backend_cpu_set_abort_callback(lm_ggml_backend_t backend_cpu, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
136
+ LM_GGML_BACKEND_API bool lm_ggml_backend_is_cpu (lm_ggml_backend_t backend);
137
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_n_threads (lm_ggml_backend_t backend_cpu, int n_threads);
138
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_threadpool (lm_ggml_backend_t backend_cpu, lm_ggml_threadpool_t threadpool);
139
+ LM_GGML_BACKEND_API void lm_ggml_backend_cpu_set_abort_callback(lm_ggml_backend_t backend_cpu, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
141
140
 
142
- LM_GGML_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
141
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
143
142
 
144
143
  #ifdef LM_GGML_USE_CPU_HBM
145
- LM_GGML_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_hbm_buffer_type(void);
144
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_hbm_buffer_type(void);
146
145
  #endif
147
146
 
147
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_cpu_aarch64_buffer_type(void);
148
+ LM_GGML_BACKEND_API bool lm_ggml_backend_cpu_buft_is_aarch64(lm_ggml_backend_buffer_type_t buft);
149
+
148
150
  #ifdef __cplusplus
149
151
  }
150
152
  #endif
package/cpp/ggml-impl.h CHANGED
@@ -3,22 +3,40 @@
3
3
  // GGML internal header
4
4
 
5
5
  #include "ggml.h"
6
-
7
6
  #include <assert.h>
7
+ #include <math.h>
8
8
  #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
9
  #include <stdbool.h>
10
10
  #include <stdint.h>
11
11
  #include <string.h>
12
12
 
13
+ #ifdef __ARM_FEATURE_SVE
14
+ #include <arm_sve.h>
15
+ #endif // __ARM_FEATURE_SVE
16
+
17
+ #if defined(__ARM_NEON) && !defined(__CUDACC__)
18
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
19
+ //
20
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
21
+ //
22
+ #include <arm_neon.h>
23
+ #endif
24
+
25
+ #if defined(__F16C__)
26
+ #include <immintrin.h>
27
+ #endif
28
+
13
29
  #ifdef __cplusplus
14
30
  extern "C" {
15
31
  #endif
16
32
 
17
- #undef MIN
18
- #undef MAX
33
+ #ifndef MIN
34
+ # define MIN(a, b) ((a) < (b) ? (a) : (b))
35
+ #endif
19
36
 
20
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
21
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
37
+ #ifndef MAX
38
+ # define MAX(a, b) ((a) > (b) ? (a) : (b))
39
+ #endif
22
40
 
23
41
  // required for mmap as gguf only guarantees 32-byte alignment
24
42
  #define TENSOR_ALIGNMENT 32
@@ -28,13 +46,13 @@ extern "C" {
28
46
  // if C99 - static_assert is noop
29
47
  // ref: https://stackoverflow.com/a/53923785/4039976
30
48
  #ifndef __cplusplus
31
- #ifndef static_assert
32
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
33
- #define static_assert(cond, msg) _Static_assert(cond, msg)
34
- #else
35
- #define static_assert(cond, msg) struct global_scope_noop_trick
36
- #endif
37
- #endif
49
+ #ifndef static_assert
50
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
51
+ #define static_assert(cond, msg) _Static_assert(cond, msg)
52
+ #else
53
+ #define static_assert(cond, msg) struct global_scope_noop_trick
54
+ #endif
55
+ #endif
38
56
  #endif
39
57
 
40
58
  static inline int lm_ggml_up32(int n) {
@@ -120,14 +138,12 @@ struct lm_ggml_map_custom1_op_params {
120
138
  void * userdata;
121
139
  };
122
140
 
123
-
124
141
  struct lm_ggml_map_custom2_op_params {
125
142
  lm_ggml_custom2_op_t fun;
126
143
  int n_tasks;
127
144
  void * userdata;
128
145
  };
129
146
 
130
-
131
147
  struct lm_ggml_map_custom3_op_params {
132
148
  lm_ggml_custom3_op_t fun;
133
149
  int n_tasks;
@@ -182,7 +198,7 @@ void lm_ggml_hash_set_reset(struct lm_ggml_hash_set * hash_set);
182
198
  static bool lm_ggml_hash_contains(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
183
199
 
184
200
  // returns LM_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
185
- static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
201
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key);
186
202
 
187
203
  // returns LM_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
188
204
  static size_t lm_ggml_hash_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
@@ -196,7 +212,7 @@ static inline size_t lm_ggml_hash(const struct lm_ggml_tensor * p) {
196
212
  return (size_t)(uintptr_t)p >> 4;
197
213
  }
198
214
 
199
- static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
215
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key) {
200
216
  size_t h = lm_ggml_hash(key) % hash_set->size;
201
217
 
202
218
  // linear probing
@@ -267,19 +283,23 @@ enum lm_ggml_cgraph_eval_order {
267
283
  };
268
284
 
269
285
  struct lm_ggml_cgraph {
270
- int size;
271
- int n_nodes;
272
- int n_leafs;
286
+ int size; // maximum number of nodes/leafs/grads/grad_accs
287
+ int n_nodes; // number of nodes currently in use
288
+ int n_leafs; // number of leafs currently in use
273
289
 
274
- struct lm_ggml_tensor ** nodes;
275
- struct lm_ggml_tensor ** grads;
276
- struct lm_ggml_tensor ** leafs;
290
+ struct lm_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
291
+ struct lm_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
292
+ struct lm_ggml_tensor ** grad_accs; // accumulators for node gradients
293
+ struct lm_ggml_tensor ** leafs; // tensors with constant data
277
294
 
278
295
  struct lm_ggml_hash_set visited_hash_set;
279
296
 
280
297
  enum lm_ggml_cgraph_eval_order order;
281
298
  };
282
299
 
300
+ // returns a slice of cgraph with nodes [i0, i1)
301
+ // the slice does not have leafs or gradients
302
+ // if you need the gradients, get them from the original graph
283
303
  struct lm_ggml_cgraph lm_ggml_graph_view(struct lm_ggml_cgraph * cgraph, int i0, int i1);
284
304
 
285
305
  // Memory allocation
@@ -287,9 +307,249 @@ struct lm_ggml_cgraph lm_ggml_graph_view(struct lm_ggml_cgraph * cgraph, int i0,
287
307
  void * lm_ggml_aligned_malloc(size_t size);
288
308
  void lm_ggml_aligned_free(void * ptr, size_t size);
289
309
 
290
- // TODO: move to threading file
291
- void lm_ggml_critical_section_start(void);
292
- void lm_ggml_critical_section_end(void);
310
+ // FP16 to FP32 conversion
311
+
312
+ #if defined(__ARM_NEON)
313
+ #ifdef _MSC_VER
314
+ typedef uint16_t lm_ggml_fp16_internal_t;
315
+ #else
316
+ typedef __fp16 lm_ggml_fp16_internal_t;
317
+ #endif
318
+ #endif
319
+
320
+ #if defined(__ARM_NEON) && !defined(_MSC_VER)
321
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
322
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
323
+
324
+ #define LM_GGML_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
325
+
326
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
327
+ lm_ggml_fp16_internal_t tmp;
328
+ memcpy(&tmp, &h, sizeof(lm_ggml_fp16_t));
329
+ return (float)tmp;
330
+ }
331
+
332
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
333
+ lm_ggml_fp16_t res;
334
+ lm_ggml_fp16_internal_t tmp = f;
335
+ memcpy(&res, &tmp, sizeof(lm_ggml_fp16_t));
336
+ return res;
337
+ }
338
+
339
+ #elif defined(__F16C__)
340
+
341
+ #ifdef _MSC_VER
342
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
343
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
344
+ #else
345
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
346
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
347
+ #endif
348
+
349
+ #elif defined(__POWER9_VECTOR__)
350
+
351
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
352
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
353
+ /* the inline asm below is about 12% faster than the lookup method */
354
+ #define LM_GGML_FP16_TO_FP32(x) LM_GGML_COMPUTE_FP16_TO_FP32(x)
355
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
356
+
357
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
358
+ register float f;
359
+ register double d;
360
+ __asm__(
361
+ "mtfprd %0,%2\n"
362
+ "xscvhpdp %0,%0\n"
363
+ "frsp %1,%0\n" :
364
+ /* temp */ "=d"(d),
365
+ /* out */ "=f"(f):
366
+ /* in */ "r"(h));
367
+ return f;
368
+ }
369
+
370
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
371
+ register double d;
372
+ register lm_ggml_fp16_t r;
373
+ __asm__( /* xscvdphp can work on double or single precision */
374
+ "xscvdphp %0,%2\n"
375
+ "mffprd %1,%0\n" :
376
+ /* temp */ "=d"(d),
377
+ /* out */ "=r"(r):
378
+ /* in */ "f"(f));
379
+ return r;
380
+ }
381
+
382
+ #else
383
+
384
+ // FP16 <-> FP32
385
+ // ref: https://github.com/Maratyszcza/FP16
386
+
387
+ static inline float fp32_from_bits(uint32_t w) {
388
+ union {
389
+ uint32_t as_bits;
390
+ float as_value;
391
+ } fp32;
392
+ fp32.as_bits = w;
393
+ return fp32.as_value;
394
+ }
395
+
396
+ static inline uint32_t fp32_to_bits(float f) {
397
+ union {
398
+ float as_value;
399
+ uint32_t as_bits;
400
+ } fp32;
401
+ fp32.as_value = f;
402
+ return fp32.as_bits;
403
+ }
404
+
405
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
406
+ const uint32_t w = (uint32_t) h << 16;
407
+ const uint32_t sign = w & UINT32_C(0x80000000);
408
+ const uint32_t two_w = w + w;
409
+
410
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
411
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
412
+ const float exp_scale = 0x1.0p-112f;
413
+ #else
414
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
415
+ #endif
416
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
417
+
418
+ const uint32_t magic_mask = UINT32_C(126) << 23;
419
+ const float magic_bias = 0.5f;
420
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
421
+
422
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
423
+ const uint32_t result = sign |
424
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
425
+ return fp32_from_bits(result);
426
+ }
427
+
428
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
429
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
430
+ const float scale_to_inf = 0x1.0p+112f;
431
+ const float scale_to_zero = 0x1.0p-110f;
432
+ #else
433
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
434
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
435
+ #endif
436
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
437
+
438
+ const uint32_t w = fp32_to_bits(f);
439
+ const uint32_t shl1_w = w + w;
440
+ const uint32_t sign = w & UINT32_C(0x80000000);
441
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
442
+ if (bias < UINT32_C(0x71000000)) {
443
+ bias = UINT32_C(0x71000000);
444
+ }
445
+
446
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
447
+ const uint32_t bits = fp32_to_bits(base);
448
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
449
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
450
+ const uint32_t nonsign = exp_bits + mantissa_bits;
451
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
452
+ }
453
+
454
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
455
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
456
+
457
+ #endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
458
+
459
+ // precomputed f32 table for f16 (256 KB)
460
+ // defined in ggml.c, initialized in lm_ggml_init()
461
+ LM_GGML_API float lm_ggml_table_f32_f16[1 << 16];
462
+
463
+ // On ARM NEON, it's quicker to directly convert x -> x instead of calling into lm_ggml_lookup_fp16_to_fp32,
464
+ // so we define LM_GGML_FP16_TO_FP32 and LM_GGML_FP32_TO_FP16 elsewhere for NEON.
465
+ // This is also true for POWER9.
466
+ #if !defined(LM_GGML_FP16_TO_FP32)
467
+ inline static float lm_ggml_lookup_fp16_to_fp32(lm_ggml_fp16_t f) {
468
+ uint16_t s;
469
+ memcpy(&s, &f, sizeof(uint16_t));
470
+ return lm_ggml_table_f32_f16[s];
471
+ }
472
+
473
+ #define LM_GGML_FP16_TO_FP32(x) lm_ggml_lookup_fp16_to_fp32(x)
474
+ #endif
475
+
476
+ #if !defined(LM_GGML_FP32_TO_FP16)
477
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
478
+ #endif
479
+
480
+ /**
481
+ * Converts brain16 to float32.
482
+ *
483
+ * The bfloat16 floating point format has the following structure:
484
+ *
485
+ * ┌sign
486
+ * │
487
+ * │ ┌exponent
488
+ * │ │
489
+ * │ │ ┌mantissa
490
+ * │ │ │
491
+ * │┌──┴───┐┌─┴───┐
492
+ * 0b0000000000000000 brain16
493
+ *
494
+ * Since bf16 has the same number of exponent bits as a 32bit float,
495
+ * encoding and decoding numbers becomes relatively straightforward.
496
+ *
497
+ * ┌sign
498
+ * │
499
+ * │ ┌exponent
500
+ * │ │
501
+ * │ │ ┌mantissa
502
+ * │ │ │
503
+ * │┌──┴───┐┌─┴───────────────────┐
504
+ * 0b00000000000000000000000000000000 IEEE binary32
505
+ *
506
+ * For comparison, the standard fp16 format has fewer exponent bits.
507
+ *
508
+ * ┌sign
509
+ * │
510
+ * │ ┌exponent
511
+ * │ │
512
+ * │ │ ┌mantissa
513
+ * │ │ │
514
+ * │┌─┴─┐┌─┴──────┐
515
+ * 0b0000000000000000 IEEE binary16
516
+ *
517
+ * @see IEEE 754-2008
518
+ */
519
+ static inline float lm_ggml_compute_bf16_to_fp32(lm_ggml_bf16_t h) {
520
+ union {
521
+ float f;
522
+ uint32_t i;
523
+ } u;
524
+ u.i = (uint32_t)h.bits << 16;
525
+ return u.f;
526
+ }
527
+
528
+ /**
529
+ * Converts float32 to brain16.
530
+ *
531
+ * This is binary identical with Google Brain float conversion.
532
+ * Floats shall round to nearest even, and NANs shall be quiet.
533
+ * Subnormals aren't flushed to zero, except perhaps when used.
534
+ * This code should vectorize nicely if using modern compilers.
535
+ */
536
+ static inline lm_ggml_bf16_t lm_ggml_compute_fp32_to_bf16(float s) {
537
+ lm_ggml_bf16_t h;
538
+ union {
539
+ float f;
540
+ uint32_t i;
541
+ } u;
542
+ u.f = s;
543
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
544
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
545
+ return h;
546
+ }
547
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
548
+ return h;
549
+ }
550
+
551
+ #define LM_GGML_FP32_TO_BF16(x) lm_ggml_compute_fp32_to_bf16(x)
552
+ #define LM_GGML_BF16_TO_FP32(x) lm_ggml_compute_bf16_to_fp32(x)
293
553
 
294
554
  #ifdef __cplusplus
295
555
  }
package/cpp/ggml-metal.h CHANGED
@@ -39,27 +39,27 @@ extern "C" {
39
39
  // user-code should use only these functions
40
40
  //
41
41
 
42
- LM_GGML_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
42
+ LM_GGML_BACKEND_API lm_ggml_backend_t lm_ggml_backend_metal_init(void);
43
43
 
44
- LM_GGML_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
44
+ LM_GGML_BACKEND_API bool lm_ggml_backend_is_metal(lm_ggml_backend_t backend);
45
45
 
46
46
  LM_GGML_DEPRECATED(
47
- LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
47
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_t lm_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
48
  "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
49
49
 
50
- LM_GGML_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
50
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_set_abort_callback(lm_ggml_backend_t backend, lm_ggml_abort_callback abort_callback, void * user_data);
51
51
 
52
- LM_GGML_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
52
+ LM_GGML_BACKEND_API lm_ggml_backend_buffer_type_t lm_ggml_backend_metal_buffer_type(void);
53
53
 
54
54
  // helper to check if the device supports a specific family
55
55
  // ideally, the user code should be doing these checks
56
56
  // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
57
- LM_GGML_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
57
+ LM_GGML_BACKEND_API bool lm_ggml_backend_metal_supports_family(lm_ggml_backend_t backend, int family);
58
58
 
59
59
  // capture all command buffers committed the next time `lm_ggml_backend_graph_compute` is called
60
- LM_GGML_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
60
+ LM_GGML_BACKEND_API void lm_ggml_backend_metal_capture_next_compute(lm_ggml_backend_t backend);
61
61
 
62
- LM_GGML_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
62
+ LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_metal_reg(void);
63
63
 
64
64
  #ifdef __cplusplus
65
65
  }