llama-cpp-capacitor 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/cpp/LICENSE +21 -0
  2. package/cpp/README.md +4 -0
  3. package/cpp/anyascii.c +22223 -0
  4. package/cpp/anyascii.h +42 -0
  5. package/cpp/chat-parser.cpp +393 -0
  6. package/cpp/chat-parser.h +120 -0
  7. package/cpp/chat.cpp +2315 -0
  8. package/cpp/chat.h +221 -0
  9. package/cpp/common.cpp +1619 -0
  10. package/cpp/common.h +744 -0
  11. package/cpp/ggml-alloc.c +1028 -0
  12. package/cpp/ggml-alloc.h +76 -0
  13. package/cpp/ggml-backend-impl.h +255 -0
  14. package/cpp/ggml-backend-reg.cpp +600 -0
  15. package/cpp/ggml-backend.cpp +2118 -0
  16. package/cpp/ggml-backend.h +354 -0
  17. package/cpp/ggml-common.h +1878 -0
  18. package/cpp/ggml-cpp.h +39 -0
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  25. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  26. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  27. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  28. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  29. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  30. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  31. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  32. package/cpp/ggml-cpu/binary-ops.h +16 -0
  33. package/cpp/ggml-cpu/common.h +73 -0
  34. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  35. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  36. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  37. package/cpp/ggml-cpu/ops.cpp +10587 -0
  38. package/cpp/ggml-cpu/ops.h +114 -0
  39. package/cpp/ggml-cpu/quants.c +1193 -0
  40. package/cpp/ggml-cpu/quants.h +97 -0
  41. package/cpp/ggml-cpu/repack.cpp +1982 -0
  42. package/cpp/ggml-cpu/repack.h +120 -0
  43. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  44. package/cpp/ggml-cpu/traits.cpp +36 -0
  45. package/cpp/ggml-cpu/traits.h +38 -0
  46. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  47. package/cpp/ggml-cpu/unary-ops.h +28 -0
  48. package/cpp/ggml-cpu/vec.cpp +348 -0
  49. package/cpp/ggml-cpu/vec.h +1121 -0
  50. package/cpp/ggml-cpu.h +145 -0
  51. package/cpp/ggml-impl.h +622 -0
  52. package/cpp/ggml-metal-impl.h +688 -0
  53. package/cpp/ggml-metal.h +66 -0
  54. package/cpp/ggml-metal.m +6833 -0
  55. package/cpp/ggml-opt.cpp +1093 -0
  56. package/cpp/ggml-opt.h +256 -0
  57. package/cpp/ggml-quants.c +5324 -0
  58. package/cpp/ggml-quants.h +106 -0
  59. package/cpp/ggml-threading.cpp +12 -0
  60. package/cpp/ggml-threading.h +14 -0
  61. package/cpp/ggml.c +7108 -0
  62. package/cpp/ggml.h +2492 -0
  63. package/cpp/gguf.cpp +1358 -0
  64. package/cpp/gguf.h +202 -0
  65. package/cpp/json-partial.cpp +256 -0
  66. package/cpp/json-partial.h +38 -0
  67. package/cpp/json-schema-to-grammar.cpp +985 -0
  68. package/cpp/json-schema-to-grammar.h +21 -0
  69. package/cpp/llama-adapter.cpp +388 -0
  70. package/cpp/llama-adapter.h +76 -0
  71. package/cpp/llama-arch.cpp +2355 -0
  72. package/cpp/llama-arch.h +499 -0
  73. package/cpp/llama-batch.cpp +875 -0
  74. package/cpp/llama-batch.h +160 -0
  75. package/cpp/llama-chat.cpp +783 -0
  76. package/cpp/llama-chat.h +65 -0
  77. package/cpp/llama-context.cpp +2748 -0
  78. package/cpp/llama-context.h +306 -0
  79. package/cpp/llama-cparams.cpp +5 -0
  80. package/cpp/llama-cparams.h +41 -0
  81. package/cpp/llama-cpp.h +30 -0
  82. package/cpp/llama-grammar.cpp +1229 -0
  83. package/cpp/llama-grammar.h +173 -0
  84. package/cpp/llama-graph.cpp +1891 -0
  85. package/cpp/llama-graph.h +810 -0
  86. package/cpp/llama-hparams.cpp +180 -0
  87. package/cpp/llama-hparams.h +233 -0
  88. package/cpp/llama-impl.cpp +167 -0
  89. package/cpp/llama-impl.h +61 -0
  90. package/cpp/llama-io.cpp +15 -0
  91. package/cpp/llama-io.h +35 -0
  92. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  93. package/cpp/llama-kv-cache-iswa.h +135 -0
  94. package/cpp/llama-kv-cache.cpp +2059 -0
  95. package/cpp/llama-kv-cache.h +374 -0
  96. package/cpp/llama-kv-cells.h +491 -0
  97. package/cpp/llama-memory-hybrid.cpp +258 -0
  98. package/cpp/llama-memory-hybrid.h +137 -0
  99. package/cpp/llama-memory-recurrent.cpp +1146 -0
  100. package/cpp/llama-memory-recurrent.h +179 -0
  101. package/cpp/llama-memory.cpp +59 -0
  102. package/cpp/llama-memory.h +119 -0
  103. package/cpp/llama-mmap.cpp +600 -0
  104. package/cpp/llama-mmap.h +68 -0
  105. package/cpp/llama-model-loader.cpp +1164 -0
  106. package/cpp/llama-model-loader.h +170 -0
  107. package/cpp/llama-model-saver.cpp +282 -0
  108. package/cpp/llama-model-saver.h +37 -0
  109. package/cpp/llama-model.cpp +19042 -0
  110. package/cpp/llama-model.h +491 -0
  111. package/cpp/llama-sampling.cpp +2575 -0
  112. package/cpp/llama-sampling.h +32 -0
  113. package/cpp/llama-vocab.cpp +3792 -0
  114. package/cpp/llama-vocab.h +176 -0
  115. package/cpp/llama.cpp +358 -0
  116. package/cpp/llama.h +1373 -0
  117. package/cpp/log.cpp +427 -0
  118. package/cpp/log.h +103 -0
  119. package/cpp/minja/chat-template.hpp +550 -0
  120. package/cpp/minja/minja.hpp +3009 -0
  121. package/cpp/nlohmann/json.hpp +25526 -0
  122. package/cpp/nlohmann/json_fwd.hpp +187 -0
  123. package/cpp/regex-partial.cpp +204 -0
  124. package/cpp/regex-partial.h +56 -0
  125. package/cpp/rn-completion.cpp +681 -0
  126. package/cpp/rn-completion.h +116 -0
  127. package/cpp/rn-llama.cpp +345 -0
  128. package/cpp/rn-llama.h +149 -0
  129. package/cpp/rn-mtmd.hpp +602 -0
  130. package/cpp/rn-tts.cpp +591 -0
  131. package/cpp/rn-tts.h +59 -0
  132. package/cpp/sampling.cpp +579 -0
  133. package/cpp/sampling.h +107 -0
  134. package/cpp/tools/mtmd/clip-impl.h +473 -0
  135. package/cpp/tools/mtmd/clip.cpp +4322 -0
  136. package/cpp/tools/mtmd/clip.h +106 -0
  137. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  138. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  139. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  140. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  141. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  142. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  143. package/cpp/tools/mtmd/mtmd.h +298 -0
  144. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  145. package/cpp/unicode-data.cpp +7034 -0
  146. package/cpp/unicode-data.h +20 -0
  147. package/cpp/unicode.cpp +1061 -0
  148. package/cpp/unicode.h +68 -0
  149. package/package.json +2 -1
@@ -0,0 +1,622 @@
1
+ #pragma once
2
+
3
+ // GGML internal header
4
+
5
+ #include "ggml.h"
6
+ #include "gguf.h"
7
+
8
+ #include <assert.h>
9
+ #include <math.h>
10
+ #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
11
+ #include <stdbool.h>
12
+ #include <stdint.h>
13
+ #include <string.h>
14
+
15
+ #ifdef __ARM_FEATURE_SVE
16
+ #include <arm_sve.h>
17
+ #endif // __ARM_FEATURE_SVE
18
+
19
+ #if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
20
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
21
+ //
22
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
23
+ //
24
+ #include <arm_neon.h>
25
+ #endif
26
+
27
+ #if defined(__F16C__)
28
+ #include <immintrin.h>
29
+ #endif
30
+
31
+ #ifdef __cplusplus
32
+ extern "C" {
33
+ #endif
34
+
35
+ void lm_ggml_print_backtrace(void);
36
+
37
+ #ifndef MIN
38
+ # define MIN(a, b) ((a) < (b) ? (a) : (b))
39
+ #endif
40
+
41
+ #ifndef MAX
42
+ # define MAX(a, b) ((a) > (b) ? (a) : (b))
43
+ #endif
44
+
45
+ // required for mmap as gguf only guarantees 32-byte alignment
46
+ #define TENSOR_ALIGNMENT 32
47
+
48
+ // static_assert should be a #define, but if it's not,
49
+ // fall back to the _Static_assert C11 keyword.
50
+ // if C99 - static_assert is noop
51
+ // ref: https://stackoverflow.com/a/53923785/4039976
52
+ #ifndef __cplusplus
53
+ #ifndef static_assert
54
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
55
+ #define static_assert(cond, msg) _Static_assert(cond, msg)
56
+ #else
57
+ #define static_assert(cond, msg) struct global_scope_noop_trick
58
+ #endif
59
+ #endif
60
+ #endif
61
+
62
+ static inline int lm_ggml_up32(int n) {
63
+ return (n + 31) & ~31;
64
+ }
65
+
66
+ //static inline int lm_ggml_up64(int n) {
67
+ // return (n + 63) & ~63;
68
+ //}
69
+
70
+ static inline int lm_ggml_up(int n, int m) {
71
+ // assert m is a power of 2
72
+ LM_GGML_ASSERT((m & (m - 1)) == 0);
73
+ return (n + m - 1) & ~(m - 1);
74
+ }
75
+
76
+ // TODO: move to ggml.h?
77
+ static bool lm_ggml_are_same_layout(const struct lm_ggml_tensor * a, const struct lm_ggml_tensor * b) {
78
+ if (a->type != b->type) {
79
+ return false;
80
+ }
81
+ for (int i = 0; i < LM_GGML_MAX_DIMS; i++) {
82
+ if (a->ne[i] != b->ne[i]) {
83
+ return false;
84
+ }
85
+ if (a->nb[i] != b->nb[i]) {
86
+ return false;
87
+ }
88
+ }
89
+ return true;
90
+ }
91
+
92
+ //
93
+ // logging
94
+ //
95
+
96
+ LM_GGML_ATTRIBUTE_FORMAT(2, 3)
97
+ LM_GGML_API void lm_ggml_log_internal (enum lm_ggml_log_level level, const char * format, ...);
98
+ LM_GGML_API void lm_ggml_log_callback_default(enum lm_ggml_log_level level, const char * text, void * user_data);
99
+
100
+ #define LM_GGML_LOG(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
101
+ #define LM_GGML_LOG_INFO(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
102
+ #define LM_GGML_LOG_WARN(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
103
+ #define LM_GGML_LOG_ERROR(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
104
+ #define LM_GGML_LOG_DEBUG(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
105
+ #define LM_GGML_LOG_CONT(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
106
+
107
+ #define LM_GGML_DEBUG 0
108
+
109
+ #if (LM_GGML_DEBUG >= 1)
110
+ #define LM_GGML_PRINT_DEBUG(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
111
+ #else
112
+ #define LM_GGML_PRINT_DEBUG(...)
113
+ #endif
114
+
115
+ #if (LM_GGML_DEBUG >= 5)
116
+ #define LM_GGML_PRINT_DEBUG_5(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
117
+ #else
118
+ #define LM_GGML_PRINT_DEBUG_5(...)
119
+ #endif
120
+
121
+ #if (LM_GGML_DEBUG >= 10)
122
+ #define LM_GGML_PRINT_DEBUG_10(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
123
+ #else
124
+ #define LM_GGML_PRINT_DEBUG_10(...)
125
+ #endif
126
+
127
+ // tensor params
128
+
129
+ static void lm_ggml_set_op_params(struct lm_ggml_tensor * tensor, const void * params, size_t params_size) {
130
+ LM_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
131
+ assert(params_size <= LM_GGML_MAX_OP_PARAMS);
132
+ memcpy(tensor->op_params, params, params_size);
133
+ }
134
+
135
+ static int32_t lm_ggml_get_op_params_i32(const struct lm_ggml_tensor * tensor, uint32_t i) {
136
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
137
+ return ((const int32_t *)(tensor->op_params))[i];
138
+ }
139
+
140
+ static float lm_ggml_get_op_params_f32(const struct lm_ggml_tensor * tensor, uint32_t i) {
141
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
142
+ return ((const float *)(tensor->op_params))[i];
143
+ }
144
+
145
+ static void lm_ggml_set_op_params_i32(struct lm_ggml_tensor * tensor, uint32_t i, int32_t value) {
146
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
147
+ ((int32_t *)(tensor->op_params))[i] = value;
148
+ }
149
+
150
+ static void lm_ggml_set_op_params_f32(struct lm_ggml_tensor * tensor, uint32_t i, float value) {
151
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
152
+ ((float *)(tensor->op_params))[i] = value;
153
+ }
154
+
155
+ struct lm_ggml_map_custom1_op_params {
156
+ lm_ggml_custom1_op_t fun;
157
+ int n_tasks;
158
+ void * userdata;
159
+ };
160
+
161
+ struct lm_ggml_map_custom2_op_params {
162
+ lm_ggml_custom2_op_t fun;
163
+ int n_tasks;
164
+ void * userdata;
165
+ };
166
+
167
+ struct lm_ggml_map_custom3_op_params {
168
+ lm_ggml_custom3_op_t fun;
169
+ int n_tasks;
170
+ void * userdata;
171
+ };
172
+
173
+ struct lm_ggml_custom_op_params {
174
+ lm_ggml_custom_op_t fun;
175
+ int n_tasks;
176
+ void * userdata;
177
+ };
178
+
179
+ // bitset
180
+
181
+ typedef uint32_t lm_ggml_bitset_t;
182
+
183
+ static_assert(sizeof(lm_ggml_bitset_t) == 4, "bitset_t constants must be updated");
184
+ #define BITSET_SHR 5 // log2(sizeof(lm_ggml_bitset_t)*8)
185
+ #define BITSET_MASK (sizeof(lm_ggml_bitset_t)*8 - 1)
186
+
187
+ static size_t lm_ggml_bitset_size(size_t n) {
188
+ return (n + BITSET_MASK) >> BITSET_SHR;
189
+ }
190
+
191
+ static inline bool lm_ggml_bitset_get(const lm_ggml_bitset_t * bitset, size_t i) {
192
+ return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
193
+ }
194
+
195
+ static inline void lm_ggml_bitset_set(lm_ggml_bitset_t * bitset, size_t i) {
196
+ bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
197
+ }
198
+
199
+ static inline void lm_ggml_bitset_clear(lm_ggml_bitset_t * bitset, size_t i) {
200
+ bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
201
+ }
202
+
203
+ // hash set
204
+
205
+ #define LM_GGML_HASHSET_FULL ((size_t)-1)
206
+ #define LM_GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
207
+
208
+ struct lm_ggml_hash_set {
209
+ size_t size;
210
+ lm_ggml_bitset_t * used; // whether or not the keys are in use i.e. set
211
+ struct lm_ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if lm_ggml_bitset_get(used, i)
212
+ };
213
+
214
+ struct lm_ggml_hash_set lm_ggml_hash_set_new(size_t size);
215
+ void lm_ggml_hash_set_free(struct lm_ggml_hash_set * hash_set);
216
+
217
+ // returns the minimum size for a hash set that can hold min_sz elements
218
+ size_t lm_ggml_hash_size(size_t min_sz);
219
+
220
+ // remove all elements from the hash set
221
+ void lm_ggml_hash_set_reset(struct lm_ggml_hash_set * hash_set);
222
+
223
+ // returns true if key is in the hash set
224
+ static bool lm_ggml_hash_contains(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
225
+
226
+ // returns LM_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
227
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key);
228
+
229
+ // returns LM_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
230
+ static size_t lm_ggml_hash_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
231
+
232
+ // return index, asserts if table is full
233
+ static size_t lm_ggml_hash_find_or_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
234
+
235
+ // hash function for lm_ggml_tensor
236
+ static inline size_t lm_ggml_hash(const struct lm_ggml_tensor * p) {
237
+ // the last 4 bits are always zero due to alignment
238
+ return (size_t)(uintptr_t)p >> 4;
239
+ }
240
+
241
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key) {
242
+ size_t h = lm_ggml_hash(key) % hash_set->size;
243
+
244
+ // linear probing
245
+ size_t i = h;
246
+ while (lm_ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
247
+ i = (i + 1) % hash_set->size;
248
+ if (i == h) {
249
+ // visited all hash table entries -> not found
250
+ return LM_GGML_HASHSET_FULL;
251
+ }
252
+ }
253
+ return i;
254
+ }
255
+
256
+ static bool lm_ggml_hash_contains(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
257
+ size_t i = lm_ggml_hash_find(hash_set, key);
258
+ return i != LM_GGML_HASHSET_FULL && lm_ggml_bitset_get(hash_set->used, i);
259
+ }
260
+
261
+ static size_t lm_ggml_hash_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
262
+ size_t h = lm_ggml_hash(key) % hash_set->size;
263
+
264
+ // linear probing
265
+ size_t i = h;
266
+ do {
267
+ if (!lm_ggml_bitset_get(hash_set->used, i)) {
268
+ lm_ggml_bitset_set(hash_set->used, i);
269
+ hash_set->keys[i] = key;
270
+ return i;
271
+ }
272
+ if (hash_set->keys[i] == key) {
273
+ return LM_GGML_HASHSET_ALREADY_EXISTS;
274
+ }
275
+ i = (i + 1) % hash_set->size;
276
+ } while (i != h);
277
+
278
+ // visited all hash table entries -> not found
279
+ LM_GGML_ABORT("fatal error");
280
+ }
281
+
282
+ static size_t lm_ggml_hash_find_or_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
283
+ size_t h = lm_ggml_hash(key) % hash_set->size;
284
+
285
+ // linear probing
286
+ size_t i = h;
287
+ do {
288
+ if (!lm_ggml_bitset_get(hash_set->used, i)) {
289
+ lm_ggml_bitset_set(hash_set->used, i);
290
+ hash_set->keys[i] = key;
291
+ return i;
292
+ }
293
+ if (hash_set->keys[i] == key) {
294
+ return i;
295
+ }
296
+ i = (i + 1) % hash_set->size;
297
+ } while (i != h);
298
+
299
+ // visited all hash table entries -> not found
300
+ LM_GGML_ABORT("fatal error");
301
+ }
302
+
303
+ // computation graph
304
+
305
+ enum lm_ggml_cgraph_eval_order {
306
+ LM_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
307
+ LM_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
308
+ LM_GGML_CGRAPH_EVAL_ORDER_COUNT
309
+ };
310
+
311
+ struct lm_ggml_cgraph {
312
+ int size; // maximum number of nodes/leafs/grads/grad_accs
313
+ int n_nodes; // number of nodes currently in use
314
+ int n_leafs; // number of leafs currently in use
315
+
316
+ struct lm_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
317
+ struct lm_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
318
+ struct lm_ggml_tensor ** grad_accs; // accumulators for node gradients
319
+ struct lm_ggml_tensor ** leafs; // tensors with constant data
320
+ int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot
321
+
322
+ struct lm_ggml_hash_set visited_hash_set;
323
+
324
+ enum lm_ggml_cgraph_eval_order order;
325
+ };
326
+
327
+ // returns a slice of cgraph with nodes [i0, i1)
328
+ // the slice does not have leafs or gradients
329
+ // if you need the gradients, get them from the original graph
330
+ struct lm_ggml_cgraph lm_ggml_graph_view(struct lm_ggml_cgraph * cgraph, int i0, int i1);
331
+
332
+ // Memory allocation
333
+
334
+ LM_GGML_API void * lm_ggml_aligned_malloc(size_t size);
335
+ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
336
+
337
+ // FP16 <-> FP32
338
+ // ref: https://github.com/Maratyszcza/FP16
339
+
340
+ static inline float fp32_from_bits(uint32_t w) {
341
+ union {
342
+ uint32_t as_bits;
343
+ float as_value;
344
+ } fp32;
345
+ fp32.as_bits = w;
346
+ return fp32.as_value;
347
+ }
348
+
349
+ static inline uint32_t fp32_to_bits(float f) {
350
+ union {
351
+ float as_value;
352
+ uint32_t as_bits;
353
+ } fp32;
354
+ fp32.as_value = f;
355
+ return fp32.as_bits;
356
+ }
357
+
358
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
359
+ const uint32_t w = (uint32_t) h << 16;
360
+ const uint32_t sign = w & UINT32_C(0x80000000);
361
+ const uint32_t two_w = w + w;
362
+
363
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
364
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
365
+ const float exp_scale = 0x1.0p-112f;
366
+ #else
367
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
368
+ #endif
369
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
370
+
371
+ const uint32_t magic_mask = UINT32_C(126) << 23;
372
+ const float magic_bias = 0.5f;
373
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
374
+
375
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
376
+ const uint32_t result = sign |
377
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
378
+ return fp32_from_bits(result);
379
+ }
380
+
381
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
382
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
383
+ const float scale_to_inf = 0x1.0p+112f;
384
+ const float scale_to_zero = 0x1.0p-110f;
385
+ #else
386
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
387
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
388
+ #endif
389
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
390
+
391
+ const uint32_t w = fp32_to_bits(f);
392
+ const uint32_t shl1_w = w + w;
393
+ const uint32_t sign = w & UINT32_C(0x80000000);
394
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
395
+ if (bias < UINT32_C(0x71000000)) {
396
+ bias = UINT32_C(0x71000000);
397
+ }
398
+
399
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
400
+ const uint32_t bits = fp32_to_bits(base);
401
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
402
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
403
+ const uint32_t nonsign = exp_bits + mantissa_bits;
404
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
405
+ }
406
+
407
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
408
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
409
+
410
+ #define LM_GGML_FP16_TO_FP32(x) LM_GGML_COMPUTE_FP16_TO_FP32(x)
411
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
412
+
413
+ static inline float lm_ggml_e8m0_to_fp32(uint8_t x) {
414
+ uint32_t bits; // Stores the raw bit representation of the float
415
+
416
+ // Handle special case for minimum exponent (denormalized float)
417
+ if (x == 0) {
418
+ // Bit pattern for 2^(-127):
419
+ // - Sign bit: 0 (positive)
420
+ // - Exponent: 0 (denormalized number)
421
+ // - Mantissa: 0x400000 (0.5 in fractional form)
422
+ // Value = 0.5 * 2^(-126) = 2^(-127)
423
+ bits = 0x00400000;
424
+ }
425
+ // note: disabled as we don't need to handle NaNs
426
+ //// Handle special case for NaN (all bits set)
427
+ //else if (x == 0xFF) {
428
+ // // Standard quiet NaN pattern:
429
+ // // - Sign bit: 0
430
+ // // - Exponent: all 1s (0xFF)
431
+ // // - Mantissa: 0x400000 (quiet NaN flag)
432
+ // bits = 0x7FC00000;
433
+ //}
434
+ // Normalized values (most common case)
435
+ else {
436
+ // Construct normalized float by shifting exponent into position:
437
+ // - Exponent field: 8 bits (positions 30-23)
438
+ // - Mantissa: 0 (implicit leading 1)
439
+ // Value = 2^(x - 127)
440
+ bits = (uint32_t) x << 23;
441
+ }
442
+
443
+ float result; // Final float value
444
+ // Safely reinterpret bit pattern as float without type-punning issues
445
+ memcpy(&result, &bits, sizeof(float));
446
+ return result;
447
+ }
448
+
449
+ // Equal to lm_ggml_e8m0_to_fp32/2
450
+ // Useful with MXFP4 quantization since the E0M2 values are doubled
451
+ static inline float lm_ggml_e8m0_to_fp32_half(uint8_t x) {
452
+ uint32_t bits;
453
+
454
+ // For x < 2: use precomputed denormal patterns
455
+ if (x < 2) {
456
+ // 0x00200000 = 2^(-128), 0x00400000 = 2^(-127)
457
+ bits = 0x00200000 << x;
458
+ }
459
+ // For x >= 2: normalized exponent adjustment
460
+ else {
461
+ // 0.5 * 2^(x-127) = 2^(x-128) = normalized with exponent (x-1)
462
+ bits = (uint32_t)(x - 1) << 23;
463
+ }
464
+ // Note: NaNs are not handled here
465
+
466
+ float result;
467
+ memcpy(&result, &bits, sizeof(float));
468
+ return result;
469
+ }
470
+
471
+ #define LM_GGML_E8M0_TO_FP32(x) lm_ggml_e8m0_to_fp32(x)
472
+ #define LM_GGML_E8M0_TO_FP32_HALF(x) lm_ggml_e8m0_to_fp32_half(x)
473
+
474
+ /**
475
+ * Converts brain16 to float32.
476
+ *
477
+ * The bfloat16 floating point format has the following structure:
478
+ *
479
+ * ┌sign
480
+ * │
481
+ * │ ┌exponent
482
+ * │ │
483
+ * │ │ ┌mantissa
484
+ * │ │ │
485
+ * │┌──┴───┐┌─┴───┐
486
+ * 0b0000000000000000 brain16
487
+ *
488
+ * Since bf16 has the same number of exponent bits as a 32bit float,
489
+ * encoding and decoding numbers becomes relatively straightforward.
490
+ *
491
+ * ┌sign
492
+ * │
493
+ * │ ┌exponent
494
+ * │ │
495
+ * │ │ ┌mantissa
496
+ * │ │ │
497
+ * │┌──┴───┐┌─┴───────────────────┐
498
+ * 0b00000000000000000000000000000000 IEEE binary32
499
+ *
500
+ * For comparison, the standard fp16 format has fewer exponent bits.
501
+ *
502
+ * ┌sign
503
+ * │
504
+ * │ ┌exponent
505
+ * │ │
506
+ * │ │ ┌mantissa
507
+ * │ │ │
508
+ * │┌─┴─┐┌─┴──────┐
509
+ * 0b0000000000000000 IEEE binary16
510
+ *
511
+ * @see IEEE 754-2008
512
+ */
513
+ static inline float lm_ggml_compute_bf16_to_fp32(lm_ggml_bf16_t h) {
514
+ union {
515
+ float f;
516
+ uint32_t i;
517
+ } u;
518
+ u.i = (uint32_t)h.bits << 16;
519
+ return u.f;
520
+ }
521
+
522
+ /**
523
+ * Converts float32 to brain16.
524
+ *
525
+ * This is binary identical with Google Brain float conversion.
526
+ * Floats shall round to nearest even, and NANs shall be quiet.
527
+ * Subnormals aren't flushed to zero, except perhaps when used.
528
+ * This code should vectorize nicely if using modern compilers.
529
+ */
530
+ static inline lm_ggml_bf16_t lm_ggml_compute_fp32_to_bf16(float s) {
531
+ lm_ggml_bf16_t h;
532
+ union {
533
+ float f;
534
+ uint32_t i;
535
+ } u;
536
+ u.f = s;
537
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
538
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
539
+ return h;
540
+ }
541
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
542
+ return h;
543
+ }
544
+
545
+ #define LM_GGML_FP32_TO_BF16(x) lm_ggml_compute_fp32_to_bf16(x)
546
+ #define LM_GGML_BF16_TO_FP32(x) lm_ggml_compute_bf16_to_fp32(x)
547
+
548
+ // return true if the node's results are only used by N other nodes
549
+ // and can be fused into their calculations.
550
+ static inline bool lm_ggml_node_has_n_uses(const struct lm_ggml_cgraph * cgraph, int node_idx, int32_t n_uses) {
551
+ const struct lm_ggml_tensor * node = cgraph->nodes[node_idx];
552
+
553
+ // check the use count against how many we're replacing
554
+ size_t hash_pos = lm_ggml_hash_find(&cgraph->visited_hash_set, node);
555
+ if (!lm_ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos) || cgraph->use_counts[hash_pos] != n_uses) {
556
+ return false;
557
+ }
558
+
559
+ // if node is a view, some other node might be using the intermediate result
560
+ // via the view source.
561
+ if (node->view_src) {
562
+ return false;
563
+ }
564
+
565
+ // If the user requested output for the node, can't fuse
566
+ if (node->flags & LM_GGML_TENSOR_FLAG_OUTPUT) {
567
+ return false;
568
+ }
569
+
570
+ return true;
571
+ }
572
+
573
+ // Returns true if nodes [i, i+ops.size()) are the sequence of lm_ggml_ops in ops[]
574
+ // and are fusable. Nodes are considered fusable according to this function if:
575
+ // - all nodes except the last have only one use and are not views/outputs (see lm_ggml_node_has_N_uses).
576
+ // - all nodes except the last are a src of the following node.
577
+ // - all nodes are the same shape.
578
+ // TODO: Consider allowing LM_GGML_OP_NONE nodes in between
579
+ static inline bool lm_ggml_can_fuse(const struct lm_ggml_cgraph * cgraph, int node_idx, const enum lm_ggml_op * ops, int num_ops) {
580
+ if (node_idx + num_ops > cgraph->n_nodes) {
581
+ return false;
582
+ }
583
+
584
+ for (int i = 0; i < num_ops; ++i) {
585
+ struct lm_ggml_tensor * node = cgraph->nodes[node_idx + i];
586
+ if (node->op != ops[i]) {
587
+ return false;
588
+ }
589
+ if (i < num_ops - 1 && !lm_ggml_node_has_n_uses(cgraph, node_idx + i, 1)) {
590
+ return false;
591
+ }
592
+ if (i > 0) {
593
+ struct lm_ggml_tensor * prev = cgraph->nodes[node_idx + i - 1];
594
+ if (node->src[0] != prev && node->src[1] != prev) {
595
+ return false;
596
+ }
597
+ if (!lm_ggml_are_same_shape(node, prev)) {
598
+ return false;
599
+ }
600
+ }
601
+ }
602
+ return true;
603
+ }
604
+
605
+ #ifdef __cplusplus
606
+ }
607
+ #endif
608
+
609
+ #ifdef __cplusplus
610
+ #include <initializer_list>
611
+ #include <vector>
612
+
613
+ // nicer C++ syntax for lm_ggml_can_fuse
614
+ inline bool lm_ggml_can_fuse(const struct lm_ggml_cgraph * cgraph, int node_idx, std::initializer_list<enum lm_ggml_op> ops) {
615
+ return lm_ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size());
616
+ }
617
+
618
+ // expose GGUF internals for test code
619
+ LM_GGML_API size_t lm_gguf_type_size(enum lm_gguf_type type);
620
+ LM_GGML_API struct lm_gguf_context * lm_gguf_init_from_file_impl(FILE * file, struct lm_gguf_init_params params);
621
+ LM_GGML_API void lm_gguf_write_to_buf(const struct lm_gguf_context * ctx, std::vector<int8_t> & buf, bool only_meta);
622
+ #endif // __cplusplus