llama-cpp-python-win 0.3.16__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. bin/convert_hf_to_gguf.py +8751 -0
  2. bin/ggml-base.dll +0 -0
  3. bin/ggml-cpu.dll +0 -0
  4. bin/ggml.dll +0 -0
  5. bin/llama-mtmd-cli.exe +0 -0
  6. bin/llama.dll +0 -0
  7. bin/mtmd.dll +0 -0
  8. include/ggml-alloc.h +76 -0
  9. include/ggml-backend.h +354 -0
  10. include/ggml-blas.h +25 -0
  11. include/ggml-cann.h +123 -0
  12. include/ggml-cpp.h +39 -0
  13. include/ggml-cpu.h +145 -0
  14. include/ggml-cuda.h +47 -0
  15. include/ggml-metal.h +66 -0
  16. include/ggml-opt.h +256 -0
  17. include/ggml-rpc.h +33 -0
  18. include/ggml-sycl.h +49 -0
  19. include/ggml-vulkan.h +29 -0
  20. include/ggml-webgpu.h +19 -0
  21. include/ggml.h +2467 -0
  22. include/gguf.h +202 -0
  23. include/llama-cpp.h +30 -0
  24. include/llama.h +1482 -0
  25. include/mtmd-helper.h +91 -0
  26. include/mtmd.h +298 -0
  27. lib/cmake/ggml/ggml-config.cmake +328 -0
  28. lib/cmake/ggml/ggml-version.cmake +65 -0
  29. lib/cmake/llama/llama-config.cmake +54 -0
  30. lib/cmake/llama/llama-version.cmake +65 -0
  31. lib/ggml-base.lib +0 -0
  32. lib/ggml-cpu.lib +0 -0
  33. lib/ggml.lib +0 -0
  34. lib/llama.lib +0 -0
  35. lib/mtmd.lib +0 -0
  36. lib/pkgconfig/llama.pc +10 -0
  37. llama_cpp/__init__.py +4 -0
  38. llama_cpp/_ctypes_extensions.py +131 -0
  39. llama_cpp/_ggml.py +12 -0
  40. llama_cpp/_internals.py +856 -0
  41. llama_cpp/_logger.py +47 -0
  42. llama_cpp/_utils.py +78 -0
  43. llama_cpp/lib/ggml-base.dll +0 -0
  44. llama_cpp/lib/ggml-base.lib +0 -0
  45. llama_cpp/lib/ggml-cpu.dll +0 -0
  46. llama_cpp/lib/ggml-cpu.lib +0 -0
  47. llama_cpp/lib/ggml.dll +0 -0
  48. llama_cpp/lib/ggml.lib +0 -0
  49. llama_cpp/lib/llama.dll +0 -0
  50. llama_cpp/lib/llama.lib +0 -0
  51. llama_cpp/lib/mtmd.dll +0 -0
  52. llama_cpp/lib/mtmd.lib +0 -0
  53. llama_cpp/llama.py +2422 -0
  54. llama_cpp/llama_cache.py +155 -0
  55. llama_cpp/llama_chat_format.py +3962 -0
  56. llama_cpp/llama_cpp.py +4374 -0
  57. llama_cpp/llama_grammar.py +953 -0
  58. llama_cpp/llama_speculative.py +64 -0
  59. llama_cpp/llama_tokenizer.py +120 -0
  60. llama_cpp/llama_types.py +316 -0
  61. llama_cpp/llava_cpp.py +158 -0
  62. llama_cpp/mtmd_cpp.py +280 -0
  63. llama_cpp/py.typed +0 -0
  64. llama_cpp/server/__init__.py +0 -0
  65. llama_cpp/server/__main__.py +100 -0
  66. llama_cpp/server/app.py +597 -0
  67. llama_cpp/server/cli.py +97 -0
  68. llama_cpp/server/errors.py +212 -0
  69. llama_cpp/server/model.py +312 -0
  70. llama_cpp/server/settings.py +240 -0
  71. llama_cpp/server/types.py +316 -0
  72. llama_cpp_python_win-0.3.16.dist-info/METADATA +856 -0
  73. llama_cpp_python_win-0.3.16.dist-info/RECORD +75 -0
  74. llama_cpp_python_win-0.3.16.dist-info/WHEEL +5 -0
  75. llama_cpp_python_win-0.3.16.dist-info/licenses/LICENSE.md +9 -0
include/ggml.h ADDED
@@ -0,0 +1,2467 @@
1
+ #pragma once
2
+
3
+ //
4
+ // GGML Tensor Library
5
+ //
6
+ // This documentation is still a work in progress.
7
+ // If you wish some specific topics to be covered, feel free to drop a comment:
8
+ //
9
+ // https://github.com/ggerganov/whisper.cpp/issues/40
10
+ //
11
+ // ## Overview
12
+ //
13
+ // This library implements:
14
+ //
15
+ // - a set of tensor operations
16
+ // - automatic differentiation
17
+ // - basic optimization algorithms
18
+ //
19
+ // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
20
+ // but is not limited to, the following:
21
+ //
22
+ // - linear regression
23
+ // - support vector machines
24
+ // - neural networks
25
+ //
26
+ // The library allows the user to define a certain function using the available tensor operations. This function
27
+ // definition is represented internally via a computation graph. Each tensor operation in the function definition
28
+ // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
29
+ // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
30
+ // using one of the available optimization algorithms.
31
+ //
32
+ // For example, here we define the function: f(x) = a*x^2 + b
33
+ //
34
+ // {
35
+ // struct ggml_init_params params = {
36
+ // .mem_size = 16*1024*1024,
37
+ // .mem_buffer = NULL,
38
+ // };
39
+ //
40
+ // // memory allocation happens here
41
+ // struct ggml_context * ctx = ggml_init(params);
42
+ //
43
+ // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
44
+ //
45
+ // ggml_set_param(ctx, x); // x is an input variable
46
+ //
47
+ // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
48
+ // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
49
+ // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
50
+ // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
51
+ //
52
+ // ...
53
+ // }
54
+ //
55
+ // Notice that the function definition above does not involve any actual computation. The computation is performed only
56
+ // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
57
+ //
58
+ // {
59
+ // ...
60
+ //
61
+ // struct ggml_cgraph * gf = ggml_new_graph(ctx);
62
+ // ggml_build_forward_expand(gf, f);
63
+ //
64
+ // // set the input variable and parameter values
65
+ // ggml_set_f32(x, 2.0f);
66
+ // ggml_set_f32(a, 3.0f);
67
+ // ggml_set_f32(b, 4.0f);
68
+ //
69
+ // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
70
+ //
71
+ // printf("f = %f\n", ggml_get_f32_1d(f, 0));
72
+ //
73
+ // ...
74
+ // }
75
+ //
76
+ // The actual computation is performed in the ggml_graph_compute() function.
77
+ //
78
+ // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
79
+ // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
80
+ // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
81
+ // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
82
+ // actually needed.
83
+ //
84
+ // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
85
+ // differentiation and optimization algorithms.
86
+ //
87
+ // The described approach allows to define the function graph once and then compute its forward or backward graphs
88
+ // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
89
+ // the user can avoid the memory allocation overhead at runtime.
90
+ //
91
+ // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
92
+ // citizens, but in theory the library can be extended to support FP8 and integer data types.
93
+ //
94
+ // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
95
+ // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
96
+ // clear that the library needs to support more complex operations. The way to support these operations is not clear
97
+ // yet, but a few examples are demonstrated in the following operations:
98
+ //
99
+ // - ggml_permute()
100
+ // - ggml_conv_1d_1s()
101
+ // - ggml_conv_1d_2s()
102
+ //
103
+ // For each tensor operator, the library implements a forward and backward computation function. The forward function
104
+ // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
105
+ // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
106
+ // calculus class, or watch the following video:
107
+ //
108
+ // What is Automatic Differentiation?
109
+ // https://www.youtube.com/watch?v=wG_nF1awSSY
110
+ //
111
+ //
112
+ // ## Tensor data (struct ggml_tensor)
113
+ //
114
+ // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
115
+ // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
116
+ // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
117
+ //
118
+ // {
119
+ // struct ggml_tensor * c = ggml_add(ctx, a, b);
120
+ //
121
+ // assert(c->src[0] == a);
122
+ // assert(c->src[1] == b);
123
+ // }
124
+ //
125
+ // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
126
+ // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
127
+ // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
128
+ // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
129
+ // contiguous in memory.
130
+ //
131
+ // The data of the tensor is accessed via the "data" pointer. For example:
132
+ //
133
+ // {
134
+ // const int nx = 2;
135
+ // const int ny = 3;
136
+ //
137
+ // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny);
138
+ //
139
+ // for (int y = 0; y < ny; y++) {
140
+ // for (int x = 0; x < nx; x++) {
141
+ // *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y;
142
+ // }
143
+ // }
144
+ //
145
+ // ...
146
+ // }
147
+ //
148
+ // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
149
+ //
150
+ // ## The matrix multiplication operator (ggml_mul_mat)
151
+ //
152
+ // TODO
153
+ //
154
+ //
155
+ // ## Multi-threading
156
+ //
157
+ // TODO
158
+ //
159
+ //
160
+ // ## Overview of ggml.c
161
+ //
162
+ // TODO
163
+ //
164
+ //
165
+ // ## SIMD optimizations
166
+ //
167
+ // TODO
168
+ //
169
+ //
170
+ // ## Debugging ggml
171
+ //
172
+ // TODO
173
+ //
174
+ //
175
+
176
+ #ifdef GGML_SHARED
177
+ # if defined(_WIN32) && !defined(__MINGW32__)
178
+ # ifdef GGML_BUILD
179
+ # define GGML_API __declspec(dllexport) extern
180
+ # else
181
+ # define GGML_API __declspec(dllimport) extern
182
+ # endif
183
+ # else
184
+ # define GGML_API __attribute__ ((visibility ("default"))) extern
185
+ # endif
186
+ #else
187
+ # define GGML_API extern
188
+ #endif
189
+
190
+ // TODO: support for clang
191
+ #ifdef __GNUC__
192
+ # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
193
+ #elif defined(_MSC_VER)
194
+ # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
195
+ #else
196
+ # define GGML_DEPRECATED(func, hint) func
197
+ #endif
198
+
199
+ #ifndef __GNUC__
200
+ # define GGML_ATTRIBUTE_FORMAT(...)
201
+ #elif defined(__MINGW32__) && !defined(__clang__)
202
+ # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
203
+ #else
204
+ # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
205
+ #endif
206
+
207
+ #include <stdbool.h>
208
+ #include <stddef.h>
209
+ #include <stdint.h>
210
+ #include <stdio.h>
211
+
212
+ #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
213
+ #define GGML_FILE_VERSION 2
214
+
215
+ #define GGML_QNT_VERSION 2 // bump this on quantization format changes
216
+ #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
217
+
218
+ #define GGML_MAX_DIMS 4
219
+ #define GGML_MAX_PARAMS 2048
220
+ #define GGML_MAX_SRC 10
221
+ #define GGML_MAX_N_THREADS 512
222
+ #define GGML_MAX_OP_PARAMS 64
223
+
224
+ #ifndef GGML_MAX_NAME
225
+ # define GGML_MAX_NAME 64
226
+ #endif
227
+
228
+ #define GGML_DEFAULT_N_THREADS 4
229
+ #define GGML_DEFAULT_GRAPH_SIZE 2048
230
+
231
+ #if UINTPTR_MAX == 0xFFFFFFFF
232
+ #define GGML_MEM_ALIGN 4
233
+ #else
234
+ #define GGML_MEM_ALIGN 16
235
+ #endif
236
+
237
+ #define GGML_EXIT_SUCCESS 0
238
+ #define GGML_EXIT_ABORTED 1
239
+
240
+ #define GGML_ROPE_TYPE_NEOX 2
241
+ #define GGML_ROPE_TYPE_MROPE 8
242
+ #define GGML_ROPE_TYPE_VISION 24
243
+
244
+ #define GGML_MROPE_SECTIONS 4
245
+
246
+ #define GGML_UNUSED(x) (void)(x)
247
+
248
+ #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
249
+
250
+ #ifndef NDEBUG
251
+ # define GGML_UNREACHABLE() do { fprintf(stderr, "statement should be unreachable\n"); abort(); } while(0)
252
+ #elif defined(__GNUC__)
253
+ # define GGML_UNREACHABLE() __builtin_unreachable()
254
+ #elif defined(_MSC_VER)
255
+ # define GGML_UNREACHABLE() __assume(0)
256
+ #else
257
+ # define GGML_UNREACHABLE() ((void) 0)
258
+ #endif
259
+
260
+ #ifdef __cplusplus
261
+ # define GGML_NORETURN [[noreturn]]
262
+ #elif defined(_MSC_VER)
263
+ # define GGML_NORETURN __declspec(noreturn)
264
+ #else
265
+ # define GGML_NORETURN _Noreturn
266
+ #endif
267
+
268
+ #define GGML_ABORT(...) ggml_abort(__FILE__, __LINE__, __VA_ARGS__)
269
+ #define GGML_ASSERT(x) if (!(x)) GGML_ABORT("GGML_ASSERT(%s) failed", #x)
270
+
271
+ // used to copy the number of elements and stride in bytes of tensors into local variables.
272
+ // main purpose is to reduce code duplication and improve readability.
273
+ //
274
+ // example:
275
+ //
276
+ // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
277
+ // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
278
+ //
279
+ #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
280
+ const type prefix##0 = (pointer)->array[0]; \
281
+ GGML_UNUSED(prefix##0);
282
+ #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
283
+ GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
284
+ const type prefix##1 = (pointer)->array[1]; \
285
+ GGML_UNUSED(prefix##1);
286
+ #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
287
+ GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
288
+ const type prefix##2 = (pointer)->array[2]; \
289
+ GGML_UNUSED(prefix##2);
290
+ #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
291
+ GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
292
+ const type prefix##3 = (pointer)->array[3]; \
293
+ GGML_UNUSED(prefix##3);
294
+
295
+ #define GGML_TENSOR_UNARY_OP_LOCALS \
296
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
297
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
298
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
299
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
300
+
301
+ #define GGML_TENSOR_BINARY_OP_LOCALS \
302
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
303
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
304
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
305
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
306
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
307
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
308
+
309
+ #define GGML_TENSOR_TERNARY_OP_LOCALS \
310
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
311
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
312
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
313
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
314
+ GGML_TENSOR_LOCALS(int64_t, ne2, src2, ne) \
315
+ GGML_TENSOR_LOCALS(size_t, nb2, src2, nb) \
316
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
317
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
318
+
319
+ #define GGML_TENSOR_BINARY_OP_LOCALS01 \
320
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
321
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
322
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
323
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
324
+
325
+ #ifdef __cplusplus
326
+ extern "C" {
327
+ #endif
328
+
329
+ // Function type used in fatal error callbacks
330
+ typedef void (*ggml_abort_callback_t)(const char * error_message);
331
+
332
+ // Set the abort callback (passing null will restore original abort functionality: printing a message to stdout)
333
+ // Returns the old callback for chaining
334
+ GGML_API ggml_abort_callback_t ggml_set_abort_callback(ggml_abort_callback_t callback);
335
+
336
+ GGML_NORETURN GGML_ATTRIBUTE_FORMAT(3, 4)
337
+ GGML_API void ggml_abort(const char * file, int line, const char * fmt, ...);
338
+
339
+ enum ggml_status {
340
+ GGML_STATUS_ALLOC_FAILED = -2,
341
+ GGML_STATUS_FAILED = -1,
342
+ GGML_STATUS_SUCCESS = 0,
343
+ GGML_STATUS_ABORTED = 1,
344
+ };
345
+
346
+ // get ggml_status name string
347
+ GGML_API const char * ggml_status_to_string(enum ggml_status status);
348
+
349
+ // ieee 754-2008 half-precision float16
350
+ // todo: make this not an integral type
351
+ typedef uint16_t ggml_fp16_t;
352
+ GGML_API float ggml_fp16_to_fp32(ggml_fp16_t);
353
+ GGML_API ggml_fp16_t ggml_fp32_to_fp16(float);
354
+ GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t);
355
+ GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t);
356
+
357
+ // google brain half-precision bfloat16
358
+ typedef struct { uint16_t bits; } ggml_bf16_t;
359
+ GGML_API ggml_bf16_t ggml_fp32_to_bf16(float);
360
+ GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16
361
+ GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t);
362
+ GGML_API void ggml_fp32_to_bf16_row_ref(const float *, ggml_bf16_t *, int64_t);
363
+ GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t);
364
+
365
+ struct ggml_object;
366
+ struct ggml_context;
367
+ struct ggml_cgraph;
368
+
369
+ // NOTE: always add types at the end of the enum to keep backward compatibility
370
+ enum ggml_type {
371
+ GGML_TYPE_F32 = 0,
372
+ GGML_TYPE_F16 = 1,
373
+ GGML_TYPE_Q4_0 = 2,
374
+ GGML_TYPE_Q4_1 = 3,
375
+ // GGML_TYPE_Q4_2 = 4, support has been removed
376
+ // GGML_TYPE_Q4_3 = 5, support has been removed
377
+ GGML_TYPE_Q5_0 = 6,
378
+ GGML_TYPE_Q5_1 = 7,
379
+ GGML_TYPE_Q8_0 = 8,
380
+ GGML_TYPE_Q8_1 = 9,
381
+ GGML_TYPE_Q2_K = 10,
382
+ GGML_TYPE_Q3_K = 11,
383
+ GGML_TYPE_Q4_K = 12,
384
+ GGML_TYPE_Q5_K = 13,
385
+ GGML_TYPE_Q6_K = 14,
386
+ GGML_TYPE_Q8_K = 15,
387
+ GGML_TYPE_IQ2_XXS = 16,
388
+ GGML_TYPE_IQ2_XS = 17,
389
+ GGML_TYPE_IQ3_XXS = 18,
390
+ GGML_TYPE_IQ1_S = 19,
391
+ GGML_TYPE_IQ4_NL = 20,
392
+ GGML_TYPE_IQ3_S = 21,
393
+ GGML_TYPE_IQ2_S = 22,
394
+ GGML_TYPE_IQ4_XS = 23,
395
+ GGML_TYPE_I8 = 24,
396
+ GGML_TYPE_I16 = 25,
397
+ GGML_TYPE_I32 = 26,
398
+ GGML_TYPE_I64 = 27,
399
+ GGML_TYPE_F64 = 28,
400
+ GGML_TYPE_IQ1_M = 29,
401
+ GGML_TYPE_BF16 = 30,
402
+ // GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files
403
+ // GGML_TYPE_Q4_0_4_8 = 32,
404
+ // GGML_TYPE_Q4_0_8_8 = 33,
405
+ GGML_TYPE_TQ1_0 = 34,
406
+ GGML_TYPE_TQ2_0 = 35,
407
+ // GGML_TYPE_IQ4_NL_4_4 = 36,
408
+ // GGML_TYPE_IQ4_NL_4_8 = 37,
409
+ // GGML_TYPE_IQ4_NL_8_8 = 38,
410
+ GGML_TYPE_MXFP4 = 39, // MXFP4 (1 block)
411
+ GGML_TYPE_COUNT = 40,
412
+ };
413
+
414
+ // precision
415
+ enum ggml_prec {
416
+ GGML_PREC_DEFAULT = 0, // stored as ggml_tensor.op_params, 0 by default
417
+ GGML_PREC_F32 = 10,
418
+ };
419
+
420
+ // model file types
421
+ enum ggml_ftype {
422
+ GGML_FTYPE_UNKNOWN = -1,
423
+ GGML_FTYPE_ALL_F32 = 0,
424
+ GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
425
+ GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
426
+ GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
427
+ GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
428
+ GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
429
+ GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
430
+ GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
431
+ GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
432
+ GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
433
+ GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
434
+ GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
435
+ GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
436
+ GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors
437
+ GGML_FTYPE_MOSTLY_IQ2_XS = 16, // except 1d tensors
438
+ GGML_FTYPE_MOSTLY_IQ3_XXS = 17, // except 1d tensors
439
+ GGML_FTYPE_MOSTLY_IQ1_S = 18, // except 1d tensors
440
+ GGML_FTYPE_MOSTLY_IQ4_NL = 19, // except 1d tensors
441
+ GGML_FTYPE_MOSTLY_IQ3_S = 20, // except 1d tensors
442
+ GGML_FTYPE_MOSTLY_IQ2_S = 21, // except 1d tensors
443
+ GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
444
+ GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
445
+ GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
446
+ GGML_FTYPE_MOSTLY_MXFP4 = 25, // except 1d tensors
447
+ };
448
+
449
+ // available tensor operations:
450
+ enum ggml_op {
451
+ GGML_OP_NONE = 0,
452
+
453
+ GGML_OP_DUP,
454
+ GGML_OP_ADD,
455
+ GGML_OP_ADD_ID,
456
+ GGML_OP_ADD1,
457
+ GGML_OP_ACC,
458
+ GGML_OP_SUB,
459
+ GGML_OP_MUL,
460
+ GGML_OP_DIV,
461
+ GGML_OP_SQR,
462
+ GGML_OP_SQRT,
463
+ GGML_OP_LOG,
464
+ GGML_OP_SIN,
465
+ GGML_OP_COS,
466
+ GGML_OP_SUM,
467
+ GGML_OP_SUM_ROWS,
468
+ GGML_OP_MEAN,
469
+ GGML_OP_ARGMAX,
470
+ GGML_OP_COUNT_EQUAL,
471
+ GGML_OP_REPEAT,
472
+ GGML_OP_REPEAT_BACK,
473
+ GGML_OP_CONCAT,
474
+ GGML_OP_SILU_BACK,
475
+ GGML_OP_NORM, // normalize
476
+ GGML_OP_RMS_NORM,
477
+ GGML_OP_RMS_NORM_BACK,
478
+ GGML_OP_GROUP_NORM,
479
+ GGML_OP_L2_NORM,
480
+
481
+ GGML_OP_MUL_MAT,
482
+ GGML_OP_MUL_MAT_ID,
483
+ GGML_OP_OUT_PROD,
484
+
485
+ GGML_OP_SCALE,
486
+ GGML_OP_SET,
487
+ GGML_OP_CPY,
488
+ GGML_OP_CONT,
489
+ GGML_OP_RESHAPE,
490
+ GGML_OP_VIEW,
491
+ GGML_OP_PERMUTE,
492
+ GGML_OP_TRANSPOSE,
493
+ GGML_OP_GET_ROWS,
494
+ GGML_OP_GET_ROWS_BACK,
495
+ GGML_OP_SET_ROWS,
496
+ GGML_OP_DIAG,
497
+ GGML_OP_DIAG_MASK_INF,
498
+ GGML_OP_DIAG_MASK_ZERO,
499
+ GGML_OP_SOFT_MAX,
500
+ GGML_OP_SOFT_MAX_BACK,
501
+ GGML_OP_ROPE,
502
+ GGML_OP_ROPE_BACK,
503
+ GGML_OP_CLAMP,
504
+ GGML_OP_CONV_TRANSPOSE_1D,
505
+ GGML_OP_IM2COL,
506
+ GGML_OP_IM2COL_BACK,
507
+ GGML_OP_CONV_2D,
508
+ GGML_OP_CONV_2D_DW,
509
+ GGML_OP_CONV_TRANSPOSE_2D,
510
+ GGML_OP_POOL_1D,
511
+ GGML_OP_POOL_2D,
512
+ GGML_OP_POOL_2D_BACK,
513
+ GGML_OP_UPSCALE,
514
+ GGML_OP_PAD,
515
+ GGML_OP_PAD_REFLECT_1D,
516
+ GGML_OP_ROLL,
517
+ GGML_OP_ARANGE,
518
+ GGML_OP_TIMESTEP_EMBEDDING,
519
+ GGML_OP_ARGSORT,
520
+ GGML_OP_LEAKY_RELU,
521
+
522
+ GGML_OP_FLASH_ATTN_EXT,
523
+ GGML_OP_FLASH_ATTN_BACK,
524
+ GGML_OP_SSM_CONV,
525
+ GGML_OP_SSM_SCAN,
526
+ GGML_OP_WIN_PART,
527
+ GGML_OP_WIN_UNPART,
528
+ GGML_OP_GET_REL_POS,
529
+ GGML_OP_ADD_REL_POS,
530
+ GGML_OP_RWKV_WKV6,
531
+ GGML_OP_GATED_LINEAR_ATTN,
532
+ GGML_OP_RWKV_WKV7,
533
+
534
+ GGML_OP_UNARY,
535
+
536
+ GGML_OP_MAP_CUSTOM1,
537
+ GGML_OP_MAP_CUSTOM2,
538
+ GGML_OP_MAP_CUSTOM3,
539
+
540
+ GGML_OP_CUSTOM,
541
+
542
+ GGML_OP_CROSS_ENTROPY_LOSS,
543
+ GGML_OP_CROSS_ENTROPY_LOSS_BACK,
544
+ GGML_OP_OPT_STEP_ADAMW,
545
+ GGML_OP_OPT_STEP_SGD,
546
+
547
+ GGML_OP_GLU,
548
+
549
+ GGML_OP_COUNT,
550
+ };
551
+
552
+ enum ggml_unary_op {
553
+ GGML_UNARY_OP_ABS,
554
+ GGML_UNARY_OP_SGN,
555
+ GGML_UNARY_OP_NEG,
556
+ GGML_UNARY_OP_STEP,
557
+ GGML_UNARY_OP_TANH,
558
+ GGML_UNARY_OP_ELU,
559
+ GGML_UNARY_OP_RELU,
560
+ GGML_UNARY_OP_SIGMOID,
561
+ GGML_UNARY_OP_GELU,
562
+ GGML_UNARY_OP_GELU_QUICK,
563
+ GGML_UNARY_OP_SILU,
564
+ GGML_UNARY_OP_HARDSWISH,
565
+ GGML_UNARY_OP_HARDSIGMOID,
566
+ GGML_UNARY_OP_EXP,
567
+ GGML_UNARY_OP_GELU_ERF,
568
+
569
+ GGML_UNARY_OP_COUNT,
570
+ };
571
+
572
+ enum ggml_glu_op {
573
+ GGML_GLU_OP_REGLU,
574
+ GGML_GLU_OP_GEGLU,
575
+ GGML_GLU_OP_SWIGLU,
576
+ GGML_GLU_OP_SWIGLU_OAI,
577
+ GGML_GLU_OP_GEGLU_ERF,
578
+ GGML_GLU_OP_GEGLU_QUICK,
579
+
580
+ GGML_GLU_OP_COUNT,
581
+ };
582
+
583
+ enum ggml_object_type {
584
+ GGML_OBJECT_TYPE_TENSOR,
585
+ GGML_OBJECT_TYPE_GRAPH,
586
+ GGML_OBJECT_TYPE_WORK_BUFFER
587
+ };
588
+
589
+ enum ggml_log_level {
590
+ GGML_LOG_LEVEL_NONE = 0,
591
+ GGML_LOG_LEVEL_DEBUG = 1,
592
+ GGML_LOG_LEVEL_INFO = 2,
593
+ GGML_LOG_LEVEL_WARN = 3,
594
+ GGML_LOG_LEVEL_ERROR = 4,
595
+ GGML_LOG_LEVEL_CONT = 5, // continue previous log
596
+ };
597
+
598
+ // this tensor...
599
+ enum ggml_tensor_flag {
600
+ GGML_TENSOR_FLAG_INPUT = 1, // ...is an input for the GGML compute graph
601
+ GGML_TENSOR_FLAG_OUTPUT = 2, // ...is an output for the GGML compute graph
602
+ GGML_TENSOR_FLAG_PARAM = 4, // ...contains trainable parameters
603
+ GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up)
604
+ };
605
+
606
+ struct ggml_init_params {
607
+ // memory pool
608
+ size_t mem_size; // bytes
609
+ void * mem_buffer; // if NULL, memory will be allocated internally
610
+ bool no_alloc; // don't allocate memory for the tensor data
611
+ };
612
+
613
+ // n-dimensional tensor
614
+ struct ggml_tensor {
615
+ enum ggml_type type;
616
+
617
+ struct ggml_backend_buffer * buffer;
618
+
619
+ int64_t ne[GGML_MAX_DIMS]; // number of elements
620
+ size_t nb[GGML_MAX_DIMS]; // stride in bytes:
621
+ // nb[0] = ggml_type_size(type)
622
+ // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding
623
+ // nb[i] = nb[i-1] * ne[i-1]
624
+
625
+ // compute data
626
+ enum ggml_op op;
627
+
628
+ // op params - allocated as int32_t for alignment
629
+ int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
630
+
631
+ int32_t flags;
632
+
633
+ struct ggml_tensor * src[GGML_MAX_SRC];
634
+
635
+ // source tensor and offset for views
636
+ struct ggml_tensor * view_src;
637
+ size_t view_offs;
638
+
639
+ void * data;
640
+
641
+ char name[GGML_MAX_NAME];
642
+
643
+ void * extra; // extra things e.g. for ggml-cuda.cu
644
+
645
+ char padding[8];
646
+ };
647
+
648
+ static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
649
+
650
+ // Abort callback
651
+ // If not NULL, called before ggml computation
652
+ // If it returns true, the computation is aborted
653
+ typedef bool (*ggml_abort_callback)(void * data);
654
+
655
+
656
+ //
657
+ // GUID
658
+ //
659
+
660
+ // GUID types
661
+ typedef uint8_t ggml_guid[16];
662
+ typedef ggml_guid * ggml_guid_t;
663
+
664
+ GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b);
665
+
666
+ // misc
667
+
668
+ GGML_API const char * ggml_version(void);
669
+ GGML_API const char * ggml_commit(void);
670
+
671
+ GGML_API void ggml_time_init(void); // call this once at the beginning of the program
672
+ GGML_API int64_t ggml_time_ms(void);
673
+ GGML_API int64_t ggml_time_us(void);
674
+ GGML_API int64_t ggml_cycles(void);
675
+ GGML_API int64_t ggml_cycles_per_ms(void);
676
+
677
+ // accepts a UTF-8 path, even on Windows
678
+ GGML_API FILE * ggml_fopen(const char * fname, const char * mode);
679
+
680
+ GGML_API void ggml_print_object (const struct ggml_object * obj);
681
+ GGML_API void ggml_print_objects(const struct ggml_context * ctx);
682
+
683
+ GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
684
+ GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
685
+ GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
686
+ GGML_API size_t ggml_nbytes_pad(const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
687
+
688
+ GGML_API int64_t ggml_blck_size(enum ggml_type type);
689
+ GGML_API size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block
690
+ GGML_API size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row
691
+
692
+ GGML_DEPRECATED(
693
+ GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float
694
+ "use ggml_row_size() instead");
695
+
696
+ GGML_API const char * ggml_type_name(enum ggml_type type);
697
+ GGML_API const char * ggml_op_name (enum ggml_op op);
698
+ GGML_API const char * ggml_op_symbol(enum ggml_op op);
699
+
700
+ GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op);
701
+ GGML_API const char * ggml_glu_op_name(enum ggml_glu_op op);
702
+ GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name
703
+
704
+ GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
705
+
706
+ GGML_API bool ggml_is_quantized(enum ggml_type type);
707
+
708
+ // TODO: temporary until model loading of ggml examples is refactored
709
+ GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
710
+
711
+ GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
712
+ GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
713
+ GGML_API bool ggml_is_empty (const struct ggml_tensor * tensor);
714
+ GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor);
715
+ GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor);
716
+ GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor);
717
+ GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
718
+ GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
719
+
720
+ // returns whether the tensor elements can be iterated over with a flattened index (no gaps, no permutation)
721
+ GGML_API bool ggml_is_contiguous (const struct ggml_tensor * tensor);
722
+ GGML_API bool ggml_is_contiguous_0(const struct ggml_tensor * tensor); // same as ggml_is_contiguous()
723
+ GGML_API bool ggml_is_contiguous_1(const struct ggml_tensor * tensor); // contiguous for dims >= 1
724
+ GGML_API bool ggml_is_contiguous_2(const struct ggml_tensor * tensor); // contiguous for dims >= 2
725
+
726
+ // returns whether the tensor elements are allocated as one contiguous block of memory (no gaps, but permutation ok)
727
+ GGML_API bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor);
728
+
729
+ // true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN
730
+ GGML_API bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor);
731
+
732
+ // true if the elements in dimension 0 are contiguous, or there is just 1 block of elements
733
+ GGML_API bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor);
734
+
735
+ GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1);
736
+ GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
737
+
738
+ GGML_API bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
739
+
740
+ // use this to compute the memory overhead of a tensor
741
+ GGML_API size_t ggml_tensor_overhead(void);
742
+
743
+ GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes);
744
+
745
+ // main
746
+
747
+ GGML_API struct ggml_context * ggml_init (struct ggml_init_params params);
748
+ GGML_API void ggml_reset(struct ggml_context * ctx);
749
+ GGML_API void ggml_free (struct ggml_context * ctx);
750
+
751
+ GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
752
+
753
+ GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
754
+ GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
755
+
756
+ GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
757
+ GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
758
+ GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
759
+
760
+ GGML_API struct ggml_tensor * ggml_new_tensor(
761
+ struct ggml_context * ctx,
762
+ enum ggml_type type,
763
+ int n_dims,
764
+ const int64_t *ne);
765
+
766
+ GGML_API struct ggml_tensor * ggml_new_tensor_1d(
767
+ struct ggml_context * ctx,
768
+ enum ggml_type type,
769
+ int64_t ne0);
770
+
771
+ GGML_API struct ggml_tensor * ggml_new_tensor_2d(
772
+ struct ggml_context * ctx,
773
+ enum ggml_type type,
774
+ int64_t ne0,
775
+ int64_t ne1);
776
+
777
+ GGML_API struct ggml_tensor * ggml_new_tensor_3d(
778
+ struct ggml_context * ctx,
779
+ enum ggml_type type,
780
+ int64_t ne0,
781
+ int64_t ne1,
782
+ int64_t ne2);
783
+
784
+ GGML_API struct ggml_tensor * ggml_new_tensor_4d(
785
+ struct ggml_context * ctx,
786
+ enum ggml_type type,
787
+ int64_t ne0,
788
+ int64_t ne1,
789
+ int64_t ne2,
790
+ int64_t ne3);
791
+
792
+ GGML_API void * ggml_new_buffer(struct ggml_context * ctx, size_t nbytes);
793
+
794
+ GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
795
+ GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src);
796
+
797
+ // Context tensor enumeration and lookup
798
+ GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx);
799
+ GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor);
800
+ GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
801
+
802
+ // Converts a flat index into coordinates
803
+ GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
804
+
805
+ GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
806
+ GGML_API enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor);
807
+
808
+ GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
809
+ GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
810
+
811
+ GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
812
+ GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
813
+ GGML_ATTRIBUTE_FORMAT(2, 3)
814
+ GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
815
+
816
+ // Tensor flags
817
+ GGML_API void ggml_set_input(struct ggml_tensor * tensor);
818
+ GGML_API void ggml_set_output(struct ggml_tensor * tensor);
819
+ GGML_API void ggml_set_param(struct ggml_tensor * tensor);
820
+ GGML_API void ggml_set_loss(struct ggml_tensor * tensor);
821
+
822
+ //
823
+ // operations on tensors with backpropagation
824
+ //
825
+
826
+ GGML_API struct ggml_tensor * ggml_dup(
827
+ struct ggml_context * ctx,
828
+ struct ggml_tensor * a);
829
+
830
+ // in-place, returns view(a)
831
+ GGML_API struct ggml_tensor * ggml_dup_inplace(
832
+ struct ggml_context * ctx,
833
+ struct ggml_tensor * a);
834
+
835
+ GGML_API struct ggml_tensor * ggml_add(
836
+ struct ggml_context * ctx,
837
+ struct ggml_tensor * a,
838
+ struct ggml_tensor * b);
839
+
840
+ GGML_API struct ggml_tensor * ggml_add_inplace(
841
+ struct ggml_context * ctx,
842
+ struct ggml_tensor * a,
843
+ struct ggml_tensor * b);
844
+
845
+ GGML_API struct ggml_tensor * ggml_add_cast(
846
+ struct ggml_context * ctx,
847
+ struct ggml_tensor * a,
848
+ struct ggml_tensor * b,
849
+ enum ggml_type type);
850
+
851
+ // dst[i0, i1, i2] = a[i0, i1, i2] + b[i0, ids[i1, i2]]
852
+ GGML_API struct ggml_tensor * ggml_add_id(
853
+ struct ggml_context * ctx,
854
+ struct ggml_tensor * a,
855
+ struct ggml_tensor * b,
856
+ struct ggml_tensor * ids);
857
+
858
+ GGML_API struct ggml_tensor * ggml_add1(
859
+ struct ggml_context * ctx,
860
+ struct ggml_tensor * a,
861
+ struct ggml_tensor * b);
862
+
863
+ GGML_API struct ggml_tensor * ggml_add1_inplace(
864
+ struct ggml_context * ctx,
865
+ struct ggml_tensor * a,
866
+ struct ggml_tensor * b);
867
+
868
+ // dst = a
869
+ // view(dst, nb1, nb2, nb3, offset) += b
870
+ // return dst
871
+ GGML_API struct ggml_tensor * ggml_acc(
872
+ struct ggml_context * ctx,
873
+ struct ggml_tensor * a,
874
+ struct ggml_tensor * b,
875
+ size_t nb1,
876
+ size_t nb2,
877
+ size_t nb3,
878
+ size_t offset);
879
+
880
+ GGML_API struct ggml_tensor * ggml_acc_inplace(
881
+ struct ggml_context * ctx,
882
+ struct ggml_tensor * a,
883
+ struct ggml_tensor * b,
884
+ size_t nb1,
885
+ size_t nb2,
886
+ size_t nb3,
887
+ size_t offset);
888
+
889
+ GGML_API struct ggml_tensor * ggml_sub(
890
+ struct ggml_context * ctx,
891
+ struct ggml_tensor * a,
892
+ struct ggml_tensor * b);
893
+
894
+ GGML_API struct ggml_tensor * ggml_sub_inplace(
895
+ struct ggml_context * ctx,
896
+ struct ggml_tensor * a,
897
+ struct ggml_tensor * b);
898
+
899
+ GGML_API struct ggml_tensor * ggml_mul(
900
+ struct ggml_context * ctx,
901
+ struct ggml_tensor * a,
902
+ struct ggml_tensor * b);
903
+
904
+ GGML_API struct ggml_tensor * ggml_mul_inplace(
905
+ struct ggml_context * ctx,
906
+ struct ggml_tensor * a,
907
+ struct ggml_tensor * b);
908
+
909
+ GGML_API struct ggml_tensor * ggml_div(
910
+ struct ggml_context * ctx,
911
+ struct ggml_tensor * a,
912
+ struct ggml_tensor * b);
913
+
914
+ GGML_API struct ggml_tensor * ggml_div_inplace(
915
+ struct ggml_context * ctx,
916
+ struct ggml_tensor * a,
917
+ struct ggml_tensor * b);
918
+
919
+ GGML_API struct ggml_tensor * ggml_sqr(
920
+ struct ggml_context * ctx,
921
+ struct ggml_tensor * a);
922
+
923
+ GGML_API struct ggml_tensor * ggml_sqr_inplace(
924
+ struct ggml_context * ctx,
925
+ struct ggml_tensor * a);
926
+
927
+ GGML_API struct ggml_tensor * ggml_sqrt(
928
+ struct ggml_context * ctx,
929
+ struct ggml_tensor * a);
930
+
931
+ GGML_API struct ggml_tensor * ggml_sqrt_inplace(
932
+ struct ggml_context * ctx,
933
+ struct ggml_tensor * a);
934
+
935
+ GGML_API struct ggml_tensor * ggml_log(
936
+ struct ggml_context * ctx,
937
+ struct ggml_tensor * a);
938
+
939
+ GGML_API struct ggml_tensor * ggml_log_inplace(
940
+ struct ggml_context * ctx,
941
+ struct ggml_tensor * a);
942
+
943
+ GGML_API struct ggml_tensor * ggml_sin(
944
+ struct ggml_context * ctx,
945
+ struct ggml_tensor * a);
946
+
947
+ GGML_API struct ggml_tensor * ggml_sin_inplace(
948
+ struct ggml_context * ctx,
949
+ struct ggml_tensor * a);
950
+
951
+ GGML_API struct ggml_tensor * ggml_cos(
952
+ struct ggml_context * ctx,
953
+ struct ggml_tensor * a);
954
+
955
+ GGML_API struct ggml_tensor * ggml_cos_inplace(
956
+ struct ggml_context * ctx,
957
+ struct ggml_tensor * a);
958
+
959
+ // return scalar
960
+ GGML_API struct ggml_tensor * ggml_sum(
961
+ struct ggml_context * ctx,
962
+ struct ggml_tensor * a);
963
+
964
+ // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
965
+ GGML_API struct ggml_tensor * ggml_sum_rows(
966
+ struct ggml_context * ctx,
967
+ struct ggml_tensor * a);
968
+
969
+ // mean along rows
970
+ GGML_API struct ggml_tensor * ggml_mean(
971
+ struct ggml_context * ctx,
972
+ struct ggml_tensor * a);
973
+
974
+ // argmax along rows
975
+ GGML_API struct ggml_tensor * ggml_argmax(
976
+ struct ggml_context * ctx,
977
+ struct ggml_tensor * a);
978
+
979
+ // count number of equal elements in a and b
980
+ GGML_API struct ggml_tensor * ggml_count_equal(
981
+ struct ggml_context * ctx,
982
+ struct ggml_tensor * a,
983
+ struct ggml_tensor * b);
984
+
985
+ // if a is the same shape as b, and a is not parameter, return a
986
+ // otherwise, return a new tensor: repeat(a) to fit in b
987
+ GGML_API struct ggml_tensor * ggml_repeat(
988
+ struct ggml_context * ctx,
989
+ struct ggml_tensor * a,
990
+ struct ggml_tensor * b);
991
+
992
+ // repeat a to the specified shape
993
+ GGML_API struct ggml_tensor * ggml_repeat_4d(
994
+ struct ggml_context * ctx,
995
+ struct ggml_tensor * a,
996
+ int64_t ne0,
997
+ int64_t ne1,
998
+ int64_t ne2,
999
+ int64_t ne3);
1000
+
1001
+ // sums repetitions in a into shape of b
1002
+ GGML_API struct ggml_tensor * ggml_repeat_back(
1003
+ struct ggml_context * ctx,
1004
+ struct ggml_tensor * a,
1005
+ struct ggml_tensor * b); // sum up values that are adjacent in dims > 0 instead of repeated with same stride
1006
+
1007
+ // concat a and b along dim
1008
+ // used in stable-diffusion
1009
+ GGML_API struct ggml_tensor * ggml_concat(
1010
+ struct ggml_context * ctx,
1011
+ struct ggml_tensor * a,
1012
+ struct ggml_tensor * b,
1013
+ int dim);
1014
+
1015
+ GGML_API struct ggml_tensor * ggml_abs(
1016
+ struct ggml_context * ctx,
1017
+ struct ggml_tensor * a);
1018
+
1019
+ GGML_API struct ggml_tensor * ggml_abs_inplace(
1020
+ struct ggml_context * ctx,
1021
+ struct ggml_tensor * a);
1022
+
1023
+ GGML_API struct ggml_tensor * ggml_sgn(
1024
+ struct ggml_context * ctx,
1025
+ struct ggml_tensor * a);
1026
+
1027
+ GGML_API struct ggml_tensor * ggml_sgn_inplace(
1028
+ struct ggml_context * ctx,
1029
+ struct ggml_tensor * a);
1030
+
1031
+ GGML_API struct ggml_tensor * ggml_neg(
1032
+ struct ggml_context * ctx,
1033
+ struct ggml_tensor * a);
1034
+
1035
+ GGML_API struct ggml_tensor * ggml_neg_inplace(
1036
+ struct ggml_context * ctx,
1037
+ struct ggml_tensor * a);
1038
+
1039
+ GGML_API struct ggml_tensor * ggml_step(
1040
+ struct ggml_context * ctx,
1041
+ struct ggml_tensor * a);
1042
+
1043
+ GGML_API struct ggml_tensor * ggml_step_inplace(
1044
+ struct ggml_context * ctx,
1045
+ struct ggml_tensor * a);
1046
+
1047
+ GGML_API struct ggml_tensor * ggml_tanh(
1048
+ struct ggml_context * ctx,
1049
+ struct ggml_tensor * a);
1050
+
1051
+ GGML_API struct ggml_tensor * ggml_tanh_inplace(
1052
+ struct ggml_context * ctx,
1053
+ struct ggml_tensor * a);
1054
+
1055
+ GGML_API struct ggml_tensor * ggml_elu(
1056
+ struct ggml_context * ctx,
1057
+ struct ggml_tensor * a);
1058
+
1059
+ GGML_API struct ggml_tensor * ggml_elu_inplace(
1060
+ struct ggml_context * ctx,
1061
+ struct ggml_tensor * a);
1062
+
1063
+ GGML_API struct ggml_tensor * ggml_relu(
1064
+ struct ggml_context * ctx,
1065
+ struct ggml_tensor * a);
1066
+
1067
+ GGML_API struct ggml_tensor * ggml_leaky_relu(
1068
+ struct ggml_context * ctx,
1069
+ struct ggml_tensor * a, float negative_slope, bool inplace);
1070
+
1071
+ GGML_API struct ggml_tensor * ggml_relu_inplace(
1072
+ struct ggml_context * ctx,
1073
+ struct ggml_tensor * a);
1074
+
1075
+ GGML_API struct ggml_tensor * ggml_sigmoid(
1076
+ struct ggml_context * ctx,
1077
+ struct ggml_tensor * a);
1078
+
1079
+ GGML_API struct ggml_tensor * ggml_sigmoid_inplace(
1080
+ struct ggml_context * ctx,
1081
+ struct ggml_tensor * a);
1082
+
1083
+ GGML_API struct ggml_tensor * ggml_gelu(
1084
+ struct ggml_context * ctx,
1085
+ struct ggml_tensor * a);
1086
+
1087
+ GGML_API struct ggml_tensor * ggml_gelu_inplace(
1088
+ struct ggml_context * ctx,
1089
+ struct ggml_tensor * a);
1090
+
1091
+ // GELU using erf (error function) when possible
1092
+ // some backends may fallback to approximation based on Abramowitz and Stegun formula
1093
+ GGML_API struct ggml_tensor * ggml_gelu_erf(
1094
+ struct ggml_context * ctx,
1095
+ struct ggml_tensor * a);
1096
+
1097
+ GGML_API struct ggml_tensor * ggml_gelu_erf_inplace(
1098
+ struct ggml_context * ctx,
1099
+ struct ggml_tensor * a);
1100
+
1101
+ GGML_API struct ggml_tensor * ggml_gelu_quick(
1102
+ struct ggml_context * ctx,
1103
+ struct ggml_tensor * a);
1104
+
1105
+ GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
1106
+ struct ggml_context * ctx,
1107
+ struct ggml_tensor * a);
1108
+
1109
+ GGML_API struct ggml_tensor * ggml_silu(
1110
+ struct ggml_context * ctx,
1111
+ struct ggml_tensor * a);
1112
+
1113
+ GGML_API struct ggml_tensor * ggml_silu_inplace(
1114
+ struct ggml_context * ctx,
1115
+ struct ggml_tensor * a);
1116
+
1117
+ // a - x
1118
+ // b - dy
1119
+ GGML_API struct ggml_tensor * ggml_silu_back(
1120
+ struct ggml_context * ctx,
1121
+ struct ggml_tensor * a,
1122
+ struct ggml_tensor * b);
1123
+
1124
+ // hardswish(x) = x * relu6(x + 3) / 6
1125
+ GGML_API struct ggml_tensor * ggml_hardswish(
1126
+ struct ggml_context * ctx,
1127
+ struct ggml_tensor * a);
1128
+
1129
+ // hardsigmoid(x) = relu6(x + 3) / 6
1130
+ GGML_API struct ggml_tensor * ggml_hardsigmoid(
1131
+ struct ggml_context * ctx,
1132
+ struct ggml_tensor * a);
1133
+
1134
+ GGML_API struct ggml_tensor * ggml_exp(
1135
+ struct ggml_context * ctx,
1136
+ struct ggml_tensor * a);
1137
+
1138
+ GGML_API struct ggml_tensor * ggml_exp_inplace(
1139
+ struct ggml_context * ctx,
1140
+ struct ggml_tensor * a);
1141
+
1142
+ // gated linear unit ops
1143
+ // A: n columns, r rows,
1144
+ // result is n / 2 columns, r rows,
1145
+ // expects gate in second half of row, unless swapped is true
1146
+ GGML_API struct ggml_tensor * ggml_glu(
1147
+ struct ggml_context * ctx,
1148
+ struct ggml_tensor * a,
1149
+ enum ggml_glu_op op,
1150
+ bool swapped);
1151
+
1152
+ GGML_API struct ggml_tensor * ggml_reglu(
1153
+ struct ggml_context * ctx,
1154
+ struct ggml_tensor * a);
1155
+
1156
+ GGML_API struct ggml_tensor * ggml_reglu_swapped(
1157
+ struct ggml_context * ctx,
1158
+ struct ggml_tensor * a);
1159
+
1160
+ GGML_API struct ggml_tensor * ggml_geglu(
1161
+ struct ggml_context * ctx,
1162
+ struct ggml_tensor * a);
1163
+
1164
+ GGML_API struct ggml_tensor * ggml_geglu_swapped(
1165
+ struct ggml_context * ctx,
1166
+ struct ggml_tensor * a);
1167
+
1168
+ GGML_API struct ggml_tensor * ggml_swiglu(
1169
+ struct ggml_context * ctx,
1170
+ struct ggml_tensor * a);
1171
+
1172
+ GGML_API struct ggml_tensor * ggml_swiglu_swapped(
1173
+ struct ggml_context * ctx,
1174
+ struct ggml_tensor * a);
1175
+
1176
+ GGML_API struct ggml_tensor * ggml_geglu_erf(
1177
+ struct ggml_context * ctx,
1178
+ struct ggml_tensor * a);
1179
+
1180
+ GGML_API struct ggml_tensor * ggml_geglu_erf_swapped(
1181
+ struct ggml_context * ctx,
1182
+ struct ggml_tensor * a);
1183
+
1184
+ GGML_API struct ggml_tensor * ggml_geglu_quick(
1185
+ struct ggml_context * ctx,
1186
+ struct ggml_tensor * a);
1187
+
1188
+ GGML_API struct ggml_tensor * ggml_geglu_quick_swapped(
1189
+ struct ggml_context * ctx,
1190
+ struct ggml_tensor * a);
1191
+
1192
+ // A: n columns, r rows,
1193
+ // B: n columns, r rows,
1194
+ GGML_API struct ggml_tensor * ggml_glu_split(
1195
+ struct ggml_context * ctx,
1196
+ struct ggml_tensor * a,
1197
+ struct ggml_tensor * b,
1198
+ enum ggml_glu_op op);
1199
+
1200
+ GGML_API struct ggml_tensor * ggml_reglu_split(
1201
+ struct ggml_context * ctx,
1202
+ struct ggml_tensor * a,
1203
+ struct ggml_tensor * b);
1204
+
1205
+ GGML_API struct ggml_tensor * ggml_geglu_split(
1206
+ struct ggml_context * ctx,
1207
+ struct ggml_tensor * a,
1208
+ struct ggml_tensor * b);
1209
+
1210
+ GGML_API struct ggml_tensor * ggml_swiglu_split(
1211
+ struct ggml_context * ctx,
1212
+ struct ggml_tensor * a,
1213
+ struct ggml_tensor * b);
1214
+
1215
+ GGML_API struct ggml_tensor * ggml_geglu_erf_split(
1216
+ struct ggml_context * ctx,
1217
+ struct ggml_tensor * a,
1218
+ struct ggml_tensor * b);
1219
+
1220
+ GGML_API struct ggml_tensor * ggml_geglu_quick_split(
1221
+ struct ggml_context * ctx,
1222
+ struct ggml_tensor * a,
1223
+ struct ggml_tensor * b);
1224
+
1225
+ GGML_API struct ggml_tensor * ggml_swiglu_oai(
1226
+ struct ggml_context * ctx,
1227
+ struct ggml_tensor * a,
1228
+ struct ggml_tensor * b,
1229
+ float alpha,
1230
+ float limit);
1231
+
1232
+ // normalize along rows
1233
+ GGML_API struct ggml_tensor * ggml_norm(
1234
+ struct ggml_context * ctx,
1235
+ struct ggml_tensor * a,
1236
+ float eps);
1237
+
1238
+ GGML_API struct ggml_tensor * ggml_norm_inplace(
1239
+ struct ggml_context * ctx,
1240
+ struct ggml_tensor * a,
1241
+ float eps);
1242
+
1243
+ GGML_API struct ggml_tensor * ggml_rms_norm(
1244
+ struct ggml_context * ctx,
1245
+ struct ggml_tensor * a,
1246
+ float eps);
1247
+
1248
+ GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
1249
+ struct ggml_context * ctx,
1250
+ struct ggml_tensor * a,
1251
+ float eps);
1252
+
1253
+ // group normalize along ne0*ne1*n_groups
1254
+ // used in stable-diffusion
1255
+ GGML_API struct ggml_tensor * ggml_group_norm(
1256
+ struct ggml_context * ctx,
1257
+ struct ggml_tensor * a,
1258
+ int n_groups,
1259
+ float eps);
1260
+
1261
+ GGML_API struct ggml_tensor * ggml_group_norm_inplace(
1262
+ struct ggml_context * ctx,
1263
+ struct ggml_tensor * a,
1264
+ int n_groups,
1265
+ float eps);
1266
+
1267
+ // l2 normalize along rows
1268
+ // used in rwkv v7
1269
+ GGML_API struct ggml_tensor * ggml_l2_norm(
1270
+ struct ggml_context * ctx,
1271
+ struct ggml_tensor * a,
1272
+ float eps);
1273
+
1274
+ GGML_API struct ggml_tensor * ggml_l2_norm_inplace(
1275
+ struct ggml_context * ctx,
1276
+ struct ggml_tensor * a,
1277
+ float eps);
1278
+
1279
+ // a - x
1280
+ // b - dy
1281
+ GGML_API struct ggml_tensor * ggml_rms_norm_back(
1282
+ struct ggml_context * ctx,
1283
+ struct ggml_tensor * a,
1284
+ struct ggml_tensor * b,
1285
+ float eps);
1286
+
1287
+ // A: k columns, n rows => [ne03, ne02, n, k]
1288
+ // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k]
1289
+ // result is n columns, m rows => [ne03 * x, ne02 * y, m, n]
1290
+ GGML_API struct ggml_tensor * ggml_mul_mat(
1291
+ struct ggml_context * ctx,
1292
+ struct ggml_tensor * a,
1293
+ struct ggml_tensor * b);
1294
+
1295
+ // change the precision of a matrix multiplication
1296
+ // set to GGML_PREC_F32 for higher precision (useful for phi-2)
1297
+ GGML_API void ggml_mul_mat_set_prec(
1298
+ struct ggml_tensor * a,
1299
+ enum ggml_prec prec);
1300
+
1301
+ // indirect matrix multiplication
1302
+ GGML_API struct ggml_tensor * ggml_mul_mat_id(
1303
+ struct ggml_context * ctx,
1304
+ struct ggml_tensor * as,
1305
+ struct ggml_tensor * b,
1306
+ struct ggml_tensor * ids);
1307
+
1308
+ // A: m columns, n rows,
1309
+ // B: p columns, n rows,
1310
+ // result is m columns, p rows
1311
+ GGML_API struct ggml_tensor * ggml_out_prod(
1312
+ struct ggml_context * ctx,
1313
+ struct ggml_tensor * a,
1314
+ struct ggml_tensor * b);
1315
+
1316
+ //
1317
+ // operations on tensors without backpropagation
1318
+ //
1319
+
1320
+ GGML_API struct ggml_tensor * ggml_scale(
1321
+ struct ggml_context * ctx,
1322
+ struct ggml_tensor * a,
1323
+ float s);
1324
+
1325
+ // in-place, returns view(a)
1326
+ GGML_API struct ggml_tensor * ggml_scale_inplace(
1327
+ struct ggml_context * ctx,
1328
+ struct ggml_tensor * a,
1329
+ float s);
1330
+
1331
+ // x = s * a + b
1332
+ GGML_API struct ggml_tensor * ggml_scale_bias(
1333
+ struct ggml_context * ctx,
1334
+ struct ggml_tensor * a,
1335
+ float s,
1336
+ float b);
1337
+
1338
+ GGML_API struct ggml_tensor * ggml_scale_bias_inplace(
1339
+ struct ggml_context * ctx,
1340
+ struct ggml_tensor * a,
1341
+ float s,
1342
+ float b);
1343
+
1344
+ // b -> view(a,offset,nb1,nb2,3), return modified a
1345
+ GGML_API struct ggml_tensor * ggml_set(
1346
+ struct ggml_context * ctx,
1347
+ struct ggml_tensor * a,
1348
+ struct ggml_tensor * b,
1349
+ size_t nb1,
1350
+ size_t nb2,
1351
+ size_t nb3,
1352
+ size_t offset); // in bytes
1353
+
1354
+ // b -> view(a,offset,nb1,nb2,3), return view(a)
1355
+ GGML_API struct ggml_tensor * ggml_set_inplace(
1356
+ struct ggml_context * ctx,
1357
+ struct ggml_tensor * a,
1358
+ struct ggml_tensor * b,
1359
+ size_t nb1,
1360
+ size_t nb2,
1361
+ size_t nb3,
1362
+ size_t offset); // in bytes
1363
+
1364
+ GGML_API struct ggml_tensor * ggml_set_1d(
1365
+ struct ggml_context * ctx,
1366
+ struct ggml_tensor * a,
1367
+ struct ggml_tensor * b,
1368
+ size_t offset); // in bytes
1369
+
1370
+ GGML_API struct ggml_tensor * ggml_set_1d_inplace(
1371
+ struct ggml_context * ctx,
1372
+ struct ggml_tensor * a,
1373
+ struct ggml_tensor * b,
1374
+ size_t offset); // in bytes
1375
+
1376
+ // b -> view(a,offset,nb1,nb2,3), return modified a
1377
+ GGML_API struct ggml_tensor * ggml_set_2d(
1378
+ struct ggml_context * ctx,
1379
+ struct ggml_tensor * a,
1380
+ struct ggml_tensor * b,
1381
+ size_t nb1,
1382
+ size_t offset); // in bytes
1383
+
1384
+ // b -> view(a,offset,nb1,nb2,3), return view(a)
1385
+ GGML_API struct ggml_tensor * ggml_set_2d_inplace(
1386
+ struct ggml_context * ctx,
1387
+ struct ggml_tensor * a,
1388
+ struct ggml_tensor * b,
1389
+ size_t nb1,
1390
+ size_t offset); // in bytes
1391
+
1392
+ // a -> b, return view(b)
1393
+ GGML_API struct ggml_tensor * ggml_cpy(
1394
+ struct ggml_context * ctx,
1395
+ struct ggml_tensor * a,
1396
+ struct ggml_tensor * b);
1397
+
1398
+ GGML_API struct ggml_tensor * ggml_cast(
1399
+ struct ggml_context * ctx,
1400
+ struct ggml_tensor * a,
1401
+ enum ggml_type type);
1402
+
1403
+ // make contiguous
1404
+ GGML_API struct ggml_tensor * ggml_cont(
1405
+ struct ggml_context * ctx,
1406
+ struct ggml_tensor * a);
1407
+
1408
+ // make contiguous, with new shape
1409
+ GGML_API struct ggml_tensor * ggml_cont_1d(
1410
+ struct ggml_context * ctx,
1411
+ struct ggml_tensor * a,
1412
+ int64_t ne0);
1413
+
1414
+ GGML_API struct ggml_tensor * ggml_cont_2d(
1415
+ struct ggml_context * ctx,
1416
+ struct ggml_tensor * a,
1417
+ int64_t ne0,
1418
+ int64_t ne1);
1419
+
1420
+ GGML_API struct ggml_tensor * ggml_cont_3d(
1421
+ struct ggml_context * ctx,
1422
+ struct ggml_tensor * a,
1423
+ int64_t ne0,
1424
+ int64_t ne1,
1425
+ int64_t ne2);
1426
+
1427
+ GGML_API struct ggml_tensor * ggml_cont_4d(
1428
+ struct ggml_context * ctx,
1429
+ struct ggml_tensor * a,
1430
+ int64_t ne0,
1431
+ int64_t ne1,
1432
+ int64_t ne2,
1433
+ int64_t ne3);
1434
+
1435
+ // return view(a), b specifies the new shape
1436
+ // TODO: when we start computing gradient, make a copy instead of view
1437
+ GGML_API struct ggml_tensor * ggml_reshape(
1438
+ struct ggml_context * ctx,
1439
+ struct ggml_tensor * a,
1440
+ struct ggml_tensor * b);
1441
+
1442
+ // return view(a)
1443
+ // TODO: when we start computing gradient, make a copy instead of view
1444
+ GGML_API struct ggml_tensor * ggml_reshape_1d(
1445
+ struct ggml_context * ctx,
1446
+ struct ggml_tensor * a,
1447
+ int64_t ne0);
1448
+
1449
+ GGML_API struct ggml_tensor * ggml_reshape_2d(
1450
+ struct ggml_context * ctx,
1451
+ struct ggml_tensor * a,
1452
+ int64_t ne0,
1453
+ int64_t ne1);
1454
+
1455
+ // return view(a)
1456
+ // TODO: when we start computing gradient, make a copy instead of view
1457
+ GGML_API struct ggml_tensor * ggml_reshape_3d(
1458
+ struct ggml_context * ctx,
1459
+ struct ggml_tensor * a,
1460
+ int64_t ne0,
1461
+ int64_t ne1,
1462
+ int64_t ne2);
1463
+
1464
+ GGML_API struct ggml_tensor * ggml_reshape_4d(
1465
+ struct ggml_context * ctx,
1466
+ struct ggml_tensor * a,
1467
+ int64_t ne0,
1468
+ int64_t ne1,
1469
+ int64_t ne2,
1470
+ int64_t ne3);
1471
+
1472
+ // offset in bytes
1473
+ GGML_API struct ggml_tensor * ggml_view_1d(
1474
+ struct ggml_context * ctx,
1475
+ struct ggml_tensor * a,
1476
+ int64_t ne0,
1477
+ size_t offset);
1478
+
1479
+ GGML_API struct ggml_tensor * ggml_view_2d(
1480
+ struct ggml_context * ctx,
1481
+ struct ggml_tensor * a,
1482
+ int64_t ne0,
1483
+ int64_t ne1,
1484
+ size_t nb1, // row stride in bytes
1485
+ size_t offset);
1486
+
1487
+ GGML_API struct ggml_tensor * ggml_view_3d(
1488
+ struct ggml_context * ctx,
1489
+ struct ggml_tensor * a,
1490
+ int64_t ne0,
1491
+ int64_t ne1,
1492
+ int64_t ne2,
1493
+ size_t nb1, // row stride in bytes
1494
+ size_t nb2, // slice stride in bytes
1495
+ size_t offset);
1496
+
1497
+ GGML_API struct ggml_tensor * ggml_view_4d(
1498
+ struct ggml_context * ctx,
1499
+ struct ggml_tensor * a,
1500
+ int64_t ne0,
1501
+ int64_t ne1,
1502
+ int64_t ne2,
1503
+ int64_t ne3,
1504
+ size_t nb1, // row stride in bytes
1505
+ size_t nb2, // slice stride in bytes
1506
+ size_t nb3,
1507
+ size_t offset);
1508
+
1509
+ GGML_API struct ggml_tensor * ggml_permute(
1510
+ struct ggml_context * ctx,
1511
+ struct ggml_tensor * a,
1512
+ int axis0,
1513
+ int axis1,
1514
+ int axis2,
1515
+ int axis3);
1516
+
1517
+ // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
1518
+ GGML_API struct ggml_tensor * ggml_transpose(
1519
+ struct ggml_context * ctx,
1520
+ struct ggml_tensor * a);
1521
+
1522
+ // supports 3D: a->ne[2] == b->ne[1]
1523
+ GGML_API struct ggml_tensor * ggml_get_rows(
1524
+ struct ggml_context * ctx,
1525
+ struct ggml_tensor * a, // data
1526
+ struct ggml_tensor * b); // row indices
1527
+
1528
+ GGML_API struct ggml_tensor * ggml_get_rows_back(
1529
+ struct ggml_context * ctx,
1530
+ struct ggml_tensor * a, // gradients of ggml_get_rows result
1531
+ struct ggml_tensor * b, // row indices
1532
+ struct ggml_tensor * c); // data for ggml_get_rows, only used for its shape
1533
+
1534
+ // a TD [n_embd, ne1, ne2, ne3]
1535
+ // b TS [n_embd, n_rows, ne02, ne03] | ne02 == ne2, ne03 == ne3
1536
+ // c I64 [n_rows, ne11, ne12, 1] | c[i] in [0, ne1)
1537
+ //
1538
+ // undefined behavior if destination rows overlap
1539
+ //
1540
+ // broadcast:
1541
+ // ne2 % ne11 == 0
1542
+ // ne3 % ne12 == 0
1543
+ //
1544
+ // return view(a)
1545
+ GGML_API struct ggml_tensor * ggml_set_rows(
1546
+ struct ggml_context * ctx,
1547
+ struct ggml_tensor * a, // destination
1548
+ struct ggml_tensor * b, // source
1549
+ struct ggml_tensor * c); // row indices
1550
+
1551
+ GGML_API struct ggml_tensor * ggml_diag(
1552
+ struct ggml_context * ctx,
1553
+ struct ggml_tensor * a);
1554
+
1555
+ // set elements above the diagonal to -INF
1556
+ GGML_API struct ggml_tensor * ggml_diag_mask_inf(
1557
+ struct ggml_context * ctx,
1558
+ struct ggml_tensor * a,
1559
+ int n_past);
1560
+
1561
+ // in-place, returns view(a)
1562
+ GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
1563
+ struct ggml_context * ctx,
1564
+ struct ggml_tensor * a,
1565
+ int n_past);
1566
+
1567
+ // set elements above the diagonal to 0
1568
+ GGML_API struct ggml_tensor * ggml_diag_mask_zero(
1569
+ struct ggml_context * ctx,
1570
+ struct ggml_tensor * a,
1571
+ int n_past);
1572
+
1573
+ // in-place, returns view(a)
1574
+ GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
1575
+ struct ggml_context * ctx,
1576
+ struct ggml_tensor * a,
1577
+ int n_past);
1578
+
1579
+ GGML_API struct ggml_tensor * ggml_soft_max(
1580
+ struct ggml_context * ctx,
1581
+ struct ggml_tensor * a);
1582
+
1583
+ // in-place, returns view(a)
1584
+ GGML_API struct ggml_tensor * ggml_soft_max_inplace(
1585
+ struct ggml_context * ctx,
1586
+ struct ggml_tensor * a);
1587
+
1588
+ // a [ne0, ne01, ne02, ne03]
1589
+ // mask [ne0, ne11, ne12, ne13] | ne11 >= ne01, F16 or F32, optional
1590
+ //
1591
+ // broadcast:
1592
+ // ne02 % ne12 == 0
1593
+ // ne03 % ne13 == 0
1594
+ //
1595
+ // fused soft_max(a*scale + mask*(ALiBi slope))
1596
+ // max_bias = 0.0f for no ALiBi
1597
+ GGML_API struct ggml_tensor * ggml_soft_max_ext(
1598
+ struct ggml_context * ctx,
1599
+ struct ggml_tensor * a,
1600
+ struct ggml_tensor * mask,
1601
+ float scale,
1602
+ float max_bias);
1603
+
1604
+ GGML_API void ggml_soft_max_add_sinks(
1605
+ struct ggml_tensor * a,
1606
+ struct ggml_tensor * sinks);
1607
+
1608
+ GGML_API struct ggml_tensor * ggml_soft_max_ext_back(
1609
+ struct ggml_context * ctx,
1610
+ struct ggml_tensor * a,
1611
+ struct ggml_tensor * b,
1612
+ float scale,
1613
+ float max_bias);
1614
+
1615
+ // in-place, returns view(a)
1616
+ GGML_API struct ggml_tensor * ggml_soft_max_ext_back_inplace(
1617
+ struct ggml_context * ctx,
1618
+ struct ggml_tensor * a,
1619
+ struct ggml_tensor * b,
1620
+ float scale,
1621
+ float max_bias);
1622
+
1623
+ // rotary position embedding
1624
+ // if (mode & 1) - skip n_past elements (NOT SUPPORTED)
1625
+ // if (mode & GGML_ROPE_TYPE_NEOX) - GPT-NeoX style
1626
+ //
1627
+ // b is an int32 vector with size a->ne[2], it contains the positions
1628
+ GGML_API struct ggml_tensor * ggml_rope(
1629
+ struct ggml_context * ctx,
1630
+ struct ggml_tensor * a,
1631
+ struct ggml_tensor * b,
1632
+ int n_dims,
1633
+ int mode);
1634
+
1635
+ // in-place, returns view(a)
1636
+ GGML_API struct ggml_tensor * ggml_rope_inplace(
1637
+ struct ggml_context * ctx,
1638
+ struct ggml_tensor * a,
1639
+ struct ggml_tensor * b,
1640
+ int n_dims,
1641
+ int mode);
1642
+
1643
+ // custom RoPE
1644
+ // c is freq factors (e.g. phi3-128k), (optional)
1645
+ GGML_API struct ggml_tensor * ggml_rope_ext(
1646
+ struct ggml_context * ctx,
1647
+ struct ggml_tensor * a,
1648
+ struct ggml_tensor * b,
1649
+ struct ggml_tensor * c,
1650
+ int n_dims,
1651
+ int mode,
1652
+ int n_ctx_orig,
1653
+ float freq_base,
1654
+ float freq_scale,
1655
+ float ext_factor,
1656
+ float attn_factor,
1657
+ float beta_fast,
1658
+ float beta_slow);
1659
+
1660
+ GGML_API struct ggml_tensor * ggml_rope_multi(
1661
+ struct ggml_context * ctx,
1662
+ struct ggml_tensor * a,
1663
+ struct ggml_tensor * b,
1664
+ struct ggml_tensor * c,
1665
+ int n_dims,
1666
+ int sections[GGML_MROPE_SECTIONS],
1667
+ int mode,
1668
+ int n_ctx_orig,
1669
+ float freq_base,
1670
+ float freq_scale,
1671
+ float ext_factor,
1672
+ float attn_factor,
1673
+ float beta_fast,
1674
+ float beta_slow);
1675
+
1676
+ // in-place, returns view(a)
1677
+ GGML_API struct ggml_tensor * ggml_rope_ext_inplace(
1678
+ struct ggml_context * ctx,
1679
+ struct ggml_tensor * a,
1680
+ struct ggml_tensor * b,
1681
+ struct ggml_tensor * c,
1682
+ int n_dims,
1683
+ int mode,
1684
+ int n_ctx_orig,
1685
+ float freq_base,
1686
+ float freq_scale,
1687
+ float ext_factor,
1688
+ float attn_factor,
1689
+ float beta_fast,
1690
+ float beta_slow);
1691
+
1692
+ GGML_API struct ggml_tensor * ggml_rope_multi_inplace(
1693
+ struct ggml_context * ctx,
1694
+ struct ggml_tensor * a,
1695
+ struct ggml_tensor * b,
1696
+ struct ggml_tensor * c,
1697
+ int n_dims,
1698
+ int sections[GGML_MROPE_SECTIONS],
1699
+ int mode,
1700
+ int n_ctx_orig,
1701
+ float freq_base,
1702
+ float freq_scale,
1703
+ float ext_factor,
1704
+ float attn_factor,
1705
+ float beta_fast,
1706
+ float beta_slow);
1707
+
1708
+ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom(
1709
+ struct ggml_context * ctx,
1710
+ struct ggml_tensor * a,
1711
+ struct ggml_tensor * b,
1712
+ int n_dims,
1713
+ int mode,
1714
+ int n_ctx_orig,
1715
+ float freq_base,
1716
+ float freq_scale,
1717
+ float ext_factor,
1718
+ float attn_factor,
1719
+ float beta_fast,
1720
+ float beta_slow),
1721
+ "use ggml_rope_ext instead");
1722
+
1723
+ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
1724
+ struct ggml_context * ctx,
1725
+ struct ggml_tensor * a,
1726
+ struct ggml_tensor * b,
1727
+ int n_dims,
1728
+ int mode,
1729
+ int n_ctx_orig,
1730
+ float freq_base,
1731
+ float freq_scale,
1732
+ float ext_factor,
1733
+ float attn_factor,
1734
+ float beta_fast,
1735
+ float beta_slow),
1736
+ "use ggml_rope_ext_inplace instead");
1737
+
1738
+ // compute correction dims for YaRN RoPE scaling
1739
+ GGML_API void ggml_rope_yarn_corr_dims(
1740
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]);
1741
+
1742
+ // rotary position embedding backward, i.e compute dx from dy
1743
+ // a - dy
1744
+ GGML_API struct ggml_tensor * ggml_rope_ext_back(
1745
+ struct ggml_context * ctx,
1746
+ struct ggml_tensor * a, // gradients of ggml_rope result
1747
+ struct ggml_tensor * b, // positions
1748
+ struct ggml_tensor * c, // freq factors
1749
+ int n_dims,
1750
+ int mode,
1751
+ int n_ctx_orig,
1752
+ float freq_base,
1753
+ float freq_scale,
1754
+ float ext_factor,
1755
+ float attn_factor,
1756
+ float beta_fast,
1757
+ float beta_slow);
1758
+
1759
+ GGML_API struct ggml_tensor * ggml_rope_multi_back(
1760
+ struct ggml_context * ctx,
1761
+ struct ggml_tensor * a,
1762
+ struct ggml_tensor * b,
1763
+ struct ggml_tensor * c,
1764
+ int n_dims,
1765
+ int sections[4],
1766
+ int mode,
1767
+ int n_ctx_orig,
1768
+ float freq_base,
1769
+ float freq_scale,
1770
+ float ext_factor,
1771
+ float attn_factor,
1772
+ float beta_fast,
1773
+ float beta_slow);
1774
+
1775
+
1776
+ // clamp
1777
+ // in-place, returns view(a)
1778
+ GGML_API struct ggml_tensor * ggml_clamp(
1779
+ struct ggml_context * ctx,
1780
+ struct ggml_tensor * a,
1781
+ float min,
1782
+ float max);
1783
+
1784
+ // im2col
1785
+ // converts data into a format that effectively results in a convolution when combined with matrix multiplication
1786
+ GGML_API struct ggml_tensor * ggml_im2col(
1787
+ struct ggml_context * ctx,
1788
+ struct ggml_tensor * a, // convolution kernel
1789
+ struct ggml_tensor * b, // data
1790
+ int s0, // stride dimension 0
1791
+ int s1, // stride dimension 1
1792
+ int p0, // padding dimension 0
1793
+ int p1, // padding dimension 1
1794
+ int d0, // dilation dimension 0
1795
+ int d1, // dilation dimension 1
1796
+ bool is_2D,
1797
+ enum ggml_type dst_type);
1798
+
1799
+ GGML_API struct ggml_tensor * ggml_im2col_back(
1800
+ struct ggml_context * ctx,
1801
+ struct ggml_tensor * a, // convolution kernel
1802
+ struct ggml_tensor * b, // gradient of im2col output
1803
+ int64_t * ne, // shape of im2col input
1804
+ int s0, // stride dimension 0
1805
+ int s1, // stride dimension 1
1806
+ int p0, // padding dimension 0
1807
+ int p1, // padding dimension 1
1808
+ int d0, // dilation dimension 0
1809
+ int d1, // dilation dimension 1
1810
+ bool is_2D);
1811
+
1812
+ GGML_API struct ggml_tensor * ggml_conv_1d(
1813
+ struct ggml_context * ctx,
1814
+ struct ggml_tensor * a, // convolution kernel
1815
+ struct ggml_tensor * b, // data
1816
+ int s0, // stride
1817
+ int p0, // padding
1818
+ int d0); // dilation
1819
+
1820
+ // conv_1d with padding = half
1821
+ // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
1822
+ GGML_API struct ggml_tensor* ggml_conv_1d_ph(
1823
+ struct ggml_context * ctx,
1824
+ struct ggml_tensor * a, // convolution kernel
1825
+ struct ggml_tensor * b, // data
1826
+ int s, // stride
1827
+ int d); // dilation
1828
+
1829
+ // depthwise
1830
+ // TODO: this is very likely wrong for some cases! - needs more testing
1831
+ GGML_API struct ggml_tensor * ggml_conv_1d_dw(
1832
+ struct ggml_context * ctx,
1833
+ struct ggml_tensor * a, // convolution kernel
1834
+ struct ggml_tensor * b, // data
1835
+ int s0, // stride
1836
+ int p0, // padding
1837
+ int d0); // dilation
1838
+
1839
+ GGML_API struct ggml_tensor * ggml_conv_1d_dw_ph(
1840
+ struct ggml_context * ctx,
1841
+ struct ggml_tensor * a, // convolution kernel
1842
+ struct ggml_tensor * b, // data
1843
+ int s0, // stride
1844
+ int d0); // dilation
1845
+
1846
+ GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
1847
+ struct ggml_context * ctx,
1848
+ struct ggml_tensor * a, // convolution kernel
1849
+ struct ggml_tensor * b, // data
1850
+ int s0, // stride
1851
+ int p0, // padding
1852
+ int d0); // dilation
1853
+
1854
+ GGML_API struct ggml_tensor * ggml_conv_2d(
1855
+ struct ggml_context * ctx,
1856
+ struct ggml_tensor * a, // convolution kernel
1857
+ struct ggml_tensor * b, // data
1858
+ int s0, // stride dimension 0
1859
+ int s1, // stride dimension 1
1860
+ int p0, // padding dimension 0
1861
+ int p1, // padding dimension 1
1862
+ int d0, // dilation dimension 0
1863
+ int d1); // dilation dimension 1
1864
+
1865
+ // kernel size is a->ne[0] x a->ne[1]
1866
+ // stride is equal to kernel size
1867
+ // padding is zero
1868
+ // example:
1869
+ // a: 16 16 3 768
1870
+ // b: 1024 1024 3 1
1871
+ // res: 64 64 768 1
1872
+ // used in sam
1873
+ GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
1874
+ struct ggml_context * ctx,
1875
+ struct ggml_tensor * a,
1876
+ struct ggml_tensor * b);
1877
+
1878
+ // kernel size is a->ne[0] x a->ne[1]
1879
+ // stride is 1
1880
+ // padding is half
1881
+ // example:
1882
+ // a: 3 3 256 256
1883
+ // b: 64 64 256 1
1884
+ // res: 64 64 256 1
1885
+ // used in sam
1886
+ GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph(
1887
+ struct ggml_context * ctx,
1888
+ struct ggml_tensor * a,
1889
+ struct ggml_tensor * b);
1890
+
1891
+ // depthwise (via im2col and mul_mat)
1892
+ GGML_API struct ggml_tensor * ggml_conv_2d_dw(
1893
+ struct ggml_context * ctx,
1894
+ struct ggml_tensor * a, // convolution kernel
1895
+ struct ggml_tensor * b, // data
1896
+ int s0, // stride dimension 0
1897
+ int s1, // stride dimension 1
1898
+ int p0, // padding dimension 0
1899
+ int p1, // padding dimension 1
1900
+ int d0, // dilation dimension 0
1901
+ int d1); // dilation dimension 1
1902
+
1903
+ // Depthwise 2D convolution
1904
+ // may be faster than ggml_conv_2d_dw, but not available in all backends
1905
+ // a: KW KH 1 C convolution kernel
1906
+ // b: W H C N input data
1907
+ // res: W_out H_out C N
1908
+ GGML_API struct ggml_tensor * ggml_conv_2d_dw_direct(
1909
+ struct ggml_context * ctx,
1910
+ struct ggml_tensor * a,
1911
+ struct ggml_tensor * b,
1912
+ int stride0,
1913
+ int stride1,
1914
+ int pad0,
1915
+ int pad1,
1916
+ int dilation0,
1917
+ int dilation1);
1918
+
1919
+ GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0(
1920
+ struct ggml_context * ctx,
1921
+ struct ggml_tensor * a,
1922
+ struct ggml_tensor * b,
1923
+ int stride);
1924
+
1925
+ GGML_API struct ggml_tensor * ggml_conv_2d_direct(
1926
+ struct ggml_context * ctx,
1927
+ struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC]
1928
+ struct ggml_tensor * b, // input data [W, H, C, N]
1929
+ int s0, // stride dimension 0
1930
+ int s1, // stride dimension 1
1931
+ int p0, // padding dimension 0
1932
+ int p1, // padding dimension 1
1933
+ int d0, // dilation dimension 0
1934
+ int d1); // dilation dimension 1
1935
+
1936
+ enum ggml_op_pool {
1937
+ GGML_OP_POOL_MAX,
1938
+ GGML_OP_POOL_AVG,
1939
+ GGML_OP_POOL_COUNT,
1940
+ };
1941
+
1942
+ GGML_API struct ggml_tensor * ggml_pool_1d(
1943
+ struct ggml_context * ctx,
1944
+ struct ggml_tensor * a,
1945
+ enum ggml_op_pool op,
1946
+ int k0, // kernel size
1947
+ int s0, // stride
1948
+ int p0); // padding
1949
+
1950
+ // the result will have 2*p0 padding for the first dimension
1951
+ // and 2*p1 padding for the second dimension
1952
+ GGML_API struct ggml_tensor * ggml_pool_2d(
1953
+ struct ggml_context * ctx,
1954
+ struct ggml_tensor * a,
1955
+ enum ggml_op_pool op,
1956
+ int k0,
1957
+ int k1,
1958
+ int s0,
1959
+ int s1,
1960
+ float p0,
1961
+ float p1);
1962
+
1963
+ GGML_API struct ggml_tensor * ggml_pool_2d_back(
1964
+ struct ggml_context * ctx,
1965
+ struct ggml_tensor * a,
1966
+ struct ggml_tensor * af, // "a"/input used in forward pass
1967
+ enum ggml_op_pool op,
1968
+ int k0,
1969
+ int k1,
1970
+ int s0,
1971
+ int s1,
1972
+ float p0,
1973
+ float p1);
1974
+
1975
+ enum ggml_scale_mode {
1976
+ GGML_SCALE_MODE_NEAREST = 0,
1977
+ GGML_SCALE_MODE_BILINEAR = 1,
1978
+
1979
+ GGML_SCALE_MODE_COUNT
1980
+ };
1981
+
1982
+ enum ggml_scale_flag {
1983
+ GGML_SCALE_FLAG_ALIGN_CORNERS = (1 << 8)
1984
+ };
1985
+
1986
+ // interpolate
1987
+ // multiplies ne0 and ne1 by scale factor
1988
+ GGML_API struct ggml_tensor * ggml_upscale(
1989
+ struct ggml_context * ctx,
1990
+ struct ggml_tensor * a,
1991
+ int scale_factor,
1992
+ enum ggml_scale_mode mode);
1993
+
1994
+ // interpolate
1995
+ // interpolate scale to specified dimensions
1996
+ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_upscale_ext(
1997
+ struct ggml_context * ctx,
1998
+ struct ggml_tensor * a,
1999
+ int ne0,
2000
+ int ne1,
2001
+ int ne2,
2002
+ int ne3,
2003
+ enum ggml_scale_mode mode),
2004
+ "use ggml_interpolate instead");
2005
+
2006
+ // Up- or downsamples the input to the specified size.
2007
+ // 2D scale modes (eg. bilinear) are applied to the first two dimensions.
2008
+ GGML_API struct ggml_tensor * ggml_interpolate(
2009
+ struct ggml_context * ctx,
2010
+ struct ggml_tensor * a,
2011
+ int64_t ne0,
2012
+ int64_t ne1,
2013
+ int64_t ne2,
2014
+ int64_t ne3,
2015
+ uint32_t mode); // ggml_scale_mode [ | ggml_scale_flag...]
2016
+
2017
+ // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0]
2018
+ GGML_API struct ggml_tensor * ggml_pad(
2019
+ struct ggml_context * ctx,
2020
+ struct ggml_tensor * a,
2021
+ int p0,
2022
+ int p1,
2023
+ int p2,
2024
+ int p3);
2025
+
2026
+ // pad each dimension with reflection: [a, b, c, d] -> [b, a, b, c, d, c]
2027
+ GGML_API struct ggml_tensor * ggml_pad_reflect_1d(
2028
+ struct ggml_context * ctx,
2029
+ struct ggml_tensor * a,
2030
+ int p0,
2031
+ int p1);
2032
+
2033
+ // Move tensor elements by an offset given for each dimension. Elements that
2034
+ // are shifted beyond the last position are wrapped around to the beginning.
2035
+ GGML_API struct ggml_tensor * ggml_roll(
2036
+ struct ggml_context * ctx,
2037
+ struct ggml_tensor * a,
2038
+ int shift0,
2039
+ int shift1,
2040
+ int shift2,
2041
+ int shift3);
2042
+
2043
+
2044
+ // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151
2045
+ // timesteps: [N,]
2046
+ // return: [N, dim]
2047
+ GGML_API struct ggml_tensor * ggml_timestep_embedding(
2048
+ struct ggml_context * ctx,
2049
+ struct ggml_tensor * timesteps,
2050
+ int dim,
2051
+ int max_period);
2052
+
2053
+ // sort rows
2054
+ enum ggml_sort_order {
2055
+ GGML_SORT_ORDER_ASC,
2056
+ GGML_SORT_ORDER_DESC,
2057
+ };
2058
+
2059
+ GGML_API struct ggml_tensor * ggml_argsort(
2060
+ struct ggml_context * ctx,
2061
+ struct ggml_tensor * a,
2062
+ enum ggml_sort_order order);
2063
+
2064
+ GGML_API struct ggml_tensor * ggml_arange(
2065
+ struct ggml_context * ctx,
2066
+ float start,
2067
+ float stop,
2068
+ float step);
2069
+
2070
+ // top k elements per row
2071
+ GGML_API struct ggml_tensor * ggml_top_k(
2072
+ struct ggml_context * ctx,
2073
+ struct ggml_tensor * a,
2074
+ int k);
2075
+
2076
+ #define GGML_KQ_MASK_PAD 64
2077
+
2078
+ // q: [n_embd_k, n_batch, n_head, ne3 ]
2079
+ // k: [n_embd_k, n_kv, n_head_kv, ne3 ]
2080
+ // v: [n_embd_v, n_kv, n_head_kv, ne3 ] !! not transposed !!
2081
+ // mask: [n_kv, n_batch_pad, ne32, ne33] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !!
2082
+ // res: [n_embd_v, n_head, n_batch, ne3 ] !! permuted !!
2083
+ //
2084
+ // broadcast:
2085
+ // n_head % n_head_kv == 0
2086
+ // n_head % ne32 == 0
2087
+ // ne3 % ne33 == 0
2088
+ //
2089
+ GGML_API struct ggml_tensor * ggml_flash_attn_ext(
2090
+ struct ggml_context * ctx,
2091
+ struct ggml_tensor * q,
2092
+ struct ggml_tensor * k,
2093
+ struct ggml_tensor * v,
2094
+ struct ggml_tensor * mask,
2095
+ float scale,
2096
+ float max_bias,
2097
+ float logit_softcap);
2098
+
2099
+ GGML_API void ggml_flash_attn_ext_set_prec(
2100
+ struct ggml_tensor * a,
2101
+ enum ggml_prec prec);
2102
+
2103
+ GGML_API enum ggml_prec ggml_flash_attn_ext_get_prec(
2104
+ const struct ggml_tensor * a);
2105
+
2106
+ GGML_API void ggml_flash_attn_ext_add_sinks(
2107
+ struct ggml_tensor * a,
2108
+ struct ggml_tensor * sinks);
2109
+
2110
+ // TODO: needs to be adapted to ggml_flash_attn_ext
2111
+ GGML_API struct ggml_tensor * ggml_flash_attn_back(
2112
+ struct ggml_context * ctx,
2113
+ struct ggml_tensor * q,
2114
+ struct ggml_tensor * k,
2115
+ struct ggml_tensor * v,
2116
+ struct ggml_tensor * d,
2117
+ bool masked);
2118
+
2119
+ GGML_API struct ggml_tensor * ggml_ssm_conv(
2120
+ struct ggml_context * ctx,
2121
+ struct ggml_tensor * sx,
2122
+ struct ggml_tensor * c);
2123
+
2124
+ GGML_API struct ggml_tensor * ggml_ssm_scan(
2125
+ struct ggml_context * ctx,
2126
+ struct ggml_tensor * s,
2127
+ struct ggml_tensor * x,
2128
+ struct ggml_tensor * dt,
2129
+ struct ggml_tensor * A,
2130
+ struct ggml_tensor * B,
2131
+ struct ggml_tensor * C,
2132
+ struct ggml_tensor * ids);
2133
+
2134
+ // partition into non-overlapping windows with padding if needed
2135
+ // example:
2136
+ // a: 768 64 64 1
2137
+ // w: 14
2138
+ // res: 768 14 14 25
2139
+ // used in sam
2140
+ GGML_API struct ggml_tensor * ggml_win_part(
2141
+ struct ggml_context * ctx,
2142
+ struct ggml_tensor * a,
2143
+ int w);
2144
+
2145
+ // reverse of ggml_win_part
2146
+ // used in sam
2147
+ GGML_API struct ggml_tensor * ggml_win_unpart(
2148
+ struct ggml_context * ctx,
2149
+ struct ggml_tensor * a,
2150
+ int w0,
2151
+ int h0,
2152
+ int w);
2153
+
2154
+ GGML_API struct ggml_tensor * ggml_unary(
2155
+ struct ggml_context * ctx,
2156
+ struct ggml_tensor * a,
2157
+ enum ggml_unary_op op);
2158
+
2159
+ GGML_API struct ggml_tensor * ggml_unary_inplace(
2160
+ struct ggml_context * ctx,
2161
+ struct ggml_tensor * a,
2162
+ enum ggml_unary_op op);
2163
+
2164
+ // used in sam
2165
+ GGML_API struct ggml_tensor * ggml_get_rel_pos(
2166
+ struct ggml_context * ctx,
2167
+ struct ggml_tensor * a,
2168
+ int qh,
2169
+ int kh);
2170
+
2171
+ // used in sam
2172
+ GGML_API struct ggml_tensor * ggml_add_rel_pos(
2173
+ struct ggml_context * ctx,
2174
+ struct ggml_tensor * a,
2175
+ struct ggml_tensor * pw,
2176
+ struct ggml_tensor * ph);
2177
+
2178
+ GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace(
2179
+ struct ggml_context * ctx,
2180
+ struct ggml_tensor * a,
2181
+ struct ggml_tensor * pw,
2182
+ struct ggml_tensor * ph);
2183
+
2184
+ GGML_API struct ggml_tensor * ggml_rwkv_wkv6(
2185
+ struct ggml_context * ctx,
2186
+ struct ggml_tensor * k,
2187
+ struct ggml_tensor * v,
2188
+ struct ggml_tensor * r,
2189
+ struct ggml_tensor * tf,
2190
+ struct ggml_tensor * td,
2191
+ struct ggml_tensor * state);
2192
+
2193
+ GGML_API struct ggml_tensor * ggml_gated_linear_attn(
2194
+ struct ggml_context * ctx,
2195
+ struct ggml_tensor * k,
2196
+ struct ggml_tensor * v,
2197
+ struct ggml_tensor * q,
2198
+ struct ggml_tensor * g,
2199
+ struct ggml_tensor * state,
2200
+ float scale);
2201
+
2202
+ GGML_API struct ggml_tensor * ggml_rwkv_wkv7(
2203
+ struct ggml_context * ctx,
2204
+ struct ggml_tensor * r,
2205
+ struct ggml_tensor * w,
2206
+ struct ggml_tensor * k,
2207
+ struct ggml_tensor * v,
2208
+ struct ggml_tensor * a,
2209
+ struct ggml_tensor * b,
2210
+ struct ggml_tensor * state);
2211
+
2212
+ // custom operators
2213
+
2214
+ typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
2215
+ typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
2216
+ typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
2217
+
2218
+ #define GGML_N_TASKS_MAX (-1)
2219
+ // n_tasks == GGML_N_TASKS_MAX means to use max number of tasks
2220
+
2221
+ GGML_API struct ggml_tensor * ggml_map_custom1(
2222
+ struct ggml_context * ctx,
2223
+ struct ggml_tensor * a,
2224
+ ggml_custom1_op_t fun,
2225
+ int n_tasks,
2226
+ void * userdata);
2227
+
2228
+ GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
2229
+ struct ggml_context * ctx,
2230
+ struct ggml_tensor * a,
2231
+ ggml_custom1_op_t fun,
2232
+ int n_tasks,
2233
+ void * userdata);
2234
+
2235
+ GGML_API struct ggml_tensor * ggml_map_custom2(
2236
+ struct ggml_context * ctx,
2237
+ struct ggml_tensor * a,
2238
+ struct ggml_tensor * b,
2239
+ ggml_custom2_op_t fun,
2240
+ int n_tasks,
2241
+ void * userdata);
2242
+
2243
+ GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
2244
+ struct ggml_context * ctx,
2245
+ struct ggml_tensor * a,
2246
+ struct ggml_tensor * b,
2247
+ ggml_custom2_op_t fun,
2248
+ int n_tasks,
2249
+ void * userdata);
2250
+
2251
+ GGML_API struct ggml_tensor * ggml_map_custom3(
2252
+ struct ggml_context * ctx,
2253
+ struct ggml_tensor * a,
2254
+ struct ggml_tensor * b,
2255
+ struct ggml_tensor * c,
2256
+ ggml_custom3_op_t fun,
2257
+ int n_tasks,
2258
+ void * userdata);
2259
+
2260
+ GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
2261
+ struct ggml_context * ctx,
2262
+ struct ggml_tensor * a,
2263
+ struct ggml_tensor * b,
2264
+ struct ggml_tensor * c,
2265
+ ggml_custom3_op_t fun,
2266
+ int n_tasks,
2267
+ void * userdata);
2268
+
2269
+ typedef void (*ggml_custom_op_t)(struct ggml_tensor * dst , int ith, int nth, void * userdata);
2270
+
2271
+ GGML_API struct ggml_tensor * ggml_custom_4d(
2272
+ struct ggml_context * ctx,
2273
+ enum ggml_type type,
2274
+ int64_t ne0,
2275
+ int64_t ne1,
2276
+ int64_t ne2,
2277
+ int64_t ne3,
2278
+ struct ggml_tensor ** args,
2279
+ int n_args,
2280
+ ggml_custom_op_t fun,
2281
+ int n_tasks,
2282
+ void * userdata);
2283
+
2284
+ GGML_API struct ggml_tensor * ggml_custom_inplace(
2285
+ struct ggml_context * ctx,
2286
+ struct ggml_tensor * a,
2287
+ struct ggml_tensor ** args,
2288
+ int n_args,
2289
+ ggml_custom_op_t fun,
2290
+ int n_tasks,
2291
+ void * userdata);
2292
+
2293
+ // loss function
2294
+
2295
+ GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
2296
+ struct ggml_context * ctx,
2297
+ struct ggml_tensor * a, // logits
2298
+ struct ggml_tensor * b); // labels
2299
+
2300
+ GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
2301
+ struct ggml_context * ctx,
2302
+ struct ggml_tensor * a, // logits
2303
+ struct ggml_tensor * b, // labels
2304
+ struct ggml_tensor * c); // gradients of cross_entropy_loss result
2305
+
2306
+ // AdamW optimizer step
2307
+ // Paper: https://arxiv.org/pdf/1711.05101v3.pdf
2308
+ // PyTorch: https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html
2309
+ GGML_API struct ggml_tensor * ggml_opt_step_adamw(
2310
+ struct ggml_context * ctx,
2311
+ struct ggml_tensor * a,
2312
+ struct ggml_tensor * grad,
2313
+ struct ggml_tensor * m,
2314
+ struct ggml_tensor * v,
2315
+ struct ggml_tensor * adamw_params); // parameters such as the learning rate
2316
+
2317
+ // stochastic gradient descent step (with weight decay)
2318
+ GGML_API struct ggml_tensor * ggml_opt_step_sgd(
2319
+ struct ggml_context * ctx,
2320
+ struct ggml_tensor * a,
2321
+ struct ggml_tensor * grad,
2322
+ struct ggml_tensor * sgd_params); // alpha, weight decay
2323
+
2324
+ //
2325
+ // automatic differentiation
2326
+ //
2327
+
2328
+ GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
2329
+ GGML_API void ggml_build_backward_expand(
2330
+ struct ggml_context * ctx, // context for gradient computation
2331
+ struct ggml_cgraph * cgraph,
2332
+ struct ggml_tensor ** grad_accs);
2333
+
2334
+ // graph allocation in a context
2335
+ GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
2336
+ GGML_API struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads);
2337
+ GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph, bool force_grads);
2338
+ GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst);
2339
+ GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // set regular grads + optimizer momenta to 0, set loss grad to 1
2340
+ GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph);
2341
+
2342
+ GGML_API int ggml_graph_size (struct ggml_cgraph * cgraph);
2343
+ GGML_API struct ggml_tensor * ggml_graph_node (struct ggml_cgraph * cgraph, int i); // if i < 0, returns nodes[n_nodes + i]
2344
+ GGML_API struct ggml_tensor ** ggml_graph_nodes (struct ggml_cgraph * cgraph);
2345
+ GGML_API int ggml_graph_n_nodes(struct ggml_cgraph * cgraph);
2346
+
2347
+ GGML_API void ggml_graph_add_node(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
2348
+
2349
+ GGML_API size_t ggml_graph_overhead(void);
2350
+ GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
2351
+
2352
+ GGML_API struct ggml_tensor * ggml_graph_get_tensor (const struct ggml_cgraph * cgraph, const char * name);
2353
+ GGML_API struct ggml_tensor * ggml_graph_get_grad (const struct ggml_cgraph * cgraph, const struct ggml_tensor * node);
2354
+ GGML_API struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node);
2355
+
2356
+ // print info and performance information for the graph
2357
+ GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
2358
+
2359
+ // dump the graph into a file using the dot format
2360
+ GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
2361
+
2362
+ // TODO these functions were sandwiched in the old optimization interface, is there a better place for them?
2363
+ typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
2364
+
2365
+ // Set callback for all future logging events.
2366
+ // If this is not called, or NULL is supplied, everything is output on stderr.
2367
+ GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data);
2368
+
2369
+ GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
2370
+
2371
+ //
2372
+ // quantization
2373
+ //
2374
+
2375
+ // - ggml_quantize_init can be called multiple times with the same type
2376
+ // it will only initialize the quantization tables for the first call or after ggml_quantize_free
2377
+ // automatically called by ggml_quantize_chunk for convenience
2378
+ //
2379
+ // - ggml_quantize_free will free any memory allocated by ggml_quantize_init
2380
+ // call this at the end of the program to avoid memory leaks
2381
+ //
2382
+ // note: these are thread-safe
2383
+ //
2384
+ GGML_API void ggml_quantize_init(enum ggml_type type);
2385
+ GGML_API void ggml_quantize_free(void);
2386
+
2387
+ // some quantization type cannot be used without an importance matrix
2388
+ GGML_API bool ggml_quantize_requires_imatrix(enum ggml_type type);
2389
+
2390
+ // calls ggml_quantize_init internally (i.e. can allocate memory)
2391
+ GGML_API size_t ggml_quantize_chunk(
2392
+ enum ggml_type type,
2393
+ const float * src,
2394
+ void * dst,
2395
+ int64_t start,
2396
+ int64_t nrows,
2397
+ int64_t n_per_row,
2398
+ const float * imatrix);
2399
+
2400
+ #ifdef __cplusplus
2401
+ // restrict not standard in C++
2402
+ # if defined(__GNUC__)
2403
+ # define GGML_RESTRICT __restrict__
2404
+ # elif defined(__clang__)
2405
+ # define GGML_RESTRICT __restrict
2406
+ # elif defined(_MSC_VER)
2407
+ # define GGML_RESTRICT __restrict
2408
+ # else
2409
+ # define GGML_RESTRICT
2410
+ # endif
2411
+ #else
2412
+ # if defined (_MSC_VER) && (__STDC_VERSION__ < 201112L)
2413
+ # define GGML_RESTRICT __restrict
2414
+ # else
2415
+ # define GGML_RESTRICT restrict
2416
+ # endif
2417
+ #endif
2418
+ typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
2419
+ typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
2420
+
2421
+ struct ggml_type_traits {
2422
+ const char * type_name;
2423
+ int64_t blck_size;
2424
+ int64_t blck_size_interleave; // interleave elements in blocks
2425
+ size_t type_size;
2426
+ bool is_quantized;
2427
+ ggml_to_float_t to_float;
2428
+ ggml_from_float_t from_float_ref;
2429
+ };
2430
+
2431
+ GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type);
2432
+
2433
+ // ggml threadpool
2434
+ // TODO: currently, only a few functions are in the base ggml API, while the rest are in the CPU backend
2435
+ // the goal should be to create an API that other backends can use move everything to the ggml base
2436
+
2437
+ // scheduling priorities
2438
+ enum ggml_sched_priority {
2439
+ GGML_SCHED_PRIO_LOW = -1,
2440
+ GGML_SCHED_PRIO_NORMAL,
2441
+ GGML_SCHED_PRIO_MEDIUM,
2442
+ GGML_SCHED_PRIO_HIGH,
2443
+ GGML_SCHED_PRIO_REALTIME
2444
+ };
2445
+
2446
+ // threadpool params
2447
+ // Use ggml_threadpool_params_default() or ggml_threadpool_params_init() to populate the defaults
2448
+ struct ggml_threadpool_params {
2449
+ bool cpumask[GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
2450
+ int n_threads; // number of threads
2451
+ enum ggml_sched_priority prio; // thread priority
2452
+ uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
2453
+ bool strict_cpu; // strict cpu placement
2454
+ bool paused; // start in paused state
2455
+ };
2456
+
2457
+ struct ggml_threadpool; // forward declaration, see ggml.c
2458
+
2459
+ typedef struct ggml_threadpool * ggml_threadpool_t;
2460
+
2461
+ GGML_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads);
2462
+ GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads);
2463
+ GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1);
2464
+
2465
+ #ifdef __cplusplus
2466
+ }
2467
+ #endif