cui-llama.rn 1.0.3 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +35 -39
  2. package/android/src/main/CMakeLists.txt +12 -2
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +29 -9
  4. package/android/src/main/java/com/rnllama/RNLlama.java +33 -1
  5. package/android/src/main/jni.cpp +62 -8
  6. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +5 -0
  7. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +5 -0
  8. package/cpp/common.cpp +3237 -3231
  9. package/cpp/common.h +469 -468
  10. package/cpp/ggml-aarch64.c +2193 -2193
  11. package/cpp/ggml-aarch64.h +39 -39
  12. package/cpp/ggml-alloc.c +1036 -1042
  13. package/cpp/ggml-backend-impl.h +153 -153
  14. package/cpp/ggml-backend.c +2240 -2234
  15. package/cpp/ggml-backend.h +238 -238
  16. package/cpp/ggml-common.h +1833 -1829
  17. package/cpp/ggml-impl.h +755 -655
  18. package/cpp/ggml-metal.h +65 -65
  19. package/cpp/ggml-metal.m +3269 -3269
  20. package/cpp/ggml-quants.c +14872 -14860
  21. package/cpp/ggml-quants.h +132 -132
  22. package/cpp/ggml.c +22055 -22044
  23. package/cpp/ggml.h +2453 -2447
  24. package/cpp/llama-grammar.cpp +539 -0
  25. package/cpp/llama-grammar.h +39 -0
  26. package/cpp/llama-impl.h +26 -0
  27. package/cpp/llama-sampling.cpp +635 -0
  28. package/cpp/llama-sampling.h +56 -0
  29. package/cpp/llama-vocab.cpp +1721 -0
  30. package/cpp/llama-vocab.h +130 -0
  31. package/cpp/llama.cpp +19171 -21892
  32. package/cpp/llama.h +1240 -1217
  33. package/cpp/log.h +737 -737
  34. package/cpp/rn-llama.hpp +207 -29
  35. package/cpp/sampling.cpp +460 -460
  36. package/cpp/sgemm.cpp +1027 -1027
  37. package/cpp/sgemm.h +14 -14
  38. package/cpp/unicode.cpp +6 -0
  39. package/cpp/unicode.h +3 -0
  40. package/ios/RNLlama.mm +15 -6
  41. package/ios/RNLlamaContext.h +2 -8
  42. package/ios/RNLlamaContext.mm +41 -34
  43. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  44. package/lib/commonjs/chat.js +37 -0
  45. package/lib/commonjs/chat.js.map +1 -0
  46. package/lib/commonjs/index.js +14 -1
  47. package/lib/commonjs/index.js.map +1 -1
  48. package/lib/module/NativeRNLlama.js.map +1 -1
  49. package/lib/module/chat.js +31 -0
  50. package/lib/module/chat.js.map +1 -0
  51. package/lib/module/index.js +14 -1
  52. package/lib/module/index.js.map +1 -1
  53. package/lib/typescript/NativeRNLlama.d.ts +5 -1
  54. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  55. package/lib/typescript/chat.d.ts +10 -0
  56. package/lib/typescript/chat.d.ts.map +1 -0
  57. package/lib/typescript/index.d.ts +9 -2
  58. package/lib/typescript/index.d.ts.map +1 -1
  59. package/package.json +1 -1
  60. package/src/NativeRNLlama.ts +10 -1
  61. package/src/chat.ts +44 -0
  62. package/src/index.ts +31 -4
package/cpp/llama.h CHANGED
@@ -1,1217 +1,1240 @@
1
- #ifndef LLAMA_H
2
- #define LLAMA_H
3
-
4
- #include "ggml.h"
5
- #include "ggml-backend.h"
6
-
7
- #include <stddef.h>
8
- #include <stdint.h>
9
- #include <stdio.h>
10
- #include <stdbool.h>
11
-
12
- #ifdef LLAMA_SHARED
13
- # if defined(_WIN32) && !defined(__MINGW32__)
14
- # ifdef LLAMA_BUILD
15
- # define LLAMA_API __declspec(dllexport)
16
- # else
17
- # define LLAMA_API __declspec(dllimport)
18
- # endif
19
- # else
20
- # define LLAMA_API __attribute__ ((visibility ("default")))
21
- # endif
22
- #else
23
- # define LLAMA_API
24
- #endif
25
-
26
- #ifdef __GNUC__
27
- # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
28
- #elif defined(_MSC_VER)
29
- # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
30
- #else
31
- # define DEPRECATED(func, hint) func
32
- #endif
33
-
34
- #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
35
-
36
- #define LLAMA_MAX_RNG_STATE (64*1024)
37
-
38
- #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
39
- #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
40
- #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
41
-
42
- #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
43
- #define LLAMA_SESSION_VERSION 7
44
-
45
- #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
46
- #define LLAMA_STATE_SEQ_VERSION 1
47
-
48
- #ifdef __cplusplus
49
- extern "C" {
50
- #endif
51
-
52
- //
53
- // C interface
54
- //
55
- // TODO: show sample usage
56
- //
57
-
58
- struct llama_model;
59
- struct llama_context;
60
-
61
- typedef int32_t llama_pos;
62
- typedef int32_t llama_token;
63
- typedef int32_t llama_seq_id;
64
-
65
- enum llama_vocab_type {
66
- LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
67
- LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
68
- LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
69
- LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
70
- LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
71
- };
72
-
73
- // pre-tokenization types
74
- enum llama_vocab_pre_type {
75
- LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
76
- LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
77
- LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
78
- LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
79
- LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
80
- LLAMA_VOCAB_PRE_TYPE_MPT = 5,
81
- LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
82
- LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
83
- LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
84
- LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
85
- LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
86
- LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
87
- LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
88
- LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
89
- LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
90
- LLAMA_VOCAB_PRE_TYPE_PORO = 15,
91
- LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
92
- LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
93
- LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
94
- LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
95
- LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
96
- LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
97
- LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
98
- };
99
-
100
- // note: these values should be synchronized with lm_ggml_rope
101
- // TODO: maybe move this enum to ggml.h (lm_ggml_rope_type)
102
- enum llama_rope_type {
103
- LLAMA_ROPE_TYPE_NONE = -1,
104
- LLAMA_ROPE_TYPE_NORM = 0,
105
- LLAMA_ROPE_TYPE_NEOX = 2,
106
- LLAMA_ROPE_TYPE_GLM = 4,
107
- };
108
-
109
- enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
110
- LLAMA_TOKEN_TYPE_UNDEFINED = 0,
111
- LLAMA_TOKEN_TYPE_NORMAL = 1,
112
- LLAMA_TOKEN_TYPE_UNKNOWN = 2,
113
- LLAMA_TOKEN_TYPE_CONTROL = 3,
114
- LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
115
- LLAMA_TOKEN_TYPE_UNUSED = 5,
116
- LLAMA_TOKEN_TYPE_BYTE = 6,
117
- };
118
-
119
- enum llama_token_attr {
120
- LLAMA_TOKEN_ATTR_UNDEFINED = 0,
121
- LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
122
- LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
123
- LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
124
- LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
125
- LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
126
- LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
127
- LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
128
- LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
129
- LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
130
- LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
131
- };
132
-
133
- // model file types
134
- enum llama_ftype {
135
- LLAMA_FTYPE_ALL_F32 = 0,
136
- LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
137
- LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
138
- LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
139
- // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
140
- // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
141
- // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
142
- LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
143
- LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
144
- LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
145
- LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
146
- LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
147
- LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
148
- LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
149
- LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
150
- LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
151
- LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
152
- LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
153
- LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
154
- LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
155
- LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
156
- LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
157
- LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
158
- LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
159
- LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
160
- LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
161
- LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
162
- LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
163
- LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
164
- LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
165
- LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
166
- LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
167
- LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
168
- LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
169
- LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
170
- LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
171
-
172
- LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
173
- };
174
-
175
- enum llama_rope_scaling_type {
176
- LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
177
- LLAMA_ROPE_SCALING_TYPE_NONE = 0,
178
- LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
179
- LLAMA_ROPE_SCALING_TYPE_YARN = 2,
180
- LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
181
- };
182
-
183
- enum llama_pooling_type {
184
- LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
185
- LLAMA_POOLING_TYPE_NONE = 0,
186
- LLAMA_POOLING_TYPE_MEAN = 1,
187
- LLAMA_POOLING_TYPE_CLS = 2,
188
- LLAMA_POOLING_TYPE_LAST = 3,
189
- };
190
-
191
- enum llama_attention_type {
192
- LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
193
- LLAMA_ATTENTION_TYPE_CAUSAL = 0,
194
- LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
195
- };
196
-
197
- enum llama_split_mode {
198
- LLAMA_SPLIT_MODE_NONE = 0, // single GPU
199
- LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
200
- LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
201
- };
202
-
203
- typedef struct llama_token_data {
204
- llama_token id; // token id
205
- float logit; // log-odds of the token
206
- float p; // probability of the token
207
- } llama_token_data;
208
-
209
- typedef struct llama_token_data_array {
210
- llama_token_data * data;
211
- size_t size;
212
- bool sorted;
213
- } llama_token_data_array;
214
-
215
- typedef bool (*llama_progress_callback)(float progress, void * user_data);
216
-
217
- // Input data for llama_decode
218
- // A llama_batch object can contain input about one or many sequences
219
- // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
220
- //
221
- // - token : the token ids of the input (used when embd is NULL)
222
- // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
223
- // - pos : the positions of the respective token in the sequence
224
- // - seq_id : the sequence to which the respective token belongs
225
- // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
226
- //
227
- typedef struct llama_batch {
228
- int32_t n_tokens;
229
-
230
- llama_token * token;
231
- float * embd;
232
- llama_pos * pos;
233
- int32_t * n_seq_id;
234
- llama_seq_id ** seq_id;
235
- int8_t * logits; // TODO: rename this to "output"
236
-
237
- // NOTE: helpers for smooth API transition - can be deprecated in the future
238
- // for future-proof code, use the above fields instead and ignore everything below
239
- //
240
- // pos[i] = all_pos_0 + i*all_pos_1
241
- //
242
- llama_pos all_pos_0; // used if pos == NULL
243
- llama_pos all_pos_1; // used if pos == NULL
244
- llama_seq_id all_seq_id; // used if seq_id == NULL
245
- } llama_batch;
246
-
247
- enum llama_model_kv_override_type {
248
- LLAMA_KV_OVERRIDE_TYPE_INT,
249
- LLAMA_KV_OVERRIDE_TYPE_FLOAT,
250
- LLAMA_KV_OVERRIDE_TYPE_BOOL,
251
- LLAMA_KV_OVERRIDE_TYPE_STR,
252
- };
253
-
254
- struct llama_model_kv_override {
255
- enum llama_model_kv_override_type tag;
256
-
257
- char key[128];
258
-
259
- union {
260
- int64_t val_i64;
261
- double val_f64;
262
- bool val_bool;
263
- char val_str[128];
264
- };
265
- };
266
-
267
- struct llama_model_params {
268
- int32_t n_gpu_layers; // number of layers to store in VRAM
269
- enum llama_split_mode split_mode; // how to split the model across multiple GPUs
270
-
271
- // main_gpu interpretation depends on split_mode:
272
- // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
273
- // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
274
- // LLAMA_SPLIT_LAYER: ignored
275
- int32_t main_gpu;
276
-
277
- // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
278
- const float * tensor_split;
279
-
280
- // comma separated list of RPC servers to use for offloading
281
- const char * rpc_servers;
282
-
283
- // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
284
- // If the provided progress_callback returns true, model loading continues.
285
- // If it returns false, model loading is immediately aborted.
286
- llama_progress_callback progress_callback;
287
-
288
- // context pointer passed to the progress callback
289
- void * progress_callback_user_data;
290
-
291
- // override key-value pairs of the model meta data
292
- const struct llama_model_kv_override * kv_overrides;
293
-
294
- // Keep the booleans together to avoid misalignment during copy-by-value.
295
- bool vocab_only; // only load the vocabulary, no weights
296
- bool use_mmap; // use mmap if possible
297
- bool use_mlock; // force system to keep model in RAM
298
- bool check_tensors; // validate model tensor data
299
- };
300
-
301
- // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
302
- // https://github.com/ggerganov/llama.cpp/pull/7544
303
- struct llama_context_params {
304
- uint32_t seed; // RNG seed, -1 for random
305
- uint32_t n_ctx; // text context, 0 = from model
306
- uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
307
- uint32_t n_ubatch; // physical maximum batch size
308
- uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
309
- uint32_t n_threads; // number of threads to use for generation
310
- uint32_t n_threads_batch; // number of threads to use for batch processing
311
-
312
- enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
313
- enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
314
- enum llama_attention_type attention_type; // attention type to use for embeddings
315
-
316
- // ref: https://github.com/ggerganov/llama.cpp/pull/2054
317
- float rope_freq_base; // RoPE base frequency, 0 = from model
318
- float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
319
- float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
320
- float yarn_attn_factor; // YaRN magnitude scaling factor
321
- float yarn_beta_fast; // YaRN low correction dim
322
- float yarn_beta_slow; // YaRN high correction dim
323
- uint32_t yarn_orig_ctx; // YaRN original context size
324
- float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
325
-
326
- lm_ggml_backend_sched_eval_callback cb_eval;
327
- void * cb_eval_user_data;
328
-
329
- enum lm_ggml_type type_k; // data type for K cache [EXPERIMENTAL]
330
- enum lm_ggml_type type_v; // data type for V cache [EXPERIMENTAL]
331
-
332
- // Keep the booleans together to avoid misalignment during copy-by-value.
333
- bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
334
- bool embeddings; // if true, extract embeddings (together with logits)
335
- bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
336
- bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
337
-
338
- // Abort callback
339
- // if it returns true, execution of llama_decode() will be aborted
340
- // currently works only with CPU execution
341
- lm_ggml_abort_callback abort_callback;
342
- void * abort_callback_data;
343
- };
344
-
345
- // model quantization parameters
346
- typedef struct llama_model_quantize_params {
347
- int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
348
- enum llama_ftype ftype; // quantize to this llama_ftype
349
- enum lm_ggml_type output_tensor_type; // output tensor type
350
- enum lm_ggml_type token_embedding_type; // itoken embeddings tensor type
351
- bool allow_requantize; // allow quantizing non-f32/f16 tensors
352
- bool quantize_output_tensor; // quantize output.weight
353
- bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
354
- bool pure; // quantize all tensors to the default type
355
- bool keep_split; // quantize to the same number of shards
356
- void * imatrix; // pointer to importance matrix data
357
- void * kv_overrides; // pointer to vector containing overrides
358
- } llama_model_quantize_params;
359
-
360
- // grammar types
361
- struct llama_grammar;
362
-
363
- // grammar element type
364
- enum llama_gretype {
365
- // end of rule definition
366
- LLAMA_GRETYPE_END = 0,
367
-
368
- // start of alternate definition for rule
369
- LLAMA_GRETYPE_ALT = 1,
370
-
371
- // non-terminal element: reference to rule
372
- LLAMA_GRETYPE_RULE_REF = 2,
373
-
374
- // terminal element: character (code point)
375
- LLAMA_GRETYPE_CHAR = 3,
376
-
377
- // inverse char(s) ([^a], [^a-b] [^abc])
378
- LLAMA_GRETYPE_CHAR_NOT = 4,
379
-
380
- // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
381
- // be an inclusive range ([a-z])
382
- LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
383
-
384
- // modifies a preceding LLAMA_GRETYPE_CHAR or
385
- // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
386
- LLAMA_GRETYPE_CHAR_ALT = 6,
387
-
388
- // any character (.)
389
- LLAMA_GRETYPE_CHAR_ANY = 7,
390
- };
391
-
392
- typedef struct llama_grammar_element {
393
- enum llama_gretype type;
394
- uint32_t value; // Unicode code point or rule ID
395
- } llama_grammar_element;
396
-
397
- // performance timing information
398
- struct llama_timings {
399
- double t_start_ms;
400
- double t_end_ms;
401
- double t_load_ms;
402
- double t_sample_ms;
403
- double t_p_eval_ms;
404
- double t_eval_ms;
405
-
406
- int32_t n_sample;
407
- int32_t n_p_eval;
408
- int32_t n_eval;
409
- };
410
-
411
- // used in chat template
412
- typedef struct llama_chat_message {
413
- const char * role;
414
- const char * content;
415
- } llama_chat_message;
416
-
417
- // lora adapter
418
- struct llama_lora_adapter;
419
-
420
- // Helpers for getting default parameters
421
- LLAMA_API struct llama_model_params llama_model_default_params(void);
422
- LLAMA_API struct llama_context_params llama_context_default_params(void);
423
- LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
424
-
425
- // Initialize the llama + ggml backend
426
- // If numa is true, use NUMA optimizations
427
- // Call once at the start of the program
428
- LLAMA_API void llama_backend_init(void);
429
-
430
- //optional:
431
- LLAMA_API void llama_numa_init(enum lm_ggml_numa_strategy numa);
432
-
433
- // Call once at the end of the program - currently only used for MPI
434
- LLAMA_API void llama_backend_free(void);
435
-
436
- LLAMA_API struct llama_model * llama_load_model_from_file(
437
- const char * path_model,
438
- struct llama_model_params params);
439
-
440
- LLAMA_API void llama_free_model(struct llama_model * model);
441
-
442
- LLAMA_API struct llama_context * llama_new_context_with_model(
443
- struct llama_model * model,
444
- struct llama_context_params params);
445
-
446
- // Frees all allocated memory
447
- LLAMA_API void llama_free(struct llama_context * ctx);
448
-
449
- LLAMA_API int64_t llama_time_us(void);
450
-
451
- LLAMA_API size_t llama_max_devices(void);
452
-
453
- LLAMA_API bool llama_supports_mmap (void);
454
- LLAMA_API bool llama_supports_mlock (void);
455
- LLAMA_API bool llama_supports_gpu_offload(void);
456
-
457
- LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
458
-
459
- LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
460
- LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
461
- LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
462
- LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
463
-
464
- LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
465
-
466
- LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
467
- LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
468
-
469
- LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
470
- LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
471
- LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
472
- LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
473
-
474
- // Get the model's RoPE frequency scaling factor
475
- LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
476
-
477
- // Functions to access the model's GGUF metadata scalar values
478
- // - The functions return the length of the string on success, or -1 on failure
479
- // - The output string is always null-terminated and cleared on failure
480
- // - GGUF array values are not supported by these functions
481
-
482
- // Get metadata value as a string by key name
483
- LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
484
-
485
- // Get the number of metadata key/value pairs
486
- LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
487
-
488
- // Get metadata key name by index
489
- LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
490
-
491
- // Get metadata value as a string by index
492
- LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
493
-
494
- // Get a string describing the model type
495
- LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
496
-
497
- // Returns the total size of all the tensors in the model in bytes
498
- LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
499
-
500
- // Returns the total number of parameters in the model
501
- LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
502
-
503
- // Get a llama model tensor
504
- LLAMA_API struct lm_ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
505
-
506
- // Returns true if the model contains an encoder that requires llama_encode() call
507
- LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
508
-
509
- // For encoder-decoder models, this function returns id of the token that must be provided
510
- // to the decoder to start generating output sequence. For other models, it returns -1.
511
- LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
512
-
513
- // Returns 0 on success
514
- LLAMA_API uint32_t llama_model_quantize(
515
- const char * fname_inp,
516
- const char * fname_out,
517
- const llama_model_quantize_params * params);
518
-
519
- // Load a LoRA adapter from file
520
- // The loaded adapter will be associated to the given model, and will be free when the model is deleted
521
- LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init(
522
- struct llama_model * model,
523
- const char * path_lora);
524
-
525
- // Add a loaded LoRA adapter to given context
526
- // This will not modify model's weight
527
- LLAMA_API int32_t llama_lora_adapter_set(
528
- struct llama_context * ctx,
529
- struct llama_lora_adapter * adapter,
530
- float scale);
531
-
532
- // Remove a LoRA adapter from given context
533
- // Return -1 if the adapter is not present in the context
534
- LLAMA_API int32_t llama_lora_adapter_remove(
535
- struct llama_context * ctx,
536
- struct llama_lora_adapter * adapter);
537
-
538
- // Manually free a LoRA adapter
539
- // Note: loaded adapters will be free when the associated model is deleted
540
- LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);
541
-
542
- // Apply a loaded control vector to a llama_context, or if data is NULL, clear
543
- // the currently loaded vector.
544
- // n_embd should be the size of a single layer's control, and data should point
545
- // to an n_embd x n_layers buffer starting from layer 1.
546
- // il_start and il_end are the layer range the vector should apply to (both inclusive)
547
- // See llama_control_vector_load in common to load a control vector.
548
- LLAMA_API int32_t llama_control_vector_apply(
549
- struct llama_context * lctx,
550
- const float * data,
551
- size_t len,
552
- int32_t n_embd,
553
- int32_t il_start,
554
- int32_t il_end);
555
-
556
- //
557
- // KV cache
558
- //
559
-
560
- // Information associated with an individual cell in the KV cache view.
561
- struct llama_kv_cache_view_cell {
562
- // The position for this cell. Takes KV cache shifts into account.
563
- // May be negative if the cell is not populated.
564
- llama_pos pos;
565
- };
566
-
567
- // An updateable view of the KV cache.
568
- struct llama_kv_cache_view {
569
- // Number of KV cache cells. This will be the same as the context size.
570
- int32_t n_cells;
571
-
572
- // Maximum number of sequences that can exist in a cell. It's not an error
573
- // if there are more sequences in a cell than this value, however they will
574
- // not be visible in the view cells_sequences.
575
- int32_t n_seq_max;
576
-
577
- // Number of tokens in the cache. For example, if there are two populated
578
- // cells, the first with 1 sequence id in it and the second with 2 sequence
579
- // ids then you'll have 3 tokens.
580
- int32_t token_count;
581
-
582
- // Number of populated cache cells.
583
- int32_t used_cells;
584
-
585
- // Maximum contiguous empty slots in the cache.
586
- int32_t max_contiguous;
587
-
588
- // Index to the start of the max_contiguous slot range. Can be negative
589
- // when cache is full.
590
- int32_t max_contiguous_idx;
591
-
592
- // Information for an individual cell.
593
- struct llama_kv_cache_view_cell * cells;
594
-
595
- // The sequences for each cell. There will be n_seq_max items per cell.
596
- llama_seq_id * cells_sequences;
597
- };
598
-
599
- // Create an empty KV cache view. (use only for debugging purposes)
600
- LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
601
-
602
- // Free a KV cache view. (use only for debugging purposes)
603
- LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
604
-
605
- // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
606
- LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
607
-
608
- // Returns the number of tokens in the KV cache (slow, use only for debug)
609
- // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
610
- LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
611
-
612
- // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
613
- LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
614
-
615
- // Clear the KV cache - both cell info is erased and KV data is zeroed
616
- LLAMA_API void llama_kv_cache_clear(
617
- struct llama_context * ctx);
618
-
619
- // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
620
- // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
621
- // seq_id < 0 : match any sequence
622
- // p0 < 0 : [0, p1]
623
- // p1 < 0 : [p0, inf)
624
- LLAMA_API bool llama_kv_cache_seq_rm(
625
- struct llama_context * ctx,
626
- llama_seq_id seq_id,
627
- llama_pos p0,
628
- llama_pos p1);
629
-
630
- // Copy all tokens that belong to the specified sequence to another sequence
631
- // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
632
- // p0 < 0 : [0, p1]
633
- // p1 < 0 : [p0, inf)
634
- LLAMA_API void llama_kv_cache_seq_cp(
635
- struct llama_context * ctx,
636
- llama_seq_id seq_id_src,
637
- llama_seq_id seq_id_dst,
638
- llama_pos p0,
639
- llama_pos p1);
640
-
641
- // Removes all tokens that do not belong to the specified sequence
642
- LLAMA_API void llama_kv_cache_seq_keep(
643
- struct llama_context * ctx,
644
- llama_seq_id seq_id);
645
-
646
- // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
647
- // If the KV cache is RoPEd, the KV data is updated accordingly:
648
- // - lazily on next llama_decode()
649
- // - explicitly with llama_kv_cache_update()
650
- // p0 < 0 : [0, p1]
651
- // p1 < 0 : [p0, inf)
652
- LLAMA_API void llama_kv_cache_seq_add(
653
- struct llama_context * ctx,
654
- llama_seq_id seq_id,
655
- llama_pos p0,
656
- llama_pos p1,
657
- llama_pos delta);
658
-
659
- // Integer division of the positions by factor of `d > 1`
660
- // If the KV cache is RoPEd, the KV data is updated accordingly:
661
- // - lazily on next llama_decode()
662
- // - explicitly with llama_kv_cache_update()
663
- // p0 < 0 : [0, p1]
664
- // p1 < 0 : [p0, inf)
665
- LLAMA_API void llama_kv_cache_seq_div(
666
- struct llama_context * ctx,
667
- llama_seq_id seq_id,
668
- llama_pos p0,
669
- llama_pos p1,
670
- int d);
671
-
672
- // Returns the largest position present in the KV cache for the specified sequence
673
- LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
674
- struct llama_context * ctx,
675
- llama_seq_id seq_id);
676
-
677
- // Defragment the KV cache
678
- // This will be applied:
679
- // - lazily on next llama_decode()
680
- // - explicitly with llama_kv_cache_update()
681
- LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
682
-
683
- // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
684
- LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
685
-
686
- //
687
- // State / sessions
688
- //
689
-
690
- // Returns the maximum size in bytes of the state (rng, logits, embedding
691
- // and kv_cache) - will often be smaller after compacting tokens
692
- LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);
693
- LLAMA_API DEPRECATED(size_t llama_get_state_size(const struct llama_context * ctx),
694
- "use llama_state_get_size instead");
695
-
696
- // Copies the state to the specified destination address.
697
- // Destination needs to have allocated enough memory.
698
- // Returns the number of bytes copied
699
- LLAMA_API size_t llama_state_get_data(
700
- struct llama_context * ctx,
701
- uint8_t * dst);
702
- LLAMA_API DEPRECATED(size_t llama_copy_state_data(
703
- struct llama_context * ctx,
704
- uint8_t * dst),
705
- "use llama_state_get_data instead");
706
-
707
- // Set the state reading from the specified address
708
- // Returns the number of bytes read
709
- LLAMA_API size_t llama_state_set_data(
710
- struct llama_context * ctx,
711
- const uint8_t * src);
712
- LLAMA_API DEPRECATED(size_t llama_set_state_data(
713
- struct llama_context * ctx,
714
- const uint8_t * src),
715
- "use llama_state_set_data instead");
716
-
717
- // Save/load session file
718
- LLAMA_API bool llama_state_load_file(
719
- struct llama_context * ctx,
720
- const char * path_session,
721
- llama_token * tokens_out,
722
- size_t n_token_capacity,
723
- size_t * n_token_count_out);
724
- LLAMA_API DEPRECATED(bool llama_load_session_file(
725
- struct llama_context * ctx,
726
- const char * path_session,
727
- llama_token * tokens_out,
728
- size_t n_token_capacity,
729
- size_t * n_token_count_out),
730
- "use llama_state_load_file instead");
731
-
732
- LLAMA_API bool llama_state_save_file(
733
- struct llama_context * ctx,
734
- const char * path_session,
735
- const llama_token * tokens,
736
- size_t n_token_count);
737
- LLAMA_API DEPRECATED(bool llama_save_session_file(
738
- struct llama_context * ctx,
739
- const char * path_session,
740
- const llama_token * tokens,
741
- size_t n_token_count),
742
- "use llama_state_save_file instead");
743
-
744
- // Get the exact size needed to copy the KV cache of a single sequence
745
- LLAMA_API size_t llama_state_seq_get_size(
746
- struct llama_context * ctx,
747
- llama_seq_id seq_id);
748
-
749
- // Copy the KV cache of a single sequence into the specified buffer
750
- LLAMA_API size_t llama_state_seq_get_data(
751
- struct llama_context * ctx,
752
- uint8_t * dst,
753
- llama_seq_id seq_id);
754
-
755
- // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
756
- // Returns:
757
- // - Positive: Ok
758
- // - Zero: Failed to load
759
- LLAMA_API size_t llama_state_seq_set_data(
760
- struct llama_context * ctx,
761
- const uint8_t * src,
762
- llama_seq_id dest_seq_id);
763
-
764
- LLAMA_API size_t llama_state_seq_save_file(
765
- struct llama_context * ctx,
766
- const char * filepath,
767
- llama_seq_id seq_id,
768
- const llama_token * tokens,
769
- size_t n_token_count);
770
-
771
- LLAMA_API size_t llama_state_seq_load_file(
772
- struct llama_context * ctx,
773
- const char * filepath,
774
- llama_seq_id dest_seq_id,
775
- llama_token * tokens_out,
776
- size_t n_token_capacity,
777
- size_t * n_token_count_out);
778
-
779
- //
780
- // Decoding
781
- //
782
-
783
- // Return batch for single sequence of tokens starting at pos_0
784
- //
785
- // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
786
- //
787
- LLAMA_API struct llama_batch llama_batch_get_one(
788
- llama_token * tokens,
789
- int32_t n_tokens,
790
- llama_pos pos_0,
791
- llama_seq_id seq_id);
792
-
793
- // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
794
- // Each token can be assigned up to n_seq_max sequence ids
795
- // The batch has to be freed with llama_batch_free()
796
- // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
797
- // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
798
- // The rest of the llama_batch members are allocated with size n_tokens
799
- // All members are left uninitialized
800
- LLAMA_API struct llama_batch llama_batch_init(
801
- int32_t n_tokens,
802
- int32_t embd,
803
- int32_t n_seq_max);
804
-
805
- // Frees a batch of tokens allocated with llama_batch_init()
806
- LLAMA_API void llama_batch_free(struct llama_batch batch);
807
-
808
- // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
809
- // Stores the encoder output internally for later use by the decoder cross-attention layers.
810
- // 0 - success
811
- // < 0 - error
812
- LLAMA_API int32_t llama_encode(
813
- struct llama_context * ctx,
814
- struct llama_batch batch);
815
-
816
- // Positive return values does not mean a fatal error, but rather a warning.
817
- // 0 - success
818
- // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
819
- // < 0 - error
820
- LLAMA_API int32_t llama_decode(
821
- struct llama_context * ctx,
822
- struct llama_batch batch);
823
-
824
- // Set the number of threads used for decoding
825
- // n_threads is the number of threads used for generation (single token)
826
- // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
827
- LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
828
-
829
- // Get the number of threads used for generation of a single token.
830
- LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
831
-
832
- // Get the number of threads used for prompt and batch processing (multiple token).
833
- LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
834
-
835
- // Set whether the model is in embeddings mode or not
836
- // If true, embeddings will be returned but logits will not
837
- LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
838
-
839
- // Set whether to use causal attention or not
840
- // If set to true, the model will only attend to the past tokens
841
- LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
842
-
843
- // Set abort callback
844
- LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
845
-
846
- // Wait until all computations are finished
847
- // This is automatically done when using one of the functions below to obtain the computation results
848
- // and is not necessary to call it explicitly in most cases
849
- LLAMA_API void llama_synchronize(struct llama_context * ctx);
850
-
851
- // Token logits obtained from the last call to llama_decode()
852
- // The logits for which llama_batch.logits[i] != 0 are stored contiguously
853
- // in the order they have appeared in the batch.
854
- // Rows: number of tokens for which llama_batch.logits[i] != 0
855
- // Cols: n_vocab
856
- LLAMA_API float * llama_get_logits(struct llama_context * ctx);
857
-
858
- // Logits for the ith token. For positive indices, Equivalent to:
859
- // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
860
- // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
861
- // returns NULL for invalid ids.
862
- LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
863
-
864
- // Get all output token embeddings.
865
- // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
866
- // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
867
- // in the order they have appeared in the batch.
868
- // shape: [n_outputs*n_embd]
869
- // Otherwise, returns NULL.
870
- LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
871
-
872
- // Get the embeddings for the ith token. For positive indices, Equivalent to:
873
- // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
874
- // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
875
- // shape: [n_embd] (1-dimensional)
876
- // returns NULL for invalid ids.
877
- LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
878
-
879
- // Get the embeddings for a sequence id
880
- // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
881
- // shape: [n_embd] (1-dimensional)
882
- LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
883
-
884
- //
885
- // Vocab
886
- //
887
-
888
- LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
889
-
890
- LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
891
-
892
- LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
893
-
894
- // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
895
- LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
896
-
897
- // Identify if Token Id is a control token or a render-able token
898
- LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
899
-
900
- // Special tokens
901
- LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
902
- LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
903
- LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
904
- LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
905
- LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
906
- LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
907
-
908
- // Returns -1 if unknown, 1 for true or 0 for false.
909
- LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
910
-
911
- // Returns -1 if unknown, 1 for true or 0 for false.
912
- LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
913
-
914
- // Codellama infill tokens
915
- LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
916
- LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
917
- LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
918
- LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
919
-
920
- //
921
- // Tokenization
922
- //
923
-
924
- /// @details Convert the provided text into tokens.
925
- /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
926
- /// @return Returns the number of tokens on success, no more than n_tokens_max
927
- /// @return Returns a negative number on failure - the number of tokens that would have been returned
928
- /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
929
- /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
930
- /// as plaintext. Does not insert a leading space.
931
- LLAMA_API int32_t llama_tokenize(
932
- const struct llama_model * model,
933
- const char * text,
934
- int32_t text_len,
935
- llama_token * tokens,
936
- int32_t n_tokens_max,
937
- bool add_special,
938
- bool parse_special);
939
-
940
- // Token Id -> Piece.
941
- // Uses the vocabulary in the provided context.
942
- // Does not write null terminator to the buffer.
943
- // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
944
- // @param special If true, special tokens are rendered in the output.
945
- LLAMA_API int32_t llama_token_to_piece(
946
- const struct llama_model * model,
947
- llama_token token,
948
- char * buf,
949
- int32_t length,
950
- int32_t lstrip,
951
- bool special);
952
-
953
- /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
954
- /// @param text The char pointer must be large enough to hold the resulting text.
955
- /// @return Returns the number of chars/bytes on success, no more than text_len_max.
956
- /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
957
- /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
958
- /// @param unparse_special If true, special tokens are rendered in the output.
959
- LLAMA_API int32_t llama_detokenize(
960
- const struct llama_model * model,
961
- const llama_token * tokens,
962
- int32_t n_tokens,
963
- char * text,
964
- int32_t text_len_max,
965
- bool remove_special,
966
- bool unparse_special);
967
-
968
- /// Apply chat template. Inspired by hf apply_chat_template() on python.
969
- /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
970
- /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
971
- /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
972
- /// @param chat Pointer to a list of multiple llama_chat_message
973
- /// @param n_msg Number of llama_chat_message in this chat
974
- /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
975
- /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
976
- /// @param length The size of the allocated buffer
977
- /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
978
- LLAMA_API int32_t llama_chat_apply_template(
979
- const struct llama_model * model,
980
- const char * tmpl,
981
- const struct llama_chat_message * chat,
982
- size_t n_msg,
983
- bool add_ass,
984
- char * buf,
985
- int32_t length);
986
-
987
- //
988
- // Grammar
989
- //
990
-
991
- /// Initialize a llama_grammar.
992
- ///
993
- /// @param rules The rule elements of the grammar to initialize.
994
- /// @param n_rules The number of rules.
995
- /// @param start_rule_index The index of the root rule (the starting point of the grammar).
996
- /// @return The initialized llama_grammar or nullptr if initialization failed.
997
- LLAMA_API struct llama_grammar * llama_grammar_init(
998
- const llama_grammar_element ** rules,
999
- size_t n_rules,
1000
- size_t start_rule_index);
1001
-
1002
- LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
1003
-
1004
- LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
1005
-
1006
- //
1007
- // Sampling functions
1008
- //
1009
-
1010
- // Sets the current rng seed.
1011
- LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
1012
-
1013
- /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
1014
- /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
1015
- LLAMA_API void llama_sample_repetition_penalties(
1016
- struct llama_context * ctx,
1017
- llama_token_data_array * candidates,
1018
- const llama_token * last_tokens,
1019
- size_t penalty_last_n,
1020
- float penalty_repeat,
1021
- float penalty_freq,
1022
- float penalty_present);
1023
-
1024
- /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
1025
- /// @param logits Logits extracted from the original generation context.
1026
- /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
1027
- /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
1028
- LLAMA_API void llama_sample_apply_guidance(
1029
- struct llama_context * ctx,
1030
- float * logits,
1031
- float * logits_guidance,
1032
- float scale);
1033
-
1034
- /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1035
- LLAMA_API void llama_sample_softmax(
1036
- struct llama_context * ctx,
1037
- llama_token_data_array * candidates);
1038
-
1039
- /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1040
- LLAMA_API void llama_sample_top_k(
1041
- struct llama_context * ctx,
1042
- llama_token_data_array * candidates,
1043
- int32_t k,
1044
- size_t min_keep);
1045
-
1046
- /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1047
- LLAMA_API void llama_sample_top_p(
1048
- struct llama_context * ctx,
1049
- llama_token_data_array * candidates,
1050
- float p,
1051
- size_t min_keep);
1052
-
1053
- /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
1054
- LLAMA_API void llama_sample_min_p(
1055
- struct llama_context * ctx,
1056
- llama_token_data_array * candidates,
1057
- float p,
1058
- size_t min_keep);
1059
-
1060
- /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
1061
- LLAMA_API void llama_sample_tail_free(
1062
- struct llama_context * ctx,
1063
- llama_token_data_array * candidates,
1064
- float z,
1065
- size_t min_keep);
1066
-
1067
- /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1068
- LLAMA_API void llama_sample_typical(
1069
- struct llama_context * ctx,
1070
- llama_token_data_array * candidates,
1071
- float p,
1072
- size_t min_keep);
1073
-
1074
- /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
1075
- LLAMA_API void llama_sample_entropy(
1076
- struct llama_context * ctx,
1077
- llama_token_data_array * candidates_p,
1078
- float min_temp,
1079
- float max_temp,
1080
- float exponent_val);
1081
-
1082
- LLAMA_API void llama_sample_temp(
1083
- struct llama_context * ctx,
1084
- llama_token_data_array * candidates,
1085
- float temp);
1086
-
1087
- /// @details Apply constraints from grammar
1088
- LLAMA_API void llama_sample_grammar(
1089
- struct llama_context * ctx,
1090
- llama_token_data_array * candidates,
1091
- const struct llama_grammar * grammar);
1092
-
1093
- /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1094
- /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1095
- /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1096
- /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1097
- /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1098
- /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1099
- LLAMA_API llama_token llama_sample_token_mirostat(
1100
- struct llama_context * ctx,
1101
- llama_token_data_array * candidates,
1102
- float tau,
1103
- float eta,
1104
- int32_t m,
1105
- float * mu);
1106
-
1107
- /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1108
- /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1109
- /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1110
- /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1111
- /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1112
- LLAMA_API llama_token llama_sample_token_mirostat_v2(
1113
- struct llama_context * ctx,
1114
- llama_token_data_array * candidates,
1115
- float tau,
1116
- float eta,
1117
- float * mu);
1118
-
1119
- /// @details Selects the token with the highest probability.
1120
- /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
1121
- LLAMA_API llama_token llama_sample_token_greedy(
1122
- struct llama_context * ctx,
1123
- llama_token_data_array * candidates);
1124
-
1125
- /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
1126
- LLAMA_API llama_token llama_sample_token(
1127
- struct llama_context * ctx,
1128
- llama_token_data_array * candidates);
1129
-
1130
- /// @details Accepts the sampled token into the grammar
1131
- LLAMA_API void llama_grammar_accept_token(
1132
- struct llama_context * ctx,
1133
- struct llama_grammar * grammar,
1134
- llama_token token);
1135
-
1136
- //
1137
- // Model split
1138
- //
1139
-
1140
- /// @details Build a split GGUF final path for this chunk.
1141
- /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1142
- // Returns the split_path length.
1143
- LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1144
-
1145
- /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1146
- /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1147
- // Returns the split_prefix length.
1148
- LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1149
-
1150
- // Performance information
1151
- LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
1152
-
1153
- LLAMA_API void llama_print_timings(struct llama_context * ctx);
1154
- LLAMA_API void llama_reset_timings(struct llama_context * ctx);
1155
-
1156
- // Print system information
1157
- LLAMA_API const char * llama_print_system_info(void);
1158
-
1159
- // Set callback for all future logging events.
1160
- // If this is not called, or NULL is supplied, everything is output on stderr.
1161
- LLAMA_API void llama_log_set(lm_ggml_log_callback log_callback, void * user_data);
1162
-
1163
- LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
1164
-
1165
- #ifdef __cplusplus
1166
- }
1167
- #endif
1168
-
1169
- // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
1170
- #ifdef LLAMA_API_INTERNAL
1171
-
1172
- #include <random>
1173
- #include <string>
1174
- #include <vector>
1175
-
1176
- struct lm_ggml_tensor;
1177
-
1178
- struct llama_partial_utf8 {
1179
- uint32_t value; // bit value so far (unshifted)
1180
- int n_remain; // num bytes remaining; -1 indicates invalid sequence
1181
- };
1182
-
1183
- struct llama_grammar {
1184
- const std::vector<std::vector<llama_grammar_element>> rules;
1185
- std::vector<std::vector<const llama_grammar_element *>> stacks;
1186
-
1187
- // buffer for partially generated UTF-8 sequence from accepted tokens
1188
- llama_partial_utf8 partial_utf8;
1189
- };
1190
-
1191
- struct llama_grammar_candidate {
1192
- size_t index;
1193
- const uint32_t * code_points;
1194
- llama_partial_utf8 partial_utf8;
1195
- };
1196
-
1197
- const std::vector<std::pair<std::string, struct lm_ggml_tensor *>> & llama_internal_get_tensor_map(
1198
- struct llama_context * ctx
1199
- );
1200
-
1201
- void llama_grammar_accept(
1202
- const std::vector<std::vector<llama_grammar_element>> & rules,
1203
- const std::vector<std::vector<const llama_grammar_element *>> & stacks,
1204
- const uint32_t chr,
1205
- std::vector<std::vector<const llama_grammar_element *>> & new_stacks);
1206
-
1207
- std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1208
- const std::string & src,
1209
- llama_partial_utf8 partial_start);
1210
-
1211
- // Randomly selects a token from the candidates based on their probabilities using given std::mt19937.
1212
- // This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
1213
- llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
1214
-
1215
- #endif // LLAMA_API_INTERNAL
1216
-
1217
- #endif // LLAMA_H
1
+ #ifndef LLAMA_H
2
+ #define LLAMA_H
3
+
4
+ #include "ggml.h"
5
+ #include "ggml-backend.h"
6
+
7
+ #include <stddef.h>
8
+ #include <stdint.h>
9
+ #include <stdio.h>
10
+ #include <stdbool.h>
11
+
12
+ #ifdef LLAMA_SHARED
13
+ # if defined(_WIN32) && !defined(__MINGW32__)
14
+ # ifdef LLAMA_BUILD
15
+ # define LLAMA_API __declspec(dllexport)
16
+ # else
17
+ # define LLAMA_API __declspec(dllimport)
18
+ # endif
19
+ # else
20
+ # define LLAMA_API __attribute__ ((visibility ("default")))
21
+ # endif
22
+ #else
23
+ # define LLAMA_API
24
+ #endif
25
+
26
+ #ifdef __GNUC__
27
+ # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
28
+ #elif defined(_MSC_VER)
29
+ # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
30
+ #else
31
+ # define DEPRECATED(func, hint) func
32
+ #endif
33
+
34
+ #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
35
+
36
+ #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
37
+ #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
38
+ #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
39
+
40
+ #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
41
+ #define LLAMA_SESSION_VERSION 8
42
+
43
+ #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
44
+ #define LLAMA_STATE_SEQ_VERSION 2
45
+
46
+ #ifdef __cplusplus
47
+ extern "C" {
48
+ #endif
49
+
50
+ //
51
+ // C interface
52
+ //
53
+ // TODO: show sample usage
54
+ //
55
+
56
+ struct llama_model;
57
+ struct llama_context;
58
+
59
+ typedef int32_t llama_pos;
60
+ typedef int32_t llama_token;
61
+ typedef int32_t llama_seq_id;
62
+
63
+ enum llama_vocab_type {
64
+ LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
65
+ LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
66
+ LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
67
+ LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
68
+ LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
69
+ };
70
+
71
+ // pre-tokenization types
72
+ enum llama_vocab_pre_type {
73
+ LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
74
+ LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
75
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
76
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
77
+ LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
78
+ LLAMA_VOCAB_PRE_TYPE_MPT = 5,
79
+ LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
80
+ LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
81
+ LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
82
+ LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
83
+ LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
84
+ LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
85
+ LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
86
+ LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
87
+ LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
88
+ LLAMA_VOCAB_PRE_TYPE_PORO = 15,
89
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
90
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
91
+ LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
92
+ LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
93
+ LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
94
+ LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
95
+ LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
96
+ };
97
+
98
+ // note: these values should be synchronized with lm_ggml_rope
99
+ // TODO: maybe move this enum to ggml.h (lm_ggml_rope_type)
100
+ enum llama_rope_type {
101
+ LLAMA_ROPE_TYPE_NONE = -1,
102
+ LLAMA_ROPE_TYPE_NORM = 0,
103
+ LLAMA_ROPE_TYPE_NEOX = 2,
104
+ LLAMA_ROPE_TYPE_GLM = 4,
105
+ };
106
+
107
+ enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
108
+ LLAMA_TOKEN_TYPE_UNDEFINED = 0,
109
+ LLAMA_TOKEN_TYPE_NORMAL = 1,
110
+ LLAMA_TOKEN_TYPE_UNKNOWN = 2,
111
+ LLAMA_TOKEN_TYPE_CONTROL = 3,
112
+ LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
113
+ LLAMA_TOKEN_TYPE_UNUSED = 5,
114
+ LLAMA_TOKEN_TYPE_BYTE = 6,
115
+ };
116
+
117
+ enum llama_token_attr {
118
+ LLAMA_TOKEN_ATTR_UNDEFINED = 0,
119
+ LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
120
+ LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
121
+ LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
122
+ LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
123
+ LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
124
+ LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
125
+ LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
126
+ LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
127
+ LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
128
+ LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
129
+ };
130
+
131
+ // model file types
132
+ enum llama_ftype {
133
+ LLAMA_FTYPE_ALL_F32 = 0,
134
+ LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
135
+ LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
136
+ LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
137
+ // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
138
+ // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
139
+ // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
140
+ LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
141
+ LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
142
+ LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
143
+ LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
144
+ LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
145
+ LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
146
+ LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
147
+ LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
148
+ LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
149
+ LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
150
+ LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
151
+ LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
152
+ LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
153
+ LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
154
+ LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
155
+ LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
156
+ LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
157
+ LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
158
+ LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
159
+ LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
160
+ LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
161
+ LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
162
+ LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
163
+ LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
164
+ LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
165
+ LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
166
+ LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
167
+ LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
168
+ LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
169
+
170
+ LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
171
+ };
172
+
173
+ enum llama_rope_scaling_type {
174
+ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
175
+ LLAMA_ROPE_SCALING_TYPE_NONE = 0,
176
+ LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
177
+ LLAMA_ROPE_SCALING_TYPE_YARN = 2,
178
+ LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
179
+ };
180
+
181
+ enum llama_pooling_type {
182
+ LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
183
+ LLAMA_POOLING_TYPE_NONE = 0,
184
+ LLAMA_POOLING_TYPE_MEAN = 1,
185
+ LLAMA_POOLING_TYPE_CLS = 2,
186
+ LLAMA_POOLING_TYPE_LAST = 3,
187
+ };
188
+
189
+ enum llama_attention_type {
190
+ LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
191
+ LLAMA_ATTENTION_TYPE_CAUSAL = 0,
192
+ LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
193
+ };
194
+
195
+ enum llama_split_mode {
196
+ LLAMA_SPLIT_MODE_NONE = 0, // single GPU
197
+ LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
198
+ LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
199
+ };
200
+
201
+ typedef struct llama_token_data {
202
+ llama_token id; // token id
203
+ float logit; // log-odds of the token
204
+ float p; // probability of the token
205
+ } llama_token_data;
206
+
207
+ typedef struct llama_token_data_array {
208
+ llama_token_data * data;
209
+ size_t size;
210
+ bool sorted;
211
+ } llama_token_data_array;
212
+
213
+ typedef bool (*llama_progress_callback)(float progress, void * user_data);
214
+
215
+ // Input data for llama_decode
216
+ // A llama_batch object can contain input about one or many sequences
217
+ // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
218
+ //
219
+ // - token : the token ids of the input (used when embd is NULL)
220
+ // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
221
+ // - pos : the positions of the respective token in the sequence
222
+ // - seq_id : the sequence to which the respective token belongs
223
+ // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
224
+ //
225
+ typedef struct llama_batch {
226
+ int32_t n_tokens;
227
+
228
+ llama_token * token;
229
+ float * embd;
230
+ llama_pos * pos;
231
+ int32_t * n_seq_id;
232
+ llama_seq_id ** seq_id;
233
+ int8_t * logits; // TODO: rename this to "output"
234
+
235
+ // NOTE: helpers for smooth API transition - can be deprecated in the future
236
+ // for future-proof code, use the above fields instead and ignore everything below
237
+ //
238
+ // pos[i] = all_pos_0 + i*all_pos_1
239
+ //
240
+ llama_pos all_pos_0; // used if pos == NULL
241
+ llama_pos all_pos_1; // used if pos == NULL
242
+ llama_seq_id all_seq_id; // used if seq_id == NULL
243
+ } llama_batch;
244
+
245
+ enum llama_model_kv_override_type {
246
+ LLAMA_KV_OVERRIDE_TYPE_INT,
247
+ LLAMA_KV_OVERRIDE_TYPE_FLOAT,
248
+ LLAMA_KV_OVERRIDE_TYPE_BOOL,
249
+ LLAMA_KV_OVERRIDE_TYPE_STR,
250
+ };
251
+
252
+ struct llama_model_kv_override {
253
+ enum llama_model_kv_override_type tag;
254
+
255
+ char key[128];
256
+
257
+ union {
258
+ int64_t val_i64;
259
+ double val_f64;
260
+ bool val_bool;
261
+ char val_str[128];
262
+ };
263
+ };
264
+
265
+ struct llama_model_params {
266
+ int32_t n_gpu_layers; // number of layers to store in VRAM
267
+ enum llama_split_mode split_mode; // how to split the model across multiple GPUs
268
+
269
+ // main_gpu interpretation depends on split_mode:
270
+ // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
271
+ // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
272
+ // LLAMA_SPLIT_LAYER: ignored
273
+ int32_t main_gpu;
274
+
275
+ // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
276
+ const float * tensor_split;
277
+
278
+ // comma separated list of RPC servers to use for offloading
279
+ const char * rpc_servers;
280
+
281
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
282
+ // If the provided progress_callback returns true, model loading continues.
283
+ // If it returns false, model loading is immediately aborted.
284
+ llama_progress_callback progress_callback;
285
+
286
+ // context pointer passed to the progress callback
287
+ void * progress_callback_user_data;
288
+
289
+ // override key-value pairs of the model meta data
290
+ const struct llama_model_kv_override * kv_overrides;
291
+
292
+ // Keep the booleans together to avoid misalignment during copy-by-value.
293
+ bool vocab_only; // only load the vocabulary, no weights
294
+ bool use_mmap; // use mmap if possible
295
+ bool use_mlock; // force system to keep model in RAM
296
+ bool check_tensors; // validate model tensor data
297
+ };
298
+
299
+ // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
300
+ // https://github.com/ggerganov/llama.cpp/pull/7544
301
+ struct llama_context_params {
302
+ uint32_t seed; // RNG seed, -1 for random
303
+ uint32_t n_ctx; // text context, 0 = from model
304
+ uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
305
+ uint32_t n_ubatch; // physical maximum batch size
306
+ uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
307
+ uint32_t n_threads; // number of threads to use for generation
308
+ uint32_t n_threads_batch; // number of threads to use for batch processing
309
+
310
+ enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
311
+ enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
312
+ enum llama_attention_type attention_type; // attention type to use for embeddings
313
+
314
+ // ref: https://github.com/ggerganov/llama.cpp/pull/2054
315
+ float rope_freq_base; // RoPE base frequency, 0 = from model
316
+ float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
317
+ float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
318
+ float yarn_attn_factor; // YaRN magnitude scaling factor
319
+ float yarn_beta_fast; // YaRN low correction dim
320
+ float yarn_beta_slow; // YaRN high correction dim
321
+ uint32_t yarn_orig_ctx; // YaRN original context size
322
+ float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
323
+
324
+ lm_ggml_backend_sched_eval_callback cb_eval;
325
+ void * cb_eval_user_data;
326
+
327
+ enum lm_ggml_type type_k; // data type for K cache [EXPERIMENTAL]
328
+ enum lm_ggml_type type_v; // data type for V cache [EXPERIMENTAL]
329
+
330
+ // Keep the booleans together to avoid misalignment during copy-by-value.
331
+ bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
332
+ bool embeddings; // if true, extract embeddings (together with logits)
333
+ bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
334
+ bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
335
+
336
+ // Abort callback
337
+ // if it returns true, execution of llama_decode() will be aborted
338
+ // currently works only with CPU execution
339
+ lm_ggml_abort_callback abort_callback;
340
+ void * abort_callback_data;
341
+ };
342
+
343
+ // model quantization parameters
344
+ typedef struct llama_model_quantize_params {
345
+ int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
346
+ enum llama_ftype ftype; // quantize to this llama_ftype
347
+ enum lm_ggml_type output_tensor_type; // output tensor type
348
+ enum lm_ggml_type token_embedding_type; // itoken embeddings tensor type
349
+ bool allow_requantize; // allow quantizing non-f32/f16 tensors
350
+ bool quantize_output_tensor; // quantize output.weight
351
+ bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
352
+ bool pure; // quantize all tensors to the default type
353
+ bool keep_split; // quantize to the same number of shards
354
+ void * imatrix; // pointer to importance matrix data
355
+ void * kv_overrides; // pointer to vector containing overrides
356
+ } llama_model_quantize_params;
357
+
358
+ // grammar types
359
+ struct llama_grammar;
360
+
361
+ // grammar element type
362
+ enum llama_gretype {
363
+ // end of rule definition
364
+ LLAMA_GRETYPE_END = 0,
365
+
366
+ // start of alternate definition for rule
367
+ LLAMA_GRETYPE_ALT = 1,
368
+
369
+ // non-terminal element: reference to rule
370
+ LLAMA_GRETYPE_RULE_REF = 2,
371
+
372
+ // terminal element: character (code point)
373
+ LLAMA_GRETYPE_CHAR = 3,
374
+
375
+ // inverse char(s) ([^a], [^a-b] [^abc])
376
+ LLAMA_GRETYPE_CHAR_NOT = 4,
377
+
378
+ // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
379
+ // be an inclusive range ([a-z])
380
+ LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
381
+
382
+ // modifies a preceding LLAMA_GRETYPE_CHAR or
383
+ // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
384
+ LLAMA_GRETYPE_CHAR_ALT = 6,
385
+
386
+ // any character (.)
387
+ LLAMA_GRETYPE_CHAR_ANY = 7,
388
+ };
389
+
390
+ typedef struct llama_grammar_element {
391
+ enum llama_gretype type;
392
+ uint32_t value; // Unicode code point or rule ID
393
+ } llama_grammar_element;
394
+
395
+ // performance timing information
396
+ struct llama_timings {
397
+ double t_start_ms;
398
+ double t_end_ms;
399
+ double t_load_ms;
400
+ double t_sample_ms;
401
+ double t_p_eval_ms;
402
+ double t_eval_ms;
403
+
404
+ int32_t n_sample;
405
+ int32_t n_p_eval;
406
+ int32_t n_eval;
407
+ };
408
+
409
+ // used in chat template
410
+ typedef struct llama_chat_message {
411
+ const char * role;
412
+ const char * content;
413
+ } llama_chat_message;
414
+
415
+ // lora adapter
416
+ struct llama_lora_adapter;
417
+
418
+ // Helpers for getting default parameters
419
+ LLAMA_API struct llama_model_params llama_model_default_params(void);
420
+ LLAMA_API struct llama_context_params llama_context_default_params(void);
421
+ LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
422
+
423
+ // Initialize the llama + ggml backend
424
+ // If numa is true, use NUMA optimizations
425
+ // Call once at the start of the program
426
+ LLAMA_API void llama_backend_init(void);
427
+
428
+ //optional:
429
+ LLAMA_API void llama_numa_init(enum lm_ggml_numa_strategy numa);
430
+
431
+ // Call once at the end of the program - currently only used for MPI
432
+ LLAMA_API void llama_backend_free(void);
433
+
434
+ LLAMA_API struct llama_model * llama_load_model_from_file(
435
+ const char * path_model,
436
+ struct llama_model_params params);
437
+
438
+ LLAMA_API void llama_free_model(struct llama_model * model);
439
+
440
+ LLAMA_API struct llama_context * llama_new_context_with_model(
441
+ struct llama_model * model,
442
+ struct llama_context_params params);
443
+
444
+ // Frees all allocated memory
445
+ LLAMA_API void llama_free(struct llama_context * ctx);
446
+
447
+ LLAMA_API int64_t llama_time_us(void);
448
+
449
+ LLAMA_API size_t llama_max_devices(void);
450
+
451
+ LLAMA_API bool llama_supports_mmap (void);
452
+ LLAMA_API bool llama_supports_mlock (void);
453
+ LLAMA_API bool llama_supports_gpu_offload(void);
454
+
455
+ LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
456
+
457
+ LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
458
+ LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
459
+ LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
460
+ LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
461
+
462
+ LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
463
+
464
+ LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
465
+ LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
466
+
467
+ LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
468
+ LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
469
+ LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
470
+ LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
471
+
472
+ // Get the model's RoPE frequency scaling factor
473
+ LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
474
+
475
+ // Functions to access the model's GGUF metadata scalar values
476
+ // - The functions return the length of the string on success, or -1 on failure
477
+ // - The output string is always null-terminated and cleared on failure
478
+ // - GGUF array values are not supported by these functions
479
+
480
+ // Get metadata value as a string by key name
481
+ LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
482
+
483
+ // Get the number of metadata key/value pairs
484
+ LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
485
+
486
+ // Get metadata key name by index
487
+ LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
488
+
489
+ // Get metadata value as a string by index
490
+ LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
491
+
492
+ // Get a string describing the model type
493
+ LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
494
+
495
+ // Returns the total size of all the tensors in the model in bytes
496
+ LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
497
+
498
+ // Returns the total number of parameters in the model
499
+ LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
500
+
501
+ // Get a llama model tensor
502
+ LLAMA_API struct lm_ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
503
+
504
+ // Returns true if the model contains an encoder that requires llama_encode() call
505
+ LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
506
+
507
+ // For encoder-decoder models, this function returns id of the token that must be provided
508
+ // to the decoder to start generating output sequence. For other models, it returns -1.
509
+ LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
510
+
511
+ // Returns 0 on success
512
+ LLAMA_API uint32_t llama_model_quantize(
513
+ const char * fname_inp,
514
+ const char * fname_out,
515
+ const llama_model_quantize_params * params);
516
+
517
+ // Load a LoRA adapter from file
518
+ // The loaded adapter will be associated to the given model, and will be free when the model is deleted
519
+ LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init(
520
+ struct llama_model * model,
521
+ const char * path_lora);
522
+
523
+ // Add a loaded LoRA adapter to given context
524
+ // This will not modify model's weight
525
+ LLAMA_API int32_t llama_lora_adapter_set(
526
+ struct llama_context * ctx,
527
+ struct llama_lora_adapter * adapter,
528
+ float scale);
529
+
530
+ // Remove a specific LoRA adapter from given context
531
+ // Return -1 if the adapter is not present in the context
532
+ LLAMA_API int32_t llama_lora_adapter_remove(
533
+ struct llama_context * ctx,
534
+ struct llama_lora_adapter * adapter);
535
+
536
+ // Remove all LoRA adapters from given context
537
+ LLAMA_API void llama_lora_adapter_clear(
538
+ struct llama_context * ctx);
539
+
540
+ // Manually free a LoRA adapter
541
+ // Note: loaded adapters will be free when the associated model is deleted
542
+ LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);
543
+
544
+ // Apply a loaded control vector to a llama_context, or if data is NULL, clear
545
+ // the currently loaded vector.
546
+ // n_embd should be the size of a single layer's control, and data should point
547
+ // to an n_embd x n_layers buffer starting from layer 1.
548
+ // il_start and il_end are the layer range the vector should apply to (both inclusive)
549
+ // See llama_control_vector_load in common to load a control vector.
550
+ LLAMA_API int32_t llama_control_vector_apply(
551
+ struct llama_context * lctx,
552
+ const float * data,
553
+ size_t len,
554
+ int32_t n_embd,
555
+ int32_t il_start,
556
+ int32_t il_end);
557
+
558
+ //
559
+ // KV cache
560
+ //
561
+
562
+ // Information associated with an individual cell in the KV cache view.
563
+ struct llama_kv_cache_view_cell {
564
+ // The position for this cell. Takes KV cache shifts into account.
565
+ // May be negative if the cell is not populated.
566
+ llama_pos pos;
567
+ };
568
+
569
+ // An updateable view of the KV cache.
570
+ struct llama_kv_cache_view {
571
+ // Number of KV cache cells. This will be the same as the context size.
572
+ int32_t n_cells;
573
+
574
+ // Maximum number of sequences that can exist in a cell. It's not an error
575
+ // if there are more sequences in a cell than this value, however they will
576
+ // not be visible in the view cells_sequences.
577
+ int32_t n_seq_max;
578
+
579
+ // Number of tokens in the cache. For example, if there are two populated
580
+ // cells, the first with 1 sequence id in it and the second with 2 sequence
581
+ // ids then you'll have 3 tokens.
582
+ int32_t token_count;
583
+
584
+ // Number of populated cache cells.
585
+ int32_t used_cells;
586
+
587
+ // Maximum contiguous empty slots in the cache.
588
+ int32_t max_contiguous;
589
+
590
+ // Index to the start of the max_contiguous slot range. Can be negative
591
+ // when cache is full.
592
+ int32_t max_contiguous_idx;
593
+
594
+ // Information for an individual cell.
595
+ struct llama_kv_cache_view_cell * cells;
596
+
597
+ // The sequences for each cell. There will be n_seq_max items per cell.
598
+ llama_seq_id * cells_sequences;
599
+ };
600
+
601
+ // Create an empty KV cache view. (use only for debugging purposes)
602
+ LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
603
+
604
+ // Free a KV cache view. (use only for debugging purposes)
605
+ LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
606
+
607
+ // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
608
+ LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
609
+
610
+ // Returns the number of tokens in the KV cache (slow, use only for debug)
611
+ // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
612
+ LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
613
+
614
+ // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
615
+ LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
616
+
617
+ // Clear the KV cache - both cell info is erased and KV data is zeroed
618
+ LLAMA_API void llama_kv_cache_clear(
619
+ struct llama_context * ctx);
620
+
621
+ // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
622
+ // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
623
+ // seq_id < 0 : match any sequence
624
+ // p0 < 0 : [0, p1]
625
+ // p1 < 0 : [p0, inf)
626
+ LLAMA_API bool llama_kv_cache_seq_rm(
627
+ struct llama_context * ctx,
628
+ llama_seq_id seq_id,
629
+ llama_pos p0,
630
+ llama_pos p1);
631
+
632
+ // Copy all tokens that belong to the specified sequence to another sequence
633
+ // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
634
+ // p0 < 0 : [0, p1]
635
+ // p1 < 0 : [p0, inf)
636
+ LLAMA_API void llama_kv_cache_seq_cp(
637
+ struct llama_context * ctx,
638
+ llama_seq_id seq_id_src,
639
+ llama_seq_id seq_id_dst,
640
+ llama_pos p0,
641
+ llama_pos p1);
642
+
643
+ // Removes all tokens that do not belong to the specified sequence
644
+ LLAMA_API void llama_kv_cache_seq_keep(
645
+ struct llama_context * ctx,
646
+ llama_seq_id seq_id);
647
+
648
+ // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
649
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
650
+ // - lazily on next llama_decode()
651
+ // - explicitly with llama_kv_cache_update()
652
+ // p0 < 0 : [0, p1]
653
+ // p1 < 0 : [p0, inf)
654
+ LLAMA_API void llama_kv_cache_seq_add(
655
+ struct llama_context * ctx,
656
+ llama_seq_id seq_id,
657
+ llama_pos p0,
658
+ llama_pos p1,
659
+ llama_pos delta);
660
+
661
+ // Integer division of the positions by factor of `d > 1`
662
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
663
+ // - lazily on next llama_decode()
664
+ // - explicitly with llama_kv_cache_update()
665
+ // p0 < 0 : [0, p1]
666
+ // p1 < 0 : [p0, inf)
667
+ LLAMA_API void llama_kv_cache_seq_div(
668
+ struct llama_context * ctx,
669
+ llama_seq_id seq_id,
670
+ llama_pos p0,
671
+ llama_pos p1,
672
+ int d);
673
+
674
+ // Returns the largest position present in the KV cache for the specified sequence
675
+ LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
676
+ struct llama_context * ctx,
677
+ llama_seq_id seq_id);
678
+
679
+ // Defragment the KV cache
680
+ // This will be applied:
681
+ // - lazily on next llama_decode()
682
+ // - explicitly with llama_kv_cache_update()
683
+ LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
684
+
685
+ // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
686
+ LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
687
+
688
+ //
689
+ // State / sessions
690
+ //
691
+
692
+ // Returns the *actual* size in bytes of the state
693
+ // (rng, logits, embedding and kv_cache)
694
+ // Only use when saving the state, not when restoring it, otherwise the size may be too small.
695
+ LLAMA_API size_t llama_state_get_size(struct llama_context * ctx);
696
+ LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx),
697
+ "use llama_state_get_size instead");
698
+
699
+ // Copies the state to the specified destination address.
700
+ // Destination needs to have allocated enough memory.
701
+ // Returns the number of bytes copied
702
+ LLAMA_API size_t llama_state_get_data(
703
+ struct llama_context * ctx,
704
+ uint8_t * dst,
705
+ size_t size);
706
+ LLAMA_API DEPRECATED(size_t llama_copy_state_data(
707
+ struct llama_context * ctx,
708
+ uint8_t * dst),
709
+ "use llama_state_get_data instead");
710
+
711
+ // Set the state reading from the specified address
712
+ // Returns the number of bytes read
713
+ LLAMA_API size_t llama_state_set_data(
714
+ struct llama_context * ctx,
715
+ const uint8_t * src,
716
+ size_t size);
717
+ LLAMA_API DEPRECATED(size_t llama_set_state_data(
718
+ struct llama_context * ctx,
719
+ const uint8_t * src),
720
+ "use llama_state_set_data instead");
721
+
722
+ // Save/load session file
723
+ LLAMA_API bool llama_state_load_file(
724
+ struct llama_context * ctx,
725
+ const char * path_session,
726
+ llama_token * tokens_out,
727
+ size_t n_token_capacity,
728
+ size_t * n_token_count_out);
729
+ LLAMA_API DEPRECATED(bool llama_load_session_file(
730
+ struct llama_context * ctx,
731
+ const char * path_session,
732
+ llama_token * tokens_out,
733
+ size_t n_token_capacity,
734
+ size_t * n_token_count_out),
735
+ "use llama_state_load_file instead");
736
+
737
+ LLAMA_API bool llama_state_save_file(
738
+ struct llama_context * ctx,
739
+ const char * path_session,
740
+ const llama_token * tokens,
741
+ size_t n_token_count);
742
+ LLAMA_API DEPRECATED(bool llama_save_session_file(
743
+ struct llama_context * ctx,
744
+ const char * path_session,
745
+ const llama_token * tokens,
746
+ size_t n_token_count),
747
+ "use llama_state_save_file instead");
748
+
749
+ // Get the exact size needed to copy the KV cache of a single sequence
750
+ LLAMA_API size_t llama_state_seq_get_size(
751
+ struct llama_context * ctx,
752
+ llama_seq_id seq_id);
753
+
754
+ // Copy the KV cache of a single sequence into the specified buffer
755
+ LLAMA_API size_t llama_state_seq_get_data(
756
+ struct llama_context * ctx,
757
+ uint8_t * dst,
758
+ size_t size,
759
+ llama_seq_id seq_id);
760
+
761
+ // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
762
+ // Returns:
763
+ // - Positive: Ok
764
+ // - Zero: Failed to load
765
+ LLAMA_API size_t llama_state_seq_set_data(
766
+ struct llama_context * ctx,
767
+ const uint8_t * src,
768
+ size_t size,
769
+ llama_seq_id dest_seq_id);
770
+
771
+ LLAMA_API size_t llama_state_seq_save_file(
772
+ struct llama_context * ctx,
773
+ const char * filepath,
774
+ llama_seq_id seq_id,
775
+ const llama_token * tokens,
776
+ size_t n_token_count);
777
+
778
+ LLAMA_API size_t llama_state_seq_load_file(
779
+ struct llama_context * ctx,
780
+ const char * filepath,
781
+ llama_seq_id dest_seq_id,
782
+ llama_token * tokens_out,
783
+ size_t n_token_capacity,
784
+ size_t * n_token_count_out);
785
+
786
+ //
787
+ // Decoding
788
+ //
789
+
790
+ // Return batch for single sequence of tokens starting at pos_0
791
+ //
792
+ // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
793
+ //
794
+ LLAMA_API struct llama_batch llama_batch_get_one(
795
+ llama_token * tokens,
796
+ int32_t n_tokens,
797
+ llama_pos pos_0,
798
+ llama_seq_id seq_id);
799
+
800
+ // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
801
+ // Each token can be assigned up to n_seq_max sequence ids
802
+ // The batch has to be freed with llama_batch_free()
803
+ // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
804
+ // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
805
+ // The rest of the llama_batch members are allocated with size n_tokens
806
+ // All members are left uninitialized
807
+ LLAMA_API struct llama_batch llama_batch_init(
808
+ int32_t n_tokens,
809
+ int32_t embd,
810
+ int32_t n_seq_max);
811
+
812
+ // Frees a batch of tokens allocated with llama_batch_init()
813
+ LLAMA_API void llama_batch_free(struct llama_batch batch);
814
+
815
+ // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
816
+ // Stores the encoder output internally for later use by the decoder cross-attention layers.
817
+ // 0 - success
818
+ // < 0 - error
819
+ LLAMA_API int32_t llama_encode(
820
+ struct llama_context * ctx,
821
+ struct llama_batch batch);
822
+
823
+ // Positive return values does not mean a fatal error, but rather a warning.
824
+ // 0 - success
825
+ // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
826
+ // < 0 - error
827
+ LLAMA_API int32_t llama_decode(
828
+ struct llama_context * ctx,
829
+ struct llama_batch batch);
830
+
831
+ // Set the number of threads used for decoding
832
+ // n_threads is the number of threads used for generation (single token)
833
+ // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
834
+ LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
835
+
836
+ // Get the number of threads used for generation of a single token.
837
+ LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
838
+
839
+ // Get the number of threads used for prompt and batch processing (multiple token).
840
+ LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
841
+
842
+ // Set whether the model is in embeddings mode or not
843
+ // If true, embeddings will be returned but logits will not
844
+ LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
845
+
846
+ // Set whether to use causal attention or not
847
+ // If set to true, the model will only attend to the past tokens
848
+ LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
849
+
850
+ // Set abort callback
851
+ LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
852
+
853
+ // Wait until all computations are finished
854
+ // This is automatically done when using one of the functions below to obtain the computation results
855
+ // and is not necessary to call it explicitly in most cases
856
+ LLAMA_API void llama_synchronize(struct llama_context * ctx);
857
+
858
+ // Token logits obtained from the last call to llama_decode()
859
+ // The logits for which llama_batch.logits[i] != 0 are stored contiguously
860
+ // in the order they have appeared in the batch.
861
+ // Rows: number of tokens for which llama_batch.logits[i] != 0
862
+ // Cols: n_vocab
863
+ LLAMA_API float * llama_get_logits(struct llama_context * ctx);
864
+
865
+ // Logits for the ith token. For positive indices, Equivalent to:
866
+ // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
867
+ // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
868
+ // returns NULL for invalid ids.
869
+ LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
870
+
871
+ // Get all output token embeddings.
872
+ // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
873
+ // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
874
+ // in the order they have appeared in the batch.
875
+ // shape: [n_outputs*n_embd]
876
+ // Otherwise, returns NULL.
877
+ LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
878
+
879
+ // Get the embeddings for the ith token. For positive indices, Equivalent to:
880
+ // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
881
+ // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
882
+ // shape: [n_embd] (1-dimensional)
883
+ // returns NULL for invalid ids.
884
+ LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
885
+
886
+ // Get the embeddings for a sequence id
887
+ // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
888
+ // shape: [n_embd] (1-dimensional)
889
+ LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
890
+
891
+ //
892
+ // Vocab
893
+ //
894
+
895
+ LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
896
+
897
+ LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
898
+
899
+ LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
900
+
901
+ // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
902
+ LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
903
+
904
+ // Identify if Token Id is a control token or a render-able token
905
+ LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
906
+
907
+ // Special tokens
908
+ LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
909
+ LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
910
+ LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
911
+ LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
912
+ LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
913
+ LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
914
+
915
+ // Returns -1 if unknown, 1 for true or 0 for false.
916
+ LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
917
+
918
+ // Returns -1 if unknown, 1 for true or 0 for false.
919
+ LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
920
+
921
+ // Codellama infill tokens
922
+ LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
923
+ LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
924
+ LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
925
+ LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
926
+
927
+ //
928
+ // Tokenization
929
+ //
930
+
931
+ /// @details Convert the provided text into tokens.
932
+ /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
933
+ /// @return Returns the number of tokens on success, no more than n_tokens_max
934
+ /// @return Returns a negative number on failure - the number of tokens that would have been returned
935
+ /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
936
+ /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
937
+ /// as plaintext. Does not insert a leading space.
938
+ LLAMA_API int32_t llama_tokenize(
939
+ const struct llama_model * model,
940
+ const char * text,
941
+ int32_t text_len,
942
+ llama_token * tokens,
943
+ int32_t n_tokens_max,
944
+ bool add_special,
945
+ bool parse_special);
946
+
947
+ // Token Id -> Piece.
948
+ // Uses the vocabulary in the provided context.
949
+ // Does not write null terminator to the buffer.
950
+ // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
951
+ // @param special If true, special tokens are rendered in the output.
952
+ LLAMA_API int32_t llama_token_to_piece(
953
+ const struct llama_model * model,
954
+ llama_token token,
955
+ char * buf,
956
+ int32_t length,
957
+ int32_t lstrip,
958
+ bool special);
959
+
960
+ /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
961
+ /// @param text The char pointer must be large enough to hold the resulting text.
962
+ /// @return Returns the number of chars/bytes on success, no more than text_len_max.
963
+ /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
964
+ /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
965
+ /// @param unparse_special If true, special tokens are rendered in the output.
966
+ LLAMA_API int32_t llama_detokenize(
967
+ const struct llama_model * model,
968
+ const llama_token * tokens,
969
+ int32_t n_tokens,
970
+ char * text,
971
+ int32_t text_len_max,
972
+ bool remove_special,
973
+ bool unparse_special);
974
+
975
+ //
976
+ // Chat templates
977
+ //
978
+
979
+ /// Apply chat template. Inspired by hf apply_chat_template() on python.
980
+ /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
981
+ /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
982
+ /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
983
+ /// @param chat Pointer to a list of multiple llama_chat_message
984
+ /// @param n_msg Number of llama_chat_message in this chat
985
+ /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
986
+ /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
987
+ /// @param length The size of the allocated buffer
988
+ /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
989
+ LLAMA_API int32_t llama_chat_apply_template(
990
+ const struct llama_model * model,
991
+ const char * tmpl,
992
+ const struct llama_chat_message * chat,
993
+ size_t n_msg,
994
+ bool add_ass,
995
+ char * buf,
996
+ int32_t length);
997
+
998
+ //
999
+ // Grammar
1000
+ //
1001
+
1002
+ /// Initialize a llama_grammar.
1003
+ ///
1004
+ /// @param rules The rule elements of the grammar to initialize.
1005
+ /// @param n_rules The number of rules.
1006
+ /// @param start_rule_index The index of the root rule (the starting point of the grammar).
1007
+ /// @return The initialized llama_grammar or nullptr if initialization failed.
1008
+ LLAMA_API struct llama_grammar * llama_grammar_init(
1009
+ const llama_grammar_element ** rules,
1010
+ size_t n_rules,
1011
+ size_t start_rule_index);
1012
+
1013
+ LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
1014
+
1015
+ LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
1016
+
1017
+ /// @details Apply constraints from grammar
1018
+ LLAMA_API void llama_grammar_sample(
1019
+ const struct llama_grammar * grammar,
1020
+ const struct llama_context * ctx,
1021
+ llama_token_data_array * candidates);
1022
+ LLAMA_API DEPRECATED(void llama_sample_grammar(
1023
+ struct llama_context * ctx,
1024
+ llama_token_data_array * candidates,
1025
+ const struct llama_grammar * grammar),
1026
+ "use llama_grammar_sample instead");
1027
+
1028
+ /// @details Accepts the sampled token into the grammar
1029
+ LLAMA_API void llama_grammar_accept_token(
1030
+ struct llama_grammar * grammar,
1031
+ struct llama_context * ctx,
1032
+ llama_token token);
1033
+
1034
+ //
1035
+ // Sampling functions
1036
+ //
1037
+
1038
+ // Sets the current rng seed.
1039
+ LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
1040
+
1041
+ /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
1042
+ /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
1043
+ LLAMA_API void llama_sample_repetition_penalties(
1044
+ struct llama_context * ctx,
1045
+ llama_token_data_array * candidates,
1046
+ const llama_token * last_tokens,
1047
+ size_t penalty_last_n,
1048
+ float penalty_repeat,
1049
+ float penalty_freq,
1050
+ float penalty_present);
1051
+
1052
+ /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
1053
+ /// @param logits Logits extracted from the original generation context.
1054
+ /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
1055
+ /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
1056
+ LLAMA_API void llama_sample_apply_guidance(
1057
+ struct llama_context * ctx,
1058
+ float * logits,
1059
+ float * logits_guidance,
1060
+ float scale);
1061
+
1062
+ /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1063
+ LLAMA_API void llama_sample_softmax(
1064
+ struct llama_context * ctx,
1065
+ llama_token_data_array * candidates);
1066
+
1067
+ /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1068
+ LLAMA_API void llama_sample_top_k(
1069
+ struct llama_context * ctx,
1070
+ llama_token_data_array * candidates,
1071
+ int32_t k,
1072
+ size_t min_keep);
1073
+
1074
+ /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1075
+ LLAMA_API void llama_sample_top_p(
1076
+ struct llama_context * ctx,
1077
+ llama_token_data_array * candidates,
1078
+ float p,
1079
+ size_t min_keep);
1080
+
1081
+ /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
1082
+ LLAMA_API void llama_sample_min_p(
1083
+ struct llama_context * ctx,
1084
+ llama_token_data_array * candidates,
1085
+ float p,
1086
+ size_t min_keep);
1087
+
1088
+ /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
1089
+ LLAMA_API void llama_sample_tail_free(
1090
+ struct llama_context * ctx,
1091
+ llama_token_data_array * candidates,
1092
+ float z,
1093
+ size_t min_keep);
1094
+
1095
+ /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1096
+ LLAMA_API void llama_sample_typical(
1097
+ struct llama_context * ctx,
1098
+ llama_token_data_array * candidates,
1099
+ float p,
1100
+ size_t min_keep);
1101
+
1102
+ /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
1103
+ LLAMA_API void llama_sample_entropy(
1104
+ struct llama_context * ctx,
1105
+ llama_token_data_array * candidates_p,
1106
+ float min_temp,
1107
+ float max_temp,
1108
+ float exponent_val);
1109
+
1110
+ LLAMA_API void llama_sample_temp(
1111
+ struct llama_context * ctx,
1112
+ llama_token_data_array * candidates,
1113
+ float temp);
1114
+
1115
+ /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1116
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1117
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1118
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1119
+ /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1120
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1121
+ LLAMA_API llama_token llama_sample_token_mirostat(
1122
+ struct llama_context * ctx,
1123
+ llama_token_data_array * candidates,
1124
+ float tau,
1125
+ float eta,
1126
+ int32_t m,
1127
+ float * mu);
1128
+
1129
+ /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1130
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1131
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1132
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1133
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1134
+ LLAMA_API llama_token llama_sample_token_mirostat_v2(
1135
+ struct llama_context * ctx,
1136
+ llama_token_data_array * candidates,
1137
+ float tau,
1138
+ float eta,
1139
+ float * mu);
1140
+
1141
+ /// @details Selects the token with the highest probability.
1142
+ /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
1143
+ LLAMA_API llama_token llama_sample_token_greedy(
1144
+ struct llama_context * ctx,
1145
+ llama_token_data_array * candidates);
1146
+
1147
+ /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
1148
+ LLAMA_API llama_token llama_sample_token(
1149
+ struct llama_context * ctx,
1150
+ llama_token_data_array * candidates);
1151
+
1152
+ //
1153
+ // Model split
1154
+ //
1155
+
1156
+ /// @details Build a split GGUF final path for this chunk.
1157
+ /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1158
+ // Returns the split_path length.
1159
+ LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1160
+
1161
+ /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1162
+ /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1163
+ // Returns the split_prefix length.
1164
+ LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1165
+
1166
+ // Performance information
1167
+ LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
1168
+
1169
+ LLAMA_API void llama_print_timings(struct llama_context * ctx);
1170
+ LLAMA_API void llama_reset_timings(struct llama_context * ctx);
1171
+
1172
+ // Print system information
1173
+ LLAMA_API const char * llama_print_system_info(void);
1174
+
1175
+ // Set callback for all future logging events.
1176
+ // If this is not called, or NULL is supplied, everything is output on stderr.
1177
+ LLAMA_API void llama_log_set(lm_ggml_log_callback log_callback, void * user_data);
1178
+
1179
+ LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
1180
+
1181
+ #ifdef __cplusplus
1182
+ }
1183
+ #endif
1184
+
1185
+ // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
1186
+ #ifdef LLAMA_API_INTERNAL
1187
+
1188
+ #include <random>
1189
+ #include <string>
1190
+ #include <vector>
1191
+
1192
+ struct lm_ggml_tensor;
1193
+
1194
+ const std::vector<std::pair<std::string, struct lm_ggml_tensor *>> & llama_internal_get_tensor_map(
1195
+ struct llama_context * ctx
1196
+ );
1197
+
1198
+ struct llama_partial_utf8 {
1199
+ uint32_t value; // bit value so far (unshifted)
1200
+ int n_remain; // num bytes remaining; -1 indicates invalid sequence
1201
+ };
1202
+
1203
+ struct llama_grammar_candidate {
1204
+ size_t index;
1205
+ const uint32_t * code_points;
1206
+ llama_partial_utf8 partial_utf8;
1207
+ };
1208
+
1209
+ using llama_grammar_rule = std::vector< llama_grammar_element>;
1210
+ using llama_grammar_stack = std::vector<const llama_grammar_element *>;
1211
+
1212
+ using llama_grammar_rules = std::vector<llama_grammar_rule>;
1213
+ using llama_grammar_stacks = std::vector<llama_grammar_stack>;
1214
+ using llama_grammar_candidates = std::vector<llama_grammar_candidate>;
1215
+
1216
+ const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
1217
+ llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);
1218
+
1219
+ void llama_grammar_accept(
1220
+ const llama_grammar_rules & rules,
1221
+ const llama_grammar_stacks & stacks,
1222
+ const uint32_t chr,
1223
+ llama_grammar_stacks & new_stacks);
1224
+
1225
+ std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
1226
+ const llama_grammar_rules & rules,
1227
+ const llama_grammar_stack & stack,
1228
+ const llama_grammar_candidates & candidates);
1229
+
1230
+ std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1231
+ const std::string & src,
1232
+ llama_partial_utf8 partial_start);
1233
+
1234
+ // Randomly selects a token from the candidates based on their probabilities using given std::mt19937.
1235
+ // This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
1236
+ llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
1237
+
1238
+ #endif // LLAMA_API_INTERNAL
1239
+
1240
+ #endif // LLAMA_H