cui-llama.rn 0.2.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/cpp/llama.h CHANGED
@@ -1,1201 +1,1201 @@
1
- #ifndef LLAMA_H
2
- #define LLAMA_H
3
-
4
- #include "ggml.h"
5
- #include "ggml-backend.h"
6
-
7
- #include <stddef.h>
8
- #include <stdint.h>
9
- #include <stdio.h>
10
- #include <stdbool.h>
11
-
12
- #ifdef LLAMA_SHARED
13
- # if defined(_WIN32) && !defined(__MINGW32__)
14
- # ifdef LLAMA_BUILD
15
- # define LLAMA_API __declspec(dllexport)
16
- # else
17
- # define LLAMA_API __declspec(dllimport)
18
- # endif
19
- # else
20
- # define LLAMA_API __attribute__ ((visibility ("default")))
21
- # endif
22
- #else
23
- # define LLAMA_API
24
- #endif
25
-
26
- #ifdef __GNUC__
27
- # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
28
- #elif defined(_MSC_VER)
29
- # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
30
- #else
31
- # define DEPRECATED(func, hint) func
32
- #endif
33
-
34
- #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
35
-
36
- #define LLAMA_MAX_RNG_STATE (64*1024)
37
-
38
- #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
39
- #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
40
- #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
41
-
42
- #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
43
- #define LLAMA_SESSION_VERSION 6
44
-
45
- #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
46
- #define LLAMA_STATE_SEQ_VERSION 1
47
-
48
- #ifdef __cplusplus
49
- extern "C" {
50
- #endif
51
-
52
- //
53
- // C interface
54
- //
55
- // TODO: show sample usage
56
- //
57
-
58
- struct llama_model;
59
- struct llama_context;
60
-
61
- typedef int32_t llama_pos;
62
- typedef int32_t llama_token;
63
- typedef int32_t llama_seq_id;
64
-
65
- enum llama_vocab_type {
66
- LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
67
- LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
68
- LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
69
- LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
70
- LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
71
- };
72
-
73
- // pre-tokenization types
74
- enum llama_vocab_pre_type {
75
- LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
76
- LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
77
- LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
78
- LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
79
- LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
80
- LLAMA_VOCAB_PRE_TYPE_MPT = 5,
81
- LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
82
- LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
83
- LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
84
- LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
85
- LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
86
- LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
87
- LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
88
- LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
89
- LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
90
- LLAMA_VOCAB_PRE_TYPE_PORO = 15,
91
- LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
92
- LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
93
- LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
94
- LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
95
- };
96
-
97
- // note: these values should be synchronized with lm_ggml_rope
98
- // TODO: maybe move this enum to ggml.h (lm_ggml_rope_type)
99
- enum llama_rope_type {
100
- LLAMA_ROPE_TYPE_NONE = -1,
101
- LLAMA_ROPE_TYPE_NORM = 0,
102
- LLAMA_ROPE_TYPE_NEOX = 2,
103
- LLAMA_ROPE_TYPE_GLM = 4,
104
- };
105
-
106
- enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
107
- LLAMA_TOKEN_TYPE_UNDEFINED = 0,
108
- LLAMA_TOKEN_TYPE_NORMAL = 1,
109
- LLAMA_TOKEN_TYPE_UNKNOWN = 2,
110
- LLAMA_TOKEN_TYPE_CONTROL = 3,
111
- LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
112
- LLAMA_TOKEN_TYPE_UNUSED = 5,
113
- LLAMA_TOKEN_TYPE_BYTE = 6,
114
- };
115
-
116
- enum llama_token_attr {
117
- LLAMA_TOKEN_ATTR_UNDEFINED = 0,
118
- LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
119
- LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
120
- LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
121
- LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
122
- LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
123
- LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
124
- LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
125
- LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
126
- LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
127
- LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
128
- };
129
-
130
- // model file types
131
- enum llama_ftype {
132
- LLAMA_FTYPE_ALL_F32 = 0,
133
- LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
134
- LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
135
- LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
136
- LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
137
- // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
138
- // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
139
- LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
140
- LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
141
- LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
142
- LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
143
- LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
144
- LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
145
- LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
146
- LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
147
- LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
148
- LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
149
- LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
150
- LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
151
- LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
152
- LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
153
- LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
154
- LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
155
- LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
156
- LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
157
- LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
158
- LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
159
- LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
160
- LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
161
- LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
162
- LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
163
- LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
164
- LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
165
- LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
166
- LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
167
- LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
168
-
169
- LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
170
- };
171
-
172
- enum llama_rope_scaling_type {
173
- LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
174
- LLAMA_ROPE_SCALING_TYPE_NONE = 0,
175
- LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
176
- LLAMA_ROPE_SCALING_TYPE_YARN = 2,
177
- LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
178
- };
179
-
180
- enum llama_pooling_type {
181
- LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
182
- LLAMA_POOLING_TYPE_NONE = 0,
183
- LLAMA_POOLING_TYPE_MEAN = 1,
184
- LLAMA_POOLING_TYPE_CLS = 2,
185
- LLAMA_POOLING_TYPE_LAST = 3,
186
- };
187
-
188
- enum llama_attention_type {
189
- LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
190
- LLAMA_ATTENTION_TYPE_CAUSAL = 0,
191
- LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
192
- };
193
-
194
- enum llama_split_mode {
195
- LLAMA_SPLIT_MODE_NONE = 0, // single GPU
196
- LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
197
- LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
198
- };
199
-
200
- typedef struct llama_token_data {
201
- llama_token id; // token id
202
- float logit; // log-odds of the token
203
- float p; // probability of the token
204
- } llama_token_data;
205
-
206
- typedef struct llama_token_data_array {
207
- llama_token_data * data;
208
- size_t size;
209
- bool sorted;
210
- } llama_token_data_array;
211
-
212
- typedef bool (*llama_progress_callback)(float progress, void * user_data);
213
-
214
- // Input data for llama_decode
215
- // A llama_batch object can contain input about one or many sequences
216
- // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
217
- //
218
- // - token : the token ids of the input (used when embd is NULL)
219
- // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
220
- // - pos : the positions of the respective token in the sequence
221
- // - seq_id : the sequence to which the respective token belongs
222
- // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
223
- //
224
- typedef struct llama_batch {
225
- int32_t n_tokens;
226
-
227
- llama_token * token;
228
- float * embd;
229
- llama_pos * pos;
230
- int32_t * n_seq_id;
231
- llama_seq_id ** seq_id;
232
- int8_t * logits; // TODO: rename this to "output"
233
-
234
- // NOTE: helpers for smooth API transition - can be deprecated in the future
235
- // for future-proof code, use the above fields instead and ignore everything below
236
- //
237
- // pos[i] = all_pos_0 + i*all_pos_1
238
- //
239
- llama_pos all_pos_0; // used if pos == NULL
240
- llama_pos all_pos_1; // used if pos == NULL
241
- llama_seq_id all_seq_id; // used if seq_id == NULL
242
- } llama_batch;
243
-
244
- enum llama_model_kv_override_type {
245
- LLAMA_KV_OVERRIDE_TYPE_INT,
246
- LLAMA_KV_OVERRIDE_TYPE_FLOAT,
247
- LLAMA_KV_OVERRIDE_TYPE_BOOL,
248
- LLAMA_KV_OVERRIDE_TYPE_STR,
249
- };
250
-
251
- struct llama_model_kv_override {
252
- enum llama_model_kv_override_type tag;
253
-
254
- char key[128];
255
-
256
- union {
257
- int64_t val_i64;
258
- double val_f64;
259
- bool val_bool;
260
- char val_str[128];
261
- };
262
- };
263
-
264
- struct llama_model_params {
265
- int32_t n_gpu_layers; // number of layers to store in VRAM
266
- enum llama_split_mode split_mode; // how to split the model across multiple GPUs
267
-
268
- // main_gpu interpretation depends on split_mode:
269
- // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
270
- // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
271
- // LLAMA_SPLIT_LAYER: ignored
272
- int32_t main_gpu;
273
-
274
- // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
275
- const float * tensor_split;
276
-
277
- // comma separated list of RPC servers to use for offloading
278
- const char * rpc_servers;
279
-
280
- // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
281
- // If the provided progress_callback returns true, model loading continues.
282
- // If it returns false, model loading is immediately aborted.
283
- llama_progress_callback progress_callback;
284
-
285
- // context pointer passed to the progress callback
286
- void * progress_callback_user_data;
287
-
288
- // override key-value pairs of the model meta data
289
- const struct llama_model_kv_override * kv_overrides;
290
-
291
- // Keep the booleans together to avoid misalignment during copy-by-value.
292
- bool vocab_only; // only load the vocabulary, no weights
293
- bool use_mmap; // use mmap if possible
294
- bool use_mlock; // force system to keep model in RAM
295
- bool check_tensors; // validate model tensor data
296
- };
297
-
298
- // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
299
- // https://github.com/ggerganov/llama.cpp/pull/7544
300
- struct llama_context_params {
301
- uint32_t seed; // RNG seed, -1 for random
302
- uint32_t n_ctx; // text context, 0 = from model
303
- uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
304
- uint32_t n_ubatch; // physical maximum batch size
305
- uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
306
- uint32_t n_threads; // number of threads to use for generation
307
- uint32_t n_threads_batch; // number of threads to use for batch processing
308
-
309
- enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
310
- enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
311
- enum llama_attention_type attention_type; // attention type to use for embeddings
312
-
313
- // ref: https://github.com/ggerganov/llama.cpp/pull/2054
314
- float rope_freq_base; // RoPE base frequency, 0 = from model
315
- float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
316
- float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
317
- float yarn_attn_factor; // YaRN magnitude scaling factor
318
- float yarn_beta_fast; // YaRN low correction dim
319
- float yarn_beta_slow; // YaRN high correction dim
320
- uint32_t yarn_orig_ctx; // YaRN original context size
321
- float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
322
-
323
- lm_ggml_backend_sched_eval_callback cb_eval;
324
- void * cb_eval_user_data;
325
-
326
- enum lm_ggml_type type_k; // data type for K cache [EXPERIMENTAL]
327
- enum lm_ggml_type type_v; // data type for V cache [EXPERIMENTAL]
328
-
329
- // Keep the booleans together to avoid misalignment during copy-by-value.
330
- bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
331
- bool embeddings; // if true, extract embeddings (together with logits)
332
- bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
333
- bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
334
-
335
- // Abort callback
336
- // if it returns true, execution of llama_decode() will be aborted
337
- // currently works only with CPU execution
338
- lm_ggml_abort_callback abort_callback;
339
- void * abort_callback_data;
340
- };
341
-
342
- // model quantization parameters
343
- typedef struct llama_model_quantize_params {
344
- int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
345
- enum llama_ftype ftype; // quantize to this llama_ftype
346
- enum lm_ggml_type output_tensor_type; // output tensor type
347
- enum lm_ggml_type token_embedding_type; // itoken embeddings tensor type
348
- bool allow_requantize; // allow quantizing non-f32/f16 tensors
349
- bool quantize_output_tensor; // quantize output.weight
350
- bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
351
- bool pure; // quantize all tensors to the default type
352
- bool keep_split; // quantize to the same number of shards
353
- void * imatrix; // pointer to importance matrix data
354
- void * kv_overrides; // pointer to vector containing overrides
355
- } llama_model_quantize_params;
356
-
357
- // grammar types
358
- struct llama_grammar;
359
-
360
- // grammar element type
361
- enum llama_gretype {
362
- // end of rule definition
363
- LLAMA_GRETYPE_END = 0,
364
-
365
- // start of alternate definition for rule
366
- LLAMA_GRETYPE_ALT = 1,
367
-
368
- // non-terminal element: reference to rule
369
- LLAMA_GRETYPE_RULE_REF = 2,
370
-
371
- // terminal element: character (code point)
372
- LLAMA_GRETYPE_CHAR = 3,
373
-
374
- // inverse char(s) ([^a], [^a-b] [^abc])
375
- LLAMA_GRETYPE_CHAR_NOT = 4,
376
-
377
- // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
378
- // be an inclusive range ([a-z])
379
- LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
380
-
381
- // modifies a preceding LLAMA_GRETYPE_CHAR or
382
- // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
383
- LLAMA_GRETYPE_CHAR_ALT = 6,
384
-
385
- // any character (.)
386
- LLAMA_GRETYPE_CHAR_ANY = 7,
387
- };
388
-
389
- typedef struct llama_grammar_element {
390
- enum llama_gretype type;
391
- uint32_t value; // Unicode code point or rule ID
392
- } llama_grammar_element;
393
-
394
- // performance timing information
395
- struct llama_timings {
396
- double t_start_ms;
397
- double t_end_ms;
398
- double t_load_ms;
399
- double t_sample_ms;
400
- double t_p_eval_ms;
401
- double t_eval_ms;
402
-
403
- int32_t n_sample;
404
- int32_t n_p_eval;
405
- int32_t n_eval;
406
- };
407
-
408
- // used in chat template
409
- typedef struct llama_chat_message {
410
- const char * role;
411
- const char * content;
412
- } llama_chat_message;
413
-
414
- // Helpers for getting default parameters
415
- LLAMA_API struct llama_model_params llama_model_default_params(void);
416
- LLAMA_API struct llama_context_params llama_context_default_params(void);
417
- LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
418
-
419
- // Initialize the llama + ggml backend
420
- // If numa is true, use NUMA optimizations
421
- // Call once at the start of the program
422
- LLAMA_API void llama_backend_init(void);
423
-
424
- //optional:
425
- LLAMA_API void llama_numa_init(enum lm_ggml_numa_strategy numa);
426
-
427
- // Call once at the end of the program - currently only used for MPI
428
- LLAMA_API void llama_backend_free(void);
429
-
430
- LLAMA_API struct llama_model * llama_load_model_from_file(
431
- const char * path_model,
432
- struct llama_model_params params);
433
-
434
- LLAMA_API void llama_free_model(struct llama_model * model);
435
-
436
- LLAMA_API struct llama_context * llama_new_context_with_model(
437
- struct llama_model * model,
438
- struct llama_context_params params);
439
-
440
- // Frees all allocated memory
441
- LLAMA_API void llama_free(struct llama_context * ctx);
442
-
443
- LLAMA_API int64_t llama_time_us(void);
444
-
445
- LLAMA_API size_t llama_max_devices(void);
446
-
447
- LLAMA_API bool llama_supports_mmap (void);
448
- LLAMA_API bool llama_supports_mlock (void);
449
- LLAMA_API bool llama_supports_gpu_offload(void);
450
-
451
- LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
452
-
453
- LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
454
- LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
455
- LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
456
- LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
457
-
458
- LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
459
-
460
- LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
461
- LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
462
-
463
- LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
464
- LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
465
- LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
466
- LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
467
-
468
- // Get the model's RoPE frequency scaling factor
469
- LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
470
-
471
- // Functions to access the model's GGUF metadata scalar values
472
- // - The functions return the length of the string on success, or -1 on failure
473
- // - The output string is always null-terminated and cleared on failure
474
- // - GGUF array values are not supported by these functions
475
-
476
- // Get metadata value as a string by key name
477
- LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
478
-
479
- // Get the number of metadata key/value pairs
480
- LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
481
-
482
- // Get metadata key name by index
483
- LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
484
-
485
- // Get metadata value as a string by index
486
- LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
487
-
488
- // Get a string describing the model type
489
- LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
490
-
491
- // Returns the total size of all the tensors in the model in bytes
492
- LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
493
-
494
- // Returns the total number of parameters in the model
495
- LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
496
-
497
- // Get a llama model tensor
498
- LLAMA_API struct lm_ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
499
-
500
- // Returns true if the model contains an encoder that requires llama_encode() call
501
- LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
502
-
503
- // For encoder-decoder models, this function returns id of the token that must be provided
504
- // to the decoder to start generating output sequence. For other models, it returns -1.
505
- LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
506
-
507
- // Returns 0 on success
508
- LLAMA_API uint32_t llama_model_quantize(
509
- const char * fname_inp,
510
- const char * fname_out,
511
- const llama_model_quantize_params * params);
512
-
513
- // Apply a LoRA adapter to a loaded model
514
- // path_base_model is the path to a higher quality model to use as a base for
515
- // the layers modified by the adapter. Can be NULL to use the current loaded model.
516
- // The model needs to be reloaded before applying a new adapter, otherwise the adapter
517
- // will be applied on top of the previous one
518
- // Returns 0 on success
519
- LLAMA_API int32_t llama_model_apply_lora_from_file(
520
- const struct llama_model * model,
521
- const char * path_lora,
522
- float scale,
523
- const char * path_base_model,
524
- int32_t n_threads);
525
-
526
- // Apply a loaded control vector to a llama_context, or if data is NULL, clear
527
- // the currently loaded vector.
528
- // n_embd should be the size of a single layer's control, and data should point
529
- // to an n_embd x n_layers buffer starting from layer 1.
530
- // il_start and il_end are the layer range the vector should apply to (both inclusive)
531
- // See llama_control_vector_load in common to load a control vector.
532
- LLAMA_API int32_t llama_control_vector_apply(
533
- struct llama_context * lctx,
534
- const float * data,
535
- size_t len,
536
- int32_t n_embd,
537
- int32_t il_start,
538
- int32_t il_end);
539
-
540
- //
541
- // KV cache
542
- //
543
-
544
- // Information associated with an individual cell in the KV cache view.
545
- struct llama_kv_cache_view_cell {
546
- // The position for this cell. Takes KV cache shifts into account.
547
- // May be negative if the cell is not populated.
548
- llama_pos pos;
549
- };
550
-
551
- // An updateable view of the KV cache.
552
- struct llama_kv_cache_view {
553
- // Number of KV cache cells. This will be the same as the context size.
554
- int32_t n_cells;
555
-
556
- // Maximum number of sequences that can exist in a cell. It's not an error
557
- // if there are more sequences in a cell than this value, however they will
558
- // not be visible in the view cells_sequences.
559
- int32_t n_seq_max;
560
-
561
- // Number of tokens in the cache. For example, if there are two populated
562
- // cells, the first with 1 sequence id in it and the second with 2 sequence
563
- // ids then you'll have 3 tokens.
564
- int32_t token_count;
565
-
566
- // Number of populated cache cells.
567
- int32_t used_cells;
568
-
569
- // Maximum contiguous empty slots in the cache.
570
- int32_t max_contiguous;
571
-
572
- // Index to the start of the max_contiguous slot range. Can be negative
573
- // when cache is full.
574
- int32_t max_contiguous_idx;
575
-
576
- // Information for an individual cell.
577
- struct llama_kv_cache_view_cell * cells;
578
-
579
- // The sequences for each cell. There will be n_seq_max items per cell.
580
- llama_seq_id * cells_sequences;
581
- };
582
-
583
- // Create an empty KV cache view. (use only for debugging purposes)
584
- LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
585
-
586
- // Free a KV cache view. (use only for debugging purposes)
587
- LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
588
-
589
- // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
590
- LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
591
-
592
- // Returns the number of tokens in the KV cache (slow, use only for debug)
593
- // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
594
- LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
595
-
596
- // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
597
- LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
598
-
599
- // Clear the KV cache - both cell info is erased and KV data is zeroed
600
- LLAMA_API void llama_kv_cache_clear(
601
- struct llama_context * ctx);
602
-
603
- // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
604
- // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
605
- // seq_id < 0 : match any sequence
606
- // p0 < 0 : [0, p1]
607
- // p1 < 0 : [p0, inf)
608
- LLAMA_API bool llama_kv_cache_seq_rm(
609
- struct llama_context * ctx,
610
- llama_seq_id seq_id,
611
- llama_pos p0,
612
- llama_pos p1);
613
-
614
- // Copy all tokens that belong to the specified sequence to another sequence
615
- // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
616
- // p0 < 0 : [0, p1]
617
- // p1 < 0 : [p0, inf)
618
- LLAMA_API void llama_kv_cache_seq_cp(
619
- struct llama_context * ctx,
620
- llama_seq_id seq_id_src,
621
- llama_seq_id seq_id_dst,
622
- llama_pos p0,
623
- llama_pos p1);
624
-
625
- // Removes all tokens that do not belong to the specified sequence
626
- LLAMA_API void llama_kv_cache_seq_keep(
627
- struct llama_context * ctx,
628
- llama_seq_id seq_id);
629
-
630
- // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
631
- // If the KV cache is RoPEd, the KV data is updated accordingly:
632
- // - lazily on next llama_decode()
633
- // - explicitly with llama_kv_cache_update()
634
- // p0 < 0 : [0, p1]
635
- // p1 < 0 : [p0, inf)
636
- LLAMA_API void llama_kv_cache_seq_add(
637
- struct llama_context * ctx,
638
- llama_seq_id seq_id,
639
- llama_pos p0,
640
- llama_pos p1,
641
- llama_pos delta);
642
-
643
- // Integer division of the positions by factor of `d > 1`
644
- // If the KV cache is RoPEd, the KV data is updated accordingly:
645
- // - lazily on next llama_decode()
646
- // - explicitly with llama_kv_cache_update()
647
- // p0 < 0 : [0, p1]
648
- // p1 < 0 : [p0, inf)
649
- LLAMA_API void llama_kv_cache_seq_div(
650
- struct llama_context * ctx,
651
- llama_seq_id seq_id,
652
- llama_pos p0,
653
- llama_pos p1,
654
- int d);
655
-
656
- // Returns the largest position present in the KV cache for the specified sequence
657
- LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
658
- struct llama_context * ctx,
659
- llama_seq_id seq_id);
660
-
661
- // Defragment the KV cache
662
- // This will be applied:
663
- // - lazily on next llama_decode()
664
- // - explicitly with llama_kv_cache_update()
665
- LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
666
-
667
- // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
668
- LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
669
-
670
- //
671
- // State / sessions
672
- //
673
-
674
- // Returns the maximum size in bytes of the state (rng, logits, embedding
675
- // and kv_cache) - will often be smaller after compacting tokens
676
- LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);
677
- LLAMA_API DEPRECATED(size_t llama_get_state_size(const struct llama_context * ctx),
678
- "use llama_state_get_size instead");
679
-
680
- // Copies the state to the specified destination address.
681
- // Destination needs to have allocated enough memory.
682
- // Returns the number of bytes copied
683
- LLAMA_API size_t llama_state_get_data(
684
- struct llama_context * ctx,
685
- uint8_t * dst);
686
- LLAMA_API DEPRECATED(size_t llama_copy_state_data(
687
- struct llama_context * ctx,
688
- uint8_t * dst),
689
- "use llama_state_get_data instead");
690
-
691
- // Set the state reading from the specified address
692
- // Returns the number of bytes read
693
- LLAMA_API size_t llama_state_set_data(
694
- struct llama_context * ctx,
695
- const uint8_t * src);
696
- LLAMA_API DEPRECATED(size_t llama_set_state_data(
697
- struct llama_context * ctx,
698
- const uint8_t * src),
699
- "use llama_state_set_data instead");
700
-
701
- // Save/load session file
702
- LLAMA_API bool llama_state_load_file(
703
- struct llama_context * ctx,
704
- const char * path_session,
705
- llama_token * tokens_out,
706
- size_t n_token_capacity,
707
- size_t * n_token_count_out);
708
- LLAMA_API DEPRECATED(bool llama_load_session_file(
709
- struct llama_context * ctx,
710
- const char * path_session,
711
- llama_token * tokens_out,
712
- size_t n_token_capacity,
713
- size_t * n_token_count_out),
714
- "use llama_state_load_file instead");
715
-
716
- LLAMA_API bool llama_state_save_file(
717
- struct llama_context * ctx,
718
- const char * path_session,
719
- const llama_token * tokens,
720
- size_t n_token_count);
721
- LLAMA_API DEPRECATED(bool llama_save_session_file(
722
- struct llama_context * ctx,
723
- const char * path_session,
724
- const llama_token * tokens,
725
- size_t n_token_count),
726
- "use llama_state_save_file instead");
727
-
728
- // Get the exact size needed to copy the KV cache of a single sequence
729
- LLAMA_API size_t llama_state_seq_get_size(
730
- struct llama_context * ctx,
731
- llama_seq_id seq_id);
732
-
733
- // Copy the KV cache of a single sequence into the specified buffer
734
- LLAMA_API size_t llama_state_seq_get_data(
735
- struct llama_context * ctx,
736
- uint8_t * dst,
737
- llama_seq_id seq_id);
738
-
739
- // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
740
- // Returns:
741
- // - Positive: Ok
742
- // - Zero: Failed to load
743
- LLAMA_API size_t llama_state_seq_set_data(
744
- struct llama_context * ctx,
745
- const uint8_t * src,
746
- llama_seq_id dest_seq_id);
747
-
748
- LLAMA_API size_t llama_state_seq_save_file(
749
- struct llama_context * ctx,
750
- const char * filepath,
751
- llama_seq_id seq_id,
752
- const llama_token * tokens,
753
- size_t n_token_count);
754
-
755
- LLAMA_API size_t llama_state_seq_load_file(
756
- struct llama_context * ctx,
757
- const char * filepath,
758
- llama_seq_id dest_seq_id,
759
- llama_token * tokens_out,
760
- size_t n_token_capacity,
761
- size_t * n_token_count_out);
762
-
763
- //
764
- // Decoding
765
- //
766
-
767
- // Return batch for single sequence of tokens starting at pos_0
768
- //
769
- // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
770
- //
771
- LLAMA_API struct llama_batch llama_batch_get_one(
772
- llama_token * tokens,
773
- int32_t n_tokens,
774
- llama_pos pos_0,
775
- llama_seq_id seq_id);
776
-
777
- // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
778
- // Each token can be assigned up to n_seq_max sequence ids
779
- // The batch has to be freed with llama_batch_free()
780
- // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
781
- // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
782
- // The rest of the llama_batch members are allocated with size n_tokens
783
- // All members are left uninitialized
784
- LLAMA_API struct llama_batch llama_batch_init(
785
- int32_t n_tokens,
786
- int32_t embd,
787
- int32_t n_seq_max);
788
-
789
- // Frees a batch of tokens allocated with llama_batch_init()
790
- LLAMA_API void llama_batch_free(struct llama_batch batch);
791
-
792
- // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
793
- // Stores the encoder output internally for later use by the decoder cross-attention layers.
794
- // 0 - success
795
- // < 0 - error
796
- LLAMA_API int32_t llama_encode(
797
- struct llama_context * ctx,
798
- struct llama_batch batch);
799
-
800
- // Positive return values does not mean a fatal error, but rather a warning.
801
- // 0 - success
802
- // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
803
- // < 0 - error
804
- LLAMA_API int32_t llama_decode(
805
- struct llama_context * ctx,
806
- struct llama_batch batch);
807
-
808
- // Set the number of threads used for decoding
809
- // n_threads is the number of threads used for generation (single token)
810
- // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
811
- LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
812
-
813
- // Get the number of threads used for generation of a single token.
814
- LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
815
-
816
- // Get the number of threads used for prompt and batch processing (multiple token).
817
- LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
818
-
819
- // Set whether the model is in embeddings mode or not
820
- // If true, embeddings will be returned but logits will not
821
- LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
822
-
823
- // Set whether to use causal attention or not
824
- // If set to true, the model will only attend to the past tokens
825
- LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
826
-
827
- // Set abort callback
828
- LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
829
-
830
- // Wait until all computations are finished
831
- // This is automatically done when using one of the functions below to obtain the computation results
832
- // and is not necessary to call it explicitly in most cases
833
- LLAMA_API void llama_synchronize(struct llama_context * ctx);
834
-
835
- // Token logits obtained from the last call to llama_decode()
836
- // The logits for which llama_batch.logits[i] != 0 are stored contiguously
837
- // in the order they have appeared in the batch.
838
- // Rows: number of tokens for which llama_batch.logits[i] != 0
839
- // Cols: n_vocab
840
- LLAMA_API float * llama_get_logits(struct llama_context * ctx);
841
-
842
- // Logits for the ith token. For positive indices, Equivalent to:
843
- // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
844
- // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
845
- // returns NULL for invalid ids.
846
- LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
847
-
848
- // Get all output token embeddings.
849
- // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
850
- // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
851
- // in the order they have appeared in the batch.
852
- // shape: [n_outputs*n_embd]
853
- // Otherwise, returns NULL.
854
- LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
855
-
856
- // Get the embeddings for the ith token. For positive indices, Equivalent to:
857
- // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
858
- // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
859
- // shape: [n_embd] (1-dimensional)
860
- // returns NULL for invalid ids.
861
- LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
862
-
863
- // Get the embeddings for a sequence id
864
- // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
865
- // shape: [n_embd] (1-dimensional)
866
- LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
867
-
868
- //
869
- // Vocab
870
- //
871
-
872
- LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
873
-
874
- LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
875
-
876
- LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
877
-
878
- // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
879
- LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
880
-
881
- // Identify if Token Id is a control token or a render-able token
882
- LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
883
-
884
- // Special tokens
885
- LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
886
- LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
887
- LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
888
- LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
889
- LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
890
- LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
891
-
892
- // Returns -1 if unknown, 1 for true or 0 for false.
893
- LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
894
-
895
- // Returns -1 if unknown, 1 for true or 0 for false.
896
- LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
897
-
898
- // Codellama infill tokens
899
- LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
900
- LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
901
- LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
902
- LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
903
-
904
- //
905
- // Tokenization
906
- //
907
-
908
- /// @details Convert the provided text into tokens.
909
- /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
910
- /// @return Returns the number of tokens on success, no more than n_tokens_max
911
- /// @return Returns a negative number on failure - the number of tokens that would have been returned
912
- /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
913
- /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
914
- /// as plaintext. Does not insert a leading space.
915
- LLAMA_API int32_t llama_tokenize(
916
- const struct llama_model * model,
917
- const char * text,
918
- int32_t text_len,
919
- llama_token * tokens,
920
- int32_t n_tokens_max,
921
- bool add_special,
922
- bool parse_special);
923
-
924
- // Token Id -> Piece.
925
- // Uses the vocabulary in the provided context.
926
- // Does not write null terminator to the buffer.
927
- // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
928
- // @param special If true, special tokens are rendered in the output.
929
- LLAMA_API int32_t llama_token_to_piece(
930
- const struct llama_model * model,
931
- llama_token token,
932
- char * buf,
933
- int32_t length,
934
- int32_t lstrip,
935
- bool special);
936
-
937
- /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
938
- /// @param text The char pointer must be large enough to hold the resulting text.
939
- /// @return Returns the number of chars/bytes on success, no more than text_len_max.
940
- /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
941
- /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
942
- /// @param unparse_special If true, special tokens are rendered in the output.
943
- LLAMA_API int32_t llama_detokenize(
944
- const struct llama_model * model,
945
- const llama_token * tokens,
946
- int32_t n_tokens,
947
- char * text,
948
- int32_t text_len_max,
949
- bool remove_special,
950
- bool unparse_special);
951
-
952
- /// Apply chat template. Inspired by hf apply_chat_template() on python.
953
- /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
954
- /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
955
- /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
956
- /// @param chat Pointer to a list of multiple llama_chat_message
957
- /// @param n_msg Number of llama_chat_message in this chat
958
- /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
959
- /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
960
- /// @param length The size of the allocated buffer
961
- /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
962
- LLAMA_API int32_t llama_chat_apply_template(
963
- const struct llama_model * model,
964
- const char * tmpl,
965
- const struct llama_chat_message * chat,
966
- size_t n_msg,
967
- bool add_ass,
968
- char * buf,
969
- int32_t length);
970
-
971
- //
972
- // Grammar
973
- //
974
-
975
- /// Initialize a llama_grammar.
976
- ///
977
- /// @param rules The rule elements of the grammar to initialize.
978
- /// @param n_rules The number of rules.
979
- /// @param start_rule_index The index of the root rule (the starting point of the grammar).
980
- /// @return The initialized llama_grammar or nullptr if initialization failed.
981
- LLAMA_API struct llama_grammar * llama_grammar_init(
982
- const llama_grammar_element ** rules,
983
- size_t n_rules,
984
- size_t start_rule_index);
985
-
986
- LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
987
-
988
- LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
989
-
990
- //
991
- // Sampling functions
992
- //
993
-
994
- // Sets the current rng seed.
995
- LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
996
-
997
- /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
998
- /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
999
- LLAMA_API void llama_sample_repetition_penalties(
1000
- struct llama_context * ctx,
1001
- llama_token_data_array * candidates,
1002
- const llama_token * last_tokens,
1003
- size_t penalty_last_n,
1004
- float penalty_repeat,
1005
- float penalty_freq,
1006
- float penalty_present);
1007
-
1008
- /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
1009
- /// @param logits Logits extracted from the original generation context.
1010
- /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
1011
- /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
1012
- LLAMA_API void llama_sample_apply_guidance(
1013
- struct llama_context * ctx,
1014
- float * logits,
1015
- float * logits_guidance,
1016
- float scale);
1017
-
1018
- /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1019
- LLAMA_API void llama_sample_softmax(
1020
- struct llama_context * ctx,
1021
- llama_token_data_array * candidates);
1022
-
1023
- /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1024
- LLAMA_API void llama_sample_top_k(
1025
- struct llama_context * ctx,
1026
- llama_token_data_array * candidates,
1027
- int32_t k,
1028
- size_t min_keep);
1029
-
1030
- /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1031
- LLAMA_API void llama_sample_top_p(
1032
- struct llama_context * ctx,
1033
- llama_token_data_array * candidates,
1034
- float p,
1035
- size_t min_keep);
1036
-
1037
- /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
1038
- LLAMA_API void llama_sample_min_p(
1039
- struct llama_context * ctx,
1040
- llama_token_data_array * candidates,
1041
- float p,
1042
- size_t min_keep);
1043
-
1044
- /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
1045
- LLAMA_API void llama_sample_tail_free(
1046
- struct llama_context * ctx,
1047
- llama_token_data_array * candidates,
1048
- float z,
1049
- size_t min_keep);
1050
-
1051
- /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1052
- LLAMA_API void llama_sample_typical(
1053
- struct llama_context * ctx,
1054
- llama_token_data_array * candidates,
1055
- float p,
1056
- size_t min_keep);
1057
-
1058
- /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
1059
- LLAMA_API void llama_sample_entropy(
1060
- struct llama_context * ctx,
1061
- llama_token_data_array * candidates_p,
1062
- float min_temp,
1063
- float max_temp,
1064
- float exponent_val);
1065
-
1066
- LLAMA_API void llama_sample_temp(
1067
- struct llama_context * ctx,
1068
- llama_token_data_array * candidates,
1069
- float temp);
1070
-
1071
- /// @details Apply constraints from grammar
1072
- LLAMA_API void llama_sample_grammar(
1073
- struct llama_context * ctx,
1074
- llama_token_data_array * candidates,
1075
- const struct llama_grammar * grammar);
1076
-
1077
- /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1078
- /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1079
- /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1080
- /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1081
- /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1082
- /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1083
- LLAMA_API llama_token llama_sample_token_mirostat(
1084
- struct llama_context * ctx,
1085
- llama_token_data_array * candidates,
1086
- float tau,
1087
- float eta,
1088
- int32_t m,
1089
- float * mu);
1090
-
1091
- /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1092
- /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1093
- /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1094
- /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1095
- /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1096
- LLAMA_API llama_token llama_sample_token_mirostat_v2(
1097
- struct llama_context * ctx,
1098
- llama_token_data_array * candidates,
1099
- float tau,
1100
- float eta,
1101
- float * mu);
1102
-
1103
- /// @details Selects the token with the highest probability.
1104
- /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
1105
- LLAMA_API llama_token llama_sample_token_greedy(
1106
- struct llama_context * ctx,
1107
- llama_token_data_array * candidates);
1108
-
1109
- /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
1110
- LLAMA_API llama_token llama_sample_token(
1111
- struct llama_context * ctx,
1112
- llama_token_data_array * candidates);
1113
-
1114
- /// @details Accepts the sampled token into the grammar
1115
- LLAMA_API void llama_grammar_accept_token(
1116
- struct llama_context * ctx,
1117
- struct llama_grammar * grammar,
1118
- llama_token token);
1119
-
1120
- //
1121
- // Model split
1122
- //
1123
-
1124
- /// @details Build a split GGUF final path for this chunk.
1125
- /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1126
- // Returns the split_path length.
1127
- LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1128
-
1129
- /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1130
- /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1131
- // Returns the split_prefix length.
1132
- LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1133
-
1134
- // Performance information
1135
- LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
1136
-
1137
- LLAMA_API void llama_print_timings(struct llama_context * ctx);
1138
- LLAMA_API void llama_reset_timings(struct llama_context * ctx);
1139
-
1140
- // Print system information
1141
- LLAMA_API const char * llama_print_system_info(void);
1142
-
1143
- // Set callback for all future logging events.
1144
- // If this is not called, or NULL is supplied, everything is output on stderr.
1145
- LLAMA_API void llama_log_set(lm_ggml_log_callback log_callback, void * user_data);
1146
-
1147
- LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
1148
-
1149
- #ifdef __cplusplus
1150
- }
1151
- #endif
1152
-
1153
- // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
1154
- #ifdef LLAMA_API_INTERNAL
1155
-
1156
- #include <random>
1157
- #include <string>
1158
- #include <vector>
1159
-
1160
- struct lm_ggml_tensor;
1161
-
1162
- struct llama_partial_utf8 {
1163
- uint32_t value; // bit value so far (unshifted)
1164
- int n_remain; // num bytes remaining; -1 indicates invalid sequence
1165
- };
1166
-
1167
- struct llama_grammar {
1168
- const std::vector<std::vector<llama_grammar_element>> rules;
1169
- std::vector<std::vector<const llama_grammar_element *>> stacks;
1170
-
1171
- // buffer for partially generated UTF-8 sequence from accepted tokens
1172
- llama_partial_utf8 partial_utf8;
1173
- };
1174
-
1175
- struct llama_grammar_candidate {
1176
- size_t index;
1177
- const uint32_t * code_points;
1178
- llama_partial_utf8 partial_utf8;
1179
- };
1180
-
1181
- const std::vector<std::pair<std::string, struct lm_ggml_tensor *>> & llama_internal_get_tensor_map(
1182
- struct llama_context * ctx
1183
- );
1184
-
1185
- void llama_grammar_accept(
1186
- const std::vector<std::vector<llama_grammar_element>> & rules,
1187
- const std::vector<std::vector<const llama_grammar_element *>> & stacks,
1188
- const uint32_t chr,
1189
- std::vector<std::vector<const llama_grammar_element *>> & new_stacks);
1190
-
1191
- std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1192
- const std::string & src,
1193
- llama_partial_utf8 partial_start);
1194
-
1195
- // Randomly selects a token from the candidates based on their probabilities using given std::mt19937.
1196
- // This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
1197
- llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
1198
-
1199
- #endif // LLAMA_API_INTERNAL
1200
-
1201
- #endif // LLAMA_H
1
+ #ifndef LLAMA_H
2
+ #define LLAMA_H
3
+
4
+ #include "ggml.h"
5
+ #include "ggml-backend.h"
6
+
7
+ #include <stddef.h>
8
+ #include <stdint.h>
9
+ #include <stdio.h>
10
+ #include <stdbool.h>
11
+
12
+ #ifdef LLAMA_SHARED
13
+ # if defined(_WIN32) && !defined(__MINGW32__)
14
+ # ifdef LLAMA_BUILD
15
+ # define LLAMA_API __declspec(dllexport)
16
+ # else
17
+ # define LLAMA_API __declspec(dllimport)
18
+ # endif
19
+ # else
20
+ # define LLAMA_API __attribute__ ((visibility ("default")))
21
+ # endif
22
+ #else
23
+ # define LLAMA_API
24
+ #endif
25
+
26
+ #ifdef __GNUC__
27
+ # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
28
+ #elif defined(_MSC_VER)
29
+ # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
30
+ #else
31
+ # define DEPRECATED(func, hint) func
32
+ #endif
33
+
34
+ #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
35
+
36
+ #define LLAMA_MAX_RNG_STATE (64*1024)
37
+
38
+ #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
39
+ #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
40
+ #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
41
+
42
+ #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
43
+ #define LLAMA_SESSION_VERSION 6
44
+
45
+ #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
46
+ #define LLAMA_STATE_SEQ_VERSION 1
47
+
48
+ #ifdef __cplusplus
49
+ extern "C" {
50
+ #endif
51
+
52
+ //
53
+ // C interface
54
+ //
55
+ // TODO: show sample usage
56
+ //
57
+
58
+ struct llama_model;
59
+ struct llama_context;
60
+
61
+ typedef int32_t llama_pos;
62
+ typedef int32_t llama_token;
63
+ typedef int32_t llama_seq_id;
64
+
65
+ enum llama_vocab_type {
66
+ LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
67
+ LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
68
+ LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
69
+ LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
70
+ LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
71
+ };
72
+
73
+ // pre-tokenization types
74
+ enum llama_vocab_pre_type {
75
+ LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
76
+ LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
77
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
78
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
79
+ LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
80
+ LLAMA_VOCAB_PRE_TYPE_MPT = 5,
81
+ LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
82
+ LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
83
+ LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
84
+ LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
85
+ LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
86
+ LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
87
+ LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
88
+ LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
89
+ LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
90
+ LLAMA_VOCAB_PRE_TYPE_PORO = 15,
91
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
92
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
93
+ LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
94
+ LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
95
+ };
96
+
97
+ // note: these values should be synchronized with lm_ggml_rope
98
+ // TODO: maybe move this enum to ggml.h (lm_ggml_rope_type)
99
+ enum llama_rope_type {
100
+ LLAMA_ROPE_TYPE_NONE = -1,
101
+ LLAMA_ROPE_TYPE_NORM = 0,
102
+ LLAMA_ROPE_TYPE_NEOX = 2,
103
+ LLAMA_ROPE_TYPE_GLM = 4,
104
+ };
105
+
106
+ enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
107
+ LLAMA_TOKEN_TYPE_UNDEFINED = 0,
108
+ LLAMA_TOKEN_TYPE_NORMAL = 1,
109
+ LLAMA_TOKEN_TYPE_UNKNOWN = 2,
110
+ LLAMA_TOKEN_TYPE_CONTROL = 3,
111
+ LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
112
+ LLAMA_TOKEN_TYPE_UNUSED = 5,
113
+ LLAMA_TOKEN_TYPE_BYTE = 6,
114
+ };
115
+
116
+ enum llama_token_attr {
117
+ LLAMA_TOKEN_ATTR_UNDEFINED = 0,
118
+ LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
119
+ LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
120
+ LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
121
+ LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
122
+ LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
123
+ LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
124
+ LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
125
+ LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
126
+ LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
127
+ LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
128
+ };
129
+
130
+ // model file types
131
+ enum llama_ftype {
132
+ LLAMA_FTYPE_ALL_F32 = 0,
133
+ LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
134
+ LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
135
+ LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
136
+ LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
137
+ // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
138
+ // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
139
+ LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
140
+ LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
141
+ LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
142
+ LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
143
+ LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
144
+ LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
145
+ LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
146
+ LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
147
+ LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
148
+ LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
149
+ LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
150
+ LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
151
+ LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
152
+ LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
153
+ LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
154
+ LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
155
+ LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
156
+ LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
157
+ LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
158
+ LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
159
+ LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
160
+ LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
161
+ LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
162
+ LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
163
+ LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
164
+ LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
165
+ LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
166
+ LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
167
+ LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
168
+
169
+ LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
170
+ };
171
+
172
+ enum llama_rope_scaling_type {
173
+ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
174
+ LLAMA_ROPE_SCALING_TYPE_NONE = 0,
175
+ LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
176
+ LLAMA_ROPE_SCALING_TYPE_YARN = 2,
177
+ LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
178
+ };
179
+
180
+ enum llama_pooling_type {
181
+ LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
182
+ LLAMA_POOLING_TYPE_NONE = 0,
183
+ LLAMA_POOLING_TYPE_MEAN = 1,
184
+ LLAMA_POOLING_TYPE_CLS = 2,
185
+ LLAMA_POOLING_TYPE_LAST = 3,
186
+ };
187
+
188
+ enum llama_attention_type {
189
+ LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
190
+ LLAMA_ATTENTION_TYPE_CAUSAL = 0,
191
+ LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
192
+ };
193
+
194
+ enum llama_split_mode {
195
+ LLAMA_SPLIT_MODE_NONE = 0, // single GPU
196
+ LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
197
+ LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
198
+ };
199
+
200
+ typedef struct llama_token_data {
201
+ llama_token id; // token id
202
+ float logit; // log-odds of the token
203
+ float p; // probability of the token
204
+ } llama_token_data;
205
+
206
+ typedef struct llama_token_data_array {
207
+ llama_token_data * data;
208
+ size_t size;
209
+ bool sorted;
210
+ } llama_token_data_array;
211
+
212
+ typedef bool (*llama_progress_callback)(float progress, void * user_data);
213
+
214
+ // Input data for llama_decode
215
+ // A llama_batch object can contain input about one or many sequences
216
+ // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
217
+ //
218
+ // - token : the token ids of the input (used when embd is NULL)
219
+ // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
220
+ // - pos : the positions of the respective token in the sequence
221
+ // - seq_id : the sequence to which the respective token belongs
222
+ // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
223
+ //
224
+ typedef struct llama_batch {
225
+ int32_t n_tokens;
226
+
227
+ llama_token * token;
228
+ float * embd;
229
+ llama_pos * pos;
230
+ int32_t * n_seq_id;
231
+ llama_seq_id ** seq_id;
232
+ int8_t * logits; // TODO: rename this to "output"
233
+
234
+ // NOTE: helpers for smooth API transition - can be deprecated in the future
235
+ // for future-proof code, use the above fields instead and ignore everything below
236
+ //
237
+ // pos[i] = all_pos_0 + i*all_pos_1
238
+ //
239
+ llama_pos all_pos_0; // used if pos == NULL
240
+ llama_pos all_pos_1; // used if pos == NULL
241
+ llama_seq_id all_seq_id; // used if seq_id == NULL
242
+ } llama_batch;
243
+
244
+ enum llama_model_kv_override_type {
245
+ LLAMA_KV_OVERRIDE_TYPE_INT,
246
+ LLAMA_KV_OVERRIDE_TYPE_FLOAT,
247
+ LLAMA_KV_OVERRIDE_TYPE_BOOL,
248
+ LLAMA_KV_OVERRIDE_TYPE_STR,
249
+ };
250
+
251
+ struct llama_model_kv_override {
252
+ enum llama_model_kv_override_type tag;
253
+
254
+ char key[128];
255
+
256
+ union {
257
+ int64_t val_i64;
258
+ double val_f64;
259
+ bool val_bool;
260
+ char val_str[128];
261
+ };
262
+ };
263
+
264
+ struct llama_model_params {
265
+ int32_t n_gpu_layers; // number of layers to store in VRAM
266
+ enum llama_split_mode split_mode; // how to split the model across multiple GPUs
267
+
268
+ // main_gpu interpretation depends on split_mode:
269
+ // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
270
+ // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
271
+ // LLAMA_SPLIT_LAYER: ignored
272
+ int32_t main_gpu;
273
+
274
+ // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
275
+ const float * tensor_split;
276
+
277
+ // comma separated list of RPC servers to use for offloading
278
+ const char * rpc_servers;
279
+
280
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
281
+ // If the provided progress_callback returns true, model loading continues.
282
+ // If it returns false, model loading is immediately aborted.
283
+ llama_progress_callback progress_callback;
284
+
285
+ // context pointer passed to the progress callback
286
+ void * progress_callback_user_data;
287
+
288
+ // override key-value pairs of the model meta data
289
+ const struct llama_model_kv_override * kv_overrides;
290
+
291
+ // Keep the booleans together to avoid misalignment during copy-by-value.
292
+ bool vocab_only; // only load the vocabulary, no weights
293
+ bool use_mmap; // use mmap if possible
294
+ bool use_mlock; // force system to keep model in RAM
295
+ bool check_tensors; // validate model tensor data
296
+ };
297
+
298
+ // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
299
+ // https://github.com/ggerganov/llama.cpp/pull/7544
300
+ struct llama_context_params {
301
+ uint32_t seed; // RNG seed, -1 for random
302
+ uint32_t n_ctx; // text context, 0 = from model
303
+ uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
304
+ uint32_t n_ubatch; // physical maximum batch size
305
+ uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
306
+ uint32_t n_threads; // number of threads to use for generation
307
+ uint32_t n_threads_batch; // number of threads to use for batch processing
308
+
309
+ enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
310
+ enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
311
+ enum llama_attention_type attention_type; // attention type to use for embeddings
312
+
313
+ // ref: https://github.com/ggerganov/llama.cpp/pull/2054
314
+ float rope_freq_base; // RoPE base frequency, 0 = from model
315
+ float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
316
+ float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
317
+ float yarn_attn_factor; // YaRN magnitude scaling factor
318
+ float yarn_beta_fast; // YaRN low correction dim
319
+ float yarn_beta_slow; // YaRN high correction dim
320
+ uint32_t yarn_orig_ctx; // YaRN original context size
321
+ float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
322
+
323
+ lm_ggml_backend_sched_eval_callback cb_eval;
324
+ void * cb_eval_user_data;
325
+
326
+ enum lm_ggml_type type_k; // data type for K cache [EXPERIMENTAL]
327
+ enum lm_ggml_type type_v; // data type for V cache [EXPERIMENTAL]
328
+
329
+ // Keep the booleans together to avoid misalignment during copy-by-value.
330
+ bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
331
+ bool embeddings; // if true, extract embeddings (together with logits)
332
+ bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
333
+ bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
334
+
335
+ // Abort callback
336
+ // if it returns true, execution of llama_decode() will be aborted
337
+ // currently works only with CPU execution
338
+ lm_ggml_abort_callback abort_callback;
339
+ void * abort_callback_data;
340
+ };
341
+
342
+ // model quantization parameters
343
+ typedef struct llama_model_quantize_params {
344
+ int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
345
+ enum llama_ftype ftype; // quantize to this llama_ftype
346
+ enum lm_ggml_type output_tensor_type; // output tensor type
347
+ enum lm_ggml_type token_embedding_type; // itoken embeddings tensor type
348
+ bool allow_requantize; // allow quantizing non-f32/f16 tensors
349
+ bool quantize_output_tensor; // quantize output.weight
350
+ bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
351
+ bool pure; // quantize all tensors to the default type
352
+ bool keep_split; // quantize to the same number of shards
353
+ void * imatrix; // pointer to importance matrix data
354
+ void * kv_overrides; // pointer to vector containing overrides
355
+ } llama_model_quantize_params;
356
+
357
+ // grammar types
358
+ struct llama_grammar;
359
+
360
+ // grammar element type
361
+ enum llama_gretype {
362
+ // end of rule definition
363
+ LLAMA_GRETYPE_END = 0,
364
+
365
+ // start of alternate definition for rule
366
+ LLAMA_GRETYPE_ALT = 1,
367
+
368
+ // non-terminal element: reference to rule
369
+ LLAMA_GRETYPE_RULE_REF = 2,
370
+
371
+ // terminal element: character (code point)
372
+ LLAMA_GRETYPE_CHAR = 3,
373
+
374
+ // inverse char(s) ([^a], [^a-b] [^abc])
375
+ LLAMA_GRETYPE_CHAR_NOT = 4,
376
+
377
+ // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
378
+ // be an inclusive range ([a-z])
379
+ LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
380
+
381
+ // modifies a preceding LLAMA_GRETYPE_CHAR or
382
+ // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
383
+ LLAMA_GRETYPE_CHAR_ALT = 6,
384
+
385
+ // any character (.)
386
+ LLAMA_GRETYPE_CHAR_ANY = 7,
387
+ };
388
+
389
+ typedef struct llama_grammar_element {
390
+ enum llama_gretype type;
391
+ uint32_t value; // Unicode code point or rule ID
392
+ } llama_grammar_element;
393
+
394
+ // performance timing information
395
+ struct llama_timings {
396
+ double t_start_ms;
397
+ double t_end_ms;
398
+ double t_load_ms;
399
+ double t_sample_ms;
400
+ double t_p_eval_ms;
401
+ double t_eval_ms;
402
+
403
+ int32_t n_sample;
404
+ int32_t n_p_eval;
405
+ int32_t n_eval;
406
+ };
407
+
408
+ // used in chat template
409
+ typedef struct llama_chat_message {
410
+ const char * role;
411
+ const char * content;
412
+ } llama_chat_message;
413
+
414
+ // Helpers for getting default parameters
415
+ LLAMA_API struct llama_model_params llama_model_default_params(void);
416
+ LLAMA_API struct llama_context_params llama_context_default_params(void);
417
+ LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
418
+
419
+ // Initialize the llama + ggml backend
420
+ // If numa is true, use NUMA optimizations
421
+ // Call once at the start of the program
422
+ LLAMA_API void llama_backend_init(void);
423
+
424
+ //optional:
425
+ LLAMA_API void llama_numa_init(enum lm_ggml_numa_strategy numa);
426
+
427
+ // Call once at the end of the program - currently only used for MPI
428
+ LLAMA_API void llama_backend_free(void);
429
+
430
+ LLAMA_API struct llama_model * llama_load_model_from_file(
431
+ const char * path_model,
432
+ struct llama_model_params params);
433
+
434
+ LLAMA_API void llama_free_model(struct llama_model * model);
435
+
436
+ LLAMA_API struct llama_context * llama_new_context_with_model(
437
+ struct llama_model * model,
438
+ struct llama_context_params params);
439
+
440
+ // Frees all allocated memory
441
+ LLAMA_API void llama_free(struct llama_context * ctx);
442
+
443
+ LLAMA_API int64_t llama_time_us(void);
444
+
445
+ LLAMA_API size_t llama_max_devices(void);
446
+
447
+ LLAMA_API bool llama_supports_mmap (void);
448
+ LLAMA_API bool llama_supports_mlock (void);
449
+ LLAMA_API bool llama_supports_gpu_offload(void);
450
+
451
+ LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
452
+
453
+ LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
454
+ LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
455
+ LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
456
+ LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
457
+
458
+ LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
459
+
460
+ LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
461
+ LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
462
+
463
+ LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
464
+ LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
465
+ LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
466
+ LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
467
+
468
+ // Get the model's RoPE frequency scaling factor
469
+ LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
470
+
471
+ // Functions to access the model's GGUF metadata scalar values
472
+ // - The functions return the length of the string on success, or -1 on failure
473
+ // - The output string is always null-terminated and cleared on failure
474
+ // - GGUF array values are not supported by these functions
475
+
476
+ // Get metadata value as a string by key name
477
+ LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
478
+
479
+ // Get the number of metadata key/value pairs
480
+ LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
481
+
482
+ // Get metadata key name by index
483
+ LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
484
+
485
+ // Get metadata value as a string by index
486
+ LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
487
+
488
+ // Get a string describing the model type
489
+ LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
490
+
491
+ // Returns the total size of all the tensors in the model in bytes
492
+ LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
493
+
494
+ // Returns the total number of parameters in the model
495
+ LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
496
+
497
+ // Get a llama model tensor
498
+ LLAMA_API struct lm_ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
499
+
500
+ // Returns true if the model contains an encoder that requires llama_encode() call
501
+ LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
502
+
503
+ // For encoder-decoder models, this function returns id of the token that must be provided
504
+ // to the decoder to start generating output sequence. For other models, it returns -1.
505
+ LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
506
+
507
+ // Returns 0 on success
508
+ LLAMA_API uint32_t llama_model_quantize(
509
+ const char * fname_inp,
510
+ const char * fname_out,
511
+ const llama_model_quantize_params * params);
512
+
513
+ // Apply a LoRA adapter to a loaded model
514
+ // path_base_model is the path to a higher quality model to use as a base for
515
+ // the layers modified by the adapter. Can be NULL to use the current loaded model.
516
+ // The model needs to be reloaded before applying a new adapter, otherwise the adapter
517
+ // will be applied on top of the previous one
518
+ // Returns 0 on success
519
+ LLAMA_API int32_t llama_model_apply_lora_from_file(
520
+ const struct llama_model * model,
521
+ const char * path_lora,
522
+ float scale,
523
+ const char * path_base_model,
524
+ int32_t n_threads);
525
+
526
+ // Apply a loaded control vector to a llama_context, or if data is NULL, clear
527
+ // the currently loaded vector.
528
+ // n_embd should be the size of a single layer's control, and data should point
529
+ // to an n_embd x n_layers buffer starting from layer 1.
530
+ // il_start and il_end are the layer range the vector should apply to (both inclusive)
531
+ // See llama_control_vector_load in common to load a control vector.
532
+ LLAMA_API int32_t llama_control_vector_apply(
533
+ struct llama_context * lctx,
534
+ const float * data,
535
+ size_t len,
536
+ int32_t n_embd,
537
+ int32_t il_start,
538
+ int32_t il_end);
539
+
540
+ //
541
+ // KV cache
542
+ //
543
+
544
+ // Information associated with an individual cell in the KV cache view.
545
+ struct llama_kv_cache_view_cell {
546
+ // The position for this cell. Takes KV cache shifts into account.
547
+ // May be negative if the cell is not populated.
548
+ llama_pos pos;
549
+ };
550
+
551
+ // An updateable view of the KV cache.
552
+ struct llama_kv_cache_view {
553
+ // Number of KV cache cells. This will be the same as the context size.
554
+ int32_t n_cells;
555
+
556
+ // Maximum number of sequences that can exist in a cell. It's not an error
557
+ // if there are more sequences in a cell than this value, however they will
558
+ // not be visible in the view cells_sequences.
559
+ int32_t n_seq_max;
560
+
561
+ // Number of tokens in the cache. For example, if there are two populated
562
+ // cells, the first with 1 sequence id in it and the second with 2 sequence
563
+ // ids then you'll have 3 tokens.
564
+ int32_t token_count;
565
+
566
+ // Number of populated cache cells.
567
+ int32_t used_cells;
568
+
569
+ // Maximum contiguous empty slots in the cache.
570
+ int32_t max_contiguous;
571
+
572
+ // Index to the start of the max_contiguous slot range. Can be negative
573
+ // when cache is full.
574
+ int32_t max_contiguous_idx;
575
+
576
+ // Information for an individual cell.
577
+ struct llama_kv_cache_view_cell * cells;
578
+
579
+ // The sequences for each cell. There will be n_seq_max items per cell.
580
+ llama_seq_id * cells_sequences;
581
+ };
582
+
583
+ // Create an empty KV cache view. (use only for debugging purposes)
584
+ LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
585
+
586
+ // Free a KV cache view. (use only for debugging purposes)
587
+ LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
588
+
589
+ // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
590
+ LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
591
+
592
+ // Returns the number of tokens in the KV cache (slow, use only for debug)
593
+ // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
594
+ LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
595
+
596
+ // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
597
+ LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
598
+
599
+ // Clear the KV cache - both cell info is erased and KV data is zeroed
600
+ LLAMA_API void llama_kv_cache_clear(
601
+ struct llama_context * ctx);
602
+
603
+ // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
604
+ // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
605
+ // seq_id < 0 : match any sequence
606
+ // p0 < 0 : [0, p1]
607
+ // p1 < 0 : [p0, inf)
608
+ LLAMA_API bool llama_kv_cache_seq_rm(
609
+ struct llama_context * ctx,
610
+ llama_seq_id seq_id,
611
+ llama_pos p0,
612
+ llama_pos p1);
613
+
614
+ // Copy all tokens that belong to the specified sequence to another sequence
615
+ // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
616
+ // p0 < 0 : [0, p1]
617
+ // p1 < 0 : [p0, inf)
618
+ LLAMA_API void llama_kv_cache_seq_cp(
619
+ struct llama_context * ctx,
620
+ llama_seq_id seq_id_src,
621
+ llama_seq_id seq_id_dst,
622
+ llama_pos p0,
623
+ llama_pos p1);
624
+
625
+ // Removes all tokens that do not belong to the specified sequence
626
+ LLAMA_API void llama_kv_cache_seq_keep(
627
+ struct llama_context * ctx,
628
+ llama_seq_id seq_id);
629
+
630
+ // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
631
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
632
+ // - lazily on next llama_decode()
633
+ // - explicitly with llama_kv_cache_update()
634
+ // p0 < 0 : [0, p1]
635
+ // p1 < 0 : [p0, inf)
636
+ LLAMA_API void llama_kv_cache_seq_add(
637
+ struct llama_context * ctx,
638
+ llama_seq_id seq_id,
639
+ llama_pos p0,
640
+ llama_pos p1,
641
+ llama_pos delta);
642
+
643
+ // Integer division of the positions by factor of `d > 1`
644
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
645
+ // - lazily on next llama_decode()
646
+ // - explicitly with llama_kv_cache_update()
647
+ // p0 < 0 : [0, p1]
648
+ // p1 < 0 : [p0, inf)
649
+ LLAMA_API void llama_kv_cache_seq_div(
650
+ struct llama_context * ctx,
651
+ llama_seq_id seq_id,
652
+ llama_pos p0,
653
+ llama_pos p1,
654
+ int d);
655
+
656
+ // Returns the largest position present in the KV cache for the specified sequence
657
+ LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
658
+ struct llama_context * ctx,
659
+ llama_seq_id seq_id);
660
+
661
+ // Defragment the KV cache
662
+ // This will be applied:
663
+ // - lazily on next llama_decode()
664
+ // - explicitly with llama_kv_cache_update()
665
+ LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
666
+
667
+ // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
668
+ LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
669
+
670
+ //
671
+ // State / sessions
672
+ //
673
+
674
+ // Returns the maximum size in bytes of the state (rng, logits, embedding
675
+ // and kv_cache) - will often be smaller after compacting tokens
676
+ LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);
677
+ LLAMA_API DEPRECATED(size_t llama_get_state_size(const struct llama_context * ctx),
678
+ "use llama_state_get_size instead");
679
+
680
+ // Copies the state to the specified destination address.
681
+ // Destination needs to have allocated enough memory.
682
+ // Returns the number of bytes copied
683
+ LLAMA_API size_t llama_state_get_data(
684
+ struct llama_context * ctx,
685
+ uint8_t * dst);
686
+ LLAMA_API DEPRECATED(size_t llama_copy_state_data(
687
+ struct llama_context * ctx,
688
+ uint8_t * dst),
689
+ "use llama_state_get_data instead");
690
+
691
+ // Set the state reading from the specified address
692
+ // Returns the number of bytes read
693
+ LLAMA_API size_t llama_state_set_data(
694
+ struct llama_context * ctx,
695
+ const uint8_t * src);
696
+ LLAMA_API DEPRECATED(size_t llama_set_state_data(
697
+ struct llama_context * ctx,
698
+ const uint8_t * src),
699
+ "use llama_state_set_data instead");
700
+
701
+ // Save/load session file
702
+ LLAMA_API bool llama_state_load_file(
703
+ struct llama_context * ctx,
704
+ const char * path_session,
705
+ llama_token * tokens_out,
706
+ size_t n_token_capacity,
707
+ size_t * n_token_count_out);
708
+ LLAMA_API DEPRECATED(bool llama_load_session_file(
709
+ struct llama_context * ctx,
710
+ const char * path_session,
711
+ llama_token * tokens_out,
712
+ size_t n_token_capacity,
713
+ size_t * n_token_count_out),
714
+ "use llama_state_load_file instead");
715
+
716
+ LLAMA_API bool llama_state_save_file(
717
+ struct llama_context * ctx,
718
+ const char * path_session,
719
+ const llama_token * tokens,
720
+ size_t n_token_count);
721
+ LLAMA_API DEPRECATED(bool llama_save_session_file(
722
+ struct llama_context * ctx,
723
+ const char * path_session,
724
+ const llama_token * tokens,
725
+ size_t n_token_count),
726
+ "use llama_state_save_file instead");
727
+
728
+ // Get the exact size needed to copy the KV cache of a single sequence
729
+ LLAMA_API size_t llama_state_seq_get_size(
730
+ struct llama_context * ctx,
731
+ llama_seq_id seq_id);
732
+
733
+ // Copy the KV cache of a single sequence into the specified buffer
734
+ LLAMA_API size_t llama_state_seq_get_data(
735
+ struct llama_context * ctx,
736
+ uint8_t * dst,
737
+ llama_seq_id seq_id);
738
+
739
+ // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
740
+ // Returns:
741
+ // - Positive: Ok
742
+ // - Zero: Failed to load
743
+ LLAMA_API size_t llama_state_seq_set_data(
744
+ struct llama_context * ctx,
745
+ const uint8_t * src,
746
+ llama_seq_id dest_seq_id);
747
+
748
+ LLAMA_API size_t llama_state_seq_save_file(
749
+ struct llama_context * ctx,
750
+ const char * filepath,
751
+ llama_seq_id seq_id,
752
+ const llama_token * tokens,
753
+ size_t n_token_count);
754
+
755
+ LLAMA_API size_t llama_state_seq_load_file(
756
+ struct llama_context * ctx,
757
+ const char * filepath,
758
+ llama_seq_id dest_seq_id,
759
+ llama_token * tokens_out,
760
+ size_t n_token_capacity,
761
+ size_t * n_token_count_out);
762
+
763
+ //
764
+ // Decoding
765
+ //
766
+
767
+ // Return batch for single sequence of tokens starting at pos_0
768
+ //
769
+ // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
770
+ //
771
+ LLAMA_API struct llama_batch llama_batch_get_one(
772
+ llama_token * tokens,
773
+ int32_t n_tokens,
774
+ llama_pos pos_0,
775
+ llama_seq_id seq_id);
776
+
777
+ // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
778
+ // Each token can be assigned up to n_seq_max sequence ids
779
+ // The batch has to be freed with llama_batch_free()
780
+ // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
781
+ // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
782
+ // The rest of the llama_batch members are allocated with size n_tokens
783
+ // All members are left uninitialized
784
+ LLAMA_API struct llama_batch llama_batch_init(
785
+ int32_t n_tokens,
786
+ int32_t embd,
787
+ int32_t n_seq_max);
788
+
789
+ // Frees a batch of tokens allocated with llama_batch_init()
790
+ LLAMA_API void llama_batch_free(struct llama_batch batch);
791
+
792
+ // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
793
+ // Stores the encoder output internally for later use by the decoder cross-attention layers.
794
+ // 0 - success
795
+ // < 0 - error
796
+ LLAMA_API int32_t llama_encode(
797
+ struct llama_context * ctx,
798
+ struct llama_batch batch);
799
+
800
+ // Positive return values does not mean a fatal error, but rather a warning.
801
+ // 0 - success
802
+ // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
803
+ // < 0 - error
804
+ LLAMA_API int32_t llama_decode(
805
+ struct llama_context * ctx,
806
+ struct llama_batch batch);
807
+
808
+ // Set the number of threads used for decoding
809
+ // n_threads is the number of threads used for generation (single token)
810
+ // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
811
+ LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
812
+
813
+ // Get the number of threads used for generation of a single token.
814
+ LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
815
+
816
+ // Get the number of threads used for prompt and batch processing (multiple token).
817
+ LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
818
+
819
+ // Set whether the model is in embeddings mode or not
820
+ // If true, embeddings will be returned but logits will not
821
+ LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
822
+
823
+ // Set whether to use causal attention or not
824
+ // If set to true, the model will only attend to the past tokens
825
+ LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
826
+
827
+ // Set abort callback
828
+ LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, lm_ggml_abort_callback abort_callback, void * abort_callback_data);
829
+
830
+ // Wait until all computations are finished
831
+ // This is automatically done when using one of the functions below to obtain the computation results
832
+ // and is not necessary to call it explicitly in most cases
833
+ LLAMA_API void llama_synchronize(struct llama_context * ctx);
834
+
835
+ // Token logits obtained from the last call to llama_decode()
836
+ // The logits for which llama_batch.logits[i] != 0 are stored contiguously
837
+ // in the order they have appeared in the batch.
838
+ // Rows: number of tokens for which llama_batch.logits[i] != 0
839
+ // Cols: n_vocab
840
+ LLAMA_API float * llama_get_logits(struct llama_context * ctx);
841
+
842
+ // Logits for the ith token. For positive indices, Equivalent to:
843
+ // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
844
+ // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
845
+ // returns NULL for invalid ids.
846
+ LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
847
+
848
+ // Get all output token embeddings.
849
+ // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
850
+ // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
851
+ // in the order they have appeared in the batch.
852
+ // shape: [n_outputs*n_embd]
853
+ // Otherwise, returns NULL.
854
+ LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
855
+
856
+ // Get the embeddings for the ith token. For positive indices, Equivalent to:
857
+ // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
858
+ // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
859
+ // shape: [n_embd] (1-dimensional)
860
+ // returns NULL for invalid ids.
861
+ LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
862
+
863
+ // Get the embeddings for a sequence id
864
+ // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
865
+ // shape: [n_embd] (1-dimensional)
866
+ LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
867
+
868
+ //
869
+ // Vocab
870
+ //
871
+
872
+ LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
873
+
874
+ LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
875
+
876
+ LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
877
+
878
+ // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
879
+ LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
880
+
881
+ // Identify if Token Id is a control token or a render-able token
882
+ LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
883
+
884
+ // Special tokens
885
+ LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
886
+ LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
887
+ LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
888
+ LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
889
+ LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
890
+ LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
891
+
892
+ // Returns -1 if unknown, 1 for true or 0 for false.
893
+ LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
894
+
895
+ // Returns -1 if unknown, 1 for true or 0 for false.
896
+ LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
897
+
898
+ // Codellama infill tokens
899
+ LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
900
+ LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
901
+ LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
902
+ LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
903
+
904
+ //
905
+ // Tokenization
906
+ //
907
+
908
+ /// @details Convert the provided text into tokens.
909
+ /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
910
+ /// @return Returns the number of tokens on success, no more than n_tokens_max
911
+ /// @return Returns a negative number on failure - the number of tokens that would have been returned
912
+ /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
913
+ /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
914
+ /// as plaintext. Does not insert a leading space.
915
+ LLAMA_API int32_t llama_tokenize(
916
+ const struct llama_model * model,
917
+ const char * text,
918
+ int32_t text_len,
919
+ llama_token * tokens,
920
+ int32_t n_tokens_max,
921
+ bool add_special,
922
+ bool parse_special);
923
+
924
+ // Token Id -> Piece.
925
+ // Uses the vocabulary in the provided context.
926
+ // Does not write null terminator to the buffer.
927
+ // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
928
+ // @param special If true, special tokens are rendered in the output.
929
+ LLAMA_API int32_t llama_token_to_piece(
930
+ const struct llama_model * model,
931
+ llama_token token,
932
+ char * buf,
933
+ int32_t length,
934
+ int32_t lstrip,
935
+ bool special);
936
+
937
+ /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
938
+ /// @param text The char pointer must be large enough to hold the resulting text.
939
+ /// @return Returns the number of chars/bytes on success, no more than text_len_max.
940
+ /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
941
+ /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
942
+ /// @param unparse_special If true, special tokens are rendered in the output.
943
+ LLAMA_API int32_t llama_detokenize(
944
+ const struct llama_model * model,
945
+ const llama_token * tokens,
946
+ int32_t n_tokens,
947
+ char * text,
948
+ int32_t text_len_max,
949
+ bool remove_special,
950
+ bool unparse_special);
951
+
952
+ /// Apply chat template. Inspired by hf apply_chat_template() on python.
953
+ /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
954
+ /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
955
+ /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
956
+ /// @param chat Pointer to a list of multiple llama_chat_message
957
+ /// @param n_msg Number of llama_chat_message in this chat
958
+ /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
959
+ /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
960
+ /// @param length The size of the allocated buffer
961
+ /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
962
+ LLAMA_API int32_t llama_chat_apply_template(
963
+ const struct llama_model * model,
964
+ const char * tmpl,
965
+ const struct llama_chat_message * chat,
966
+ size_t n_msg,
967
+ bool add_ass,
968
+ char * buf,
969
+ int32_t length);
970
+
971
+ //
972
+ // Grammar
973
+ //
974
+
975
+ /// Initialize a llama_grammar.
976
+ ///
977
+ /// @param rules The rule elements of the grammar to initialize.
978
+ /// @param n_rules The number of rules.
979
+ /// @param start_rule_index The index of the root rule (the starting point of the grammar).
980
+ /// @return The initialized llama_grammar or nullptr if initialization failed.
981
+ LLAMA_API struct llama_grammar * llama_grammar_init(
982
+ const llama_grammar_element ** rules,
983
+ size_t n_rules,
984
+ size_t start_rule_index);
985
+
986
+ LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
987
+
988
+ LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
989
+
990
+ //
991
+ // Sampling functions
992
+ //
993
+
994
+ // Sets the current rng seed.
995
+ LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
996
+
997
+ /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
998
+ /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
999
+ LLAMA_API void llama_sample_repetition_penalties(
1000
+ struct llama_context * ctx,
1001
+ llama_token_data_array * candidates,
1002
+ const llama_token * last_tokens,
1003
+ size_t penalty_last_n,
1004
+ float penalty_repeat,
1005
+ float penalty_freq,
1006
+ float penalty_present);
1007
+
1008
+ /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
1009
+ /// @param logits Logits extracted from the original generation context.
1010
+ /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
1011
+ /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
1012
+ LLAMA_API void llama_sample_apply_guidance(
1013
+ struct llama_context * ctx,
1014
+ float * logits,
1015
+ float * logits_guidance,
1016
+ float scale);
1017
+
1018
+ /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1019
+ LLAMA_API void llama_sample_softmax(
1020
+ struct llama_context * ctx,
1021
+ llama_token_data_array * candidates);
1022
+
1023
+ /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1024
+ LLAMA_API void llama_sample_top_k(
1025
+ struct llama_context * ctx,
1026
+ llama_token_data_array * candidates,
1027
+ int32_t k,
1028
+ size_t min_keep);
1029
+
1030
+ /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1031
+ LLAMA_API void llama_sample_top_p(
1032
+ struct llama_context * ctx,
1033
+ llama_token_data_array * candidates,
1034
+ float p,
1035
+ size_t min_keep);
1036
+
1037
+ /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
1038
+ LLAMA_API void llama_sample_min_p(
1039
+ struct llama_context * ctx,
1040
+ llama_token_data_array * candidates,
1041
+ float p,
1042
+ size_t min_keep);
1043
+
1044
+ /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
1045
+ LLAMA_API void llama_sample_tail_free(
1046
+ struct llama_context * ctx,
1047
+ llama_token_data_array * candidates,
1048
+ float z,
1049
+ size_t min_keep);
1050
+
1051
+ /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1052
+ LLAMA_API void llama_sample_typical(
1053
+ struct llama_context * ctx,
1054
+ llama_token_data_array * candidates,
1055
+ float p,
1056
+ size_t min_keep);
1057
+
1058
+ /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
1059
+ LLAMA_API void llama_sample_entropy(
1060
+ struct llama_context * ctx,
1061
+ llama_token_data_array * candidates_p,
1062
+ float min_temp,
1063
+ float max_temp,
1064
+ float exponent_val);
1065
+
1066
+ LLAMA_API void llama_sample_temp(
1067
+ struct llama_context * ctx,
1068
+ llama_token_data_array * candidates,
1069
+ float temp);
1070
+
1071
+ /// @details Apply constraints from grammar
1072
+ LLAMA_API void llama_sample_grammar(
1073
+ struct llama_context * ctx,
1074
+ llama_token_data_array * candidates,
1075
+ const struct llama_grammar * grammar);
1076
+
1077
+ /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1078
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1079
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1080
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1081
+ /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1082
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1083
+ LLAMA_API llama_token llama_sample_token_mirostat(
1084
+ struct llama_context * ctx,
1085
+ llama_token_data_array * candidates,
1086
+ float tau,
1087
+ float eta,
1088
+ int32_t m,
1089
+ float * mu);
1090
+
1091
+ /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1092
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1093
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1094
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1095
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1096
+ LLAMA_API llama_token llama_sample_token_mirostat_v2(
1097
+ struct llama_context * ctx,
1098
+ llama_token_data_array * candidates,
1099
+ float tau,
1100
+ float eta,
1101
+ float * mu);
1102
+
1103
+ /// @details Selects the token with the highest probability.
1104
+ /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
1105
+ LLAMA_API llama_token llama_sample_token_greedy(
1106
+ struct llama_context * ctx,
1107
+ llama_token_data_array * candidates);
1108
+
1109
+ /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
1110
+ LLAMA_API llama_token llama_sample_token(
1111
+ struct llama_context * ctx,
1112
+ llama_token_data_array * candidates);
1113
+
1114
+ /// @details Accepts the sampled token into the grammar
1115
+ LLAMA_API void llama_grammar_accept_token(
1116
+ struct llama_context * ctx,
1117
+ struct llama_grammar * grammar,
1118
+ llama_token token);
1119
+
1120
+ //
1121
+ // Model split
1122
+ //
1123
+
1124
+ /// @details Build a split GGUF final path for this chunk.
1125
+ /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1126
+ // Returns the split_path length.
1127
+ LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1128
+
1129
+ /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1130
+ /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1131
+ // Returns the split_prefix length.
1132
+ LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1133
+
1134
+ // Performance information
1135
+ LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
1136
+
1137
+ LLAMA_API void llama_print_timings(struct llama_context * ctx);
1138
+ LLAMA_API void llama_reset_timings(struct llama_context * ctx);
1139
+
1140
+ // Print system information
1141
+ LLAMA_API const char * llama_print_system_info(void);
1142
+
1143
+ // Set callback for all future logging events.
1144
+ // If this is not called, or NULL is supplied, everything is output on stderr.
1145
+ LLAMA_API void llama_log_set(lm_ggml_log_callback log_callback, void * user_data);
1146
+
1147
+ LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
1148
+
1149
+ #ifdef __cplusplus
1150
+ }
1151
+ #endif
1152
+
1153
+ // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
1154
+ #ifdef LLAMA_API_INTERNAL
1155
+
1156
+ #include <random>
1157
+ #include <string>
1158
+ #include <vector>
1159
+
1160
+ struct lm_ggml_tensor;
1161
+
1162
+ struct llama_partial_utf8 {
1163
+ uint32_t value; // bit value so far (unshifted)
1164
+ int n_remain; // num bytes remaining; -1 indicates invalid sequence
1165
+ };
1166
+
1167
+ struct llama_grammar {
1168
+ const std::vector<std::vector<llama_grammar_element>> rules;
1169
+ std::vector<std::vector<const llama_grammar_element *>> stacks;
1170
+
1171
+ // buffer for partially generated UTF-8 sequence from accepted tokens
1172
+ llama_partial_utf8 partial_utf8;
1173
+ };
1174
+
1175
+ struct llama_grammar_candidate {
1176
+ size_t index;
1177
+ const uint32_t * code_points;
1178
+ llama_partial_utf8 partial_utf8;
1179
+ };
1180
+
1181
+ const std::vector<std::pair<std::string, struct lm_ggml_tensor *>> & llama_internal_get_tensor_map(
1182
+ struct llama_context * ctx
1183
+ );
1184
+
1185
+ void llama_grammar_accept(
1186
+ const std::vector<std::vector<llama_grammar_element>> & rules,
1187
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
1188
+ const uint32_t chr,
1189
+ std::vector<std::vector<const llama_grammar_element *>> & new_stacks);
1190
+
1191
+ std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1192
+ const std::string & src,
1193
+ llama_partial_utf8 partial_start);
1194
+
1195
+ // Randomly selects a token from the candidates based on their probabilities using given std::mt19937.
1196
+ // This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
1197
+ llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
1198
+
1199
+ #endif // LLAMA_API_INTERNAL
1200
+
1201
+ #endif // LLAMA_H