@fugood/llama.node 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +1 -10
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/package.json +6 -4
- package/src/LlamaCompletionWorker.cpp +6 -6
- package/src/LlamaContext.cpp +7 -9
- package/src/common.hpp +2 -1
- package/src/llama.cpp/.github/workflows/build.yml +98 -24
- package/src/llama.cpp/.github/workflows/close-issue.yml +5 -0
- package/src/llama.cpp/.github/workflows/docker.yml +43 -34
- package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +7 -0
- package/src/llama.cpp/.github/workflows/nix-ci.yml +7 -0
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +2 -4
- package/src/llama.cpp/.github/workflows/python-type-check.yml +3 -1
- package/src/llama.cpp/.github/workflows/server.yml +7 -0
- package/src/llama.cpp/CMakeLists.txt +20 -8
- package/src/llama.cpp/common/CMakeLists.txt +12 -10
- package/src/llama.cpp/common/arg.cpp +2006 -0
- package/src/llama.cpp/common/arg.h +77 -0
- package/src/llama.cpp/common/common.cpp +496 -1632
- package/src/llama.cpp/common/common.h +161 -63
- package/src/llama.cpp/common/console.cpp +3 -0
- package/src/llama.cpp/common/log.cpp +401 -0
- package/src/llama.cpp/common/log.h +66 -698
- package/src/llama.cpp/common/ngram-cache.cpp +3 -0
- package/src/llama.cpp/common/sampling.cpp +348 -350
- package/src/llama.cpp/common/sampling.h +62 -139
- package/src/llama.cpp/common/stb_image.h +5990 -6398
- package/src/llama.cpp/common/train.cpp +2 -0
- package/src/llama.cpp/docs/build.md +36 -1
- package/src/llama.cpp/examples/CMakeLists.txt +0 -1
- package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +1 -2
- package/src/llama.cpp/examples/batched/batched.cpp +39 -55
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +34 -44
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +55 -52
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +15 -15
- package/src/llama.cpp/examples/cvector-generator/pca.hpp +3 -13
- package/src/llama.cpp/examples/embedding/embedding.cpp +143 -87
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +33 -33
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +36 -35
- package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +14 -39
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +83 -0
- package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +58 -39
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +34 -27
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +59 -62
- package/src/llama.cpp/examples/infill/infill.cpp +117 -132
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +265 -58
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +29 -22
- package/src/llama.cpp/examples/llava/CMakeLists.txt +7 -0
- package/src/llama.cpp/examples/llava/clip.cpp +685 -150
- package/src/llama.cpp/examples/llava/clip.h +11 -2
- package/src/llama.cpp/examples/llava/llava-cli.cpp +47 -58
- package/src/llama.cpp/examples/llava/llava.cpp +110 -24
- package/src/llama.cpp/examples/llava/llava.h +2 -3
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +323 -0
- package/src/llama.cpp/examples/llava/requirements.txt +1 -0
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +42 -43
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +10 -8
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +23 -22
- package/src/llama.cpp/examples/lookup/lookup.cpp +40 -43
- package/src/llama.cpp/examples/main/main.cpp +210 -262
- package/src/llama.cpp/examples/parallel/parallel.cpp +49 -49
- package/src/llama.cpp/examples/passkey/passkey.cpp +42 -50
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +187 -200
- package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize/quantize.cpp +27 -9
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +2 -3
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +49 -44
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +24 -1
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +32 -35
- package/src/llama.cpp/examples/server/CMakeLists.txt +3 -5
- package/src/llama.cpp/examples/server/server.cpp +1027 -1073
- package/src/llama.cpp/examples/server/tests/requirements.txt +2 -1
- package/src/llama.cpp/examples/server/utils.hpp +107 -105
- package/src/llama.cpp/examples/simple/simple.cpp +35 -41
- package/src/llama.cpp/examples/speculative/speculative.cpp +129 -103
- package/src/llama.cpp/examples/sycl/run-llama2.sh +10 -19
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +1 -1
- package/src/llama.cpp/examples/tokenize/tokenize.cpp +25 -27
- package/src/llama.cpp/ggml/CMakeLists.txt +14 -3
- package/src/llama.cpp/ggml/include/ggml-alloc.h +3 -3
- package/src/llama.cpp/ggml/include/ggml-backend.h +145 -60
- package/src/llama.cpp/ggml/include/ggml-blas.h +3 -3
- package/src/llama.cpp/ggml/include/ggml-cann.h +15 -19
- package/src/llama.cpp/ggml/include/ggml-cuda.h +16 -16
- package/src/llama.cpp/ggml/include/ggml-metal.h +5 -8
- package/src/llama.cpp/ggml/include/ggml-rpc.h +5 -5
- package/src/llama.cpp/ggml/include/ggml-sycl.h +8 -8
- package/src/llama.cpp/ggml/include/ggml-vulkan.h +7 -7
- package/src/llama.cpp/ggml/include/ggml.h +293 -186
- package/src/llama.cpp/ggml/src/CMakeLists.txt +86 -44
- package/src/llama.cpp/ggml/src/ggml-aarch64.c +2135 -1119
- package/src/llama.cpp/ggml/src/ggml-alloc.c +6 -0
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +152 -70
- package/src/llama.cpp/ggml/src/{ggml-backend.c → ggml-backend.cpp} +606 -286
- package/src/llama.cpp/ggml/src/ggml-blas.cpp +9 -10
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +4 -27
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +32 -4
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +179 -41
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +1 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -1
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +2 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +278 -0
- package/src/llama.cpp/ggml/src/ggml-cann.cpp +215 -216
- package/src/llama.cpp/ggml/src/ggml-common.h +20 -0
- package/src/llama.cpp/ggml/src/ggml-cpu-impl.h +614 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +178 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +134 -0
- package/src/llama.cpp/ggml/src/ggml-impl.h +49 -603
- package/src/llama.cpp/ggml/src/ggml-kompute.cpp +4 -24
- package/src/llama.cpp/ggml/src/ggml-quants.c +972 -92
- package/src/llama.cpp/ggml/src/ggml-quants.h +15 -0
- package/src/llama.cpp/ggml/src/ggml-rpc.cpp +116 -66
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +52 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +99 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +21 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +57 -57
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +106 -106
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +4 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +16 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +101 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +125 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +23 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +6 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +2 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +71 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +21 -0
- package/src/llama.cpp/ggml/src/ggml-sycl.cpp +97 -169
- package/src/llama.cpp/ggml/src/ggml-vulkan.cpp +1508 -1124
- package/src/llama.cpp/ggml/src/ggml.c +3001 -1647
- package/src/llama.cpp/ggml/src/llamafile/sgemm.cpp +192 -0
- package/src/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt +2 -0
- package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +88 -40
- package/src/llama.cpp/include/llama.h +241 -264
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +46 -0
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +1 -1
- package/src/llama.cpp/src/llama-grammar.cpp +721 -122
- package/src/llama.cpp/src/llama-grammar.h +120 -15
- package/src/llama.cpp/src/llama-impl.h +156 -1
- package/src/llama.cpp/src/llama-sampling.cpp +1375 -303
- package/src/llama.cpp/src/llama-sampling.h +20 -47
- package/src/llama.cpp/src/llama-vocab.cpp +343 -120
- package/src/llama.cpp/src/llama-vocab.h +33 -17
- package/src/llama.cpp/src/llama.cpp +4247 -1525
- package/src/llama.cpp/src/unicode-data.cpp +6 -4
- package/src/llama.cpp/src/unicode-data.h +4 -4
- package/src/llama.cpp/src/unicode.cpp +15 -7
- package/src/llama.cpp/tests/CMakeLists.txt +3 -0
- package/src/llama.cpp/tests/test-arg-parser.cpp +131 -0
- package/src/llama.cpp/tests/test-backend-ops.cpp +1592 -289
- package/src/llama.cpp/tests/test-barrier.cpp +93 -0
- package/src/llama.cpp/tests/test-grad0.cpp +187 -70
- package/src/llama.cpp/tests/test-grammar-integration.cpp +23 -38
- package/src/llama.cpp/tests/test-grammar-parser.cpp +6 -4
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +6 -4
- package/src/llama.cpp/tests/test-llama-grammar.cpp +9 -8
- package/src/llama.cpp/tests/test-log.cpp +39 -0
- package/src/llama.cpp/tests/test-quantize-fns.cpp +6 -0
- package/src/llama.cpp/tests/test-rope.cpp +1 -1
- package/src/llama.cpp/tests/test-sampling.cpp +157 -98
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +55 -35
- package/patches/llama.patch +0 -22
- package/src/llama.cpp/.github/workflows/bench.yml +0 -310
- package/src/llama.cpp/common/grammar-parser.cpp +0 -536
- package/src/llama.cpp/common/grammar-parser.h +0 -29
- package/src/llama.cpp/examples/benchmark/CMakeLists.txt +0 -6
- package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +0 -275
|
@@ -33,12 +33,15 @@
|
|
|
33
33
|
|
|
34
34
|
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
|
35
35
|
|
|
36
|
+
// TODO: use everywhere in the implementation
|
|
37
|
+
#define LLAMA_TOKEN_NULL -1
|
|
38
|
+
|
|
36
39
|
#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
|
|
37
40
|
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
|
38
41
|
#define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
|
|
39
42
|
|
|
40
43
|
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
|
41
|
-
#define LLAMA_SESSION_VERSION
|
|
44
|
+
#define LLAMA_SESSION_VERSION 9
|
|
42
45
|
|
|
43
46
|
#define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
|
|
44
47
|
#define LLAMA_STATE_SEQ_VERSION 2
|
|
@@ -53,8 +56,10 @@ extern "C" {
|
|
|
53
56
|
// TODO: show sample usage
|
|
54
57
|
//
|
|
55
58
|
|
|
59
|
+
// struct llama_vocab; // TODO: add in the future
|
|
56
60
|
struct llama_model;
|
|
57
61
|
struct llama_context;
|
|
62
|
+
struct llama_sampler;
|
|
58
63
|
|
|
59
64
|
typedef int32_t llama_pos;
|
|
60
65
|
typedef int32_t llama_token;
|
|
@@ -66,6 +71,7 @@ extern "C" {
|
|
|
66
71
|
LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
|
|
67
72
|
LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
|
|
68
73
|
LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
|
|
74
|
+
LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization
|
|
69
75
|
};
|
|
70
76
|
|
|
71
77
|
// pre-tokenization types
|
|
@@ -93,15 +99,16 @@ extern "C" {
|
|
|
93
99
|
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
|
|
94
100
|
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
|
|
95
101
|
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
|
|
102
|
+
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
|
|
103
|
+
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
|
|
104
|
+
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
|
|
105
|
+
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
|
|
96
106
|
};
|
|
97
107
|
|
|
98
|
-
// note: these values should be synchronized with ggml_rope
|
|
99
|
-
// TODO: maybe move this enum to ggml.h (ggml_rope_type)
|
|
100
108
|
enum llama_rope_type {
|
|
101
109
|
LLAMA_ROPE_TYPE_NONE = -1,
|
|
102
|
-
LLAMA_ROPE_TYPE_NORM =
|
|
103
|
-
LLAMA_ROPE_TYPE_NEOX =
|
|
104
|
-
LLAMA_ROPE_TYPE_GLM = 4,
|
|
110
|
+
LLAMA_ROPE_TYPE_NORM = 0,
|
|
111
|
+
LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX,
|
|
105
112
|
};
|
|
106
113
|
|
|
107
114
|
enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
|
|
@@ -166,6 +173,8 @@ extern "C" {
|
|
|
166
173
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
|
|
167
174
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
|
|
168
175
|
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
|
|
176
|
+
LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
|
|
177
|
+
LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
|
|
169
178
|
|
|
170
179
|
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
|
171
180
|
};
|
|
@@ -184,6 +193,7 @@ extern "C" {
|
|
|
184
193
|
LLAMA_POOLING_TYPE_MEAN = 1,
|
|
185
194
|
LLAMA_POOLING_TYPE_CLS = 2,
|
|
186
195
|
LLAMA_POOLING_TYPE_LAST = 3,
|
|
196
|
+
LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph
|
|
187
197
|
};
|
|
188
198
|
|
|
189
199
|
enum llama_attention_type {
|
|
@@ -193,11 +203,12 @@ extern "C" {
|
|
|
193
203
|
};
|
|
194
204
|
|
|
195
205
|
enum llama_split_mode {
|
|
196
|
-
LLAMA_SPLIT_MODE_NONE
|
|
197
|
-
LLAMA_SPLIT_MODE_LAYER
|
|
198
|
-
LLAMA_SPLIT_MODE_ROW
|
|
206
|
+
LLAMA_SPLIT_MODE_NONE = 0, // single GPU
|
|
207
|
+
LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
|
|
208
|
+
LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
|
|
199
209
|
};
|
|
200
210
|
|
|
211
|
+
// TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
|
|
201
212
|
typedef struct llama_token_data {
|
|
202
213
|
llama_token id; // token id
|
|
203
214
|
float logit; // log-odds of the token
|
|
@@ -205,8 +216,10 @@ extern "C" {
|
|
|
205
216
|
} llama_token_data;
|
|
206
217
|
|
|
207
218
|
typedef struct llama_token_data_array {
|
|
219
|
+
// TODO: consider SoA
|
|
208
220
|
llama_token_data * data;
|
|
209
221
|
size_t size;
|
|
222
|
+
int64_t selected; // this is the index in the data array (i.e. not the token id)
|
|
210
223
|
bool sorted;
|
|
211
224
|
} llama_token_data_array;
|
|
212
225
|
|
|
@@ -267,9 +280,9 @@ extern "C" {
|
|
|
267
280
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
|
268
281
|
|
|
269
282
|
// main_gpu interpretation depends on split_mode:
|
|
270
|
-
//
|
|
271
|
-
//
|
|
272
|
-
//
|
|
283
|
+
// LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model
|
|
284
|
+
// LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results
|
|
285
|
+
// LLAMA_SPLIT_MODE_LAYER: ignored
|
|
273
286
|
int32_t main_gpu;
|
|
274
287
|
|
|
275
288
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
|
@@ -299,13 +312,12 @@ extern "C" {
|
|
|
299
312
|
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
|
|
300
313
|
// https://github.com/ggerganov/llama.cpp/pull/7544
|
|
301
314
|
struct llama_context_params {
|
|
302
|
-
uint32_t seed; // RNG seed, -1 for random
|
|
303
315
|
uint32_t n_ctx; // text context, 0 = from model
|
|
304
316
|
uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
|
|
305
317
|
uint32_t n_ubatch; // physical maximum batch size
|
|
306
318
|
uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
|
|
307
|
-
|
|
308
|
-
|
|
319
|
+
int32_t n_threads; // number of threads to use for generation
|
|
320
|
+
int32_t n_threads_batch; // number of threads to use for batch processing
|
|
309
321
|
|
|
310
322
|
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
|
|
311
323
|
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
|
|
@@ -327,11 +339,13 @@ extern "C" {
|
|
|
327
339
|
enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
|
|
328
340
|
enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
|
|
329
341
|
|
|
330
|
-
// Keep the booleans together to avoid misalignment during copy-by-value.
|
|
342
|
+
// Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
|
|
343
|
+
// TODO: move at the end of the struct
|
|
331
344
|
bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
|
|
332
345
|
bool embeddings; // if true, extract embeddings (together with logits)
|
|
333
346
|
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
|
|
334
347
|
bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
|
|
348
|
+
bool no_perf; // whether to measure performance timings
|
|
335
349
|
|
|
336
350
|
// Abort callback
|
|
337
351
|
// if it returns true, execution of llama_decode() will be aborted
|
|
@@ -345,7 +359,7 @@ extern "C" {
|
|
|
345
359
|
int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
|
|
346
360
|
enum llama_ftype ftype; // quantize to this llama_ftype
|
|
347
361
|
enum ggml_type output_tensor_type; // output tensor type
|
|
348
|
-
enum ggml_type token_embedding_type; //
|
|
362
|
+
enum ggml_type token_embedding_type; // token embeddings tensor type
|
|
349
363
|
bool allow_requantize; // allow quantizing non-f32/f16 tensors
|
|
350
364
|
bool quantize_output_tensor; // quantize output.weight
|
|
351
365
|
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
|
|
@@ -355,56 +369,14 @@ extern "C" {
|
|
|
355
369
|
void * kv_overrides; // pointer to vector containing overrides
|
|
356
370
|
} llama_model_quantize_params;
|
|
357
371
|
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
enum llama_gretype {
|
|
363
|
-
// end of rule definition
|
|
364
|
-
LLAMA_GRETYPE_END = 0,
|
|
365
|
-
|
|
366
|
-
// start of alternate definition for rule
|
|
367
|
-
LLAMA_GRETYPE_ALT = 1,
|
|
368
|
-
|
|
369
|
-
// non-terminal element: reference to rule
|
|
370
|
-
LLAMA_GRETYPE_RULE_REF = 2,
|
|
371
|
-
|
|
372
|
-
// terminal element: character (code point)
|
|
373
|
-
LLAMA_GRETYPE_CHAR = 3,
|
|
374
|
-
|
|
375
|
-
// inverse char(s) ([^a], [^a-b] [^abc])
|
|
376
|
-
LLAMA_GRETYPE_CHAR_NOT = 4,
|
|
377
|
-
|
|
378
|
-
// modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
|
|
379
|
-
// be an inclusive range ([a-z])
|
|
380
|
-
LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
|
|
381
|
-
|
|
382
|
-
// modifies a preceding LLAMA_GRETYPE_CHAR or
|
|
383
|
-
// LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
|
|
384
|
-
LLAMA_GRETYPE_CHAR_ALT = 6,
|
|
385
|
-
|
|
386
|
-
// any character (.)
|
|
387
|
-
LLAMA_GRETYPE_CHAR_ANY = 7,
|
|
388
|
-
};
|
|
389
|
-
|
|
390
|
-
typedef struct llama_grammar_element {
|
|
391
|
-
enum llama_gretype type;
|
|
392
|
-
uint32_t value; // Unicode code point or rule ID
|
|
393
|
-
} llama_grammar_element;
|
|
394
|
-
|
|
395
|
-
// performance timing information
|
|
396
|
-
struct llama_timings {
|
|
397
|
-
double t_start_ms;
|
|
398
|
-
double t_end_ms;
|
|
399
|
-
double t_load_ms;
|
|
400
|
-
double t_sample_ms;
|
|
401
|
-
double t_p_eval_ms;
|
|
402
|
-
double t_eval_ms;
|
|
372
|
+
typedef struct llama_logit_bias {
|
|
373
|
+
llama_token token;
|
|
374
|
+
float bias;
|
|
375
|
+
} llama_logit_bias;
|
|
403
376
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
};
|
|
377
|
+
typedef struct llama_sampler_chain_params {
|
|
378
|
+
bool no_perf; // whether to measure performance timings
|
|
379
|
+
} llama_sampler_chain_params;
|
|
408
380
|
|
|
409
381
|
// used in chat template
|
|
410
382
|
typedef struct llama_chat_message {
|
|
@@ -416,8 +388,10 @@ extern "C" {
|
|
|
416
388
|
struct llama_lora_adapter;
|
|
417
389
|
|
|
418
390
|
// Helpers for getting default parameters
|
|
419
|
-
|
|
420
|
-
LLAMA_API struct
|
|
391
|
+
// TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172)
|
|
392
|
+
LLAMA_API struct llama_model_params llama_model_default_params(void);
|
|
393
|
+
LLAMA_API struct llama_context_params llama_context_default_params(void);
|
|
394
|
+
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
|
|
421
395
|
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
|
|
422
396
|
|
|
423
397
|
// Initialize the llama + ggml backend
|
|
@@ -428,15 +402,23 @@ extern "C" {
|
|
|
428
402
|
//optional:
|
|
429
403
|
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
|
|
430
404
|
|
|
405
|
+
// Optional: an auto threadpool gets created in ggml if not passed explicitly
|
|
406
|
+
LLAMA_API void llama_attach_threadpool(
|
|
407
|
+
struct llama_context * ctx,
|
|
408
|
+
ggml_threadpool_t threadpool,
|
|
409
|
+
ggml_threadpool_t threadpool_batch);
|
|
410
|
+
LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
|
|
411
|
+
|
|
431
412
|
// Call once at the end of the program - currently only used for MPI
|
|
432
413
|
LLAMA_API void llama_backend_free(void);
|
|
433
414
|
|
|
434
415
|
LLAMA_API struct llama_model * llama_load_model_from_file(
|
|
435
416
|
const char * path_model,
|
|
436
|
-
|
|
417
|
+
struct llama_model_params params);
|
|
437
418
|
|
|
438
419
|
LLAMA_API void llama_free_model(struct llama_model * model);
|
|
439
420
|
|
|
421
|
+
// TODO: rename to llama_init_from_model
|
|
440
422
|
LLAMA_API struct llama_context * llama_new_context_with_model(
|
|
441
423
|
struct llama_model * model,
|
|
442
424
|
struct llama_context_params params);
|
|
@@ -452,22 +434,22 @@ extern "C" {
|
|
|
452
434
|
LLAMA_API bool llama_supports_mlock (void);
|
|
453
435
|
LLAMA_API bool llama_supports_gpu_offload(void);
|
|
454
436
|
|
|
455
|
-
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
|
|
456
|
-
|
|
457
437
|
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
|
|
458
438
|
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
|
|
459
439
|
LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
|
|
460
440
|
LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
|
|
461
441
|
|
|
462
|
-
LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
|
|
463
|
-
|
|
464
|
-
LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
|
|
465
|
-
LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
|
|
466
|
-
|
|
467
442
|
LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
|
|
468
443
|
LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
|
|
469
444
|
LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
|
|
470
445
|
LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
|
|
446
|
+
LLAMA_API int32_t llama_n_head (const struct llama_model * model);
|
|
447
|
+
|
|
448
|
+
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
|
|
449
|
+
|
|
450
|
+
LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
|
|
451
|
+
LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
|
|
452
|
+
LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
|
|
471
453
|
|
|
472
454
|
// Get the model's RoPE frequency scaling factor
|
|
473
455
|
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
|
|
@@ -504,10 +486,16 @@ extern "C" {
|
|
|
504
486
|
// Returns true if the model contains an encoder that requires llama_encode() call
|
|
505
487
|
LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
|
|
506
488
|
|
|
489
|
+
// Returns true if the model contains a decoder that requires llama_decode() call
|
|
490
|
+
LLAMA_API bool llama_model_has_decoder(const struct llama_model * model);
|
|
491
|
+
|
|
507
492
|
// For encoder-decoder models, this function returns id of the token that must be provided
|
|
508
493
|
// to the decoder to start generating output sequence. For other models, it returns -1.
|
|
509
494
|
LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
|
|
510
495
|
|
|
496
|
+
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
|
|
497
|
+
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
|
|
498
|
+
|
|
511
499
|
// Returns 0 on success
|
|
512
500
|
LLAMA_API uint32_t llama_model_quantize(
|
|
513
501
|
const char * fname_inp,
|
|
@@ -690,7 +678,7 @@ extern "C" {
|
|
|
690
678
|
//
|
|
691
679
|
|
|
692
680
|
// Returns the *actual* size in bytes of the state
|
|
693
|
-
// (
|
|
681
|
+
// (logits, embedding and kv_cache)
|
|
694
682
|
// Only use when saving the state, not when restoring it, otherwise the size may be too small.
|
|
695
683
|
LLAMA_API size_t llama_state_get_size(struct llama_context * ctx);
|
|
696
684
|
LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx),
|
|
@@ -831,13 +819,13 @@ extern "C" {
|
|
|
831
819
|
// Set the number of threads used for decoding
|
|
832
820
|
// n_threads is the number of threads used for generation (single token)
|
|
833
821
|
// n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
|
|
834
|
-
LLAMA_API void llama_set_n_threads(struct llama_context * ctx,
|
|
822
|
+
LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch);
|
|
835
823
|
|
|
836
824
|
// Get the number of threads used for generation of a single token.
|
|
837
|
-
LLAMA_API
|
|
825
|
+
LLAMA_API int32_t llama_n_threads(struct llama_context * ctx);
|
|
838
826
|
|
|
839
827
|
// Get the number of threads used for prompt and batch processing (multiple token).
|
|
840
|
-
LLAMA_API
|
|
828
|
+
LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx);
|
|
841
829
|
|
|
842
830
|
// Set whether the model is in embeddings mode or not
|
|
843
831
|
// If true, embeddings will be returned but logits will not
|
|
@@ -885,7 +873,8 @@ extern "C" {
|
|
|
885
873
|
|
|
886
874
|
// Get the embeddings for a sequence id
|
|
887
875
|
// Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
|
|
888
|
-
//
|
|
876
|
+
// when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence
|
|
877
|
+
// otherwise: float[n_embd] (1-dimensional)
|
|
889
878
|
LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
|
|
890
879
|
|
|
891
880
|
//
|
|
@@ -912,11 +901,8 @@ extern "C" {
|
|
|
912
901
|
LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
|
|
913
902
|
LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
|
|
914
903
|
|
|
915
|
-
|
|
916
|
-
LLAMA_API
|
|
917
|
-
|
|
918
|
-
// Returns -1 if unknown, 1 for true or 0 for false.
|
|
919
|
-
LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
|
|
904
|
+
LLAMA_API bool llama_add_bos_token(const struct llama_model * model);
|
|
905
|
+
LLAMA_API bool llama_add_eos_token(const struct llama_model * model);
|
|
920
906
|
|
|
921
907
|
// Codellama infill tokens
|
|
922
908
|
LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
|
|
@@ -927,6 +913,8 @@ extern "C" {
|
|
|
927
913
|
//
|
|
928
914
|
// Tokenization
|
|
929
915
|
//
|
|
916
|
+
// The API is thread-safe.
|
|
917
|
+
//
|
|
930
918
|
|
|
931
919
|
/// @details Convert the provided text into tokens.
|
|
932
920
|
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
|
|
@@ -996,121 +984,114 @@ extern "C" {
|
|
|
996
984
|
int32_t length);
|
|
997
985
|
|
|
998
986
|
//
|
|
999
|
-
//
|
|
987
|
+
// Sampling API
|
|
988
|
+
//
|
|
989
|
+
// Sample usage:
|
|
990
|
+
//
|
|
991
|
+
// // prepare the sampling chain at the start
|
|
992
|
+
// auto sparams = llama_sampler_chain_default_params();
|
|
993
|
+
//
|
|
994
|
+
// llama_sampler * smpl = llama_sampler_chain_init(sparams);
|
|
995
|
+
//
|
|
996
|
+
// llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50));
|
|
997
|
+
// llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
|
|
998
|
+
// llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8));
|
|
999
|
+
//
|
|
1000
|
+
// // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat"
|
|
1001
|
+
// // this sampler will be responsible to select the actual token
|
|
1002
|
+
// llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed));
|
|
1003
|
+
//
|
|
1004
|
+
// ...
|
|
1005
|
+
//
|
|
1006
|
+
// // decoding loop:
|
|
1007
|
+
// while (...) {
|
|
1008
|
+
// ...
|
|
1009
|
+
//
|
|
1010
|
+
// llama_decode(ctx, batch);
|
|
1011
|
+
//
|
|
1012
|
+
// // sample from the logits of the last token in the batch
|
|
1013
|
+
// const llama_token id = llama_sampler_sample(smpl, ctx, -1);
|
|
1014
|
+
//
|
|
1015
|
+
// // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.)
|
|
1016
|
+
// llama_sampler_accept(smpl, id);
|
|
1017
|
+
// ...
|
|
1018
|
+
// }
|
|
1019
|
+
//
|
|
1020
|
+
// llama_sampler_free(smpl);
|
|
1021
|
+
//
|
|
1022
|
+
// TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU).
|
|
1023
|
+
// TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab
|
|
1000
1024
|
//
|
|
1001
1025
|
|
|
1002
|
-
|
|
1003
|
-
///
|
|
1004
|
-
/// @param rules The rule elements of the grammar to initialize.
|
|
1005
|
-
/// @param n_rules The number of rules.
|
|
1006
|
-
/// @param start_rule_index The index of the root rule (the starting point of the grammar).
|
|
1007
|
-
/// @return The initialized llama_grammar or nullptr if initialization failed.
|
|
1008
|
-
LLAMA_API struct llama_grammar * llama_grammar_init(
|
|
1009
|
-
const llama_grammar_element ** rules,
|
|
1010
|
-
size_t n_rules,
|
|
1011
|
-
size_t start_rule_index);
|
|
1012
|
-
|
|
1013
|
-
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
|
1014
|
-
|
|
1015
|
-
LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
|
|
1016
|
-
|
|
1017
|
-
/// @details Apply constraints from grammar
|
|
1018
|
-
LLAMA_API void llama_grammar_sample(
|
|
1019
|
-
const struct llama_grammar * grammar,
|
|
1020
|
-
const struct llama_context * ctx,
|
|
1021
|
-
llama_token_data_array * candidates);
|
|
1022
|
-
LLAMA_API DEPRECATED(void llama_sample_grammar(
|
|
1023
|
-
struct llama_context * ctx,
|
|
1024
|
-
llama_token_data_array * candidates,
|
|
1025
|
-
const struct llama_grammar * grammar),
|
|
1026
|
-
"use llama_grammar_sample instead");
|
|
1026
|
+
typedef void * llama_sampler_context_t;
|
|
1027
1027
|
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1028
|
+
// user code can implement the interface below in order to create custom llama_sampler
|
|
1029
|
+
struct llama_sampler_i {
|
|
1030
|
+
const char * (*name) (const struct llama_sampler * smpl); // can be NULL
|
|
1031
|
+
void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL
|
|
1032
|
+
void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required
|
|
1033
|
+
void (*reset) ( struct llama_sampler * smpl); // can be NULL
|
|
1034
|
+
struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL
|
|
1035
|
+
void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL
|
|
1033
1036
|
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
+
// TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph
|
|
1038
|
+
//void (*apply_ggml) (struct llama_sampler * smpl, ...);
|
|
1039
|
+
};
|
|
1037
1040
|
|
|
1038
|
-
|
|
1039
|
-
|
|
1041
|
+
struct llama_sampler {
|
|
1042
|
+
struct llama_sampler_i * iface;
|
|
1043
|
+
llama_sampler_context_t ctx;
|
|
1044
|
+
};
|
|
1040
1045
|
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
LLAMA_API void
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
LLAMA_API void
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1046
|
+
// mirror of llama_sampler_i:
|
|
1047
|
+
LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
|
|
1048
|
+
LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
|
|
1049
|
+
LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
|
|
1050
|
+
LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl);
|
|
1051
|
+
LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl);
|
|
1052
|
+
// important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add)
|
|
1053
|
+
LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl);
|
|
1054
|
+
|
|
1055
|
+
// llama_sampler_chain
|
|
1056
|
+
// a type of llama_sampler that can chain multiple samplers one after another
|
|
1057
|
+
|
|
1058
|
+
LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params);
|
|
1059
|
+
|
|
1060
|
+
// important: takes ownership of the sampler object and will free it when llama_sampler_free is called
|
|
1061
|
+
LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl);
|
|
1062
|
+
LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i);
|
|
1063
|
+
LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain);
|
|
1064
|
+
|
|
1065
|
+
// after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed
|
|
1066
|
+
LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i);
|
|
1067
|
+
|
|
1068
|
+
// available samplers:
|
|
1069
|
+
|
|
1070
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_greedy (void);
|
|
1071
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed);
|
|
1061
1072
|
|
|
1062
1073
|
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
llama_token_data_array * candidates);
|
|
1074
|
+
/// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
|
|
1075
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void);
|
|
1066
1076
|
|
|
1067
1077
|
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
|
1068
|
-
LLAMA_API
|
|
1069
|
-
struct llama_context * ctx,
|
|
1070
|
-
llama_token_data_array * candidates,
|
|
1071
|
-
int32_t k,
|
|
1072
|
-
size_t min_keep);
|
|
1078
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
|
|
1073
1079
|
|
|
1074
1080
|
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
|
1075
|
-
LLAMA_API
|
|
1076
|
-
struct llama_context * ctx,
|
|
1077
|
-
llama_token_data_array * candidates,
|
|
1078
|
-
float p,
|
|
1079
|
-
size_t min_keep);
|
|
1081
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
|
|
1080
1082
|
|
|
1081
1083
|
/// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
|
|
1082
|
-
LLAMA_API
|
|
1083
|
-
struct llama_context * ctx,
|
|
1084
|
-
llama_token_data_array * candidates,
|
|
1085
|
-
float p,
|
|
1086
|
-
size_t min_keep);
|
|
1084
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
|
|
1087
1085
|
|
|
1088
1086
|
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
|
1089
|
-
LLAMA_API
|
|
1090
|
-
struct llama_context * ctx,
|
|
1091
|
-
llama_token_data_array * candidates,
|
|
1092
|
-
float z,
|
|
1093
|
-
size_t min_keep);
|
|
1087
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_tail_free (float z, size_t min_keep);
|
|
1094
1088
|
|
|
1095
1089
|
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
|
1096
|
-
LLAMA_API
|
|
1097
|
-
|
|
1098
|
-
llama_token_data_array * candidates,
|
|
1099
|
-
float p,
|
|
1100
|
-
size_t min_keep);
|
|
1090
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
|
|
1091
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t);
|
|
1101
1092
|
|
|
1102
|
-
/// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
|
|
1103
|
-
LLAMA_API
|
|
1104
|
-
struct llama_context * ctx,
|
|
1105
|
-
llama_token_data_array * candidates_p,
|
|
1106
|
-
float min_temp,
|
|
1107
|
-
float max_temp,
|
|
1108
|
-
float exponent_val);
|
|
1109
|
-
|
|
1110
|
-
LLAMA_API void llama_sample_temp(
|
|
1111
|
-
struct llama_context * ctx,
|
|
1112
|
-
llama_token_data_array * candidates,
|
|
1113
|
-
float temp);
|
|
1093
|
+
/// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.
|
|
1094
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent);
|
|
1114
1095
|
|
|
1115
1096
|
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
|
1116
1097
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
|
@@ -1118,36 +1099,62 @@ extern "C" {
|
|
|
1118
1099
|
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
|
1119
1100
|
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
|
1120
1101
|
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
|
1121
|
-
LLAMA_API
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
float * mu);
|
|
1102
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat(
|
|
1103
|
+
int32_t n_vocab,
|
|
1104
|
+
uint32_t seed,
|
|
1105
|
+
float tau,
|
|
1106
|
+
float eta,
|
|
1107
|
+
int32_t m);
|
|
1128
1108
|
|
|
1129
1109
|
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
|
1130
1110
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
|
1131
1111
|
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
|
1132
1112
|
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
|
1133
1113
|
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
|
1134
|
-
LLAMA_API
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1114
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2(
|
|
1115
|
+
uint32_t seed,
|
|
1116
|
+
float tau,
|
|
1117
|
+
float eta);
|
|
1118
|
+
|
|
1119
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
|
|
1120
|
+
const struct llama_model * model,
|
|
1121
|
+
const char * grammar_str,
|
|
1122
|
+
const char * grammar_root);
|
|
1123
|
+
|
|
1124
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
|
|
1125
|
+
int32_t n_vocab, // llama_n_vocab()
|
|
1126
|
+
llama_token special_eos_id, // llama_token_eos()
|
|
1127
|
+
llama_token linefeed_id, // llama_token_nl()
|
|
1128
|
+
int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
|
1129
|
+
float penalty_repeat, // 1.0 = disabled
|
|
1130
|
+
float penalty_freq, // 0.0 = disabled
|
|
1131
|
+
float penalty_present, // 0.0 = disabled
|
|
1132
|
+
bool penalize_nl, // consider newlines as a repeatable token
|
|
1133
|
+
bool ignore_eos); // ignore the end-of-sequence token
|
|
1134
|
+
|
|
1135
|
+
LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
|
|
1136
|
+
int32_t n_vocab,
|
|
1137
|
+
int32_t n_logit_bias,
|
|
1138
|
+
const llama_logit_bias * logit_bias);
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
// Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise
|
|
1142
|
+
LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl);
|
|
1143
|
+
|
|
1144
|
+
/// @details Sample and accept a token from the idx-th output of the last evaluation
|
|
1145
|
+
//
|
|
1146
|
+
// Shorthand for:
|
|
1147
|
+
// const auto * logits = llama_get_logits_ith(ctx, idx);
|
|
1148
|
+
// llama_token_data_array cur_p = { ... init from logits ... };
|
|
1149
|
+
// llama_sampler_apply(smpl, &cur_p);
|
|
1150
|
+
// auto token = cur_p.data[cur_p.selected].id;
|
|
1151
|
+
// llama_sampler_accept(smpl, token);
|
|
1152
|
+
// return token;
|
|
1153
|
+
// Returns the sampled token
|
|
1154
|
+
LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
|
|
1155
|
+
|
|
1156
|
+
// TODO: extend in the future
|
|
1157
|
+
//LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
|
|
1151
1158
|
|
|
1152
1159
|
//
|
|
1153
1160
|
// Model split
|
|
@@ -1163,12 +1170,6 @@ extern "C" {
|
|
|
1163
1170
|
// Returns the split_prefix length.
|
|
1164
1171
|
LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
|
|
1165
1172
|
|
|
1166
|
-
// Performance information
|
|
1167
|
-
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
|
1168
|
-
|
|
1169
|
-
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
|
1170
|
-
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
|
1171
|
-
|
|
1172
1173
|
// Print system information
|
|
1173
1174
|
LLAMA_API const char * llama_print_system_info(void);
|
|
1174
1175
|
|
|
@@ -1176,65 +1177,41 @@ extern "C" {
|
|
|
1176
1177
|
// If this is not called, or NULL is supplied, everything is output on stderr.
|
|
1177
1178
|
LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
|
|
1178
1179
|
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
|
1186
|
-
#ifdef LLAMA_API_INTERNAL
|
|
1187
|
-
|
|
1188
|
-
#include <random>
|
|
1189
|
-
#include <string>
|
|
1190
|
-
#include <vector>
|
|
1191
|
-
|
|
1192
|
-
struct ggml_tensor;
|
|
1193
|
-
|
|
1194
|
-
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
|
|
1195
|
-
struct llama_context * ctx
|
|
1196
|
-
);
|
|
1197
|
-
|
|
1198
|
-
struct llama_partial_utf8 {
|
|
1199
|
-
uint32_t value; // bit value so far (unshifted)
|
|
1200
|
-
int n_remain; // num bytes remaining; -1 indicates invalid sequence
|
|
1201
|
-
};
|
|
1202
|
-
|
|
1203
|
-
struct llama_grammar_candidate {
|
|
1204
|
-
size_t index;
|
|
1205
|
-
const uint32_t * code_points;
|
|
1206
|
-
llama_partial_utf8 partial_utf8;
|
|
1207
|
-
};
|
|
1180
|
+
//
|
|
1181
|
+
// Performance utils
|
|
1182
|
+
//
|
|
1183
|
+
// NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements.
|
|
1184
|
+
//
|
|
1208
1185
|
|
|
1209
|
-
|
|
1210
|
-
|
|
1186
|
+
struct llama_perf_context_data {
|
|
1187
|
+
double t_start_ms;
|
|
1188
|
+
double t_load_ms;
|
|
1189
|
+
double t_p_eval_ms;
|
|
1190
|
+
double t_eval_ms;
|
|
1211
1191
|
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1192
|
+
int32_t n_p_eval;
|
|
1193
|
+
int32_t n_eval;
|
|
1194
|
+
};
|
|
1215
1195
|
|
|
1216
|
-
|
|
1217
|
-
|
|
1196
|
+
struct llama_perf_sampler_data {
|
|
1197
|
+
double t_sample_ms;
|
|
1218
1198
|
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
const llama_grammar_stacks & stacks,
|
|
1222
|
-
const uint32_t chr,
|
|
1223
|
-
llama_grammar_stacks & new_stacks);
|
|
1199
|
+
int32_t n_sample;
|
|
1200
|
+
};
|
|
1224
1201
|
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
const llama_grammar_candidates & candidates);
|
|
1202
|
+
LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
|
|
1203
|
+
LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
|
|
1204
|
+
LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
|
|
1229
1205
|
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1206
|
+
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
|
|
1207
|
+
LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
|
|
1208
|
+
LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
|
|
1209
|
+
LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
|
|
1233
1210
|
|
|
1234
|
-
|
|
1235
|
-
// This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
|
|
1236
|
-
llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
|
|
1211
|
+
LLAMA_API void llama_perf_dump_yaml(FILE * stream, const struct llama_context * ctx);
|
|
1237
1212
|
|
|
1238
|
-
#
|
|
1213
|
+
#ifdef __cplusplus
|
|
1214
|
+
}
|
|
1215
|
+
#endif
|
|
1239
1216
|
|
|
1240
1217
|
#endif // LLAMA_H
|