llama-cpp-capacitor 0.0.5 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cpp/LICENSE +21 -0
- package/cpp/README.md +4 -0
- package/cpp/anyascii.c +22223 -0
- package/cpp/anyascii.h +42 -0
- package/cpp/chat-parser.cpp +393 -0
- package/cpp/chat-parser.h +120 -0
- package/cpp/chat.cpp +2315 -0
- package/cpp/chat.h +221 -0
- package/cpp/common.cpp +1619 -0
- package/cpp/common.h +744 -0
- package/cpp/ggml-alloc.c +1028 -0
- package/cpp/ggml-alloc.h +76 -0
- package/cpp/ggml-backend-impl.h +255 -0
- package/cpp/ggml-backend-reg.cpp +600 -0
- package/cpp/ggml-backend.cpp +2118 -0
- package/cpp/ggml-backend.h +354 -0
- package/cpp/ggml-common.h +1878 -0
- package/cpp/ggml-cpp.h +39 -0
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
- package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
- package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
- package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
- package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
- package/cpp/ggml-cpu/arch-fallback.h +215 -0
- package/cpp/ggml-cpu/binary-ops.cpp +158 -0
- package/cpp/ggml-cpu/binary-ops.h +16 -0
- package/cpp/ggml-cpu/common.h +73 -0
- package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
- package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
- package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
- package/cpp/ggml-cpu/ops.cpp +10587 -0
- package/cpp/ggml-cpu/ops.h +114 -0
- package/cpp/ggml-cpu/quants.c +1193 -0
- package/cpp/ggml-cpu/quants.h +97 -0
- package/cpp/ggml-cpu/repack.cpp +1982 -0
- package/cpp/ggml-cpu/repack.h +120 -0
- package/cpp/ggml-cpu/simd-mappings.h +1184 -0
- package/cpp/ggml-cpu/traits.cpp +36 -0
- package/cpp/ggml-cpu/traits.h +38 -0
- package/cpp/ggml-cpu/unary-ops.cpp +186 -0
- package/cpp/ggml-cpu/unary-ops.h +28 -0
- package/cpp/ggml-cpu/vec.cpp +348 -0
- package/cpp/ggml-cpu/vec.h +1121 -0
- package/cpp/ggml-cpu.h +145 -0
- package/cpp/ggml-impl.h +622 -0
- package/cpp/ggml-metal-impl.h +688 -0
- package/cpp/ggml-metal.h +66 -0
- package/cpp/ggml-metal.m +6833 -0
- package/cpp/ggml-opt.cpp +1093 -0
- package/cpp/ggml-opt.h +256 -0
- package/cpp/ggml-quants.c +5324 -0
- package/cpp/ggml-quants.h +106 -0
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +14 -0
- package/cpp/ggml.c +7108 -0
- package/cpp/ggml.h +2492 -0
- package/cpp/gguf.cpp +1358 -0
- package/cpp/gguf.h +202 -0
- package/cpp/json-partial.cpp +256 -0
- package/cpp/json-partial.h +38 -0
- package/cpp/json-schema-to-grammar.cpp +985 -0
- package/cpp/json-schema-to-grammar.h +21 -0
- package/cpp/llama-adapter.cpp +388 -0
- package/cpp/llama-adapter.h +76 -0
- package/cpp/llama-arch.cpp +2355 -0
- package/cpp/llama-arch.h +499 -0
- package/cpp/llama-batch.cpp +875 -0
- package/cpp/llama-batch.h +160 -0
- package/cpp/llama-chat.cpp +783 -0
- package/cpp/llama-chat.h +65 -0
- package/cpp/llama-context.cpp +2748 -0
- package/cpp/llama-context.h +306 -0
- package/cpp/llama-cparams.cpp +5 -0
- package/cpp/llama-cparams.h +41 -0
- package/cpp/llama-cpp.h +30 -0
- package/cpp/llama-grammar.cpp +1229 -0
- package/cpp/llama-grammar.h +173 -0
- package/cpp/llama-graph.cpp +1891 -0
- package/cpp/llama-graph.h +810 -0
- package/cpp/llama-hparams.cpp +180 -0
- package/cpp/llama-hparams.h +233 -0
- package/cpp/llama-impl.cpp +167 -0
- package/cpp/llama-impl.h +61 -0
- package/cpp/llama-io.cpp +15 -0
- package/cpp/llama-io.h +35 -0
- package/cpp/llama-kv-cache-iswa.cpp +318 -0
- package/cpp/llama-kv-cache-iswa.h +135 -0
- package/cpp/llama-kv-cache.cpp +2059 -0
- package/cpp/llama-kv-cache.h +374 -0
- package/cpp/llama-kv-cells.h +491 -0
- package/cpp/llama-memory-hybrid.cpp +258 -0
- package/cpp/llama-memory-hybrid.h +137 -0
- package/cpp/llama-memory-recurrent.cpp +1146 -0
- package/cpp/llama-memory-recurrent.h +179 -0
- package/cpp/llama-memory.cpp +59 -0
- package/cpp/llama-memory.h +119 -0
- package/cpp/llama-mmap.cpp +600 -0
- package/cpp/llama-mmap.h +68 -0
- package/cpp/llama-model-loader.cpp +1164 -0
- package/cpp/llama-model-loader.h +170 -0
- package/cpp/llama-model-saver.cpp +282 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +19042 -0
- package/cpp/llama-model.h +491 -0
- package/cpp/llama-sampling.cpp +2575 -0
- package/cpp/llama-sampling.h +32 -0
- package/cpp/llama-vocab.cpp +3792 -0
- package/cpp/llama-vocab.h +176 -0
- package/cpp/llama.cpp +358 -0
- package/cpp/llama.h +1373 -0
- package/cpp/log.cpp +427 -0
- package/cpp/log.h +103 -0
- package/cpp/minja/chat-template.hpp +550 -0
- package/cpp/minja/minja.hpp +3009 -0
- package/cpp/nlohmann/json.hpp +25526 -0
- package/cpp/nlohmann/json_fwd.hpp +187 -0
- package/cpp/regex-partial.cpp +204 -0
- package/cpp/regex-partial.h +56 -0
- package/cpp/rn-completion.cpp +681 -0
- package/cpp/rn-completion.h +116 -0
- package/cpp/rn-llama.cpp +345 -0
- package/cpp/rn-llama.h +149 -0
- package/cpp/rn-mtmd.hpp +602 -0
- package/cpp/rn-tts.cpp +591 -0
- package/cpp/rn-tts.h +59 -0
- package/cpp/sampling.cpp +579 -0
- package/cpp/sampling.h +107 -0
- package/cpp/tools/mtmd/clip-impl.h +473 -0
- package/cpp/tools/mtmd/clip.cpp +4322 -0
- package/cpp/tools/mtmd/clip.h +106 -0
- package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
- package/cpp/tools/mtmd/mtmd-audio.h +47 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
- package/cpp/tools/mtmd/mtmd-helper.h +91 -0
- package/cpp/tools/mtmd/mtmd.cpp +1066 -0
- package/cpp/tools/mtmd/mtmd.h +298 -0
- package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
- package/cpp/unicode-data.cpp +7034 -0
- package/cpp/unicode-data.h +20 -0
- package/cpp/unicode.cpp +1061 -0
- package/cpp/unicode.h +68 -0
- package/package.json +2 -1
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
#include "llama-cparams.h"
|
|
5
|
+
#include "llama-graph.h"
|
|
6
|
+
#include "llama-adapter.h"
|
|
7
|
+
|
|
8
|
+
#include "ggml-cpp.h"
|
|
9
|
+
#include "ggml-opt.h"
|
|
10
|
+
|
|
11
|
+
#include <map>
|
|
12
|
+
#include <vector>
|
|
13
|
+
|
|
14
|
+
struct llama_model;
|
|
15
|
+
class llama_batch_allocr;
|
|
16
|
+
|
|
17
|
+
class llama_io_read_i;
|
|
18
|
+
class llama_io_write_i;
|
|
19
|
+
|
|
20
|
+
struct llama_memory_i;
|
|
21
|
+
struct llama_memory_context_i;
|
|
22
|
+
|
|
23
|
+
struct llama_context {
|
|
24
|
+
// init scheduler and compute buffers, reserve worst-case graphs
|
|
25
|
+
llama_context(
|
|
26
|
+
const llama_model & model,
|
|
27
|
+
llama_context_params params);
|
|
28
|
+
|
|
29
|
+
~llama_context();
|
|
30
|
+
|
|
31
|
+
void synchronize();
|
|
32
|
+
|
|
33
|
+
const llama_model & get_model() const;
|
|
34
|
+
const llama_cparams & get_cparams() const;
|
|
35
|
+
|
|
36
|
+
lm_ggml_backend_sched_t get_sched() const;
|
|
37
|
+
|
|
38
|
+
uint32_t n_ctx() const;
|
|
39
|
+
uint32_t n_ctx_per_seq() const;
|
|
40
|
+
uint32_t n_batch() const;
|
|
41
|
+
uint32_t n_ubatch() const;
|
|
42
|
+
uint32_t n_seq_max() const;
|
|
43
|
+
|
|
44
|
+
uint32_t n_threads() const;
|
|
45
|
+
uint32_t n_threads_batch() const;
|
|
46
|
+
|
|
47
|
+
llama_memory_t get_memory() const;
|
|
48
|
+
|
|
49
|
+
// return true if the memory was updated
|
|
50
|
+
bool memory_update(bool optimize);
|
|
51
|
+
|
|
52
|
+
enum llama_pooling_type pooling_type() const;
|
|
53
|
+
|
|
54
|
+
float * get_logits();
|
|
55
|
+
float * get_logits_ith(int32_t i);
|
|
56
|
+
|
|
57
|
+
float * get_embeddings();
|
|
58
|
+
float * get_embeddings_ith(int32_t i);
|
|
59
|
+
float * get_embeddings_seq(llama_seq_id seq_id);
|
|
60
|
+
|
|
61
|
+
void attach_threadpool(
|
|
62
|
+
lm_ggml_threadpool_t threadpool,
|
|
63
|
+
lm_ggml_threadpool_t threadpool_batch);
|
|
64
|
+
|
|
65
|
+
void detach_threadpool();
|
|
66
|
+
|
|
67
|
+
void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
|
|
68
|
+
|
|
69
|
+
void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
|
|
70
|
+
|
|
71
|
+
void set_embeddings (bool value);
|
|
72
|
+
void set_causal_attn(bool value);
|
|
73
|
+
void set_warmup(bool value);
|
|
74
|
+
|
|
75
|
+
void set_adapter_lora(
|
|
76
|
+
llama_adapter_lora * adapter,
|
|
77
|
+
float scale);
|
|
78
|
+
|
|
79
|
+
bool rm_adapter_lora(
|
|
80
|
+
llama_adapter_lora * adapter);
|
|
81
|
+
|
|
82
|
+
void clear_adapter_lora();
|
|
83
|
+
|
|
84
|
+
bool apply_adapter_cvec(
|
|
85
|
+
const float * data,
|
|
86
|
+
size_t len,
|
|
87
|
+
int32_t n_embd,
|
|
88
|
+
int32_t il_start,
|
|
89
|
+
int32_t il_end);
|
|
90
|
+
|
|
91
|
+
// process a single ubatch with a specific graph type
|
|
92
|
+
// if memory_context is provided, it will be applied first to the context's memory
|
|
93
|
+
// ret contains the status of the graph computation
|
|
94
|
+
// returns nullptr only if ret != LM_GGML_STATUS_SUCCESS
|
|
95
|
+
llm_graph_result * process_ubatch(
|
|
96
|
+
const llama_ubatch & ubatch,
|
|
97
|
+
llm_graph_type gtype,
|
|
98
|
+
llama_memory_context_i * mctx,
|
|
99
|
+
lm_ggml_status & ret);
|
|
100
|
+
|
|
101
|
+
int encode(const llama_batch & batch_inp);
|
|
102
|
+
int decode(const llama_batch & batch_inp);
|
|
103
|
+
|
|
104
|
+
//
|
|
105
|
+
// state save/load
|
|
106
|
+
//
|
|
107
|
+
|
|
108
|
+
size_t state_get_size();
|
|
109
|
+
size_t state_get_data( uint8_t * dst, size_t size);
|
|
110
|
+
size_t state_set_data(const uint8_t * src, size_t size);
|
|
111
|
+
|
|
112
|
+
size_t state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags);
|
|
113
|
+
size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags);
|
|
114
|
+
size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags);
|
|
115
|
+
|
|
116
|
+
bool state_load_file(
|
|
117
|
+
const char * filepath,
|
|
118
|
+
llama_token * tokens_out,
|
|
119
|
+
size_t n_token_capacity,
|
|
120
|
+
size_t * n_token_count_out);
|
|
121
|
+
|
|
122
|
+
bool state_save_file(
|
|
123
|
+
const char * filepath,
|
|
124
|
+
const llama_token * tokens,
|
|
125
|
+
size_t n_token_count);
|
|
126
|
+
|
|
127
|
+
size_t state_seq_load_file(
|
|
128
|
+
llama_seq_id seq_id,
|
|
129
|
+
const char * filepath,
|
|
130
|
+
llama_token * tokens_out,
|
|
131
|
+
size_t n_token_capacity,
|
|
132
|
+
size_t * n_token_count_out);
|
|
133
|
+
|
|
134
|
+
size_t state_seq_save_file(
|
|
135
|
+
llama_seq_id seq_id,
|
|
136
|
+
const char * filepath,
|
|
137
|
+
const llama_token * tokens,
|
|
138
|
+
size_t n_token_count);
|
|
139
|
+
|
|
140
|
+
//
|
|
141
|
+
// perf
|
|
142
|
+
//
|
|
143
|
+
|
|
144
|
+
llama_perf_context_data perf_get_data() const;
|
|
145
|
+
void perf_reset();
|
|
146
|
+
|
|
147
|
+
//
|
|
148
|
+
// training
|
|
149
|
+
//
|
|
150
|
+
|
|
151
|
+
void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
|
|
152
|
+
|
|
153
|
+
// TODO: more flexible combinations of logical/physical batch size and context size
|
|
154
|
+
void opt_epoch(
|
|
155
|
+
lm_ggml_opt_dataset_t dataset,
|
|
156
|
+
lm_ggml_opt_result_t result_train,
|
|
157
|
+
lm_ggml_opt_result_t result_eval,
|
|
158
|
+
int64_t idata_split,
|
|
159
|
+
lm_ggml_opt_epoch_callback callback_train,
|
|
160
|
+
lm_ggml_opt_epoch_callback callback_eval);
|
|
161
|
+
|
|
162
|
+
void opt_epoch_iter(
|
|
163
|
+
lm_ggml_opt_dataset_t dataset,
|
|
164
|
+
lm_ggml_opt_result_t result,
|
|
165
|
+
const std::vector<llama_token> & tokens,
|
|
166
|
+
const std::vector<llama_token> & labels_sparse,
|
|
167
|
+
llama_batch & batch,
|
|
168
|
+
lm_ggml_opt_epoch_callback callback,
|
|
169
|
+
bool train,
|
|
170
|
+
int64_t idata_in_loop,
|
|
171
|
+
int64_t ndata_in_loop,
|
|
172
|
+
int64_t t_loop_start);
|
|
173
|
+
|
|
174
|
+
private:
|
|
175
|
+
//
|
|
176
|
+
// output
|
|
177
|
+
//
|
|
178
|
+
|
|
179
|
+
// Make sure enough space is available for outputs.
|
|
180
|
+
// Returns max number of outputs for which space was reserved.
|
|
181
|
+
uint32_t output_reserve(int32_t n_outputs);
|
|
182
|
+
|
|
183
|
+
void output_reorder();
|
|
184
|
+
|
|
185
|
+
//
|
|
186
|
+
// graph
|
|
187
|
+
//
|
|
188
|
+
|
|
189
|
+
public:
|
|
190
|
+
uint32_t graph_max_nodes() const;
|
|
191
|
+
|
|
192
|
+
// can reuse the llm_graph_result instance of the context (for example to update a memory module)
|
|
193
|
+
llm_graph_result * get_gf_res_reserve() const;
|
|
194
|
+
|
|
195
|
+
// returns the result of lm_ggml_backend_sched_graph_compute_async execution
|
|
196
|
+
lm_ggml_status graph_compute(lm_ggml_cgraph * gf, bool batched);
|
|
197
|
+
|
|
198
|
+
// reserve a graph with a dummy ubatch of the specified size
|
|
199
|
+
lm_ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx);
|
|
200
|
+
|
|
201
|
+
private:
|
|
202
|
+
llm_graph_params graph_params(
|
|
203
|
+
llm_graph_result * res,
|
|
204
|
+
const llama_ubatch & ubatch,
|
|
205
|
+
const llama_memory_context_i * mctx,
|
|
206
|
+
llm_graph_type gtype) const;
|
|
207
|
+
|
|
208
|
+
llm_graph_cb graph_get_cb() const;
|
|
209
|
+
|
|
210
|
+
// TODO: read/write lora adapters and cvec
|
|
211
|
+
size_t state_write_data(llama_io_write_i & io);
|
|
212
|
+
size_t state_read_data (llama_io_read_i & io);
|
|
213
|
+
|
|
214
|
+
size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
|
|
215
|
+
size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
|
|
216
|
+
|
|
217
|
+
//
|
|
218
|
+
// members
|
|
219
|
+
//
|
|
220
|
+
|
|
221
|
+
const llama_model & model;
|
|
222
|
+
|
|
223
|
+
llama_cparams cparams;
|
|
224
|
+
llama_adapter_cvec cvec;
|
|
225
|
+
llama_adapter_loras loras;
|
|
226
|
+
|
|
227
|
+
llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
|
|
228
|
+
|
|
229
|
+
std::unique_ptr<llama_memory_i> memory;
|
|
230
|
+
|
|
231
|
+
// decode output (2-dimensional array: [n_outputs][n_vocab])
|
|
232
|
+
size_t logits_size = 0; // capacity (of floats) for logits
|
|
233
|
+
float * logits = nullptr;
|
|
234
|
+
|
|
235
|
+
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
|
236
|
+
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
|
237
|
+
size_t embd_size = 0; // capacity (of floats) for embeddings
|
|
238
|
+
float * embd = nullptr;
|
|
239
|
+
|
|
240
|
+
// sequence embeddings output (map of [n_embd] vectors)
|
|
241
|
+
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
|
|
242
|
+
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
|
243
|
+
|
|
244
|
+
// reuse the batch_allocr to avoid unnecessary memory allocations
|
|
245
|
+
std::unique_ptr<llama_batch_allocr> balloc;
|
|
246
|
+
|
|
247
|
+
uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
|
248
|
+
|
|
249
|
+
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
|
|
250
|
+
|
|
251
|
+
struct swap_info {
|
|
252
|
+
uint32_t i0;
|
|
253
|
+
uint32_t i1;
|
|
254
|
+
};
|
|
255
|
+
|
|
256
|
+
std::vector<swap_info> output_swaps;
|
|
257
|
+
|
|
258
|
+
lm_ggml_backend_sched_ptr sched;
|
|
259
|
+
|
|
260
|
+
lm_ggml_backend_t backend_cpu = nullptr;
|
|
261
|
+
std::vector<lm_ggml_backend_ptr> backends;
|
|
262
|
+
|
|
263
|
+
// training
|
|
264
|
+
lm_ggml_opt_context_t opt_ctx = nullptr;
|
|
265
|
+
|
|
266
|
+
lm_ggml_threadpool_t threadpool = nullptr;
|
|
267
|
+
lm_ggml_threadpool_t threadpool_batch = nullptr;
|
|
268
|
+
|
|
269
|
+
lm_ggml_abort_callback abort_callback = nullptr;
|
|
270
|
+
void * abort_callback_data = nullptr;
|
|
271
|
+
|
|
272
|
+
std::vector<std::pair<lm_ggml_backend_t, lm_ggml_backend_set_n_threads_t>> set_n_threads_fns;
|
|
273
|
+
|
|
274
|
+
// buffer types used for the compute buffer of each backend
|
|
275
|
+
std::vector<lm_ggml_backend_t> backend_ptrs;
|
|
276
|
+
std::vector<lm_ggml_backend_buffer_type_t> backend_buft;
|
|
277
|
+
|
|
278
|
+
llm_graph_result_ptr gf_res_prev;
|
|
279
|
+
llm_graph_result_ptr gf_res_reserve;
|
|
280
|
+
|
|
281
|
+
// host buffer for the model output (logits and embeddings)
|
|
282
|
+
lm_ggml_backend_buffer_ptr buf_output;
|
|
283
|
+
|
|
284
|
+
bool has_evaluated_once = false;
|
|
285
|
+
|
|
286
|
+
// env: LLAMA_SET_ROWS (temporary)
|
|
287
|
+
// ref: https://github.com/ggml-org/llama.cpp/pull/14285
|
|
288
|
+
bool supports_set_rows = true;
|
|
289
|
+
|
|
290
|
+
// env: LLAMA_GRAPH_REUSE_DISABLE
|
|
291
|
+
bool graph_reuse_disable = false;
|
|
292
|
+
|
|
293
|
+
// perf
|
|
294
|
+
mutable int64_t t_start_us = 0;
|
|
295
|
+
mutable int64_t t_load_us = 0;
|
|
296
|
+
mutable int64_t t_p_eval_us = 0;
|
|
297
|
+
mutable int64_t t_eval_us = 0;
|
|
298
|
+
|
|
299
|
+
mutable int64_t t_compute_start_us = 0;
|
|
300
|
+
mutable int64_t n_queued_tokens = 0;
|
|
301
|
+
|
|
302
|
+
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
|
|
303
|
+
mutable int32_t n_eval = 0; // number of eval calls
|
|
304
|
+
|
|
305
|
+
mutable int32_t n_reused = 0; // number of times the previous graph was reused
|
|
306
|
+
};
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
#include <cstdint>
|
|
6
|
+
|
|
7
|
+
#define LLAMA_MAX_SEQ 64
|
|
8
|
+
|
|
9
|
+
struct llama_cparams {
|
|
10
|
+
uint32_t n_ctx; // context size used during inference
|
|
11
|
+
uint32_t n_batch;
|
|
12
|
+
uint32_t n_ubatch;
|
|
13
|
+
uint32_t n_seq_max;
|
|
14
|
+
int32_t n_threads; // number of threads to use for generation
|
|
15
|
+
int32_t n_threads_batch; // number of threads to use for batch processing
|
|
16
|
+
|
|
17
|
+
float rope_freq_base;
|
|
18
|
+
float rope_freq_scale;
|
|
19
|
+
|
|
20
|
+
uint32_t n_ctx_orig_yarn;
|
|
21
|
+
// These hyperparameters are not exposed in GGUF, because all
|
|
22
|
+
// existing YaRN models use the same values for them.
|
|
23
|
+
float yarn_ext_factor;
|
|
24
|
+
float yarn_attn_factor;
|
|
25
|
+
float yarn_beta_fast;
|
|
26
|
+
float yarn_beta_slow;
|
|
27
|
+
|
|
28
|
+
bool embeddings;
|
|
29
|
+
bool causal_attn;
|
|
30
|
+
bool offload_kqv;
|
|
31
|
+
bool flash_attn;
|
|
32
|
+
bool no_perf;
|
|
33
|
+
bool warmup;
|
|
34
|
+
bool op_offload;
|
|
35
|
+
bool kv_unified;
|
|
36
|
+
|
|
37
|
+
enum llama_pooling_type pooling_type;
|
|
38
|
+
|
|
39
|
+
lm_ggml_backend_sched_eval_callback cb_eval;
|
|
40
|
+
void * cb_eval_user_data;
|
|
41
|
+
};
|
package/cpp/llama-cpp.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#ifndef __cplusplus
|
|
4
|
+
#error "This header is for C++ only"
|
|
5
|
+
#endif
|
|
6
|
+
|
|
7
|
+
#include <memory>
|
|
8
|
+
|
|
9
|
+
#include "llama.h"
|
|
10
|
+
|
|
11
|
+
struct llama_model_deleter {
|
|
12
|
+
void operator()(llama_model * model) { llama_model_free(model); }
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
struct llama_context_deleter {
|
|
16
|
+
void operator()(llama_context * context) { llama_free(context); }
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
struct llama_sampler_deleter {
|
|
20
|
+
void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); }
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
struct llama_adapter_lora_deleter {
|
|
24
|
+
void operator()(llama_adapter_lora * adapter) { llama_adapter_lora_free(adapter); }
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
typedef std::unique_ptr<llama_model, llama_model_deleter> llama_model_ptr;
|
|
28
|
+
typedef std::unique_ptr<llama_context, llama_context_deleter> llama_context_ptr;
|
|
29
|
+
typedef std::unique_ptr<llama_sampler, llama_sampler_deleter> llama_sampler_ptr;
|
|
30
|
+
typedef std::unique_ptr<llama_adapter_lora, llama_adapter_lora_deleter> llama_adapter_lora_ptr;
|