cui-llama.rn 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +22 -19
- package/android/src/main/java/com/rnllama/LlamaContext.java +28 -21
- package/cpp/common.cpp +4 -11
- package/cpp/common.h +1 -1
- package/cpp/ggml-aarch64.c +2193 -2193
- package/cpp/ggml-aarch64.h +39 -39
- package/cpp/ggml-alloc.c +1042 -1041
- package/cpp/ggml-backend-impl.h +153 -153
- package/cpp/ggml-backend.c +2234 -2225
- package/cpp/ggml-backend.h +238 -236
- package/cpp/ggml-common.h +1829 -1829
- package/cpp/ggml-impl.h +655 -655
- package/cpp/ggml-metal.h +65 -65
- package/cpp/ggml-metal.m +3269 -3273
- package/cpp/ggml-quants.c +14860 -15022
- package/cpp/ggml-quants.h +132 -132
- package/cpp/ggml.c +16 -6
- package/cpp/ggml.h +2447 -2444
- package/cpp/llama.cpp +634 -531
- package/cpp/llama.h +30 -14
- package/cpp/log.h +737 -737
- package/cpp/rn-llama.hpp +9 -1
- package/cpp/sampling.cpp +460 -460
- package/cpp/sgemm.cpp +1027 -1027
- package/cpp/sgemm.h +14 -14
- package/package.json +1 -1
package/cpp/sampling.cpp
CHANGED
@@ -1,460 +1,460 @@
|
|
1
|
-
#define LLAMA_API_INTERNAL
|
2
|
-
#include "sampling.h"
|
3
|
-
#include <random>
|
4
|
-
|
5
|
-
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params) {
|
6
|
-
struct llama_sampling_context * result = new llama_sampling_context();
|
7
|
-
|
8
|
-
result->params = params;
|
9
|
-
result->grammar = nullptr;
|
10
|
-
|
11
|
-
// if there is a grammar, parse it
|
12
|
-
if (!params.grammar.empty()) {
|
13
|
-
result->parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
14
|
-
|
15
|
-
// will be empty (default) if there are parse errors
|
16
|
-
if (result->parsed_grammar.rules.empty()) {
|
17
|
-
fprintf(stderr, "%s: failed to parse grammar\n", __func__);
|
18
|
-
delete result;
|
19
|
-
return nullptr;
|
20
|
-
}
|
21
|
-
|
22
|
-
// Ensure that there is a "root" node.
|
23
|
-
if (result->parsed_grammar.symbol_ids.find("root") == result->parsed_grammar.symbol_ids.end()) {
|
24
|
-
fprintf(stderr, "%s: grammar does not contain a 'root' symbol\n", __func__);
|
25
|
-
delete result;
|
26
|
-
return nullptr;
|
27
|
-
}
|
28
|
-
|
29
|
-
std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
|
30
|
-
|
31
|
-
struct llama_grammar * grammar = llama_grammar_init(
|
32
|
-
grammar_rules.data(),
|
33
|
-
grammar_rules.size(), result->parsed_grammar.symbol_ids.at("root"));
|
34
|
-
if (grammar == nullptr) {
|
35
|
-
throw std::runtime_error("Failed to initialize llama_grammar");
|
36
|
-
}
|
37
|
-
result->grammar = grammar;
|
38
|
-
}
|
39
|
-
|
40
|
-
result->prev.resize(params.n_prev);
|
41
|
-
|
42
|
-
result->n_valid = 0;
|
43
|
-
|
44
|
-
llama_sampling_set_rng_seed(result, params.seed);
|
45
|
-
|
46
|
-
return result;
|
47
|
-
}
|
48
|
-
|
49
|
-
void llama_sampling_free(struct llama_sampling_context * ctx) {
|
50
|
-
if (ctx->grammar != NULL) {
|
51
|
-
llama_grammar_free(ctx->grammar);
|
52
|
-
}
|
53
|
-
|
54
|
-
delete ctx;
|
55
|
-
}
|
56
|
-
|
57
|
-
void llama_sampling_reset(llama_sampling_context * ctx) {
|
58
|
-
if (ctx->grammar != NULL) {
|
59
|
-
llama_grammar_free(ctx->grammar);
|
60
|
-
ctx->grammar = NULL;
|
61
|
-
}
|
62
|
-
|
63
|
-
if (!ctx->parsed_grammar.rules.empty()) {
|
64
|
-
std::vector<const llama_grammar_element *> grammar_rules(ctx->parsed_grammar.c_rules());
|
65
|
-
|
66
|
-
struct llama_grammar * grammar = llama_grammar_init(
|
67
|
-
grammar_rules.data(),
|
68
|
-
grammar_rules.size(), ctx->parsed_grammar.symbol_ids.at("root"));
|
69
|
-
if (grammar == nullptr) {
|
70
|
-
throw std::runtime_error("Failed to initialize llama_grammar");
|
71
|
-
}
|
72
|
-
ctx->grammar = grammar;
|
73
|
-
}
|
74
|
-
|
75
|
-
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
76
|
-
ctx->cur.clear();
|
77
|
-
ctx->n_valid = 0;
|
78
|
-
}
|
79
|
-
|
80
|
-
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
81
|
-
if (seed == LLAMA_DEFAULT_SEED) {
|
82
|
-
seed = std::random_device{}();
|
83
|
-
}
|
84
|
-
ctx->rng.seed(seed);
|
85
|
-
}
|
86
|
-
|
87
|
-
void llama_sampling_cp(llama_sampling_context * src, llama_sampling_context * dst) {
|
88
|
-
if (dst->grammar) {
|
89
|
-
llama_grammar_free(dst->grammar);
|
90
|
-
dst->grammar = nullptr;
|
91
|
-
}
|
92
|
-
|
93
|
-
if (src->grammar) {
|
94
|
-
dst->grammar = llama_grammar_copy(src->grammar);
|
95
|
-
}
|
96
|
-
|
97
|
-
dst->prev = src->prev;
|
98
|
-
}
|
99
|
-
|
100
|
-
llama_token llama_sampling_last(llama_sampling_context * ctx) {
|
101
|
-
return ctx->prev.back();
|
102
|
-
}
|
103
|
-
|
104
|
-
std::string llama_sampling_prev_str(llama_sampling_context * ctx_sampling, llama_context * ctx_main, int n) {
|
105
|
-
const int size = ctx_sampling->prev.size();
|
106
|
-
|
107
|
-
n = std::min(n, size);
|
108
|
-
|
109
|
-
std::string result;
|
110
|
-
|
111
|
-
for (int i = size - n; i < size; i++) {
|
112
|
-
result += llama_token_to_piece(ctx_main, ctx_sampling->prev[i]);
|
113
|
-
}
|
114
|
-
|
115
|
-
return result;
|
116
|
-
}
|
117
|
-
|
118
|
-
std::string llama_sampling_print(const llama_sampling_params & params) {
|
119
|
-
char result[1024];
|
120
|
-
|
121
|
-
snprintf(result, sizeof(result),
|
122
|
-
"\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
|
123
|
-
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n"
|
124
|
-
"\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
|
125
|
-
params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present,
|
126
|
-
params.top_k, params.tfs_z, params.top_p, params.min_p, params.typical_p, params.temp,
|
127
|
-
params.mirostat, params.mirostat_eta, params.mirostat_tau);
|
128
|
-
|
129
|
-
return std::string(result);
|
130
|
-
}
|
131
|
-
|
132
|
-
std::string llama_sampling_order_print(const llama_sampling_params & params) {
|
133
|
-
std::string result = "CFG -> Penalties ";
|
134
|
-
if (params.mirostat == 0) {
|
135
|
-
for (auto sampler_type : params.samplers_sequence) {
|
136
|
-
const auto sampler_type_name = llama_sampling_type_to_str(sampler_type);
|
137
|
-
if (!sampler_type_name.empty()) {
|
138
|
-
result += "-> " + sampler_type_name + " ";
|
139
|
-
}
|
140
|
-
}
|
141
|
-
} else {
|
142
|
-
result += "-> mirostat ";
|
143
|
-
}
|
144
|
-
|
145
|
-
return result;
|
146
|
-
}
|
147
|
-
|
148
|
-
std::string llama_sampling_type_to_str(llama_sampler_type sampler_type) {
|
149
|
-
switch (sampler_type) {
|
150
|
-
case llama_sampler_type::TOP_K: return "top_k";
|
151
|
-
case llama_sampler_type::TFS_Z: return "tfs_z";
|
152
|
-
case llama_sampler_type::TYPICAL_P: return "typical_p";
|
153
|
-
case llama_sampler_type::TOP_P: return "top_p";
|
154
|
-
case llama_sampler_type::MIN_P: return "min_p";
|
155
|
-
case llama_sampler_type::TEMPERATURE: return "temperature";
|
156
|
-
default : return "";
|
157
|
-
}
|
158
|
-
}
|
159
|
-
|
160
|
-
std::vector<llama_sampler_type> llama_sampling_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
161
|
-
std::unordered_map<std::string, llama_sampler_type> sampler_canonical_name_map {
|
162
|
-
{"top_k", llama_sampler_type::TOP_K},
|
163
|
-
{"top_p", llama_sampler_type::TOP_P},
|
164
|
-
{"typical_p", llama_sampler_type::TYPICAL_P},
|
165
|
-
{"min_p", llama_sampler_type::MIN_P},
|
166
|
-
{"tfs_z", llama_sampler_type::TFS_Z},
|
167
|
-
{"temperature", llama_sampler_type::TEMPERATURE}
|
168
|
-
};
|
169
|
-
|
170
|
-
// since samplers names are written multiple ways
|
171
|
-
// make it ready for both system names and input names
|
172
|
-
std::unordered_map<std::string, llama_sampler_type> sampler_alt_name_map {
|
173
|
-
{"top-k", llama_sampler_type::TOP_K},
|
174
|
-
{"top-p", llama_sampler_type::TOP_P},
|
175
|
-
{"nucleus", llama_sampler_type::TOP_P},
|
176
|
-
{"typical-p", llama_sampler_type::TYPICAL_P},
|
177
|
-
{"typical", llama_sampler_type::TYPICAL_P},
|
178
|
-
{"min-p", llama_sampler_type::MIN_P},
|
179
|
-
{"tfs-z", llama_sampler_type::TFS_Z},
|
180
|
-
{"tfs", llama_sampler_type::TFS_Z},
|
181
|
-
{"temp", llama_sampler_type::TEMPERATURE}
|
182
|
-
};
|
183
|
-
|
184
|
-
std::vector<llama_sampler_type> sampler_types;
|
185
|
-
sampler_types.reserve(names.size());
|
186
|
-
for (const auto & name : names)
|
187
|
-
{
|
188
|
-
auto sampler_item = sampler_canonical_name_map.find(name);
|
189
|
-
if (sampler_item != sampler_canonical_name_map.end())
|
190
|
-
{
|
191
|
-
sampler_types.push_back(sampler_item->second);
|
192
|
-
}
|
193
|
-
else
|
194
|
-
{
|
195
|
-
if (allow_alt_names)
|
196
|
-
{
|
197
|
-
sampler_item = sampler_alt_name_map.find(name);
|
198
|
-
if (sampler_item != sampler_alt_name_map.end())
|
199
|
-
{
|
200
|
-
sampler_types.push_back(sampler_item->second);
|
201
|
-
}
|
202
|
-
}
|
203
|
-
}
|
204
|
-
}
|
205
|
-
return sampler_types;
|
206
|
-
}
|
207
|
-
|
208
|
-
std::vector<llama_sampler_type> llama_sampling_types_from_chars(const std::string & names_string) {
|
209
|
-
std::unordered_map<char, llama_sampler_type> sampler_name_map {
|
210
|
-
{'k', llama_sampler_type::TOP_K},
|
211
|
-
{'p', llama_sampler_type::TOP_P},
|
212
|
-
{'y', llama_sampler_type::TYPICAL_P},
|
213
|
-
{'m', llama_sampler_type::MIN_P},
|
214
|
-
{'f', llama_sampler_type::TFS_Z},
|
215
|
-
{'t', llama_sampler_type::TEMPERATURE}
|
216
|
-
};
|
217
|
-
|
218
|
-
std::vector<llama_sampler_type> sampler_types;
|
219
|
-
sampler_types.reserve(names_string.size());
|
220
|
-
for (const auto & c : names_string) {
|
221
|
-
const auto sampler_item = sampler_name_map.find(c);
|
222
|
-
if (sampler_item != sampler_name_map.end()) {
|
223
|
-
sampler_types.push_back(sampler_item->second);
|
224
|
-
}
|
225
|
-
}
|
226
|
-
return sampler_types;
|
227
|
-
}
|
228
|
-
|
229
|
-
// no reasons to expose this function in header
|
230
|
-
static void sampler_queue(
|
231
|
-
struct llama_context * ctx_main,
|
232
|
-
const llama_sampling_params & params,
|
233
|
-
llama_token_data_array & cur_p,
|
234
|
-
size_t min_keep) {
|
235
|
-
const float temp = params.temp;
|
236
|
-
const float dynatemp_range = params.dynatemp_range;
|
237
|
-
const float dynatemp_exponent = params.dynatemp_exponent;
|
238
|
-
const int32_t top_k = params.top_k;
|
239
|
-
const float top_p = params.top_p;
|
240
|
-
const float min_p = params.min_p;
|
241
|
-
const float tfs_z = params.tfs_z;
|
242
|
-
const float typical_p = params.typical_p;
|
243
|
-
const std::vector<llama_sampler_type> & samplers_sequence = params.samplers_sequence;
|
244
|
-
|
245
|
-
for (auto sampler_type : samplers_sequence) {
|
246
|
-
switch (sampler_type) {
|
247
|
-
case llama_sampler_type::TOP_K : llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break;
|
248
|
-
case llama_sampler_type::TFS_Z : llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break;
|
249
|
-
case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
|
250
|
-
case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
|
251
|
-
case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
|
252
|
-
case llama_sampler_type::TEMPERATURE:
|
253
|
-
if (dynatemp_range > 0) {
|
254
|
-
float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
|
255
|
-
float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
|
256
|
-
llama_sample_entropy(ctx_main, &cur_p, dynatemp_min, dynatemp_max, dynatemp_exponent);
|
257
|
-
} else {
|
258
|
-
llama_sample_temp(ctx_main, &cur_p, temp);
|
259
|
-
}
|
260
|
-
break;
|
261
|
-
default : break;
|
262
|
-
}
|
263
|
-
}
|
264
|
-
}
|
265
|
-
|
266
|
-
static llama_token llama_sampling_sample_impl(
|
267
|
-
struct llama_sampling_context * ctx_sampling,
|
268
|
-
struct llama_context * ctx_main,
|
269
|
-
struct llama_context * ctx_cfg,
|
270
|
-
const int idx,
|
271
|
-
bool is_resampling) {
|
272
|
-
const llama_sampling_params & params = ctx_sampling->params;
|
273
|
-
|
274
|
-
const float temp = params.temp;
|
275
|
-
const int mirostat = params.mirostat;
|
276
|
-
const float mirostat_tau = params.mirostat_tau;
|
277
|
-
const float mirostat_eta = params.mirostat_eta;
|
278
|
-
|
279
|
-
std::vector<float> original_logits;
|
280
|
-
auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, /* apply_grammar= */ is_resampling, &original_logits);
|
281
|
-
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
282
|
-
LM_GGML_ASSERT(!original_logits.empty());
|
283
|
-
}
|
284
|
-
llama_token id = 0;
|
285
|
-
|
286
|
-
if (temp < 0.0) {
|
287
|
-
// greedy sampling, with probs
|
288
|
-
llama_sample_softmax(ctx_main, &cur_p);
|
289
|
-
id = cur_p.data[0].id;
|
290
|
-
} else if (temp == 0.0) {
|
291
|
-
// greedy sampling, no probs
|
292
|
-
id = llama_sample_token_greedy(ctx_main, &cur_p);
|
293
|
-
} else {
|
294
|
-
if (mirostat == 1) {
|
295
|
-
const int mirostat_m = 100;
|
296
|
-
llama_sample_temp(ctx_main, &cur_p, temp);
|
297
|
-
id = llama_sample_token_mirostat(ctx_main, &cur_p, mirostat_tau, mirostat_eta, mirostat_m, &ctx_sampling->mirostat_mu);
|
298
|
-
} else if (mirostat == 2) {
|
299
|
-
llama_sample_temp(ctx_main, &cur_p, temp);
|
300
|
-
id = llama_sample_token_mirostat_v2(ctx_main, &cur_p, mirostat_tau, mirostat_eta, &ctx_sampling->mirostat_mu);
|
301
|
-
} else {
|
302
|
-
// temperature sampling
|
303
|
-
size_t min_keep = std::max(1, params.min_keep);
|
304
|
-
|
305
|
-
sampler_queue(ctx_main, params, cur_p, min_keep);
|
306
|
-
|
307
|
-
id = llama_sample_token_with_rng(ctx_main, &cur_p, ctx_sampling->rng);
|
308
|
-
|
309
|
-
//{
|
310
|
-
// const int n_top = 10;
|
311
|
-
// LOG("top %d candidates:\n", n_top);
|
312
|
-
|
313
|
-
// for (int i = 0; i < n_top; i++) {
|
314
|
-
// const llama_token id = cur_p.data[i].id;
|
315
|
-
// (void)id; // To avoid a warning that id is unused when logging is disabled.
|
316
|
-
// LOG(" - %5d: '%12s' (%.3f)\n", id, llama_token_to_piece(ctx_main, id).c_str(), cur_p.data[i].p);
|
317
|
-
// }
|
318
|
-
//}
|
319
|
-
|
320
|
-
//LOG("sampled token: %5d: '%s'\n", id, llama_token_to_piece(ctx_main, id).c_str());
|
321
|
-
}
|
322
|
-
}
|
323
|
-
|
324
|
-
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
325
|
-
// Get a pointer to the logits
|
326
|
-
float * logits = llama_get_logits_ith(ctx_main, idx);
|
327
|
-
|
328
|
-
// Create an array with a single token data element for the sampled id
|
329
|
-
llama_token_data single_token_data = {id, logits[id], 0.0f};
|
330
|
-
llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
|
331
|
-
|
332
|
-
// Apply grammar constraints to the single token
|
333
|
-
llama_sample_grammar(ctx_main, &single_token_data_array, ctx_sampling->grammar);
|
334
|
-
|
335
|
-
// Check if the token is valid according to the grammar by seeing if its logit has been set to -INFINITY
|
336
|
-
bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
|
337
|
-
|
338
|
-
// If the token is not valid according to the grammar, perform resampling
|
339
|
-
if (!is_valid) {
|
340
|
-
LOG("Resampling because token %d: '%s' does not meet grammar rules\n", id, llama_token_to_piece(ctx_main, id).c_str());
|
341
|
-
|
342
|
-
// Restore logits from the copy
|
343
|
-
std::copy(original_logits.begin(), original_logits.end(), logits);
|
344
|
-
|
345
|
-
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ true);
|
346
|
-
}
|
347
|
-
}
|
348
|
-
|
349
|
-
ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
|
350
|
-
|
351
|
-
return id;
|
352
|
-
}
|
353
|
-
|
354
|
-
static llama_token_data_array llama_sampling_prepare_impl(
|
355
|
-
struct llama_sampling_context * ctx_sampling,
|
356
|
-
struct llama_context * ctx_main,
|
357
|
-
struct llama_context * ctx_cfg,
|
358
|
-
const int idx,
|
359
|
-
bool apply_grammar,
|
360
|
-
std::vector<float> * original_logits) {
|
361
|
-
const llama_sampling_params & params = ctx_sampling->params;
|
362
|
-
|
363
|
-
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
364
|
-
|
365
|
-
const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n;
|
366
|
-
const float penalty_repeat = params.penalty_repeat;
|
367
|
-
const float penalty_freq = params.penalty_freq;
|
368
|
-
const float penalty_present = params.penalty_present;
|
369
|
-
|
370
|
-
const bool penalize_nl = params.penalize_nl;
|
371
|
-
|
372
|
-
auto & prev = ctx_sampling->prev;
|
373
|
-
auto & cur = ctx_sampling->cur;
|
374
|
-
|
375
|
-
// Get a pointer to the logits
|
376
|
-
float * logits = llama_get_logits_ith(ctx_main, idx);
|
377
|
-
|
378
|
-
if (ctx_sampling->grammar != NULL && !apply_grammar) {
|
379
|
-
LM_GGML_ASSERT(original_logits != NULL);
|
380
|
-
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
381
|
-
*original_logits = {logits, logits + n_vocab};
|
382
|
-
}
|
383
|
-
|
384
|
-
// apply params.logit_bias map
|
385
|
-
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
386
|
-
logits[it->first] += it->second;
|
387
|
-
}
|
388
|
-
|
389
|
-
if (ctx_cfg) {
|
390
|
-
float * logits_guidance = llama_get_logits_ith(ctx_cfg, idx);
|
391
|
-
llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
|
392
|
-
}
|
393
|
-
|
394
|
-
cur.resize(n_vocab);
|
395
|
-
|
396
|
-
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
397
|
-
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
|
398
|
-
}
|
399
|
-
|
400
|
-
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
|
401
|
-
|
402
|
-
// apply penalties
|
403
|
-
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
|
404
|
-
const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
|
405
|
-
if (penalty_tokens_used_size) {
|
406
|
-
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
|
407
|
-
|
408
|
-
llama_sample_repetition_penalties(ctx_main, &cur_p,
|
409
|
-
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
|
410
|
-
penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
|
411
|
-
|
412
|
-
if (!penalize_nl) {
|
413
|
-
for (size_t idx = 0; idx < cur_p.size; idx++) {
|
414
|
-
if (cur_p.data[idx].id == llama_token_nl(llama_get_model(ctx_main))) {
|
415
|
-
cur_p.data[idx].logit = nl_logit;
|
416
|
-
break;
|
417
|
-
}
|
418
|
-
}
|
419
|
-
}
|
420
|
-
}
|
421
|
-
|
422
|
-
// apply grammar checks before sampling logic
|
423
|
-
if (apply_grammar && ctx_sampling->grammar != NULL) {
|
424
|
-
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
425
|
-
}
|
426
|
-
|
427
|
-
return cur_p;
|
428
|
-
}
|
429
|
-
|
430
|
-
llama_token llama_sampling_sample(
|
431
|
-
struct llama_sampling_context * ctx_sampling,
|
432
|
-
struct llama_context * ctx_main,
|
433
|
-
struct llama_context * ctx_cfg,
|
434
|
-
const int idx) {
|
435
|
-
// Call the implementation function with is_resampling set to false by default
|
436
|
-
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ false);
|
437
|
-
}
|
438
|
-
|
439
|
-
llama_token_data_array llama_sampling_prepare(
|
440
|
-
struct llama_sampling_context * ctx_sampling,
|
441
|
-
struct llama_context * ctx_main,
|
442
|
-
struct llama_context * ctx_cfg,
|
443
|
-
const int idx,
|
444
|
-
bool apply_grammar,
|
445
|
-
std::vector<float> * original_logits) {
|
446
|
-
return llama_sampling_prepare_impl(ctx_sampling,ctx_main, ctx_cfg, idx, apply_grammar, original_logits);
|
447
|
-
}
|
448
|
-
|
449
|
-
void llama_sampling_accept(
|
450
|
-
struct llama_sampling_context * ctx_sampling,
|
451
|
-
struct llama_context * ctx_main,
|
452
|
-
llama_token id,
|
453
|
-
bool apply_grammar) {
|
454
|
-
ctx_sampling->prev.erase(ctx_sampling->prev.begin());
|
455
|
-
ctx_sampling->prev.push_back(id);
|
456
|
-
|
457
|
-
if (ctx_sampling->grammar != NULL && apply_grammar) {
|
458
|
-
llama_grammar_accept_token(ctx_main, ctx_sampling->grammar, id);
|
459
|
-
}
|
460
|
-
}
|
1
|
+
#define LLAMA_API_INTERNAL
|
2
|
+
#include "sampling.h"
|
3
|
+
#include <random>
|
4
|
+
|
5
|
+
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params) {
|
6
|
+
struct llama_sampling_context * result = new llama_sampling_context();
|
7
|
+
|
8
|
+
result->params = params;
|
9
|
+
result->grammar = nullptr;
|
10
|
+
|
11
|
+
// if there is a grammar, parse it
|
12
|
+
if (!params.grammar.empty()) {
|
13
|
+
result->parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
14
|
+
|
15
|
+
// will be empty (default) if there are parse errors
|
16
|
+
if (result->parsed_grammar.rules.empty()) {
|
17
|
+
fprintf(stderr, "%s: failed to parse grammar\n", __func__);
|
18
|
+
delete result;
|
19
|
+
return nullptr;
|
20
|
+
}
|
21
|
+
|
22
|
+
// Ensure that there is a "root" node.
|
23
|
+
if (result->parsed_grammar.symbol_ids.find("root") == result->parsed_grammar.symbol_ids.end()) {
|
24
|
+
fprintf(stderr, "%s: grammar does not contain a 'root' symbol\n", __func__);
|
25
|
+
delete result;
|
26
|
+
return nullptr;
|
27
|
+
}
|
28
|
+
|
29
|
+
std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
|
30
|
+
|
31
|
+
struct llama_grammar * grammar = llama_grammar_init(
|
32
|
+
grammar_rules.data(),
|
33
|
+
grammar_rules.size(), result->parsed_grammar.symbol_ids.at("root"));
|
34
|
+
if (grammar == nullptr) {
|
35
|
+
throw std::runtime_error("Failed to initialize llama_grammar");
|
36
|
+
}
|
37
|
+
result->grammar = grammar;
|
38
|
+
}
|
39
|
+
|
40
|
+
result->prev.resize(params.n_prev);
|
41
|
+
|
42
|
+
result->n_valid = 0;
|
43
|
+
|
44
|
+
llama_sampling_set_rng_seed(result, params.seed);
|
45
|
+
|
46
|
+
return result;
|
47
|
+
}
|
48
|
+
|
49
|
+
void llama_sampling_free(struct llama_sampling_context * ctx) {
|
50
|
+
if (ctx->grammar != NULL) {
|
51
|
+
llama_grammar_free(ctx->grammar);
|
52
|
+
}
|
53
|
+
|
54
|
+
delete ctx;
|
55
|
+
}
|
56
|
+
|
57
|
+
void llama_sampling_reset(llama_sampling_context * ctx) {
|
58
|
+
if (ctx->grammar != NULL) {
|
59
|
+
llama_grammar_free(ctx->grammar);
|
60
|
+
ctx->grammar = NULL;
|
61
|
+
}
|
62
|
+
|
63
|
+
if (!ctx->parsed_grammar.rules.empty()) {
|
64
|
+
std::vector<const llama_grammar_element *> grammar_rules(ctx->parsed_grammar.c_rules());
|
65
|
+
|
66
|
+
struct llama_grammar * grammar = llama_grammar_init(
|
67
|
+
grammar_rules.data(),
|
68
|
+
grammar_rules.size(), ctx->parsed_grammar.symbol_ids.at("root"));
|
69
|
+
if (grammar == nullptr) {
|
70
|
+
throw std::runtime_error("Failed to initialize llama_grammar");
|
71
|
+
}
|
72
|
+
ctx->grammar = grammar;
|
73
|
+
}
|
74
|
+
|
75
|
+
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
76
|
+
ctx->cur.clear();
|
77
|
+
ctx->n_valid = 0;
|
78
|
+
}
|
79
|
+
|
80
|
+
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
81
|
+
if (seed == LLAMA_DEFAULT_SEED) {
|
82
|
+
seed = std::random_device{}();
|
83
|
+
}
|
84
|
+
ctx->rng.seed(seed);
|
85
|
+
}
|
86
|
+
|
87
|
+
void llama_sampling_cp(llama_sampling_context * src, llama_sampling_context * dst) {
|
88
|
+
if (dst->grammar) {
|
89
|
+
llama_grammar_free(dst->grammar);
|
90
|
+
dst->grammar = nullptr;
|
91
|
+
}
|
92
|
+
|
93
|
+
if (src->grammar) {
|
94
|
+
dst->grammar = llama_grammar_copy(src->grammar);
|
95
|
+
}
|
96
|
+
|
97
|
+
dst->prev = src->prev;
|
98
|
+
}
|
99
|
+
|
100
|
+
llama_token llama_sampling_last(llama_sampling_context * ctx) {
|
101
|
+
return ctx->prev.back();
|
102
|
+
}
|
103
|
+
|
104
|
+
std::string llama_sampling_prev_str(llama_sampling_context * ctx_sampling, llama_context * ctx_main, int n) {
|
105
|
+
const int size = ctx_sampling->prev.size();
|
106
|
+
|
107
|
+
n = std::min(n, size);
|
108
|
+
|
109
|
+
std::string result;
|
110
|
+
|
111
|
+
for (int i = size - n; i < size; i++) {
|
112
|
+
result += llama_token_to_piece(ctx_main, ctx_sampling->prev[i]);
|
113
|
+
}
|
114
|
+
|
115
|
+
return result;
|
116
|
+
}
|
117
|
+
|
118
|
+
std::string llama_sampling_print(const llama_sampling_params & params) {
|
119
|
+
char result[1024];
|
120
|
+
|
121
|
+
snprintf(result, sizeof(result),
|
122
|
+
"\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
|
123
|
+
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n"
|
124
|
+
"\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
|
125
|
+
params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present,
|
126
|
+
params.top_k, params.tfs_z, params.top_p, params.min_p, params.typical_p, params.temp,
|
127
|
+
params.mirostat, params.mirostat_eta, params.mirostat_tau);
|
128
|
+
|
129
|
+
return std::string(result);
|
130
|
+
}
|
131
|
+
|
132
|
+
std::string llama_sampling_order_print(const llama_sampling_params & params) {
|
133
|
+
std::string result = "CFG -> Penalties ";
|
134
|
+
if (params.mirostat == 0) {
|
135
|
+
for (auto sampler_type : params.samplers_sequence) {
|
136
|
+
const auto sampler_type_name = llama_sampling_type_to_str(sampler_type);
|
137
|
+
if (!sampler_type_name.empty()) {
|
138
|
+
result += "-> " + sampler_type_name + " ";
|
139
|
+
}
|
140
|
+
}
|
141
|
+
} else {
|
142
|
+
result += "-> mirostat ";
|
143
|
+
}
|
144
|
+
|
145
|
+
return result;
|
146
|
+
}
|
147
|
+
|
148
|
+
std::string llama_sampling_type_to_str(llama_sampler_type sampler_type) {
|
149
|
+
switch (sampler_type) {
|
150
|
+
case llama_sampler_type::TOP_K: return "top_k";
|
151
|
+
case llama_sampler_type::TFS_Z: return "tfs_z";
|
152
|
+
case llama_sampler_type::TYPICAL_P: return "typical_p";
|
153
|
+
case llama_sampler_type::TOP_P: return "top_p";
|
154
|
+
case llama_sampler_type::MIN_P: return "min_p";
|
155
|
+
case llama_sampler_type::TEMPERATURE: return "temperature";
|
156
|
+
default : return "";
|
157
|
+
}
|
158
|
+
}
|
159
|
+
|
160
|
+
std::vector<llama_sampler_type> llama_sampling_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
161
|
+
std::unordered_map<std::string, llama_sampler_type> sampler_canonical_name_map {
|
162
|
+
{"top_k", llama_sampler_type::TOP_K},
|
163
|
+
{"top_p", llama_sampler_type::TOP_P},
|
164
|
+
{"typical_p", llama_sampler_type::TYPICAL_P},
|
165
|
+
{"min_p", llama_sampler_type::MIN_P},
|
166
|
+
{"tfs_z", llama_sampler_type::TFS_Z},
|
167
|
+
{"temperature", llama_sampler_type::TEMPERATURE}
|
168
|
+
};
|
169
|
+
|
170
|
+
// since samplers names are written multiple ways
|
171
|
+
// make it ready for both system names and input names
|
172
|
+
std::unordered_map<std::string, llama_sampler_type> sampler_alt_name_map {
|
173
|
+
{"top-k", llama_sampler_type::TOP_K},
|
174
|
+
{"top-p", llama_sampler_type::TOP_P},
|
175
|
+
{"nucleus", llama_sampler_type::TOP_P},
|
176
|
+
{"typical-p", llama_sampler_type::TYPICAL_P},
|
177
|
+
{"typical", llama_sampler_type::TYPICAL_P},
|
178
|
+
{"min-p", llama_sampler_type::MIN_P},
|
179
|
+
{"tfs-z", llama_sampler_type::TFS_Z},
|
180
|
+
{"tfs", llama_sampler_type::TFS_Z},
|
181
|
+
{"temp", llama_sampler_type::TEMPERATURE}
|
182
|
+
};
|
183
|
+
|
184
|
+
std::vector<llama_sampler_type> sampler_types;
|
185
|
+
sampler_types.reserve(names.size());
|
186
|
+
for (const auto & name : names)
|
187
|
+
{
|
188
|
+
auto sampler_item = sampler_canonical_name_map.find(name);
|
189
|
+
if (sampler_item != sampler_canonical_name_map.end())
|
190
|
+
{
|
191
|
+
sampler_types.push_back(sampler_item->second);
|
192
|
+
}
|
193
|
+
else
|
194
|
+
{
|
195
|
+
if (allow_alt_names)
|
196
|
+
{
|
197
|
+
sampler_item = sampler_alt_name_map.find(name);
|
198
|
+
if (sampler_item != sampler_alt_name_map.end())
|
199
|
+
{
|
200
|
+
sampler_types.push_back(sampler_item->second);
|
201
|
+
}
|
202
|
+
}
|
203
|
+
}
|
204
|
+
}
|
205
|
+
return sampler_types;
|
206
|
+
}
|
207
|
+
|
208
|
+
std::vector<llama_sampler_type> llama_sampling_types_from_chars(const std::string & names_string) {
|
209
|
+
std::unordered_map<char, llama_sampler_type> sampler_name_map {
|
210
|
+
{'k', llama_sampler_type::TOP_K},
|
211
|
+
{'p', llama_sampler_type::TOP_P},
|
212
|
+
{'y', llama_sampler_type::TYPICAL_P},
|
213
|
+
{'m', llama_sampler_type::MIN_P},
|
214
|
+
{'f', llama_sampler_type::TFS_Z},
|
215
|
+
{'t', llama_sampler_type::TEMPERATURE}
|
216
|
+
};
|
217
|
+
|
218
|
+
std::vector<llama_sampler_type> sampler_types;
|
219
|
+
sampler_types.reserve(names_string.size());
|
220
|
+
for (const auto & c : names_string) {
|
221
|
+
const auto sampler_item = sampler_name_map.find(c);
|
222
|
+
if (sampler_item != sampler_name_map.end()) {
|
223
|
+
sampler_types.push_back(sampler_item->second);
|
224
|
+
}
|
225
|
+
}
|
226
|
+
return sampler_types;
|
227
|
+
}
|
228
|
+
|
229
|
+
// no reasons to expose this function in header
|
230
|
+
static void sampler_queue(
|
231
|
+
struct llama_context * ctx_main,
|
232
|
+
const llama_sampling_params & params,
|
233
|
+
llama_token_data_array & cur_p,
|
234
|
+
size_t min_keep) {
|
235
|
+
const float temp = params.temp;
|
236
|
+
const float dynatemp_range = params.dynatemp_range;
|
237
|
+
const float dynatemp_exponent = params.dynatemp_exponent;
|
238
|
+
const int32_t top_k = params.top_k;
|
239
|
+
const float top_p = params.top_p;
|
240
|
+
const float min_p = params.min_p;
|
241
|
+
const float tfs_z = params.tfs_z;
|
242
|
+
const float typical_p = params.typical_p;
|
243
|
+
const std::vector<llama_sampler_type> & samplers_sequence = params.samplers_sequence;
|
244
|
+
|
245
|
+
for (auto sampler_type : samplers_sequence) {
|
246
|
+
switch (sampler_type) {
|
247
|
+
case llama_sampler_type::TOP_K : llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break;
|
248
|
+
case llama_sampler_type::TFS_Z : llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break;
|
249
|
+
case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
|
250
|
+
case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
|
251
|
+
case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
|
252
|
+
case llama_sampler_type::TEMPERATURE:
|
253
|
+
if (dynatemp_range > 0) {
|
254
|
+
float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
|
255
|
+
float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
|
256
|
+
llama_sample_entropy(ctx_main, &cur_p, dynatemp_min, dynatemp_max, dynatemp_exponent);
|
257
|
+
} else {
|
258
|
+
llama_sample_temp(ctx_main, &cur_p, temp);
|
259
|
+
}
|
260
|
+
break;
|
261
|
+
default : break;
|
262
|
+
}
|
263
|
+
}
|
264
|
+
}
|
265
|
+
|
266
|
+
static llama_token llama_sampling_sample_impl(
|
267
|
+
struct llama_sampling_context * ctx_sampling,
|
268
|
+
struct llama_context * ctx_main,
|
269
|
+
struct llama_context * ctx_cfg,
|
270
|
+
const int idx,
|
271
|
+
bool is_resampling) {
|
272
|
+
const llama_sampling_params & params = ctx_sampling->params;
|
273
|
+
|
274
|
+
const float temp = params.temp;
|
275
|
+
const int mirostat = params.mirostat;
|
276
|
+
const float mirostat_tau = params.mirostat_tau;
|
277
|
+
const float mirostat_eta = params.mirostat_eta;
|
278
|
+
|
279
|
+
std::vector<float> original_logits;
|
280
|
+
auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, /* apply_grammar= */ is_resampling, &original_logits);
|
281
|
+
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
282
|
+
LM_GGML_ASSERT(!original_logits.empty());
|
283
|
+
}
|
284
|
+
llama_token id = 0;
|
285
|
+
|
286
|
+
if (temp < 0.0) {
|
287
|
+
// greedy sampling, with probs
|
288
|
+
llama_sample_softmax(ctx_main, &cur_p);
|
289
|
+
id = cur_p.data[0].id;
|
290
|
+
} else if (temp == 0.0) {
|
291
|
+
// greedy sampling, no probs
|
292
|
+
id = llama_sample_token_greedy(ctx_main, &cur_p);
|
293
|
+
} else {
|
294
|
+
if (mirostat == 1) {
|
295
|
+
const int mirostat_m = 100;
|
296
|
+
llama_sample_temp(ctx_main, &cur_p, temp);
|
297
|
+
id = llama_sample_token_mirostat(ctx_main, &cur_p, mirostat_tau, mirostat_eta, mirostat_m, &ctx_sampling->mirostat_mu);
|
298
|
+
} else if (mirostat == 2) {
|
299
|
+
llama_sample_temp(ctx_main, &cur_p, temp);
|
300
|
+
id = llama_sample_token_mirostat_v2(ctx_main, &cur_p, mirostat_tau, mirostat_eta, &ctx_sampling->mirostat_mu);
|
301
|
+
} else {
|
302
|
+
// temperature sampling
|
303
|
+
size_t min_keep = std::max(1, params.min_keep);
|
304
|
+
|
305
|
+
sampler_queue(ctx_main, params, cur_p, min_keep);
|
306
|
+
|
307
|
+
id = llama_sample_token_with_rng(ctx_main, &cur_p, ctx_sampling->rng);
|
308
|
+
|
309
|
+
//{
|
310
|
+
// const int n_top = 10;
|
311
|
+
// LOG("top %d candidates:\n", n_top);
|
312
|
+
|
313
|
+
// for (int i = 0; i < n_top; i++) {
|
314
|
+
// const llama_token id = cur_p.data[i].id;
|
315
|
+
// (void)id; // To avoid a warning that id is unused when logging is disabled.
|
316
|
+
// LOG(" - %5d: '%12s' (%.3f)\n", id, llama_token_to_piece(ctx_main, id).c_str(), cur_p.data[i].p);
|
317
|
+
// }
|
318
|
+
//}
|
319
|
+
|
320
|
+
//LOG("sampled token: %5d: '%s'\n", id, llama_token_to_piece(ctx_main, id).c_str());
|
321
|
+
}
|
322
|
+
}
|
323
|
+
|
324
|
+
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
325
|
+
// Get a pointer to the logits
|
326
|
+
float * logits = llama_get_logits_ith(ctx_main, idx);
|
327
|
+
|
328
|
+
// Create an array with a single token data element for the sampled id
|
329
|
+
llama_token_data single_token_data = {id, logits[id], 0.0f};
|
330
|
+
llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
|
331
|
+
|
332
|
+
// Apply grammar constraints to the single token
|
333
|
+
llama_sample_grammar(ctx_main, &single_token_data_array, ctx_sampling->grammar);
|
334
|
+
|
335
|
+
// Check if the token is valid according to the grammar by seeing if its logit has been set to -INFINITY
|
336
|
+
bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
|
337
|
+
|
338
|
+
// If the token is not valid according to the grammar, perform resampling
|
339
|
+
if (!is_valid) {
|
340
|
+
LOG("Resampling because token %d: '%s' does not meet grammar rules\n", id, llama_token_to_piece(ctx_main, id).c_str());
|
341
|
+
|
342
|
+
// Restore logits from the copy
|
343
|
+
std::copy(original_logits.begin(), original_logits.end(), logits);
|
344
|
+
|
345
|
+
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ true);
|
346
|
+
}
|
347
|
+
}
|
348
|
+
|
349
|
+
ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
|
350
|
+
|
351
|
+
return id;
|
352
|
+
}
|
353
|
+
|
354
|
+
static llama_token_data_array llama_sampling_prepare_impl(
|
355
|
+
struct llama_sampling_context * ctx_sampling,
|
356
|
+
struct llama_context * ctx_main,
|
357
|
+
struct llama_context * ctx_cfg,
|
358
|
+
const int idx,
|
359
|
+
bool apply_grammar,
|
360
|
+
std::vector<float> * original_logits) {
|
361
|
+
const llama_sampling_params & params = ctx_sampling->params;
|
362
|
+
|
363
|
+
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
364
|
+
|
365
|
+
const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n;
|
366
|
+
const float penalty_repeat = params.penalty_repeat;
|
367
|
+
const float penalty_freq = params.penalty_freq;
|
368
|
+
const float penalty_present = params.penalty_present;
|
369
|
+
|
370
|
+
const bool penalize_nl = params.penalize_nl;
|
371
|
+
|
372
|
+
auto & prev = ctx_sampling->prev;
|
373
|
+
auto & cur = ctx_sampling->cur;
|
374
|
+
|
375
|
+
// Get a pointer to the logits
|
376
|
+
float * logits = llama_get_logits_ith(ctx_main, idx);
|
377
|
+
|
378
|
+
if (ctx_sampling->grammar != NULL && !apply_grammar) {
|
379
|
+
LM_GGML_ASSERT(original_logits != NULL);
|
380
|
+
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
381
|
+
*original_logits = {logits, logits + n_vocab};
|
382
|
+
}
|
383
|
+
|
384
|
+
// apply params.logit_bias map
|
385
|
+
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
386
|
+
logits[it->first] += it->second;
|
387
|
+
}
|
388
|
+
|
389
|
+
if (ctx_cfg) {
|
390
|
+
float * logits_guidance = llama_get_logits_ith(ctx_cfg, idx);
|
391
|
+
llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
|
392
|
+
}
|
393
|
+
|
394
|
+
cur.resize(n_vocab);
|
395
|
+
|
396
|
+
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
397
|
+
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
|
398
|
+
}
|
399
|
+
|
400
|
+
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
|
401
|
+
|
402
|
+
// apply penalties
|
403
|
+
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
|
404
|
+
const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
|
405
|
+
if (penalty_tokens_used_size) {
|
406
|
+
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
|
407
|
+
|
408
|
+
llama_sample_repetition_penalties(ctx_main, &cur_p,
|
409
|
+
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
|
410
|
+
penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
|
411
|
+
|
412
|
+
if (!penalize_nl) {
|
413
|
+
for (size_t idx = 0; idx < cur_p.size; idx++) {
|
414
|
+
if (cur_p.data[idx].id == llama_token_nl(llama_get_model(ctx_main))) {
|
415
|
+
cur_p.data[idx].logit = nl_logit;
|
416
|
+
break;
|
417
|
+
}
|
418
|
+
}
|
419
|
+
}
|
420
|
+
}
|
421
|
+
|
422
|
+
// apply grammar checks before sampling logic
|
423
|
+
if (apply_grammar && ctx_sampling->grammar != NULL) {
|
424
|
+
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
425
|
+
}
|
426
|
+
|
427
|
+
return cur_p;
|
428
|
+
}
|
429
|
+
|
430
|
+
llama_token llama_sampling_sample(
|
431
|
+
struct llama_sampling_context * ctx_sampling,
|
432
|
+
struct llama_context * ctx_main,
|
433
|
+
struct llama_context * ctx_cfg,
|
434
|
+
const int idx) {
|
435
|
+
// Call the implementation function with is_resampling set to false by default
|
436
|
+
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, /* is_resampling= */ false);
|
437
|
+
}
|
438
|
+
|
439
|
+
llama_token_data_array llama_sampling_prepare(
|
440
|
+
struct llama_sampling_context * ctx_sampling,
|
441
|
+
struct llama_context * ctx_main,
|
442
|
+
struct llama_context * ctx_cfg,
|
443
|
+
const int idx,
|
444
|
+
bool apply_grammar,
|
445
|
+
std::vector<float> * original_logits) {
|
446
|
+
return llama_sampling_prepare_impl(ctx_sampling,ctx_main, ctx_cfg, idx, apply_grammar, original_logits);
|
447
|
+
}
|
448
|
+
|
449
|
+
void llama_sampling_accept(
|
450
|
+
struct llama_sampling_context * ctx_sampling,
|
451
|
+
struct llama_context * ctx_main,
|
452
|
+
llama_token id,
|
453
|
+
bool apply_grammar) {
|
454
|
+
ctx_sampling->prev.erase(ctx_sampling->prev.begin());
|
455
|
+
ctx_sampling->prev.push_back(id);
|
456
|
+
|
457
|
+
if (ctx_sampling->grammar != NULL && apply_grammar) {
|
458
|
+
llama_grammar_accept_token(ctx_main, ctx_sampling->grammar, id);
|
459
|
+
}
|
460
|
+
}
|