cui-llama.rn 1.4.6 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +9 -2
- package/android/src/main/jni.cpp +52 -34
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/cpp/binary-ops.cpp +158 -0
- package/cpp/binary-ops.h +16 -0
- package/cpp/chat.cpp +1769 -1779
- package/cpp/chat.h +9 -1
- package/cpp/common.cpp +20 -522
- package/cpp/common.h +13 -36
- package/cpp/cpu-common.h +72 -0
- package/cpp/ggml-common.h +12 -6
- package/cpp/ggml-cpu-aarch64.cpp +1557 -80
- package/cpp/ggml-cpu-impl.h +2 -21
- package/cpp/ggml-cpu-quants.c +904 -405
- package/cpp/ggml-cpu.c +909 -13237
- package/cpp/ggml-impl.h +50 -23
- package/cpp/ggml-metal-impl.h +77 -3
- package/cpp/ggml-metal.m +794 -580
- package/cpp/ggml.c +92 -3
- package/cpp/ggml.h +29 -5
- package/cpp/gguf.cpp +1 -0
- package/cpp/llama-adapter.cpp +55 -20
- package/cpp/llama-adapter.h +11 -9
- package/cpp/llama-arch.cpp +217 -16
- package/cpp/llama-arch.h +25 -0
- package/cpp/llama-batch.h +2 -2
- package/cpp/llama-chat.cpp +54 -2
- package/cpp/llama-chat.h +3 -0
- package/cpp/llama-context.cpp +2294 -1238
- package/cpp/llama-context.h +214 -77
- package/cpp/llama-cparams.h +1 -0
- package/cpp/llama-graph.cpp +1695 -0
- package/cpp/llama-graph.h +592 -0
- package/cpp/llama-hparams.cpp +8 -0
- package/cpp/llama-hparams.h +17 -0
- package/cpp/llama-io.cpp +15 -0
- package/cpp/llama-io.h +35 -0
- package/cpp/llama-kv-cache.cpp +965 -303
- package/cpp/llama-kv-cache.h +145 -151
- package/cpp/llama-memory.cpp +1 -0
- package/cpp/llama-memory.h +21 -0
- package/cpp/llama-mmap.cpp +1 -1
- package/cpp/llama-model-loader.cpp +10 -5
- package/cpp/llama-model-loader.h +5 -3
- package/cpp/llama-model.cpp +9194 -201
- package/cpp/llama-model.h +40 -1
- package/cpp/llama-sampling.cpp +5 -0
- package/cpp/llama-vocab.cpp +36 -5
- package/cpp/llama.cpp +51 -9984
- package/cpp/llama.h +102 -22
- package/cpp/log.cpp +34 -0
- package/cpp/minja/chat-template.hpp +15 -7
- package/cpp/minja/minja.hpp +120 -94
- package/cpp/ops.cpp +8723 -0
- package/cpp/ops.h +128 -0
- package/cpp/rn-llama.cpp +44 -53
- package/cpp/rn-llama.h +2 -12
- package/cpp/sampling.cpp +3 -0
- package/cpp/sgemm.cpp +533 -88
- package/cpp/simd-mappings.h +888 -0
- package/cpp/speculative.cpp +4 -4
- package/cpp/unary-ops.cpp +186 -0
- package/cpp/unary-ops.h +28 -0
- package/cpp/vec.cpp +258 -0
- package/cpp/vec.h +802 -0
- package/ios/CMakeLists.txt +5 -2
- package/ios/RNLlama.mm +2 -2
- package/ios/RNLlamaContext.mm +40 -24
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +6 -4
- package/src/index.ts +3 -1
- package/cpp/chat-template.hpp +0 -529
- package/cpp/minja.hpp +0 -2915
package/cpp/ops.h
ADDED
@@ -0,0 +1,128 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h"
|
4
|
+
|
5
|
+
//
|
6
|
+
// cache line
|
7
|
+
//
|
8
|
+
|
9
|
+
#if defined(__cpp_lib_hardware_interference_size)
|
10
|
+
#define CACHE_LINE_SIZE std::hardware_destructive_interference_size
|
11
|
+
#else
|
12
|
+
#if defined(__POWER9_VECTOR__)
|
13
|
+
#define CACHE_LINE_SIZE 128
|
14
|
+
#elif defined(__VXE__) || defined(__VXE2__)
|
15
|
+
#define CACHE_LINE_SIZE 256
|
16
|
+
#else
|
17
|
+
#define CACHE_LINE_SIZE 64
|
18
|
+
#endif
|
19
|
+
#endif
|
20
|
+
|
21
|
+
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
22
|
+
|
23
|
+
#ifdef __cplusplus
|
24
|
+
extern "C" {
|
25
|
+
#endif
|
26
|
+
|
27
|
+
void lm_ggml_compute_forward_dup(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
28
|
+
void lm_ggml_compute_forward_add(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
29
|
+
void lm_ggml_compute_forward_add1(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
30
|
+
void lm_ggml_compute_forward_acc(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
31
|
+
void lm_ggml_compute_forward_sum(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
32
|
+
void lm_ggml_compute_forward_sum_rows(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
33
|
+
void lm_ggml_compute_forward_mean(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
34
|
+
void lm_ggml_compute_forward_argmax(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
35
|
+
void lm_ggml_compute_forward_count_equal(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
36
|
+
void lm_ggml_compute_forward_repeat(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
37
|
+
void lm_ggml_compute_forward_repeat_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
38
|
+
void lm_ggml_compute_forward_concat(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
39
|
+
void lm_ggml_compute_forward_silu_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
40
|
+
void lm_ggml_compute_forward_norm(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
41
|
+
void lm_ggml_compute_forward_rms_norm(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
42
|
+
void lm_ggml_compute_forward_rms_norm_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
43
|
+
void lm_ggml_compute_forward_group_norm(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
44
|
+
void lm_ggml_compute_forward_l2_norm(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
45
|
+
void lm_ggml_compute_forward_out_prod(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
46
|
+
void lm_ggml_compute_forward_scale(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
47
|
+
void lm_ggml_compute_forward_set(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
48
|
+
void lm_ggml_compute_forward_cpy(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
49
|
+
void lm_ggml_compute_forward_cont(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
50
|
+
void lm_ggml_compute_forward_reshape(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
51
|
+
void lm_ggml_compute_forward_view(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
52
|
+
void lm_ggml_compute_forward_permute(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
53
|
+
void lm_ggml_compute_forward_transpose(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
54
|
+
void lm_ggml_compute_forward_get_rows(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
55
|
+
void lm_ggml_compute_forward_get_rows_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
56
|
+
void lm_ggml_compute_forward_diag(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
57
|
+
void lm_ggml_compute_forward_diag_mask_inf(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
58
|
+
void lm_ggml_compute_forward_diag_mask_zero(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
59
|
+
void lm_ggml_compute_forward_soft_max(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
60
|
+
void lm_ggml_compute_forward_soft_max_ext_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
61
|
+
void lm_ggml_compute_forward_rope(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
62
|
+
void lm_ggml_compute_forward_rope_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
63
|
+
void lm_ggml_compute_forward_clamp(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
64
|
+
void lm_ggml_compute_forward_conv_transpose_1d(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
65
|
+
void lm_ggml_compute_forward_im2col(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
66
|
+
void lm_ggml_compute_forward_im2col_back_f32(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
67
|
+
void lm_ggml_compute_forward_conv_transpose_2d(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
68
|
+
void lm_ggml_compute_forward_pool_1d(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
69
|
+
void lm_ggml_compute_forward_pool_2d(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
70
|
+
void lm_ggml_compute_forward_pool_2d_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
71
|
+
void lm_ggml_compute_forward_upscale(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
72
|
+
void lm_ggml_compute_forward_pad(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
73
|
+
void lm_ggml_compute_forward_pad_reflect_1d(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
74
|
+
void lm_ggml_compute_forward_arange(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
75
|
+
void lm_ggml_compute_forward_timestep_embedding(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
76
|
+
void lm_ggml_compute_forward_argsort(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
77
|
+
void lm_ggml_compute_forward_leaky_relu(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
78
|
+
void lm_ggml_compute_forward_flash_attn_ext(
|
79
|
+
const struct lm_ggml_compute_params * params,
|
80
|
+
const struct lm_ggml_tensor * q,
|
81
|
+
const struct lm_ggml_tensor * k,
|
82
|
+
const struct lm_ggml_tensor * v,
|
83
|
+
const struct lm_ggml_tensor * mask,
|
84
|
+
struct lm_ggml_tensor * dst);
|
85
|
+
void lm_ggml_compute_forward_flash_attn_back(
|
86
|
+
const struct lm_ggml_compute_params * params,
|
87
|
+
const bool masked,
|
88
|
+
struct lm_ggml_tensor * dst);
|
89
|
+
void lm_ggml_compute_forward_ssm_conv(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
90
|
+
void lm_ggml_compute_forward_ssm_scan(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
91
|
+
void lm_ggml_compute_forward_win_part(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
92
|
+
void lm_ggml_compute_forward_win_unpart(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
93
|
+
void lm_ggml_compute_forward_unary(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
94
|
+
void lm_ggml_compute_forward_get_rel_pos(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
95
|
+
void lm_ggml_compute_forward_add_rel_pos(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
96
|
+
void lm_ggml_compute_forward_rwkv_wkv6(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
97
|
+
void lm_ggml_compute_forward_rwkv_wkv7(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
98
|
+
void lm_ggml_compute_forward_gla(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
99
|
+
void lm_ggml_compute_forward_map_unary(
|
100
|
+
const struct lm_ggml_compute_params * params,
|
101
|
+
struct lm_ggml_tensor * dst,
|
102
|
+
const lm_ggml_unary_op_f32_t fun);
|
103
|
+
void lm_ggml_compute_forward_map_binary(
|
104
|
+
const struct lm_ggml_compute_params * params,
|
105
|
+
struct lm_ggml_tensor * dst,
|
106
|
+
const lm_ggml_binary_op_f32_t fun);
|
107
|
+
void lm_ggml_compute_forward_map_custom1_f32(
|
108
|
+
const struct lm_ggml_compute_params * params,
|
109
|
+
struct lm_ggml_tensor * dst,
|
110
|
+
const lm_ggml_custom1_op_f32_t fun);
|
111
|
+
void lm_ggml_compute_forward_map_custom2_f32(
|
112
|
+
const struct lm_ggml_compute_params * params,
|
113
|
+
struct lm_ggml_tensor * dst,
|
114
|
+
const lm_ggml_custom2_op_f32_t fun);
|
115
|
+
void lm_ggml_compute_forward_map_custom3_f32(
|
116
|
+
const struct lm_ggml_compute_params * params,
|
117
|
+
struct lm_ggml_tensor * dst,
|
118
|
+
const lm_ggml_custom3_op_f32_t fun);
|
119
|
+
void lm_ggml_compute_forward_map_custom1(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
120
|
+
void lm_ggml_compute_forward_map_custom2(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
121
|
+
void lm_ggml_compute_forward_map_custom3(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
122
|
+
void lm_ggml_compute_forward_cross_entropy_loss(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
123
|
+
void lm_ggml_compute_forward_cross_entropy_loss_back(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
124
|
+
void lm_ggml_compute_forward_opt_step_adamw(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
125
|
+
|
126
|
+
#ifdef __cplusplus
|
127
|
+
}
|
128
|
+
#endif
|
package/cpp/rn-llama.cpp
CHANGED
@@ -191,10 +191,9 @@ bool llama_rn_context::loadModel(common_params ¶ms_)
|
|
191
191
|
ctx = llama_init.context.get();
|
192
192
|
if (model == nullptr)
|
193
193
|
{
|
194
|
-
LOG_ERROR("unable to load model: %s", params_.model.c_str());
|
194
|
+
LOG_ERROR("unable to load model: %s", params_.model.path.c_str());
|
195
195
|
return false;
|
196
196
|
}
|
197
|
-
|
198
197
|
templates = common_chat_templates_init(model, params.chat_template);
|
199
198
|
n_ctx = llama_n_ctx(ctx);
|
200
199
|
|
@@ -220,54 +219,46 @@ common_chat_params llama_rn_context::getFormattedChatWithJinja(
|
|
220
219
|
const bool ¶llel_tool_calls,
|
221
220
|
const std::string &tool_choice
|
222
221
|
) const {
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
222
|
+
common_chat_templates_inputs inputs;
|
223
|
+
inputs.use_jinja = true;
|
224
|
+
inputs.messages = common_chat_msgs_parse_oaicompat(json::parse(messages));
|
225
|
+
auto useTools = !tools.empty();
|
226
|
+
if (useTools) {
|
227
|
+
inputs.tools = common_chat_tools_parse_oaicompat(json::parse(tools));
|
228
|
+
}
|
229
|
+
inputs.parallel_tool_calls = parallel_tool_calls;
|
230
|
+
if (!tool_choice.empty()) {
|
231
|
+
inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(tool_choice);
|
232
|
+
}
|
233
|
+
if (!json_schema.empty()) {
|
234
|
+
inputs.json_schema = json::parse(json_schema);
|
235
|
+
}
|
236
|
+
inputs.extract_reasoning = params.reasoning_format != COMMON_REASONING_FORMAT_NONE;
|
237
|
+
|
238
|
+
// If chat_template is provided, create new one and use it (probably slow)
|
239
|
+
if (!chat_template.empty()) {
|
240
|
+
auto tmps = common_chat_templates_init(model, chat_template);
|
241
|
+
return common_chat_templates_apply(tmps.get(), inputs);
|
242
|
+
} else {
|
243
|
+
return common_chat_templates_apply(templates.get(), inputs);
|
244
|
+
}
|
246
245
|
}
|
247
246
|
|
248
247
|
std::string llama_rn_context::getFormattedChat(
|
249
248
|
const std::string &messages,
|
250
249
|
const std::string &chat_template
|
251
250
|
) const {
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
// If chat_template is provided, create new one and use it (probably slow)
|
265
|
-
if (!chat_template.empty()) {
|
266
|
-
auto tmp = common_chat_templates_init(model, chat_template);
|
267
|
-
return common_chat_templates_apply(tmp.get(), inputs).prompt;
|
268
|
-
} else {
|
269
|
-
return common_chat_templates_apply(templates.get(), inputs).prompt;
|
270
|
-
}
|
251
|
+
common_chat_templates_inputs inputs;
|
252
|
+
inputs.messages = common_chat_msgs_parse_oaicompat(json::parse(messages));
|
253
|
+
inputs.use_jinja = false;
|
254
|
+
|
255
|
+
// If chat_template is provided, create new one and use it (probably slow)
|
256
|
+
if (!chat_template.empty()) {
|
257
|
+
auto tmps = common_chat_templates_init(model, chat_template);
|
258
|
+
return common_chat_templates_apply(tmps.get(), inputs).prompt;
|
259
|
+
} else {
|
260
|
+
return common_chat_templates_apply(templates.get(), inputs).prompt;
|
261
|
+
}
|
271
262
|
}
|
272
263
|
|
273
264
|
void llama_rn_context::truncatePrompt(std::vector<llama_token> &prompt_tokens) {
|
@@ -342,7 +333,7 @@ void llama_rn_context::loadPrompt() {
|
|
342
333
|
}
|
343
334
|
|
344
335
|
// since #3228 we now have to manually manage the KV cache
|
345
|
-
|
336
|
+
llama_kv_self_seq_rm(ctx, 0, n_past, -1);
|
346
337
|
|
347
338
|
LOG_VERBOSE("prompt ingested, n_past: %d, cached: %s, to_eval: %s",
|
348
339
|
n_past,
|
@@ -372,8 +363,8 @@ completion_token_output llama_rn_context::nextToken()
|
|
372
363
|
const int n_left = n_past - params.n_keep - 1;
|
373
364
|
const int n_discard = n_left/2;
|
374
365
|
|
375
|
-
|
376
|
-
|
366
|
+
llama_kv_self_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1);
|
367
|
+
llama_kv_self_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);
|
377
368
|
|
378
369
|
for (size_t i = params.n_keep + 1 + n_discard; i < embd.size(); i++)
|
379
370
|
{
|
@@ -627,7 +618,7 @@ std::string llama_rn_context::bench(int pp, int tg, int pl, int nr)
|
|
627
618
|
}
|
628
619
|
batch.logits[batch.n_tokens - 1] = 1; // true
|
629
620
|
|
630
|
-
|
621
|
+
llama_kv_self_clear(ctx);
|
631
622
|
|
632
623
|
const int64_t t_pp_start = llama_time_us();
|
633
624
|
if (llama_decode(ctx, batch) != 0)
|
@@ -635,7 +626,7 @@ std::string llama_rn_context::bench(int pp, int tg, int pl, int nr)
|
|
635
626
|
LOG_ERROR("llama_decode() failed during prompt", "");
|
636
627
|
}
|
637
628
|
const int64_t t_pp_end = llama_time_us();
|
638
|
-
|
629
|
+
llama_kv_self_clear(ctx);
|
639
630
|
|
640
631
|
if (is_interrupted) break;
|
641
632
|
|
@@ -659,7 +650,7 @@ std::string llama_rn_context::bench(int pp, int tg, int pl, int nr)
|
|
659
650
|
|
660
651
|
const int64_t t_tg_end = llama_time_us();
|
661
652
|
|
662
|
-
|
653
|
+
llama_kv_self_clear(ctx);
|
663
654
|
|
664
655
|
const double t_pp = (t_pp_end - t_pp_start) / 1000000.0;
|
665
656
|
const double t_tg = (t_tg_end - t_tg_start) / 1000000.0;
|
@@ -685,7 +676,7 @@ std::string llama_rn_context::bench(int pp, int tg, int pl, int nr)
|
|
685
676
|
tg_std = 0;
|
686
677
|
}
|
687
678
|
|
688
|
-
if (is_interrupted)
|
679
|
+
if (is_interrupted) llama_kv_self_clear(ctx);
|
689
680
|
is_predicting = false;
|
690
681
|
|
691
682
|
char model_desc[128];
|
@@ -863,8 +854,8 @@ void llama_rn_context::purge_missing_tokens(llama_context * ctx, std::vector<int
|
|
863
854
|
|
864
855
|
//extract the unwanted tokens out from context and KV
|
865
856
|
int diff = found - trimstart;
|
866
|
-
|
867
|
-
|
857
|
+
llama_kv_self_seq_rm(ctx, 0, trimstart, trimstart + diff);
|
858
|
+
llama_kv_self_seq_add(ctx, 0, trimstart + diff, -1, -diff);
|
868
859
|
|
869
860
|
for (size_t i = trimstart + diff; i < current_context_tokens.size() - 1; i++)
|
870
861
|
{
|
package/cpp/rn-llama.h
CHANGED
@@ -3,9 +3,8 @@
|
|
3
3
|
|
4
4
|
#include <sstream>
|
5
5
|
#include <iostream>
|
6
|
-
#include "chat-template.hpp"
|
7
|
-
#include "common.h"
|
8
6
|
#include "chat.h"
|
7
|
+
#include "common.h"
|
9
8
|
#include "ggml.h"
|
10
9
|
#include "gguf.h"
|
11
10
|
#include "llama.h"
|
@@ -15,15 +14,6 @@
|
|
15
14
|
#include <android/log.h>
|
16
15
|
#endif
|
17
16
|
|
18
|
-
using json = nlohmann::ordered_json;
|
19
|
-
typedef minja::chat_template common_chat_template;
|
20
|
-
|
21
|
-
struct common_chat_templates {
|
22
|
-
bool has_explicit_template;
|
23
|
-
std::unique_ptr<common_chat_template> template_default;
|
24
|
-
std::unique_ptr<common_chat_template> template_tool_use;
|
25
|
-
};
|
26
|
-
|
27
17
|
namespace rnllama {
|
28
18
|
|
29
19
|
|
@@ -75,7 +65,7 @@ struct llama_rn_context {
|
|
75
65
|
|
76
66
|
llama_context *ctx = nullptr;
|
77
67
|
common_sampler *ctx_sampling = nullptr;
|
78
|
-
common_chat_templates_ptr templates
|
68
|
+
common_chat_templates_ptr templates;
|
79
69
|
|
80
70
|
int n_ctx;
|
81
71
|
|
package/cpp/sampling.cpp
CHANGED
@@ -208,6 +208,9 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
|
208
208
|
trigger_patterns_c.data(), trigger_patterns_c.size(),
|
209
209
|
trigger_tokens.data(), trigger_tokens.size())
|
210
210
|
: llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root");
|
211
|
+
if (!grmr) {
|
212
|
+
return nullptr;
|
213
|
+
}
|
211
214
|
}
|
212
215
|
|
213
216
|
auto * result = new common_sampler {
|