@fugood/llama.node 1.4.11 → 1.4.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +15 -15
- package/scripts/llama.cpp.patch +31 -31
- package/src/llama.cpp/common/arg.cpp +128 -59
- package/src/llama.cpp/common/arg.h +1 -0
- package/src/llama.cpp/common/chat-parser.cpp +11 -0
- package/src/llama.cpp/common/chat.cpp +36 -7
- package/src/llama.cpp/common/chat.h +1 -0
- package/src/llama.cpp/common/common.cpp +42 -23
- package/src/llama.cpp/common/common.h +11 -1
- package/src/llama.cpp/common/llguidance.cpp +10 -6
- package/src/llama.cpp/common/regex-partial.cpp +13 -13
- package/src/llama.cpp/common/sampling.cpp +58 -14
- package/src/llama.cpp/common/sampling.h +3 -1
- package/src/llama.cpp/ggml/CMakeLists.txt +13 -1
- package/src/llama.cpp/ggml/include/ggml-backend.h +1 -1
- package/src/llama.cpp/ggml/src/CMakeLists.txt +23 -9
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +12 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +1 -1
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +86 -25
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +15 -8
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +768 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +0 -4
- package/src/llama.cpp/include/llama.h +100 -12
- package/src/llama.cpp/src/CMakeLists.txt +4 -0
- package/src/llama.cpp/src/llama-adapter.cpp +12 -3
- package/src/llama.cpp/src/llama-adapter.h +7 -1
- package/src/llama.cpp/src/llama-arch.cpp +78 -0
- package/src/llama.cpp/src/llama-arch.h +8 -0
- package/src/llama.cpp/src/llama-chat.cpp +11 -0
- package/src/llama.cpp/src/llama-chat.h +1 -0
- package/src/llama.cpp/src/llama-context.cpp +637 -49
- package/src/llama.cpp/src/llama-context.h +43 -1
- package/src/llama.cpp/src/llama-grammar.cpp +40 -13
- package/src/llama.cpp/src/llama-grammar.h +2 -0
- package/src/llama.cpp/src/llama-graph.cpp +173 -5
- package/src/llama.cpp/src/llama-graph.h +71 -6
- package/src/llama.cpp/src/llama-hparams.cpp +4 -0
- package/src/llama.cpp/src/llama-hparams.h +12 -5
- package/src/llama.cpp/src/llama-kv-cache.h +1 -1
- package/src/llama.cpp/src/llama-mmap.cpp +11 -4
- package/src/llama.cpp/src/llama-model-loader.cpp +23 -0
- package/src/llama.cpp/src/llama-model-loader.h +2 -0
- package/src/llama.cpp/src/llama-model-saver.cpp +3 -0
- package/src/llama.cpp/src/llama-model.cpp +337 -26
- package/src/llama.cpp/src/llama-model.h +13 -2
- package/src/llama.cpp/src/llama-sampling.cpp +1259 -186
- package/src/llama.cpp/src/llama-sampling.h +19 -7
- package/src/llama.cpp/src/llama-vocab.cpp +101 -33
- package/src/llama.cpp/src/llama-vocab.h +2 -0
- package/src/llama.cpp/src/llama.cpp +87 -64
- package/src/llama.cpp/src/models/afmoe.cpp +9 -5
- package/src/llama.cpp/src/models/bert.cpp +4 -2
- package/src/llama.cpp/src/models/cogvlm.cpp +5 -3
- package/src/llama.cpp/src/models/cohere2-iswa.cpp +3 -0
- package/src/llama.cpp/src/models/deepseek2.cpp +1 -1
- package/src/llama.cpp/src/models/gemma-embedding.cpp +2 -6
- package/src/llama.cpp/src/models/gemma2-iswa.cpp +5 -2
- package/src/llama.cpp/src/models/gemma3.cpp +3 -4
- package/src/llama.cpp/src/models/gemma3n-iswa.cpp +4 -7
- package/src/llama.cpp/src/models/llama-iswa.cpp +6 -2
- package/src/llama.cpp/src/models/llama.cpp +19 -6
- package/src/llama.cpp/src/models/maincoder.cpp +117 -0
- package/src/llama.cpp/src/models/mimo2-iswa.cpp +123 -0
- package/src/llama.cpp/src/models/models.h +18 -0
- package/src/llama.cpp/src/models/modern-bert.cpp +116 -0
- package/src/llama.cpp/src/models/openai-moe-iswa.cpp +5 -2
- package/src/llama.cpp/src/models/plamo3.cpp +128 -0
- package/src/llama.cpp/src/models/smallthinker.cpp +11 -5
- package/src/llama.cpp/src/unicode.cpp +23 -14
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#include "models.h"
|
|
2
|
+
|
|
3
|
+
llm_build_maincoder::llm_build_maincoder(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
|
|
4
|
+
const int64_t n_embd_head = hparams.n_embd_head_v;
|
|
5
|
+
|
|
6
|
+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
|
7
|
+
GGML_ASSERT(n_embd_head == hparams.n_rot);
|
|
8
|
+
|
|
9
|
+
ggml_tensor * cur;
|
|
10
|
+
ggml_tensor * inpL;
|
|
11
|
+
|
|
12
|
+
inpL = build_inp_embd(model.tok_embd);
|
|
13
|
+
|
|
14
|
+
// inp_pos - contains the positions
|
|
15
|
+
ggml_tensor * inp_pos = build_inp_pos();
|
|
16
|
+
|
|
17
|
+
auto * inp_attn = build_attn_inp_kv();
|
|
18
|
+
|
|
19
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
20
|
+
|
|
21
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
22
|
+
ggml_tensor * inpSA = inpL;
|
|
23
|
+
|
|
24
|
+
// norm
|
|
25
|
+
cur = build_norm(inpL,
|
|
26
|
+
model.layers[il].attn_norm, NULL,
|
|
27
|
+
LLM_NORM_RMS, il);
|
|
28
|
+
cb(cur, "attn_norm", il);
|
|
29
|
+
|
|
30
|
+
// self-attention
|
|
31
|
+
{
|
|
32
|
+
// compute Q and K and RoPE them
|
|
33
|
+
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
|
|
34
|
+
cb(Qcur, "Qcur", il);
|
|
35
|
+
|
|
36
|
+
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
|
|
37
|
+
cb(Kcur, "Kcur", il);
|
|
38
|
+
|
|
39
|
+
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
|
|
40
|
+
cb(Vcur, "Vcur", il);
|
|
41
|
+
|
|
42
|
+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
|
43
|
+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
|
44
|
+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
|
|
45
|
+
|
|
46
|
+
Qcur = ggml_rope_ext(
|
|
47
|
+
ctx0, Qcur, inp_pos, nullptr,
|
|
48
|
+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
49
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
50
|
+
);
|
|
51
|
+
|
|
52
|
+
Kcur = ggml_rope_ext(
|
|
53
|
+
ctx0, Kcur, inp_pos, nullptr,
|
|
54
|
+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
55
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
56
|
+
);
|
|
57
|
+
|
|
58
|
+
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
|
|
59
|
+
cb(Qcur, "Qcur_normed", il);
|
|
60
|
+
|
|
61
|
+
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
|
|
62
|
+
cb(Kcur, "Kcur_normed", il);
|
|
63
|
+
|
|
64
|
+
cb(Qcur, "Qcur", il);
|
|
65
|
+
cb(Kcur, "Kcur", il);
|
|
66
|
+
cb(Vcur, "Vcur", il);
|
|
67
|
+
|
|
68
|
+
cur = build_attn(inp_attn,
|
|
69
|
+
model.layers[il].wo, model.layers[il].bo,
|
|
70
|
+
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
|
|
71
|
+
}
|
|
72
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
73
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
74
|
+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
75
|
+
}
|
|
76
|
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
77
|
+
cb(ffn_inp, "ffn_inp", il);
|
|
78
|
+
|
|
79
|
+
// feed-forward network
|
|
80
|
+
cur = build_norm(ffn_inp,
|
|
81
|
+
model.layers[il].ffn_norm, NULL,
|
|
82
|
+
LLM_NORM_RMS, il);
|
|
83
|
+
cb(cur, "ffn_norm", il);
|
|
84
|
+
|
|
85
|
+
cur = build_ffn(cur,
|
|
86
|
+
model.layers[il].ffn_up, NULL, NULL,
|
|
87
|
+
model.layers[il].ffn_gate, NULL, NULL,
|
|
88
|
+
model.layers[il].ffn_down, NULL, NULL,
|
|
89
|
+
NULL,
|
|
90
|
+
LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
91
|
+
cb(cur, "ffn_out", il);
|
|
92
|
+
|
|
93
|
+
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
94
|
+
|
|
95
|
+
cur = build_cvec(cur, il);
|
|
96
|
+
cb(cur, "l_out", il);
|
|
97
|
+
|
|
98
|
+
// input for next layer
|
|
99
|
+
inpL = cur;
|
|
100
|
+
}
|
|
101
|
+
cur = inpL;
|
|
102
|
+
|
|
103
|
+
cur = build_norm(cur,
|
|
104
|
+
model.output_norm, NULL,
|
|
105
|
+
LLM_NORM_RMS, -1);
|
|
106
|
+
|
|
107
|
+
cb(cur, "result_norm", -1);
|
|
108
|
+
res->t_embd = cur;
|
|
109
|
+
|
|
110
|
+
// lm_head
|
|
111
|
+
cur = build_lora_mm(model.output, cur);
|
|
112
|
+
|
|
113
|
+
cb(cur, "result_output", -1);
|
|
114
|
+
res->t_logits = cur;
|
|
115
|
+
|
|
116
|
+
ggml_build_forward_expand(gf, cur);
|
|
117
|
+
}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
|
|
2
|
+
#include "models.h"
|
|
3
|
+
|
|
4
|
+
llm_build_mimo2_iswa::llm_build_mimo2_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
|
|
5
|
+
ggml_tensor * cur;
|
|
6
|
+
ggml_tensor * inpL;
|
|
7
|
+
|
|
8
|
+
inpL = build_inp_embd(model.tok_embd);
|
|
9
|
+
|
|
10
|
+
ggml_tensor * inp_pos = build_inp_pos();
|
|
11
|
+
auto * inp_attn = build_attn_inp_kv_iswa();
|
|
12
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
13
|
+
|
|
14
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
15
|
+
ggml_tensor * inpSA = inpL;
|
|
16
|
+
|
|
17
|
+
uint32_t n_head_l = hparams.n_head(il);
|
|
18
|
+
uint32_t n_head_kv_l = hparams.n_head_kv(il);
|
|
19
|
+
const float freq_base_l = model.get_rope_freq_base(cparams, il);
|
|
20
|
+
const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
|
21
|
+
|
|
22
|
+
cur = inpL;
|
|
23
|
+
|
|
24
|
+
// self_attention
|
|
25
|
+
{
|
|
26
|
+
cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
|
|
27
|
+
cb(cur, "attn_norm", il);
|
|
28
|
+
|
|
29
|
+
// compute Q and K and RoPE them
|
|
30
|
+
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
|
|
31
|
+
cb(Qcur, "Qcur", il);
|
|
32
|
+
|
|
33
|
+
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
|
|
34
|
+
cb(Kcur, "Kcur", il);
|
|
35
|
+
|
|
36
|
+
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
|
|
37
|
+
cb(Vcur, "Vcur", il);
|
|
38
|
+
|
|
39
|
+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head_l, n_tokens);
|
|
40
|
+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv_l, n_tokens);
|
|
41
|
+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head_v, n_head_kv_l, n_tokens);
|
|
42
|
+
|
|
43
|
+
Qcur = ggml_rope_ext(
|
|
44
|
+
ctx0, Qcur, inp_pos, nullptr,
|
|
45
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
46
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
47
|
+
);
|
|
48
|
+
|
|
49
|
+
Kcur = ggml_rope_ext(
|
|
50
|
+
ctx0, Kcur, inp_pos, nullptr,
|
|
51
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
52
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
cb(Qcur, "Qcur", il);
|
|
56
|
+
cb(Kcur, "Kcur", il);
|
|
57
|
+
cb(Vcur, "Vcur", il);
|
|
58
|
+
|
|
59
|
+
ggml_tensor * sinks = model.layers[il].attn_sinks;
|
|
60
|
+
|
|
61
|
+
cur = build_attn(inp_attn,
|
|
62
|
+
model.layers[il].wo, NULL,
|
|
63
|
+
Qcur, Kcur, Vcur, nullptr, sinks, nullptr, 1.0f/sqrtf(float(n_embd_head_k)), il);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
67
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
68
|
+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
72
|
+
cb(ffn_inp, "ffn_inp", il);
|
|
73
|
+
|
|
74
|
+
cur = build_norm(ffn_inp,
|
|
75
|
+
model.layers[il].ffn_norm, NULL,
|
|
76
|
+
LLM_NORM_RMS, il);
|
|
77
|
+
cb(cur, "ffn_norm", il);
|
|
78
|
+
|
|
79
|
+
// feed-forward network
|
|
80
|
+
if (model.layers[il].ffn_gate_inp == nullptr) {
|
|
81
|
+
// dense branch
|
|
82
|
+
cur = build_ffn(cur,
|
|
83
|
+
model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
|
84
|
+
model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
|
|
85
|
+
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
|
86
|
+
NULL,
|
|
87
|
+
LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
88
|
+
cb(cur, "ffn_out", il);
|
|
89
|
+
} else {
|
|
90
|
+
// MoE branch
|
|
91
|
+
cur = build_moe_ffn(cur, model.layers[il].ffn_gate_inp, model.layers[il].ffn_up_exps,
|
|
92
|
+
model.layers[il].ffn_gate_exps, model.layers[il].ffn_down_exps,
|
|
93
|
+
model.layers[il].ffn_exp_probs_b, n_expert, n_expert_used, LLM_FFN_SILU, true, false,
|
|
94
|
+
0.0, LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID, il);
|
|
95
|
+
cb(cur, "ffn_moe_out", il);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
99
|
+
|
|
100
|
+
cur = build_cvec(cur, il);
|
|
101
|
+
cb(cur, "l_out", il);
|
|
102
|
+
|
|
103
|
+
// input for next layer
|
|
104
|
+
inpL = cur;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
cur = inpL;
|
|
108
|
+
|
|
109
|
+
cur = build_norm(cur,
|
|
110
|
+
model.output_norm, NULL,
|
|
111
|
+
LLM_NORM_RMS, -1);
|
|
112
|
+
|
|
113
|
+
cb(cur, "result_norm", -1);
|
|
114
|
+
res->t_embd = cur;
|
|
115
|
+
|
|
116
|
+
// lm_head
|
|
117
|
+
cur = build_lora_mm(model.output, cur);
|
|
118
|
+
|
|
119
|
+
cb(cur, "result_output", -1);
|
|
120
|
+
res->t_logits = cur;
|
|
121
|
+
|
|
122
|
+
ggml_build_forward_expand(gf, cur);
|
|
123
|
+
}
|
|
@@ -303,6 +303,7 @@ struct llm_build_llada_moe : public llm_graph_context {
|
|
|
303
303
|
llm_build_llada_moe(const llama_model & model, const llm_graph_params & params);
|
|
304
304
|
};
|
|
305
305
|
|
|
306
|
+
template <bool embed>
|
|
306
307
|
struct llm_build_llama : public llm_graph_context {
|
|
307
308
|
llm_build_llama(const llama_model & model, const llm_graph_params & params);
|
|
308
309
|
};
|
|
@@ -311,10 +312,18 @@ struct llm_build_llama_iswa : public llm_graph_context {
|
|
|
311
312
|
llm_build_llama_iswa(const llama_model & model, const llm_graph_params & params);
|
|
312
313
|
};
|
|
313
314
|
|
|
315
|
+
struct llm_build_maincoder : public llm_graph_context {
|
|
316
|
+
llm_build_maincoder(const llama_model & model, const llm_graph_params & params);
|
|
317
|
+
};
|
|
318
|
+
|
|
314
319
|
struct llm_build_mamba : public llm_graph_context_mamba {
|
|
315
320
|
llm_build_mamba(const llama_model & model, const llm_graph_params & params);
|
|
316
321
|
};
|
|
317
322
|
|
|
323
|
+
struct llm_build_mimo2_iswa : public llm_graph_context {
|
|
324
|
+
llm_build_mimo2_iswa(const llama_model & model, const llm_graph_params & params);
|
|
325
|
+
};
|
|
326
|
+
|
|
318
327
|
struct llm_build_minicpm3 : public llm_graph_context {
|
|
319
328
|
llm_build_minicpm3(const llama_model & model, const llm_graph_params & params);
|
|
320
329
|
};
|
|
@@ -327,6 +336,10 @@ struct llm_build_mistral3 : public llm_graph_context {
|
|
|
327
336
|
llm_build_mistral3(const llama_model & model, const llm_graph_params & params);
|
|
328
337
|
};
|
|
329
338
|
|
|
339
|
+
struct llm_build_modern_bert : public llm_graph_context {
|
|
340
|
+
llm_build_modern_bert(const llama_model & model, const llm_graph_params & params);
|
|
341
|
+
};
|
|
342
|
+
|
|
330
343
|
struct llm_build_mpt : public llm_graph_context {
|
|
331
344
|
llm_build_mpt(const llama_model & model, const llm_graph_params & params);
|
|
332
345
|
};
|
|
@@ -396,6 +409,11 @@ struct llm_build_plamo : public llm_graph_context {
|
|
|
396
409
|
llm_build_plamo(const llama_model & model, const llm_graph_params & params);
|
|
397
410
|
};
|
|
398
411
|
|
|
412
|
+
template <bool iswa>
|
|
413
|
+
struct llm_build_plamo3 : public llm_graph_context {
|
|
414
|
+
llm_build_plamo3(const llama_model & model, const llm_graph_params & params);
|
|
415
|
+
};
|
|
416
|
+
|
|
399
417
|
struct llm_build_plm : public llm_graph_context {
|
|
400
418
|
llm_build_plm(const llama_model & model, const llm_graph_params & params);
|
|
401
419
|
};
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
#include "models.h"
|
|
2
|
+
|
|
3
|
+
llm_build_modern_bert::llm_build_modern_bert(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
|
|
4
|
+
const int64_t n_embd_head = hparams.n_embd_head_v;
|
|
5
|
+
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
|
|
6
|
+
|
|
7
|
+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
|
8
|
+
|
|
9
|
+
ggml_tensor * cur;
|
|
10
|
+
ggml_tensor * inpL;
|
|
11
|
+
ggml_tensor * inp_pos = build_inp_pos();
|
|
12
|
+
|
|
13
|
+
// construct input embeddings (token, type, position)
|
|
14
|
+
inpL = build_inp_embd(model.tok_embd);
|
|
15
|
+
cb(inpL, "inp_embd", -1);
|
|
16
|
+
|
|
17
|
+
// embed layer norm
|
|
18
|
+
inpL = build_norm(inpL, model.tok_norm, nullptr, LLM_NORM, -1);
|
|
19
|
+
cb(inpL, "inp_norm", -1);
|
|
20
|
+
|
|
21
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
22
|
+
|
|
23
|
+
auto * inp_attn = build_attn_inp_no_cache();
|
|
24
|
+
|
|
25
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
26
|
+
const float freq_base_l = model.get_rope_freq_base(cparams, il);
|
|
27
|
+
const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
|
28
|
+
|
|
29
|
+
cur = inpL;
|
|
30
|
+
|
|
31
|
+
// attention layer norm
|
|
32
|
+
if (model.layers[il].attn_norm) {
|
|
33
|
+
cur = build_norm(inpL,
|
|
34
|
+
model.layers[il].attn_norm, NULL,
|
|
35
|
+
LLM_NORM, il);
|
|
36
|
+
cb(cur, "attn_norm", il);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// self attention
|
|
40
|
+
cur = build_lora_mm(model.layers[il].wqkv, cur);
|
|
41
|
+
cb(cur, "wqkv", il);
|
|
42
|
+
|
|
43
|
+
const size_t type_size = ggml_type_size(cur->type);
|
|
44
|
+
|
|
45
|
+
ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*type_size, cur->nb[1], 0*type_size*(n_embd));
|
|
46
|
+
ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*type_size, cur->nb[1], 1*type_size*(n_embd));
|
|
47
|
+
ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*type_size, cur->nb[1], 1*type_size*(n_embd + n_embd_gqa));
|
|
48
|
+
|
|
49
|
+
// RoPE
|
|
50
|
+
Qcur = ggml_rope_ext(
|
|
51
|
+
ctx0, Qcur, inp_pos, nullptr,
|
|
52
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
53
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
54
|
+
);
|
|
55
|
+
|
|
56
|
+
Kcur = ggml_rope_ext(
|
|
57
|
+
ctx0, Kcur, inp_pos, nullptr,
|
|
58
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
59
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
cb(Qcur, "Qcur", il);
|
|
63
|
+
cb(Kcur, "Kcur", il);
|
|
64
|
+
cb(Vcur, "Vcur", il);
|
|
65
|
+
|
|
66
|
+
cur = build_attn(inp_attn,
|
|
67
|
+
model.layers[il].wo, nullptr,
|
|
68
|
+
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
|
|
69
|
+
cb(cur, "kqv_out", il);
|
|
70
|
+
|
|
71
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
72
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
73
|
+
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// re-add the layer input
|
|
77
|
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
|
|
78
|
+
cb(ffn_inp, "ffn_inp", il);
|
|
79
|
+
|
|
80
|
+
// attention layer norm
|
|
81
|
+
cur = build_norm(ffn_inp,
|
|
82
|
+
model.layers[il].ffn_norm, NULL,
|
|
83
|
+
LLM_NORM, il);
|
|
84
|
+
cb(cur, "ffn_norm", il);
|
|
85
|
+
|
|
86
|
+
cur = build_ffn(cur,
|
|
87
|
+
model.layers[il].ffn_up, NULL, NULL,
|
|
88
|
+
NULL, NULL, NULL,
|
|
89
|
+
model.layers[il].ffn_down, NULL, NULL,
|
|
90
|
+
NULL,
|
|
91
|
+
LLM_FFN_GEGLU, LLM_FFN_SEQ, il);
|
|
92
|
+
|
|
93
|
+
// attentions bypass the intermediate layer
|
|
94
|
+
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
95
|
+
|
|
96
|
+
// input for next layer
|
|
97
|
+
inpL = cur;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
cur = inpL;
|
|
101
|
+
|
|
102
|
+
cur = build_norm(cur,
|
|
103
|
+
model.output_norm, NULL,
|
|
104
|
+
LLM_NORM, -1);
|
|
105
|
+
cb(cur, "final_norm_out", -1);
|
|
106
|
+
|
|
107
|
+
if (hparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
|
|
108
|
+
// extracting cls token
|
|
109
|
+
cur = ggml_view_1d(ctx0, cur, hparams.n_embd, 0);
|
|
110
|
+
cb(cur, "cls_pooled_embd", -1);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
cb(cur, "res_embd", -1);
|
|
114
|
+
res->t_embd = cur;
|
|
115
|
+
ggml_build_forward_expand(gf, cur);
|
|
116
|
+
}
|
|
@@ -14,6 +14,9 @@ llm_build_openai_moe_iswa::llm_build_openai_moe_iswa(const llama_model & model,
|
|
|
14
14
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
15
15
|
|
|
16
16
|
for (int il = 0; il < n_layer; ++il) {
|
|
17
|
+
const float freq_base_l = model.get_rope_freq_base (cparams, il);
|
|
18
|
+
const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
|
19
|
+
|
|
17
20
|
ggml_tensor * inpSA = inpL;
|
|
18
21
|
|
|
19
22
|
// norm
|
|
@@ -49,13 +52,13 @@ llm_build_openai_moe_iswa::llm_build_openai_moe_iswa(const llama_model & model,
|
|
|
49
52
|
|
|
50
53
|
Qcur = ggml_rope_ext(
|
|
51
54
|
ctx0, Qcur, inp_pos, nullptr,
|
|
52
|
-
n_rot, rope_type, n_ctx_orig,
|
|
55
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
53
56
|
ext_factor, attn_factor, beta_fast, beta_slow
|
|
54
57
|
);
|
|
55
58
|
|
|
56
59
|
Kcur = ggml_rope_ext(
|
|
57
60
|
ctx0, Kcur, inp_pos, nullptr,
|
|
58
|
-
n_rot, rope_type, n_ctx_orig,
|
|
61
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
59
62
|
ext_factor, attn_factor, beta_fast, beta_slow
|
|
60
63
|
);
|
|
61
64
|
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#include "models.h"
|
|
2
|
+
|
|
3
|
+
template <bool iswa>
|
|
4
|
+
llm_build_plamo3<iswa>::llm_build_plamo3(const llama_model & model, const llm_graph_params & params) :
|
|
5
|
+
llm_graph_context(params) {
|
|
6
|
+
const int64_t head_dim_q = hparams.n_embd_head_k;
|
|
7
|
+
const int64_t head_dim_v = hparams.n_embd_head_v;
|
|
8
|
+
|
|
9
|
+
ggml_tensor * cur;
|
|
10
|
+
ggml_tensor * inpL = build_inp_embd(model.tok_embd);
|
|
11
|
+
ggml_tensor * inp_pos = build_inp_pos();
|
|
12
|
+
|
|
13
|
+
using inp_attn_type = std::conditional_t<iswa, llm_graph_input_attn_kv_iswa, llm_graph_input_attn_kv>;
|
|
14
|
+
inp_attn_type * inp_attn = nullptr;
|
|
15
|
+
|
|
16
|
+
if constexpr (iswa) {
|
|
17
|
+
inp_attn = build_attn_inp_kv_iswa();
|
|
18
|
+
} else {
|
|
19
|
+
inp_attn = build_attn_inp_kv();
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
23
|
+
|
|
24
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
25
|
+
ggml_tensor * residual = inpL;
|
|
26
|
+
|
|
27
|
+
float freq_base_l = 0.0f;
|
|
28
|
+
float freq_scale_l = 0.0f;
|
|
29
|
+
if constexpr (iswa) {
|
|
30
|
+
freq_base_l = model.get_rope_freq_base (cparams, il);
|
|
31
|
+
freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
|
32
|
+
} else {
|
|
33
|
+
freq_base_l = freq_base;
|
|
34
|
+
freq_scale_l = freq_scale;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
|
|
38
|
+
cb(cur, "attn_norm", il);
|
|
39
|
+
|
|
40
|
+
ggml_tensor * qkv = build_lora_mm(model.layers[il].wqkv, cur);
|
|
41
|
+
cb(cur, "wqkv", il);
|
|
42
|
+
|
|
43
|
+
const int32_t n_head = hparams.n_head(il);
|
|
44
|
+
const int32_t n_head_kv = hparams.n_head_kv(il);
|
|
45
|
+
|
|
46
|
+
const int64_t q_offset = 0;
|
|
47
|
+
const int64_t k_offset = head_dim_q * n_head;
|
|
48
|
+
const int64_t v_offset = k_offset + head_dim_q * n_head_kv;
|
|
49
|
+
|
|
50
|
+
ggml_tensor * Qcur = ggml_view_3d(ctx0, qkv, head_dim_q, n_head, n_tokens,
|
|
51
|
+
head_dim_q * sizeof(float), qkv->nb[1], q_offset * ggml_element_size(qkv));
|
|
52
|
+
ggml_tensor * Kcur = ggml_view_3d(ctx0, qkv, head_dim_q, n_head_kv, n_tokens,
|
|
53
|
+
head_dim_q * sizeof(float), qkv->nb[1], k_offset * ggml_element_size(qkv));
|
|
54
|
+
ggml_tensor * Vcur = ggml_view_3d(ctx0, qkv, head_dim_v, n_head_kv, n_tokens,
|
|
55
|
+
head_dim_v * sizeof(float), qkv->nb[1], v_offset * ggml_element_size(qkv));
|
|
56
|
+
|
|
57
|
+
cb(Qcur, "Qcur", il);
|
|
58
|
+
cb(Kcur, "Kcur", il);
|
|
59
|
+
cb(Vcur, "Vcur", il);
|
|
60
|
+
|
|
61
|
+
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
|
|
62
|
+
cb(Qcur, "attn_q_norm", il);
|
|
63
|
+
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
|
|
64
|
+
cb(Kcur, "attn_k_norm", il);
|
|
65
|
+
|
|
66
|
+
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
|
|
67
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
68
|
+
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
69
|
+
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
|
|
70
|
+
n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
71
|
+
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
72
|
+
|
|
73
|
+
const float attn_scale = 1.0f / sqrtf(float(head_dim_q));
|
|
74
|
+
|
|
75
|
+
cur = build_attn(inp_attn,
|
|
76
|
+
model.layers[il].wo, NULL,
|
|
77
|
+
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, attn_scale, il);
|
|
78
|
+
cb(cur, "attn_out", il);
|
|
79
|
+
|
|
80
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
81
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
82
|
+
residual = ggml_get_rows(ctx0, residual, inp_out_ids);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il);
|
|
86
|
+
cb(cur, "attn_post_norm", il);
|
|
87
|
+
|
|
88
|
+
cur = ggml_add(ctx0, cur, residual);
|
|
89
|
+
cb(cur, "attn_residual", il);
|
|
90
|
+
|
|
91
|
+
residual = cur;
|
|
92
|
+
|
|
93
|
+
cur = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
|
|
94
|
+
cb(cur, "ffn_norm", il);
|
|
95
|
+
|
|
96
|
+
cur = build_ffn(cur,
|
|
97
|
+
model.layers[il].ffn_up, NULL, NULL,
|
|
98
|
+
NULL, NULL, NULL,
|
|
99
|
+
model.layers[il].ffn_down, NULL, NULL,
|
|
100
|
+
NULL,
|
|
101
|
+
LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
|
|
102
|
+
cb(cur, "ffn_out", il);
|
|
103
|
+
|
|
104
|
+
cur = build_norm(cur, model.layers[il].ffn_post_norm, NULL, LLM_NORM_RMS, il);
|
|
105
|
+
cb(cur, "ffn_post_norm", il);
|
|
106
|
+
|
|
107
|
+
cur = ggml_add(ctx0, cur, residual);
|
|
108
|
+
cb(cur, "ffn_residual", il);
|
|
109
|
+
|
|
110
|
+
cur = build_cvec(cur, il);
|
|
111
|
+
cb(cur, "l_out", il);
|
|
112
|
+
inpL = cur;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
cur = inpL;
|
|
116
|
+
|
|
117
|
+
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
|
|
118
|
+
res->t_embd = cur;
|
|
119
|
+
|
|
120
|
+
cur = build_lora_mm(model.output, cur);
|
|
121
|
+
res->t_logits = cur;
|
|
122
|
+
|
|
123
|
+
ggml_build_forward_expand(gf, cur);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Explicit template instantiations
|
|
127
|
+
template struct llm_build_plamo3<false>;
|
|
128
|
+
template struct llm_build_plamo3<true>;
|
|
@@ -26,10 +26,16 @@ llm_build_smallthinker<iswa>::llm_build_smallthinker(const llama_model & model,
|
|
|
26
26
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
27
27
|
|
|
28
28
|
for (int il = 0; il < n_layer; ++il) {
|
|
29
|
+
const float freq_base_l = model.get_rope_freq_base (cparams, il);
|
|
30
|
+
const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
|
31
|
+
|
|
29
32
|
ggml_tensor * inpSA = inpL;
|
|
30
|
-
ggml_tensor * probs = nullptr;
|
|
31
33
|
|
|
32
|
-
|
|
34
|
+
// This overlaps with SWA layers in current models, so get_rope_freq_base/scale may be superfluous
|
|
35
|
+
const bool use_rope = hparams.n_no_rope_layer_step == n_layer ||
|
|
36
|
+
il % hparams.n_no_rope_layer_step != 0;
|
|
37
|
+
|
|
38
|
+
ggml_tensor * probs = build_lora_mm(model.layers[il].ffn_gate_inp, inpL); // [n_expert, n_tokens]
|
|
33
39
|
cb(probs, "ffn_moe_logits", il);
|
|
34
40
|
|
|
35
41
|
// norm
|
|
@@ -52,11 +58,11 @@ llm_build_smallthinker<iswa>::llm_build_smallthinker(const llama_model & model,
|
|
|
52
58
|
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
|
53
59
|
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
|
|
54
60
|
|
|
55
|
-
if (
|
|
56
|
-
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
|
|
61
|
+
if (use_rope) {
|
|
62
|
+
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
57
63
|
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
58
64
|
|
|
59
|
-
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
|
|
65
|
+
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
|
|
60
66
|
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
61
67
|
}
|
|
62
68
|
cb(Qcur, "Qcur", il);
|
|
@@ -964,6 +964,11 @@ std::vector<std::string> unicode_regex_split(const std::string & text, const std
|
|
|
964
964
|
{ "\\p{P}", unicode_cpt_flags::PUNCTUATION },
|
|
965
965
|
{ "\\p{M}", unicode_cpt_flags::ACCENT_MARK },
|
|
966
966
|
{ "\\p{S}", unicode_cpt_flags::SYMBOL },
|
|
967
|
+
{ "\\p{Lu}", unicode_cpt_flags::LETTER }, // Uppercase letter
|
|
968
|
+
{ "\\p{Ll}", unicode_cpt_flags::LETTER }, // Lowercase letter
|
|
969
|
+
{ "\\p{Lt}", unicode_cpt_flags::LETTER }, // Titlecase letter
|
|
970
|
+
{ "\\p{Lm}", unicode_cpt_flags::LETTER }, // Modifier letter
|
|
971
|
+
{ "\\p{Lo}", unicode_cpt_flags::LETTER }, // Other letter
|
|
967
972
|
};
|
|
968
973
|
|
|
969
974
|
static const std::map<int, int> k_ucat_cpt = {
|
|
@@ -1074,22 +1079,26 @@ std::vector<std::string> unicode_regex_split(const std::string & text, const std
|
|
|
1074
1079
|
continue;
|
|
1075
1080
|
}
|
|
1076
1081
|
|
|
1077
|
-
|
|
1082
|
+
// Match \p{...} Unicode properties of varying lengths
|
|
1083
|
+
if (regex_expr[i + 0] == '\\' && i + 3 < regex_expr.size() &&
|
|
1078
1084
|
regex_expr[i + 1] == 'p' &&
|
|
1079
|
-
regex_expr[i + 2] == '{'
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
if (
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
+
regex_expr[i + 2] == '{') {
|
|
1086
|
+
// Find the closing brace
|
|
1087
|
+
size_t closing_brace = regex_expr.find('}', i + 3);
|
|
1088
|
+
if (closing_brace != std::string::npos && closing_brace <= i + 10) { // reasonable limit
|
|
1089
|
+
const std::string pat = regex_expr.substr(i, closing_brace - i + 1);
|
|
1090
|
+
if (k_ucat_enum.find(pat) != k_ucat_enum.end()) {
|
|
1091
|
+
if (!inside) {
|
|
1092
|
+
regex_expr_collapsed += '[';
|
|
1093
|
+
}
|
|
1094
|
+
regex_expr_collapsed += k_ucat_cpt.at(k_ucat_enum.at(pat));
|
|
1095
|
+
regex_expr_collapsed += k_ucat_map.at(k_ucat_enum.at(pat));
|
|
1096
|
+
if (!inside) {
|
|
1097
|
+
regex_expr_collapsed += ']';
|
|
1098
|
+
}
|
|
1099
|
+
i = closing_brace;
|
|
1100
|
+
continue;
|
|
1085
1101
|
}
|
|
1086
|
-
regex_expr_collapsed += k_ucat_cpt.at(k_ucat_enum.at(pat));
|
|
1087
|
-
regex_expr_collapsed += k_ucat_map.at(k_ucat_enum.at(pat));
|
|
1088
|
-
if (!inside) {
|
|
1089
|
-
regex_expr_collapsed += ']';
|
|
1090
|
-
}
|
|
1091
|
-
i += 4;
|
|
1092
|
-
continue;
|
|
1093
1102
|
}
|
|
1094
1103
|
}
|
|
1095
1104
|
|