llama-cpp-capacitor 0.0.6 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cpp/LICENSE +21 -0
- package/cpp/README.md +4 -0
- package/cpp/anyascii.c +22223 -0
- package/cpp/anyascii.h +42 -0
- package/cpp/chat-parser.cpp +393 -0
- package/cpp/chat-parser.h +120 -0
- package/cpp/chat.cpp +2315 -0
- package/cpp/chat.h +221 -0
- package/cpp/common.cpp +1619 -0
- package/cpp/common.h +744 -0
- package/cpp/ggml-alloc.c +1028 -0
- package/cpp/ggml-alloc.h +76 -0
- package/cpp/ggml-backend-impl.h +255 -0
- package/cpp/ggml-backend-reg.cpp +600 -0
- package/cpp/ggml-backend.cpp +2118 -0
- package/cpp/ggml-backend.h +354 -0
- package/cpp/ggml-common.h +1878 -0
- package/cpp/ggml-cpp.h +39 -0
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
- package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
- package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
- package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
- package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
- package/cpp/ggml-cpu/arch-fallback.h +215 -0
- package/cpp/ggml-cpu/binary-ops.cpp +158 -0
- package/cpp/ggml-cpu/binary-ops.h +16 -0
- package/cpp/ggml-cpu/common.h +73 -0
- package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
- package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
- package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
- package/cpp/ggml-cpu/ops.cpp +10587 -0
- package/cpp/ggml-cpu/ops.h +114 -0
- package/cpp/ggml-cpu/quants.c +1193 -0
- package/cpp/ggml-cpu/quants.h +97 -0
- package/cpp/ggml-cpu/repack.cpp +1982 -0
- package/cpp/ggml-cpu/repack.h +120 -0
- package/cpp/ggml-cpu/simd-mappings.h +1184 -0
- package/cpp/ggml-cpu/traits.cpp +36 -0
- package/cpp/ggml-cpu/traits.h +38 -0
- package/cpp/ggml-cpu/unary-ops.cpp +186 -0
- package/cpp/ggml-cpu/unary-ops.h +28 -0
- package/cpp/ggml-cpu/vec.cpp +348 -0
- package/cpp/ggml-cpu/vec.h +1121 -0
- package/cpp/ggml-cpu.h +145 -0
- package/cpp/ggml-impl.h +622 -0
- package/cpp/ggml-metal-impl.h +688 -0
- package/cpp/ggml-metal.h +66 -0
- package/cpp/ggml-metal.m +6833 -0
- package/cpp/ggml-opt.cpp +1093 -0
- package/cpp/ggml-opt.h +256 -0
- package/cpp/ggml-quants.c +5324 -0
- package/cpp/ggml-quants.h +106 -0
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +14 -0
- package/cpp/ggml.c +7108 -0
- package/cpp/ggml.h +2492 -0
- package/cpp/gguf.cpp +1358 -0
- package/cpp/gguf.h +202 -0
- package/cpp/json-partial.cpp +256 -0
- package/cpp/json-partial.h +38 -0
- package/cpp/json-schema-to-grammar.cpp +985 -0
- package/cpp/json-schema-to-grammar.h +21 -0
- package/cpp/llama-adapter.cpp +388 -0
- package/cpp/llama-adapter.h +76 -0
- package/cpp/llama-arch.cpp +2355 -0
- package/cpp/llama-arch.h +499 -0
- package/cpp/llama-batch.cpp +875 -0
- package/cpp/llama-batch.h +160 -0
- package/cpp/llama-chat.cpp +783 -0
- package/cpp/llama-chat.h +65 -0
- package/cpp/llama-context.cpp +2748 -0
- package/cpp/llama-context.h +306 -0
- package/cpp/llama-cparams.cpp +5 -0
- package/cpp/llama-cparams.h +41 -0
- package/cpp/llama-cpp.h +30 -0
- package/cpp/llama-grammar.cpp +1229 -0
- package/cpp/llama-grammar.h +173 -0
- package/cpp/llama-graph.cpp +1891 -0
- package/cpp/llama-graph.h +810 -0
- package/cpp/llama-hparams.cpp +180 -0
- package/cpp/llama-hparams.h +233 -0
- package/cpp/llama-impl.cpp +167 -0
- package/cpp/llama-impl.h +61 -0
- package/cpp/llama-io.cpp +15 -0
- package/cpp/llama-io.h +35 -0
- package/cpp/llama-kv-cache-iswa.cpp +318 -0
- package/cpp/llama-kv-cache-iswa.h +135 -0
- package/cpp/llama-kv-cache.cpp +2059 -0
- package/cpp/llama-kv-cache.h +374 -0
- package/cpp/llama-kv-cells.h +491 -0
- package/cpp/llama-memory-hybrid.cpp +258 -0
- package/cpp/llama-memory-hybrid.h +137 -0
- package/cpp/llama-memory-recurrent.cpp +1146 -0
- package/cpp/llama-memory-recurrent.h +179 -0
- package/cpp/llama-memory.cpp +59 -0
- package/cpp/llama-memory.h +119 -0
- package/cpp/llama-mmap.cpp +600 -0
- package/cpp/llama-mmap.h +68 -0
- package/cpp/llama-model-loader.cpp +1164 -0
- package/cpp/llama-model-loader.h +170 -0
- package/cpp/llama-model-saver.cpp +282 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +19042 -0
- package/cpp/llama-model.h +491 -0
- package/cpp/llama-sampling.cpp +2575 -0
- package/cpp/llama-sampling.h +32 -0
- package/cpp/llama-vocab.cpp +3792 -0
- package/cpp/llama-vocab.h +176 -0
- package/cpp/llama.cpp +358 -0
- package/cpp/llama.h +1373 -0
- package/cpp/log.cpp +427 -0
- package/cpp/log.h +103 -0
- package/cpp/minja/chat-template.hpp +550 -0
- package/cpp/minja/minja.hpp +3009 -0
- package/cpp/nlohmann/json.hpp +25526 -0
- package/cpp/nlohmann/json_fwd.hpp +187 -0
- package/cpp/regex-partial.cpp +204 -0
- package/cpp/regex-partial.h +56 -0
- package/cpp/rn-completion.cpp +681 -0
- package/cpp/rn-completion.h +116 -0
- package/cpp/rn-llama.cpp +345 -0
- package/cpp/rn-llama.h +149 -0
- package/cpp/rn-mtmd.hpp +602 -0
- package/cpp/rn-tts.cpp +591 -0
- package/cpp/rn-tts.h +59 -0
- package/cpp/sampling.cpp +579 -0
- package/cpp/sampling.h +107 -0
- package/cpp/tools/mtmd/clip-impl.h +473 -0
- package/cpp/tools/mtmd/clip.cpp +4322 -0
- package/cpp/tools/mtmd/clip.h +106 -0
- package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
- package/cpp/tools/mtmd/mtmd-audio.h +47 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
- package/cpp/tools/mtmd/mtmd-helper.h +91 -0
- package/cpp/tools/mtmd/mtmd.cpp +1066 -0
- package/cpp/tools/mtmd/mtmd.h +298 -0
- package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
- package/cpp/unicode-data.cpp +7034 -0
- package/cpp/unicode-data.h +20 -0
- package/cpp/unicode.cpp +1061 -0
- package/cpp/unicode.h +68 -0
- package/package.json +2 -1
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
#include <string>
|
|
6
|
+
#include <vector>
|
|
7
|
+
#include <memory>
|
|
8
|
+
|
|
9
|
+
// pre-tokenization types
|
|
10
|
+
enum llama_vocab_pre_type {
|
|
11
|
+
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
|
|
12
|
+
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
|
|
13
|
+
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
|
|
14
|
+
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
|
|
15
|
+
LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
|
|
16
|
+
LLAMA_VOCAB_PRE_TYPE_MPT = 5,
|
|
17
|
+
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
|
|
18
|
+
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
|
|
19
|
+
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
|
|
20
|
+
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
|
|
21
|
+
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
|
|
22
|
+
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
|
|
23
|
+
LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
|
|
24
|
+
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
|
|
25
|
+
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
|
|
26
|
+
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
|
|
27
|
+
LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
|
|
28
|
+
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
|
|
29
|
+
LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
|
|
30
|
+
LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
|
|
31
|
+
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
|
|
32
|
+
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
|
|
33
|
+
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
|
|
34
|
+
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
|
|
35
|
+
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
|
|
36
|
+
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
|
|
37
|
+
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
|
|
38
|
+
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
|
|
39
|
+
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
|
|
40
|
+
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
|
|
41
|
+
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
|
|
42
|
+
LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
|
|
43
|
+
LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
|
|
44
|
+
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
|
|
45
|
+
LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
|
|
46
|
+
LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
|
|
47
|
+
LLAMA_VOCAB_PRE_TYPE_HUNYUAN = 36,
|
|
48
|
+
LLAMA_VOCAB_PRE_TYPE_KIMI_K2 = 37,
|
|
49
|
+
LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE = 38,
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
struct LLM_KV;
|
|
53
|
+
struct llama_model_loader;
|
|
54
|
+
|
|
55
|
+
struct llama_vocab {
|
|
56
|
+
struct token_data {
|
|
57
|
+
std::string text;
|
|
58
|
+
float score;
|
|
59
|
+
llama_token_attr attr;
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
llama_vocab();
|
|
63
|
+
~llama_vocab();
|
|
64
|
+
|
|
65
|
+
void load(llama_model_loader & ml, const LLM_KV & kv);
|
|
66
|
+
|
|
67
|
+
std::string get_tokenizer_model() const;
|
|
68
|
+
std::string get_tokenizer_pre() const;
|
|
69
|
+
|
|
70
|
+
enum llama_vocab_type get_type() const;
|
|
71
|
+
enum llama_vocab_pre_type get_pre_type() const;
|
|
72
|
+
|
|
73
|
+
uint32_t n_tokens() const;
|
|
74
|
+
uint32_t n_token_types() const;
|
|
75
|
+
|
|
76
|
+
std::string type_name() const;
|
|
77
|
+
|
|
78
|
+
bool is_normal (llama_token id) const;
|
|
79
|
+
bool is_unknown (llama_token id) const;
|
|
80
|
+
bool is_control (llama_token id) const;
|
|
81
|
+
bool is_byte (llama_token id) const;
|
|
82
|
+
bool is_user_defined(llama_token id) const;
|
|
83
|
+
bool is_unused (llama_token id) const;
|
|
84
|
+
bool is_eog (llama_token id) const;
|
|
85
|
+
|
|
86
|
+
uint8_t token_to_byte(llama_token id) const;
|
|
87
|
+
llama_token byte_to_token(uint8_t ch) const;
|
|
88
|
+
|
|
89
|
+
llama_token text_to_token(const std::string & text) const;
|
|
90
|
+
|
|
91
|
+
const token_data & get_token_data(llama_token id) const;
|
|
92
|
+
|
|
93
|
+
const char * token_get_text (llama_token id) const;
|
|
94
|
+
float token_get_score(llama_token id) const;
|
|
95
|
+
llama_token_attr token_get_attr (llama_token id) const;
|
|
96
|
+
|
|
97
|
+
llama_token token_bos() const;
|
|
98
|
+
llama_token token_eos() const;
|
|
99
|
+
llama_token token_eot() const;
|
|
100
|
+
llama_token token_eom() const;
|
|
101
|
+
llama_token token_unk() const;
|
|
102
|
+
llama_token token_sep() const;
|
|
103
|
+
llama_token token_nl () const;
|
|
104
|
+
llama_token token_pad() const;
|
|
105
|
+
llama_token token_mask() const;
|
|
106
|
+
|
|
107
|
+
llama_token token_prefix() const;
|
|
108
|
+
llama_token token_middle() const;
|
|
109
|
+
llama_token token_suffix() const;
|
|
110
|
+
|
|
111
|
+
llama_token token_fim_pre() const;
|
|
112
|
+
llama_token token_fim_suf() const;
|
|
113
|
+
llama_token token_fim_mid() const;
|
|
114
|
+
llama_token token_fim_pad() const;
|
|
115
|
+
llama_token token_fim_rep() const;
|
|
116
|
+
llama_token token_fim_sep() const;
|
|
117
|
+
|
|
118
|
+
bool get_add_space_prefix () const;
|
|
119
|
+
bool get_add_bos () const;
|
|
120
|
+
bool get_add_eos () const;
|
|
121
|
+
bool get_add_sep () const;
|
|
122
|
+
bool get_ignore_merges () const;
|
|
123
|
+
bool get_clean_spaces () const;
|
|
124
|
+
bool get_remove_extra_whitespaces () const;
|
|
125
|
+
bool get_escape_whitespaces () const;
|
|
126
|
+
bool get_treat_whitespace_as_suffix() const;
|
|
127
|
+
|
|
128
|
+
int max_token_len() const;
|
|
129
|
+
|
|
130
|
+
int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
|
|
131
|
+
std::vector<std::string> get_bpe_merges() const;
|
|
132
|
+
|
|
133
|
+
std::vector<char> get_precompiled_charsmap() const;
|
|
134
|
+
|
|
135
|
+
int32_t tokenize(
|
|
136
|
+
const char * text,
|
|
137
|
+
int32_t text_len,
|
|
138
|
+
llama_token * tokens,
|
|
139
|
+
int32_t n_tokens_max,
|
|
140
|
+
bool add_special,
|
|
141
|
+
bool parse_special) const;
|
|
142
|
+
|
|
143
|
+
std::vector<llama_token> tokenize(
|
|
144
|
+
const std::string & raw_text,
|
|
145
|
+
bool add_special,
|
|
146
|
+
bool parse_special = false) const;
|
|
147
|
+
|
|
148
|
+
// does not write null-terminator to buf
|
|
149
|
+
int32_t token_to_piece(
|
|
150
|
+
llama_token token,
|
|
151
|
+
char * buf,
|
|
152
|
+
int32_t length,
|
|
153
|
+
int32_t lstrip,
|
|
154
|
+
bool special) const;
|
|
155
|
+
|
|
156
|
+
// use cached data
|
|
157
|
+
const std::string & token_to_piece(llama_token token) const;
|
|
158
|
+
|
|
159
|
+
int32_t detokenize(
|
|
160
|
+
const llama_token * tokens,
|
|
161
|
+
int32_t n_tokens,
|
|
162
|
+
char * text,
|
|
163
|
+
int32_t text_len_max,
|
|
164
|
+
bool remove_special,
|
|
165
|
+
bool unparse_special) const;
|
|
166
|
+
|
|
167
|
+
std::string detokenize(
|
|
168
|
+
const std::vector<llama_token> & tokens,
|
|
169
|
+
bool special) const;
|
|
170
|
+
|
|
171
|
+
void print_info() const;
|
|
172
|
+
|
|
173
|
+
private:
|
|
174
|
+
struct impl;
|
|
175
|
+
std::unique_ptr<impl> pimpl;
|
|
176
|
+
};
|
package/cpp/llama.cpp
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
#include "llama-impl.h"
|
|
2
|
+
|
|
3
|
+
#include "llama-chat.h"
|
|
4
|
+
#include "llama-mmap.h"
|
|
5
|
+
#include "llama-vocab.h"
|
|
6
|
+
#include "llama-model-loader.h"
|
|
7
|
+
#include "llama-model-saver.h"
|
|
8
|
+
#include "llama-model.h"
|
|
9
|
+
|
|
10
|
+
#include "ggml.h"
|
|
11
|
+
#include "ggml-backend.h"
|
|
12
|
+
|
|
13
|
+
#include <algorithm>
|
|
14
|
+
#include <cstddef>
|
|
15
|
+
#include <cstdint>
|
|
16
|
+
#include <cstdio>
|
|
17
|
+
#include <cstring>
|
|
18
|
+
#include <ctime>
|
|
19
|
+
|
|
20
|
+
#if defined(_MSC_VER)
|
|
21
|
+
#pragma warning(disable: 4244 4267) // possible loss of data
|
|
22
|
+
#endif
|
|
23
|
+
|
|
24
|
+
//
|
|
25
|
+
// interface implementation
|
|
26
|
+
//
|
|
27
|
+
|
|
28
|
+
struct llama_sampler_chain_params llama_sampler_chain_default_params() {
|
|
29
|
+
struct llama_sampler_chain_params result = {
|
|
30
|
+
/*.no_perf =*/ true,
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
return result;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
size_t llama_max_devices(void) {
|
|
37
|
+
return 16;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
bool llama_supports_mmap(void) {
|
|
41
|
+
return llama_mmap::SUPPORTED;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
bool llama_supports_mlock(void) {
|
|
45
|
+
return llama_mlock::SUPPORTED;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
bool llama_supports_gpu_offload(void) {
|
|
49
|
+
return lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_GPU) != nullptr ||
|
|
50
|
+
llama_supports_rpc();
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
bool llama_supports_rpc(void) {
|
|
54
|
+
return lm_ggml_backend_reg_by_name("RPC") != nullptr;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
void llama_backend_init(void) {
|
|
58
|
+
lm_ggml_time_init();
|
|
59
|
+
|
|
60
|
+
// needed to initialize f16 tables
|
|
61
|
+
{
|
|
62
|
+
struct lm_ggml_init_params params = { 0, NULL, false };
|
|
63
|
+
struct lm_ggml_context * ctx = lm_ggml_init(params);
|
|
64
|
+
lm_ggml_free(ctx);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
void llama_numa_init(enum lm_ggml_numa_strategy numa) {
|
|
69
|
+
if (numa != LM_GGML_NUMA_STRATEGY_DISABLED) {
|
|
70
|
+
auto * dev = lm_ggml_backend_dev_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU);
|
|
71
|
+
LM_GGML_ASSERT(dev && "CPU backend is not loaded");
|
|
72
|
+
auto * reg = lm_ggml_backend_dev_backend_reg(dev);
|
|
73
|
+
auto * numa_init_fn = (decltype(lm_ggml_numa_init) *) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_cpu_numa_init");
|
|
74
|
+
numa_init_fn(numa);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
void llama_backend_free(void) {
|
|
79
|
+
lm_ggml_quantize_free();
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
int64_t llama_time_us(void) {
|
|
83
|
+
return lm_ggml_time_us();
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
|
87
|
+
static int llama_model_load(const std::string & fname, std::vector<std::string> & splits, llama_model & model, llama_model_params & params) {
|
|
88
|
+
// loading time will be recalculated after the first eval, so
|
|
89
|
+
// we take page faults deferred by mmap() into consideration
|
|
90
|
+
model.t_load_us = 0;
|
|
91
|
+
time_meas tm(model.t_load_us);
|
|
92
|
+
|
|
93
|
+
model.t_start_us = tm.t_start_us;
|
|
94
|
+
|
|
95
|
+
try {
|
|
96
|
+
llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides, params.tensor_buft_overrides);
|
|
97
|
+
|
|
98
|
+
ml.print_info();
|
|
99
|
+
|
|
100
|
+
model.hparams.vocab_only = params.vocab_only;
|
|
101
|
+
|
|
102
|
+
try {
|
|
103
|
+
model.load_arch(ml);
|
|
104
|
+
} catch(const std::exception & e) {
|
|
105
|
+
throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
|
|
106
|
+
}
|
|
107
|
+
try {
|
|
108
|
+
model.load_hparams(ml);
|
|
109
|
+
} catch(const std::exception & e) {
|
|
110
|
+
throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
|
|
111
|
+
}
|
|
112
|
+
try {
|
|
113
|
+
model.load_vocab(ml);
|
|
114
|
+
} catch(const std::exception & e) {
|
|
115
|
+
throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
model.load_stats(ml);
|
|
119
|
+
model.print_info();
|
|
120
|
+
|
|
121
|
+
if (params.vocab_only) {
|
|
122
|
+
LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
|
|
123
|
+
return 0;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (!model.load_tensors(ml)) {
|
|
127
|
+
return -2;
|
|
128
|
+
}
|
|
129
|
+
} catch (const std::exception & err) {
|
|
130
|
+
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
|
131
|
+
return -1;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return 0;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
static struct llama_model * llama_model_load_from_file_impl(
|
|
138
|
+
const std::string & path_model,
|
|
139
|
+
std::vector<std::string> & splits,
|
|
140
|
+
struct llama_model_params params) {
|
|
141
|
+
lm_ggml_time_init();
|
|
142
|
+
|
|
143
|
+
if (!params.vocab_only && lm_ggml_backend_reg_count() == 0) {
|
|
144
|
+
LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use lm_ggml_backend_load() or lm_ggml_backend_load_all() to load a backend before calling this function\n", __func__);
|
|
145
|
+
return nullptr;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
unsigned cur_percentage = 0;
|
|
149
|
+
if (params.progress_callback == NULL) {
|
|
150
|
+
params.progress_callback_user_data = &cur_percentage;
|
|
151
|
+
params.progress_callback = [](float progress, void * ctx) {
|
|
152
|
+
unsigned * cur_percentage_p = (unsigned *) ctx;
|
|
153
|
+
unsigned percentage = (unsigned) (100 * progress);
|
|
154
|
+
while (percentage > *cur_percentage_p) {
|
|
155
|
+
*cur_percentage_p = percentage;
|
|
156
|
+
LLAMA_LOG_CONT(".");
|
|
157
|
+
if (percentage >= 100) {
|
|
158
|
+
LLAMA_LOG_CONT("\n");
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
return true;
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
llama_model * model = new llama_model(params);
|
|
166
|
+
|
|
167
|
+
// create list of devices to use with this model
|
|
168
|
+
if (params.devices) {
|
|
169
|
+
for (lm_ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
|
|
170
|
+
model->devices.push_back(*dev);
|
|
171
|
+
}
|
|
172
|
+
} else {
|
|
173
|
+
std::vector<lm_ggml_backend_dev_t> rpc_servers;
|
|
174
|
+
// use all available devices
|
|
175
|
+
for (size_t i = 0; i < lm_ggml_backend_dev_count(); ++i) {
|
|
176
|
+
lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
|
|
177
|
+
switch (lm_ggml_backend_dev_type(dev)) {
|
|
178
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_CPU:
|
|
179
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_ACCEL:
|
|
180
|
+
// skip CPU backends since they are handled separately
|
|
181
|
+
break;
|
|
182
|
+
|
|
183
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_GPU:
|
|
184
|
+
lm_ggml_backend_reg_t reg = lm_ggml_backend_dev_backend_reg(dev);
|
|
185
|
+
if (lm_ggml_backend_reg_name(reg) == std::string("RPC")) {
|
|
186
|
+
rpc_servers.push_back(dev);
|
|
187
|
+
} else {
|
|
188
|
+
model->devices.push_back(dev);
|
|
189
|
+
}
|
|
190
|
+
break;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
// add RPC servers at the front of the list
|
|
194
|
+
if (!rpc_servers.empty()) {
|
|
195
|
+
model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// if using single GPU mode, remove all except the main GPU
|
|
200
|
+
if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
|
|
201
|
+
if (params.main_gpu < 0) {
|
|
202
|
+
model->devices.clear();
|
|
203
|
+
} else {
|
|
204
|
+
if (params.main_gpu >= (int)model->devices.size()) {
|
|
205
|
+
LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size());
|
|
206
|
+
llama_model_free(model);
|
|
207
|
+
return nullptr;
|
|
208
|
+
}
|
|
209
|
+
lm_ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
|
|
210
|
+
model->devices.clear();
|
|
211
|
+
model->devices.push_back(main_gpu);
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
for (auto * dev : model->devices) {
|
|
216
|
+
size_t free, total; // NOLINT
|
|
217
|
+
lm_ggml_backend_dev_memory(dev, &free, &total);
|
|
218
|
+
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, lm_ggml_backend_dev_name(dev), lm_ggml_backend_dev_description(dev), free/1024/1024);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const int status = llama_model_load(path_model, splits, *model, params);
|
|
222
|
+
LM_GGML_ASSERT(status <= 0);
|
|
223
|
+
if (status < 0) {
|
|
224
|
+
if (status == -1) {
|
|
225
|
+
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
|
226
|
+
} else if (status == -2) {
|
|
227
|
+
LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
llama_model_free(model);
|
|
231
|
+
return nullptr;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
return model;
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// deprecated
|
|
238
|
+
struct llama_model * llama_load_model_from_file(
|
|
239
|
+
const char * path_model,
|
|
240
|
+
struct llama_model_params params) {
|
|
241
|
+
return llama_model_load_from_file(path_model, params);
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
struct llama_model * llama_model_load_from_file(
|
|
245
|
+
const char * path_model,
|
|
246
|
+
struct llama_model_params params) {
|
|
247
|
+
std::vector<std::string> splits = {};
|
|
248
|
+
return llama_model_load_from_file_impl(path_model, splits, params);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
struct llama_model * llama_model_load_from_splits(
|
|
252
|
+
const char ** paths,
|
|
253
|
+
size_t n_paths,
|
|
254
|
+
struct llama_model_params params) {
|
|
255
|
+
std::vector<std::string> splits;
|
|
256
|
+
if (n_paths == 0) {
|
|
257
|
+
LLAMA_LOG_ERROR("%s: list of splits is empty\n", __func__);
|
|
258
|
+
return nullptr;
|
|
259
|
+
}
|
|
260
|
+
for (size_t i = 0; i < n_paths; ++i) {
|
|
261
|
+
splits.push_back(paths[i]);
|
|
262
|
+
}
|
|
263
|
+
return llama_model_load_from_file_impl(splits.front(), splits, params);
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
void llama_model_save_to_file(const struct llama_model * model, const char * path_model) {
|
|
267
|
+
llama_model_saver ms(*model);
|
|
268
|
+
ms.add_kv_from_model();
|
|
269
|
+
ms.add_tensors_from_model();
|
|
270
|
+
ms.save(path_model);
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
//
|
|
274
|
+
// chat templates
|
|
275
|
+
//
|
|
276
|
+
|
|
277
|
+
int32_t llama_chat_apply_template(
|
|
278
|
+
const char * tmpl,
|
|
279
|
+
const struct llama_chat_message * chat,
|
|
280
|
+
size_t n_msg,
|
|
281
|
+
bool add_ass,
|
|
282
|
+
char * buf,
|
|
283
|
+
int32_t length) {
|
|
284
|
+
const std::string curr_tmpl(tmpl == nullptr ? "chatml" : tmpl);
|
|
285
|
+
|
|
286
|
+
// format the chat to string
|
|
287
|
+
std::vector<const llama_chat_message *> chat_vec;
|
|
288
|
+
chat_vec.resize(n_msg);
|
|
289
|
+
for (size_t i = 0; i < n_msg; i++) {
|
|
290
|
+
chat_vec[i] = &chat[i];
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
std::string formatted_chat;
|
|
294
|
+
llm_chat_template detected_tmpl = llm_chat_detect_template(curr_tmpl);
|
|
295
|
+
if (detected_tmpl == LLM_CHAT_TEMPLATE_UNKNOWN) {
|
|
296
|
+
return -1;
|
|
297
|
+
}
|
|
298
|
+
int32_t res = llm_chat_apply_template(detected_tmpl, chat_vec, formatted_chat, add_ass);
|
|
299
|
+
if (res < 0) {
|
|
300
|
+
return res;
|
|
301
|
+
}
|
|
302
|
+
if (buf && length > 0) {
|
|
303
|
+
strncpy(buf, formatted_chat.c_str(), length);
|
|
304
|
+
}
|
|
305
|
+
return res;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
//
|
|
309
|
+
// model split
|
|
310
|
+
//
|
|
311
|
+
|
|
312
|
+
int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count) {
|
|
313
|
+
static const char * const SPLIT_PATH_FORMAT = "%s-%05d-of-%05d.gguf";
|
|
314
|
+
if (snprintf(split_path, maxlen, SPLIT_PATH_FORMAT, path_prefix, split_no + 1, split_count)) {
|
|
315
|
+
return strlen(split_path);
|
|
316
|
+
}
|
|
317
|
+
return 0;
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count) {
|
|
321
|
+
std::string str_split_path(split_path);
|
|
322
|
+
char postfix[32];
|
|
323
|
+
snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
|
|
324
|
+
std::string str_postfix(postfix);
|
|
325
|
+
|
|
326
|
+
// check if split_prefix ends with postfix
|
|
327
|
+
int size_prefix = str_split_path.size() - str_postfix.size();
|
|
328
|
+
if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
|
|
329
|
+
snprintf(split_prefix, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
|
|
330
|
+
return size_prefix;
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
return 0;
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
const char * llama_print_system_info(void) {
|
|
337
|
+
static std::string s;
|
|
338
|
+
s.clear(); // Clear the string, since it's static, otherwise it will accumulate data from previous calls.
|
|
339
|
+
|
|
340
|
+
for (size_t i = 0; i < lm_ggml_backend_reg_count(); i++) {
|
|
341
|
+
auto * reg = lm_ggml_backend_reg_get(i);
|
|
342
|
+
auto * get_features_fn = (lm_ggml_backend_get_features_t) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_get_features");
|
|
343
|
+
if (get_features_fn) {
|
|
344
|
+
lm_ggml_backend_feature * features = get_features_fn(reg);
|
|
345
|
+
s += lm_ggml_backend_reg_name(reg);
|
|
346
|
+
s += " : ";
|
|
347
|
+
for (; features->name; features++) {
|
|
348
|
+
s += features->name;
|
|
349
|
+
s += " = ";
|
|
350
|
+
s += features->value;
|
|
351
|
+
s += " | ";
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
return s.c_str();
|
|
357
|
+
}
|
|
358
|
+
|