cui-llama.rn 1.4.0 → 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/jni.cpp +9 -9
- package/cpp/common.cpp +163 -60
- package/cpp/common.h +43 -12
- package/cpp/ggml-alloc.c +1042 -1037
- package/cpp/ggml-backend-impl.h +255 -256
- package/cpp/ggml-backend-reg.cpp +582 -582
- package/cpp/ggml-backend.cpp +2002 -2002
- package/cpp/ggml-backend.h +354 -352
- package/cpp/ggml-common.h +1853 -1853
- package/cpp/ggml-cpp.h +39 -39
- package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
- package/cpp/ggml-cpu-aarch64.h +8 -8
- package/cpp/ggml-cpu-impl.h +386 -386
- package/cpp/ggml-cpu-quants.c +10920 -10839
- package/cpp/ggml-cpu-traits.cpp +36 -36
- package/cpp/ggml-cpu-traits.h +38 -38
- package/cpp/ggml-cpu.c +329 -60
- package/cpp/ggml-cpu.cpp +10 -2
- package/cpp/ggml-cpu.h +135 -135
- package/cpp/ggml-impl.h +567 -567
- package/cpp/ggml-metal-impl.h +17 -17
- package/cpp/ggml-metal.m +4884 -4884
- package/cpp/ggml-quants.c +5238 -5238
- package/cpp/ggml-threading.h +14 -14
- package/cpp/ggml.c +6514 -6448
- package/cpp/ggml.h +2194 -2163
- package/cpp/gguf.cpp +1329 -1325
- package/cpp/gguf.h +202 -202
- package/cpp/json-schema-to-grammar.cpp +1045 -1045
- package/cpp/json-schema-to-grammar.h +8 -8
- package/cpp/json.hpp +24766 -24766
- package/cpp/llama-adapter.cpp +347 -346
- package/cpp/llama-adapter.h +74 -73
- package/cpp/llama-arch.cpp +1487 -1434
- package/cpp/llama-arch.h +400 -395
- package/cpp/llama-batch.cpp +368 -368
- package/cpp/llama-batch.h +88 -88
- package/cpp/llama-chat.cpp +578 -567
- package/cpp/llama-chat.h +52 -51
- package/cpp/llama-context.cpp +1775 -1771
- package/cpp/llama-context.h +128 -128
- package/cpp/llama-cparams.cpp +1 -1
- package/cpp/llama-cparams.h +37 -37
- package/cpp/llama-cpp.h +30 -30
- package/cpp/llama-grammar.cpp +1139 -1139
- package/cpp/llama-grammar.h +143 -143
- package/cpp/llama-hparams.cpp +71 -71
- package/cpp/llama-hparams.h +139 -140
- package/cpp/llama-impl.cpp +167 -167
- package/cpp/llama-impl.h +61 -61
- package/cpp/llama-kv-cache.cpp +718 -718
- package/cpp/llama-kv-cache.h +218 -218
- package/cpp/llama-mmap.cpp +2 -1
- package/cpp/llama-mmap.h +67 -67
- package/cpp/llama-model-loader.cpp +1124 -1011
- package/cpp/llama-model-loader.h +167 -158
- package/cpp/llama-model.cpp +3997 -2202
- package/cpp/llama-model.h +370 -391
- package/cpp/llama-sampling.cpp +2408 -2406
- package/cpp/llama-sampling.h +32 -48
- package/cpp/llama-vocab.cpp +3247 -1982
- package/cpp/llama-vocab.h +125 -182
- package/cpp/llama.cpp +416 -2886
- package/cpp/llama.h +1323 -1285
- package/cpp/log.cpp +401 -401
- package/cpp/log.h +121 -121
- package/cpp/rn-llama.hpp +18 -12
- package/cpp/sampling.cpp +505 -500
- package/cpp/sgemm.cpp +2597 -2597
- package/cpp/speculative.cpp +277 -274
- package/cpp/speculative.h +28 -28
- package/cpp/unicode.cpp +2 -3
- package/package.json +1 -1
package/cpp/log.h
CHANGED
@@ -1,121 +1,121 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "ggml.h" // for lm_ggml_log_level
|
4
|
-
|
5
|
-
#ifndef __GNUC__
|
6
|
-
# define LOG_ATTRIBUTE_FORMAT(...)
|
7
|
-
#elif defined(__MINGW32__)
|
8
|
-
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
9
|
-
#else
|
10
|
-
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
11
|
-
#endif
|
12
|
-
|
13
|
-
#define LOG_DEFAULT_DEBUG 1
|
14
|
-
#define LOG_DEFAULT_LLAMA 0
|
15
|
-
|
16
|
-
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
17
|
-
// set via common_log_set_verbosity()
|
18
|
-
extern int common_log_verbosity_thold;
|
19
|
-
|
20
|
-
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
21
|
-
|
22
|
-
// the common_log uses an internal worker thread to print/write log messages
|
23
|
-
// when the worker thread is paused, incoming log messages are discarded
|
24
|
-
struct common_log;
|
25
|
-
|
26
|
-
struct common_log * common_log_init();
|
27
|
-
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
28
|
-
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
29
|
-
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
30
|
-
void common_log_free (struct common_log * log);
|
31
|
-
|
32
|
-
LOG_ATTRIBUTE_FORMAT(3, 4)
|
33
|
-
void common_log_add(struct common_log * log, enum lm_ggml_log_level level, const char * fmt, ...);
|
34
|
-
|
35
|
-
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
36
|
-
//
|
37
|
-
// regular log output:
|
38
|
-
//
|
39
|
-
// lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
40
|
-
// llm_load_tensors: ggml ctx size = 0.27 MiB
|
41
|
-
// llm_load_tensors: offloading 32 repeating layers to GPU
|
42
|
-
// llm_load_tensors: offloading non-repeating layers to GPU
|
43
|
-
//
|
44
|
-
// with prefix = true, timestamps = true, the log output will look like this:
|
45
|
-
//
|
46
|
-
// 0.00.035.060 D lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
47
|
-
// 0.00.035.064 I llm_load_tensors: ggml ctx size = 0.27 MiB
|
48
|
-
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
49
|
-
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
50
|
-
//
|
51
|
-
// I - info (stdout, V = 0)
|
52
|
-
// W - warning (stderr, V = 0)
|
53
|
-
// E - error (stderr, V = 0)
|
54
|
-
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
55
|
-
//
|
56
|
-
|
57
|
-
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
58
|
-
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
59
|
-
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
60
|
-
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
61
|
-
|
62
|
-
// helper macros for logging
|
63
|
-
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
64
|
-
//
|
65
|
-
// for example:
|
66
|
-
//
|
67
|
-
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
68
|
-
//
|
69
|
-
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
70
|
-
//
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
#if defined(__ANDROID__)
|
75
|
-
#include <android/log.h>
|
76
|
-
#define LLAMA_ANDROID_LOG_TAG "RNLLAMA_LOG_ANDROID"
|
77
|
-
|
78
|
-
#if defined(RNLLAMA_ANDROID_ENABLE_LOGGING)
|
79
|
-
#define RNLLAMA_LOG_LEVEL 1
|
80
|
-
#else
|
81
|
-
#define RNLLAMA_LOG_LEVEL 0
|
82
|
-
#endif
|
83
|
-
|
84
|
-
#define LOG_TMPL(level, verbosity, ...) \
|
85
|
-
do { \
|
86
|
-
if ((verbosity) <= RNLLAMA_LOG_LEVEL) { \
|
87
|
-
int android_log_level = ANDROID_LOG_DEFAULT; \
|
88
|
-
switch (level) { \
|
89
|
-
case LM_GGML_LOG_LEVEL_INFO: android_log_level = ANDROID_LOG_INFO; break; \
|
90
|
-
case LM_GGML_LOG_LEVEL_WARN: android_log_level = ANDROID_LOG_WARN; break; \
|
91
|
-
case LM_GGML_LOG_LEVEL_ERROR: android_log_level = ANDROID_LOG_ERROR; break; \
|
92
|
-
default: android_log_level = ANDROID_LOG_DEFAULT; \
|
93
|
-
} \
|
94
|
-
__android_log_print(android_log_level, LLAMA_ANDROID_LOG_TAG, __VA_ARGS__); \
|
95
|
-
} \
|
96
|
-
} while(0)
|
97
|
-
#else
|
98
|
-
|
99
|
-
#define LOG_TMPL(level, verbosity, ...) \
|
100
|
-
do { \
|
101
|
-
if ((verbosity) <= common_log_verbosity_thold) { \
|
102
|
-
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
103
|
-
} \
|
104
|
-
} while (0)
|
105
|
-
|
106
|
-
#endif
|
107
|
-
|
108
|
-
#define LOG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, 0, __VA_ARGS__)
|
109
|
-
#define LOGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, verbosity, __VA_ARGS__)
|
110
|
-
|
111
|
-
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, 0, __VA_ARGS__)
|
112
|
-
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
|
113
|
-
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
|
114
|
-
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
|
115
|
-
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
|
116
|
-
|
117
|
-
#define LOG_INFV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
|
118
|
-
#define LOG_WRNV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
|
119
|
-
#define LOG_ERRV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
|
120
|
-
#define LOG_DBGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
|
121
|
-
#define LOG_CNTV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
4
|
+
|
5
|
+
#ifndef __GNUC__
|
6
|
+
# define LOG_ATTRIBUTE_FORMAT(...)
|
7
|
+
#elif defined(__MINGW32__)
|
8
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
9
|
+
#else
|
10
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
11
|
+
#endif
|
12
|
+
|
13
|
+
#define LOG_DEFAULT_DEBUG 1
|
14
|
+
#define LOG_DEFAULT_LLAMA 0
|
15
|
+
|
16
|
+
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
17
|
+
// set via common_log_set_verbosity()
|
18
|
+
extern int common_log_verbosity_thold;
|
19
|
+
|
20
|
+
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
21
|
+
|
22
|
+
// the common_log uses an internal worker thread to print/write log messages
|
23
|
+
// when the worker thread is paused, incoming log messages are discarded
|
24
|
+
struct common_log;
|
25
|
+
|
26
|
+
struct common_log * common_log_init();
|
27
|
+
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
28
|
+
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
29
|
+
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
30
|
+
void common_log_free (struct common_log * log);
|
31
|
+
|
32
|
+
LOG_ATTRIBUTE_FORMAT(3, 4)
|
33
|
+
void common_log_add(struct common_log * log, enum lm_ggml_log_level level, const char * fmt, ...);
|
34
|
+
|
35
|
+
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
36
|
+
//
|
37
|
+
// regular log output:
|
38
|
+
//
|
39
|
+
// lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
40
|
+
// llm_load_tensors: ggml ctx size = 0.27 MiB
|
41
|
+
// llm_load_tensors: offloading 32 repeating layers to GPU
|
42
|
+
// llm_load_tensors: offloading non-repeating layers to GPU
|
43
|
+
//
|
44
|
+
// with prefix = true, timestamps = true, the log output will look like this:
|
45
|
+
//
|
46
|
+
// 0.00.035.060 D lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
47
|
+
// 0.00.035.064 I llm_load_tensors: ggml ctx size = 0.27 MiB
|
48
|
+
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
49
|
+
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
50
|
+
//
|
51
|
+
// I - info (stdout, V = 0)
|
52
|
+
// W - warning (stderr, V = 0)
|
53
|
+
// E - error (stderr, V = 0)
|
54
|
+
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
55
|
+
//
|
56
|
+
|
57
|
+
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
58
|
+
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
59
|
+
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
60
|
+
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
61
|
+
|
62
|
+
// helper macros for logging
|
63
|
+
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
64
|
+
//
|
65
|
+
// for example:
|
66
|
+
//
|
67
|
+
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
68
|
+
//
|
69
|
+
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
70
|
+
//
|
71
|
+
|
72
|
+
|
73
|
+
|
74
|
+
#if defined(__ANDROID__)
|
75
|
+
#include <android/log.h>
|
76
|
+
#define LLAMA_ANDROID_LOG_TAG "RNLLAMA_LOG_ANDROID"
|
77
|
+
|
78
|
+
#if defined(RNLLAMA_ANDROID_ENABLE_LOGGING)
|
79
|
+
#define RNLLAMA_LOG_LEVEL 1
|
80
|
+
#else
|
81
|
+
#define RNLLAMA_LOG_LEVEL 0
|
82
|
+
#endif
|
83
|
+
|
84
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
85
|
+
do { \
|
86
|
+
if ((verbosity) <= RNLLAMA_LOG_LEVEL) { \
|
87
|
+
int android_log_level = ANDROID_LOG_DEFAULT; \
|
88
|
+
switch (level) { \
|
89
|
+
case LM_GGML_LOG_LEVEL_INFO: android_log_level = ANDROID_LOG_INFO; break; \
|
90
|
+
case LM_GGML_LOG_LEVEL_WARN: android_log_level = ANDROID_LOG_WARN; break; \
|
91
|
+
case LM_GGML_LOG_LEVEL_ERROR: android_log_level = ANDROID_LOG_ERROR; break; \
|
92
|
+
default: android_log_level = ANDROID_LOG_DEFAULT; \
|
93
|
+
} \
|
94
|
+
__android_log_print(android_log_level, LLAMA_ANDROID_LOG_TAG, __VA_ARGS__); \
|
95
|
+
} \
|
96
|
+
} while(0)
|
97
|
+
#else
|
98
|
+
|
99
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
100
|
+
do { \
|
101
|
+
if ((verbosity) <= common_log_verbosity_thold) { \
|
102
|
+
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
103
|
+
} \
|
104
|
+
} while (0)
|
105
|
+
|
106
|
+
#endif
|
107
|
+
|
108
|
+
#define LOG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, 0, __VA_ARGS__)
|
109
|
+
#define LOGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, verbosity, __VA_ARGS__)
|
110
|
+
|
111
|
+
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, 0, __VA_ARGS__)
|
112
|
+
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
|
113
|
+
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
|
114
|
+
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
|
115
|
+
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
|
116
|
+
|
117
|
+
#define LOG_INFV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
|
118
|
+
#define LOG_WRNV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
|
119
|
+
#define LOG_ERRV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
|
120
|
+
#define LOG_DBGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
|
121
|
+
#define LOG_CNTV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)
|
package/cpp/rn-llama.hpp
CHANGED
@@ -219,7 +219,7 @@ struct llama_rn_context
|
|
219
219
|
std::string stopping_word;
|
220
220
|
bool incomplete = false;
|
221
221
|
|
222
|
-
std::vector<
|
222
|
+
std::vector<common_adapter_lora_info> lora;
|
223
223
|
|
224
224
|
~llama_rn_context()
|
225
225
|
{
|
@@ -279,7 +279,7 @@ struct llama_rn_context
|
|
279
279
|
|
280
280
|
bool validateModelChatTemplate() const {
|
281
281
|
llama_chat_message chat[] = {{"user", "test"}};
|
282
|
-
int32_t chat_res = llama_chat_apply_template(model,
|
282
|
+
int32_t chat_res = llama_chat_apply_template(llama_model_chat_template(model), chat, 1, true, nullptr, 0);
|
283
283
|
return chat_res > 0;
|
284
284
|
}
|
285
285
|
|
@@ -307,7 +307,7 @@ struct llama_rn_context
|
|
307
307
|
|
308
308
|
void loadPrompt()
|
309
309
|
{
|
310
|
-
std::vector<llama_token> prompt_tokens = ::common_tokenize(model, params.prompt, true, true);
|
310
|
+
std::vector<llama_token> prompt_tokens = ::common_tokenize(llama_model_get_vocab(model), params.prompt, true, true);
|
311
311
|
num_prompt_tokens = prompt_tokens.size();
|
312
312
|
|
313
313
|
// LOG tokens
|
@@ -439,14 +439,14 @@ struct llama_rn_context
|
|
439
439
|
if (params.n_predict == 0)
|
440
440
|
{
|
441
441
|
has_next_token = false;
|
442
|
-
result.tok =
|
442
|
+
result.tok = llama_vocab_eos(llama_model_get_vocab(model));
|
443
443
|
return result;
|
444
444
|
}
|
445
445
|
|
446
446
|
{
|
447
447
|
// out of user input, sample next token
|
448
448
|
std::vector<llama_token_data> candidates;
|
449
|
-
candidates.reserve(
|
449
|
+
candidates.reserve(llama_vocab_n_tokens(llama_model_get_vocab(model)));
|
450
450
|
|
451
451
|
result.tok = common_sampler_sample(ctx_sampling, ctx, -1);
|
452
452
|
|
@@ -479,7 +479,7 @@ struct llama_rn_context
|
|
479
479
|
// decrement remaining sampling budget
|
480
480
|
--n_remain;
|
481
481
|
|
482
|
-
if (!embd.empty() && embd.back() ==
|
482
|
+
if (!embd.empty() && embd.back() == llama_vocab_eos(llama_model_get_vocab(model)))
|
483
483
|
{
|
484
484
|
// stopping_word = llama_token_to_piece(ctx, embd.back());
|
485
485
|
has_next_token = false;
|
@@ -584,7 +584,7 @@ struct llama_rn_context
|
|
584
584
|
|
585
585
|
std::vector<float> getEmbedding(common_params &embd_params)
|
586
586
|
{
|
587
|
-
static const int n_embd =
|
587
|
+
static const int n_embd = llama_model_n_embd(llama_get_model(ctx));
|
588
588
|
if (!embd_params.embedding)
|
589
589
|
{
|
590
590
|
LOG_WARNING("embedding disabled, embedding: %s", embd_params.embedding);
|
@@ -716,25 +716,31 @@ struct llama_rn_context
|
|
716
716
|
std::string("]");
|
717
717
|
}
|
718
718
|
|
719
|
-
int applyLoraAdapters(std::vector<
|
719
|
+
int applyLoraAdapters(std::vector<common_adapter_lora_info> lora) {
|
720
720
|
for (auto &la : lora) {
|
721
|
-
la.ptr =
|
721
|
+
la.ptr = llama_adapter_lora_init(model, la.path.c_str());
|
722
722
|
if (la.ptr == nullptr) {
|
723
723
|
LOG_ERROR("failed to apply lora adapter '%s'\n", la.path.c_str());
|
724
724
|
return -1;
|
725
725
|
}
|
726
726
|
}
|
727
727
|
this->lora = lora;
|
728
|
-
|
728
|
+
for (auto &la : lora) {
|
729
|
+
llama_set_adapter_lora(ctx, la.ptr, 1);
|
730
|
+
}
|
731
|
+
|
729
732
|
return 0;
|
730
733
|
}
|
731
734
|
|
732
735
|
void removeLoraAdapters() {
|
736
|
+
for (auto &la : this->lora) {
|
737
|
+
llama_adapter_lora_free(la.ptr);
|
738
|
+
}
|
733
739
|
this->lora.clear();
|
734
|
-
|
740
|
+
llama_clear_adapter_lora(ctx);
|
735
741
|
}
|
736
742
|
|
737
|
-
std::vector<
|
743
|
+
std::vector<common_adapter_lora_info> getLoadedLoraAdapters() {
|
738
744
|
return this->lora;
|
739
745
|
}
|
740
746
|
// Context Shifting from KoboldCpp <https://github.com/LostRuins/koboldcpp>
|