cui-llama.rn 1.2.4 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -4
- package/android/src/main/CMakeLists.txt +21 -5
- package/android/src/main/java/com/rnllama/LlamaContext.java +115 -30
- package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
- package/android/src/main/jni.cpp +222 -36
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
- package/cpp/common.cpp +1682 -2122
- package/cpp/common.h +600 -594
- package/cpp/ggml-aarch64.c +129 -3209
- package/cpp/ggml-aarch64.h +19 -39
- package/cpp/ggml-alloc.c +1040 -1040
- package/cpp/ggml-alloc.h +76 -76
- package/cpp/ggml-backend-impl.h +216 -227
- package/cpp/ggml-backend-reg.cpp +195 -0
- package/cpp/ggml-backend.cpp +1997 -2625
- package/cpp/ggml-backend.h +328 -326
- package/cpp/ggml-common.h +1853 -1853
- package/cpp/ggml-cpp.h +38 -0
- package/cpp/ggml-cpu-aarch64.c +3560 -0
- package/cpp/ggml-cpu-aarch64.h +30 -0
- package/cpp/ggml-cpu-impl.h +371 -614
- package/cpp/ggml-cpu-quants.c +10822 -0
- package/cpp/ggml-cpu-quants.h +63 -0
- package/cpp/ggml-cpu.c +13975 -0
- package/cpp/ggml-cpu.cpp +663 -0
- package/cpp/ggml-cpu.h +177 -0
- package/cpp/ggml-impl.h +550 -209
- package/cpp/ggml-metal.h +66 -66
- package/cpp/ggml-metal.m +4294 -3819
- package/cpp/ggml-quants.c +5247 -15752
- package/cpp/ggml-quants.h +100 -147
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +12 -0
- package/cpp/ggml.c +8180 -23464
- package/cpp/ggml.h +2411 -2562
- package/cpp/llama-grammar.cpp +1138 -1138
- package/cpp/llama-grammar.h +144 -144
- package/cpp/llama-impl.h +181 -181
- package/cpp/llama-sampling.cpp +2348 -2194
- package/cpp/llama-sampling.h +48 -30
- package/cpp/llama-vocab.cpp +1984 -1968
- package/cpp/llama-vocab.h +170 -165
- package/cpp/llama.cpp +22132 -21969
- package/cpp/llama.h +1253 -1253
- package/cpp/log.cpp +401 -401
- package/cpp/log.h +121 -121
- package/cpp/rn-llama.hpp +83 -19
- package/cpp/sampling.cpp +466 -458
- package/cpp/sgemm.cpp +1884 -1219
- package/ios/RNLlama.mm +43 -20
- package/ios/RNLlamaContext.h +9 -3
- package/ios/RNLlamaContext.mm +133 -33
- package/jest/mock.js +0 -1
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +52 -15
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +51 -15
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +29 -6
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +12 -5
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +41 -7
- package/src/index.ts +82 -27
- package/cpp/json-schema-to-grammar.cpp +0 -1045
- package/cpp/json-schema-to-grammar.h +0 -8
- package/cpp/json.hpp +0 -24766
package/cpp/log.h
CHANGED
@@ -1,121 +1,121 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "ggml.h" // for lm_ggml_log_level
|
4
|
-
|
5
|
-
#ifndef __GNUC__
|
6
|
-
# define LOG_ATTRIBUTE_FORMAT(...)
|
7
|
-
#elif defined(__MINGW32__)
|
8
|
-
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
9
|
-
#else
|
10
|
-
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
11
|
-
#endif
|
12
|
-
|
13
|
-
#define LOG_DEFAULT_DEBUG 1
|
14
|
-
#define LOG_DEFAULT_LLAMA 0
|
15
|
-
|
16
|
-
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
17
|
-
// set via common_log_set_verbosity()
|
18
|
-
extern int common_log_verbosity_thold;
|
19
|
-
|
20
|
-
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
21
|
-
|
22
|
-
// the common_log uses an internal worker thread to print/write log messages
|
23
|
-
// when the worker thread is paused, incoming log messages are discarded
|
24
|
-
struct common_log;
|
25
|
-
|
26
|
-
struct common_log * common_log_init();
|
27
|
-
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
28
|
-
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
29
|
-
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
30
|
-
void common_log_free (struct common_log * log);
|
31
|
-
|
32
|
-
LOG_ATTRIBUTE_FORMAT(3, 4)
|
33
|
-
void common_log_add(struct common_log * log, enum lm_ggml_log_level level, const char * fmt, ...);
|
34
|
-
|
35
|
-
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
36
|
-
//
|
37
|
-
// regular log output:
|
38
|
-
//
|
39
|
-
// lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
40
|
-
// llm_load_tensors: ggml ctx size = 0.27 MiB
|
41
|
-
// llm_load_tensors: offloading 32 repeating layers to GPU
|
42
|
-
// llm_load_tensors: offloading non-repeating layers to GPU
|
43
|
-
//
|
44
|
-
// with prefix = true, timestamps = true, the log output will look like this:
|
45
|
-
//
|
46
|
-
// 0.00.035.060 D lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
47
|
-
// 0.00.035.064 I llm_load_tensors: ggml ctx size = 0.27 MiB
|
48
|
-
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
49
|
-
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
50
|
-
//
|
51
|
-
// I - info (stdout, V = 0)
|
52
|
-
// W - warning (stderr, V = 0)
|
53
|
-
// E - error (stderr, V = 0)
|
54
|
-
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
55
|
-
//
|
56
|
-
|
57
|
-
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
58
|
-
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
59
|
-
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
60
|
-
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
61
|
-
|
62
|
-
// helper macros for logging
|
63
|
-
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
64
|
-
//
|
65
|
-
// for example:
|
66
|
-
//
|
67
|
-
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
68
|
-
//
|
69
|
-
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
70
|
-
//
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
#if defined(__ANDROID__)
|
75
|
-
#include <android/log.h>
|
76
|
-
#define LLAMA_ANDROID_LOG_TAG "RNLLAMA_LOG_ANDROID"
|
77
|
-
|
78
|
-
#if defined(RNLLAMA_ANDROID_ENABLE_LOGGING)
|
79
|
-
#define RNLLAMA_LOG_LEVEL 1
|
80
|
-
#else
|
81
|
-
#define RNLLAMA_LOG_LEVEL 0
|
82
|
-
#endif
|
83
|
-
|
84
|
-
#define LOG_TMPL(level, verbosity, ...) \
|
85
|
-
do { \
|
86
|
-
if ((verbosity) <= RNLLAMA_LOG_LEVEL) { \
|
87
|
-
int android_log_level = ANDROID_LOG_DEFAULT; \
|
88
|
-
switch (level) { \
|
89
|
-
case LM_GGML_LOG_LEVEL_INFO: android_log_level = ANDROID_LOG_INFO; break; \
|
90
|
-
case LM_GGML_LOG_LEVEL_WARN: android_log_level = ANDROID_LOG_WARN; break; \
|
91
|
-
case LM_GGML_LOG_LEVEL_ERROR: android_log_level = ANDROID_LOG_ERROR; break; \
|
92
|
-
default: android_log_level = ANDROID_LOG_DEFAULT; \
|
93
|
-
} \
|
94
|
-
__android_log_print(android_log_level, LLAMA_ANDROID_LOG_TAG, __VA_ARGS__); \
|
95
|
-
} \
|
96
|
-
} while(0)
|
97
|
-
#else
|
98
|
-
|
99
|
-
#define LOG_TMPL(level, verbosity, ...) \
|
100
|
-
do { \
|
101
|
-
if ((verbosity) <= common_log_verbosity_thold) { \
|
102
|
-
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
103
|
-
} \
|
104
|
-
} while (0)
|
105
|
-
|
106
|
-
#endif
|
107
|
-
|
108
|
-
#define LOG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, 0, __VA_ARGS__)
|
109
|
-
#define LOGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, verbosity, __VA_ARGS__)
|
110
|
-
|
111
|
-
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, 0, __VA_ARGS__)
|
112
|
-
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
|
113
|
-
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
|
114
|
-
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
|
115
|
-
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
|
116
|
-
|
117
|
-
#define LOG_INFV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
|
118
|
-
#define LOG_WRNV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
|
119
|
-
#define LOG_ERRV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
|
120
|
-
#define LOG_DBGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
|
121
|
-
#define LOG_CNTV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
4
|
+
|
5
|
+
#ifndef __GNUC__
|
6
|
+
# define LOG_ATTRIBUTE_FORMAT(...)
|
7
|
+
#elif defined(__MINGW32__)
|
8
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
9
|
+
#else
|
10
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
11
|
+
#endif
|
12
|
+
|
13
|
+
#define LOG_DEFAULT_DEBUG 1
|
14
|
+
#define LOG_DEFAULT_LLAMA 0
|
15
|
+
|
16
|
+
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
17
|
+
// set via common_log_set_verbosity()
|
18
|
+
extern int common_log_verbosity_thold;
|
19
|
+
|
20
|
+
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
21
|
+
|
22
|
+
// the common_log uses an internal worker thread to print/write log messages
|
23
|
+
// when the worker thread is paused, incoming log messages are discarded
|
24
|
+
struct common_log;
|
25
|
+
|
26
|
+
struct common_log * common_log_init();
|
27
|
+
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
28
|
+
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
29
|
+
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
30
|
+
void common_log_free (struct common_log * log);
|
31
|
+
|
32
|
+
LOG_ATTRIBUTE_FORMAT(3, 4)
|
33
|
+
void common_log_add(struct common_log * log, enum lm_ggml_log_level level, const char * fmt, ...);
|
34
|
+
|
35
|
+
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
36
|
+
//
|
37
|
+
// regular log output:
|
38
|
+
//
|
39
|
+
// lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
40
|
+
// llm_load_tensors: ggml ctx size = 0.27 MiB
|
41
|
+
// llm_load_tensors: offloading 32 repeating layers to GPU
|
42
|
+
// llm_load_tensors: offloading non-repeating layers to GPU
|
43
|
+
//
|
44
|
+
// with prefix = true, timestamps = true, the log output will look like this:
|
45
|
+
//
|
46
|
+
// 0.00.035.060 D lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
47
|
+
// 0.00.035.064 I llm_load_tensors: ggml ctx size = 0.27 MiB
|
48
|
+
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
49
|
+
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
50
|
+
//
|
51
|
+
// I - info (stdout, V = 0)
|
52
|
+
// W - warning (stderr, V = 0)
|
53
|
+
// E - error (stderr, V = 0)
|
54
|
+
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
55
|
+
//
|
56
|
+
|
57
|
+
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
58
|
+
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
59
|
+
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
60
|
+
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
61
|
+
|
62
|
+
// helper macros for logging
|
63
|
+
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
64
|
+
//
|
65
|
+
// for example:
|
66
|
+
//
|
67
|
+
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
68
|
+
//
|
69
|
+
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
70
|
+
//
|
71
|
+
|
72
|
+
|
73
|
+
|
74
|
+
#if defined(__ANDROID__)
|
75
|
+
#include <android/log.h>
|
76
|
+
#define LLAMA_ANDROID_LOG_TAG "RNLLAMA_LOG_ANDROID"
|
77
|
+
|
78
|
+
#if defined(RNLLAMA_ANDROID_ENABLE_LOGGING)
|
79
|
+
#define RNLLAMA_LOG_LEVEL 1
|
80
|
+
#else
|
81
|
+
#define RNLLAMA_LOG_LEVEL 0
|
82
|
+
#endif
|
83
|
+
|
84
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
85
|
+
do { \
|
86
|
+
if ((verbosity) <= RNLLAMA_LOG_LEVEL) { \
|
87
|
+
int android_log_level = ANDROID_LOG_DEFAULT; \
|
88
|
+
switch (level) { \
|
89
|
+
case LM_GGML_LOG_LEVEL_INFO: android_log_level = ANDROID_LOG_INFO; break; \
|
90
|
+
case LM_GGML_LOG_LEVEL_WARN: android_log_level = ANDROID_LOG_WARN; break; \
|
91
|
+
case LM_GGML_LOG_LEVEL_ERROR: android_log_level = ANDROID_LOG_ERROR; break; \
|
92
|
+
default: android_log_level = ANDROID_LOG_DEFAULT; \
|
93
|
+
} \
|
94
|
+
__android_log_print(android_log_level, LLAMA_ANDROID_LOG_TAG, __VA_ARGS__); \
|
95
|
+
} \
|
96
|
+
} while(0)
|
97
|
+
#else
|
98
|
+
|
99
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
100
|
+
do { \
|
101
|
+
if ((verbosity) <= common_log_verbosity_thold) { \
|
102
|
+
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
103
|
+
} \
|
104
|
+
} while (0)
|
105
|
+
|
106
|
+
#endif
|
107
|
+
|
108
|
+
#define LOG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, 0, __VA_ARGS__)
|
109
|
+
#define LOGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, verbosity, __VA_ARGS__)
|
110
|
+
|
111
|
+
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, 0, __VA_ARGS__)
|
112
|
+
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
|
113
|
+
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
|
114
|
+
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
|
115
|
+
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
|
116
|
+
|
117
|
+
#define LOG_INFV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
|
118
|
+
#define LOG_WRNV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
|
119
|
+
#define LOG_ERRV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
|
120
|
+
#define LOG_DBGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
|
121
|
+
#define LOG_CNTV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)
|
package/cpp/rn-llama.hpp
CHANGED
@@ -4,11 +4,67 @@
|
|
4
4
|
#include <sstream>
|
5
5
|
#include <iostream>
|
6
6
|
#include "common.h"
|
7
|
+
#include "ggml.h"
|
7
8
|
#include "llama.h"
|
9
|
+
#include "llama-impl.h"
|
8
10
|
#include "sampling.h"
|
9
11
|
|
10
12
|
namespace rnllama {
|
11
13
|
|
14
|
+
static std::string lm_gguf_data_to_str(enum lm_gguf_type type, const void * data, int i) {
|
15
|
+
switch (type) {
|
16
|
+
case LM_GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
|
17
|
+
case LM_GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
|
18
|
+
case LM_GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
|
19
|
+
case LM_GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
|
20
|
+
case LM_GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
|
21
|
+
case LM_GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
|
22
|
+
case LM_GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
|
23
|
+
case LM_GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
|
24
|
+
case LM_GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
|
25
|
+
case LM_GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
|
26
|
+
case LM_GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
|
27
|
+
default: return "unknown type: " + std::to_string(type);
|
28
|
+
}
|
29
|
+
}
|
30
|
+
|
31
|
+
static std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i) {
|
32
|
+
const enum lm_gguf_type type = lm_gguf_get_kv_type(ctx_gguf, i);
|
33
|
+
|
34
|
+
switch (type) {
|
35
|
+
case LM_GGUF_TYPE_STRING:
|
36
|
+
return lm_gguf_get_val_str(ctx_gguf, i);
|
37
|
+
case LM_GGUF_TYPE_ARRAY:
|
38
|
+
{
|
39
|
+
const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx_gguf, i);
|
40
|
+
int arr_n = lm_gguf_get_arr_n(ctx_gguf, i);
|
41
|
+
const void * data = lm_gguf_get_arr_data(ctx_gguf, i);
|
42
|
+
std::stringstream ss;
|
43
|
+
ss << "[";
|
44
|
+
for (int j = 0; j < arr_n; j++) {
|
45
|
+
if (arr_type == LM_GGUF_TYPE_STRING) {
|
46
|
+
std::string val = lm_gguf_get_arr_str(ctx_gguf, i, j);
|
47
|
+
// escape quotes
|
48
|
+
replace_all(val, "\\", "\\\\");
|
49
|
+
replace_all(val, "\"", "\\\"");
|
50
|
+
ss << '"' << val << '"';
|
51
|
+
} else if (arr_type == LM_GGUF_TYPE_ARRAY) {
|
52
|
+
ss << "???";
|
53
|
+
} else {
|
54
|
+
ss << lm_gguf_data_to_str(arr_type, data, j);
|
55
|
+
}
|
56
|
+
if (j < arr_n - 1) {
|
57
|
+
ss << ", ";
|
58
|
+
}
|
59
|
+
}
|
60
|
+
ss << "]";
|
61
|
+
return ss.str();
|
62
|
+
}
|
63
|
+
default:
|
64
|
+
return lm_gguf_data_to_str(type, lm_gguf_get_val_data(ctx_gguf, i), 0);
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
12
68
|
static void llama_batch_clear(llama_batch *batch) {
|
13
69
|
batch->n_tokens = 0;
|
14
70
|
}
|
@@ -160,9 +216,12 @@ struct llama_rn_context
|
|
160
216
|
common_params params;
|
161
217
|
|
162
218
|
llama_model *model = nullptr;
|
219
|
+
float loading_progress = 0;
|
220
|
+
bool is_load_interrupted = false;
|
221
|
+
|
163
222
|
llama_context *ctx = nullptr;
|
164
223
|
common_sampler *ctx_sampling = nullptr;
|
165
|
-
|
224
|
+
|
166
225
|
int n_ctx;
|
167
226
|
|
168
227
|
bool truncated = false;
|
@@ -235,13 +294,16 @@ struct llama_rn_context
|
|
235
294
|
}
|
236
295
|
|
237
296
|
bool validateModelChatTemplate() const {
|
238
|
-
llama_chat_message chat[] = {{"user", "test"}};
|
239
|
-
|
240
297
|
std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
|
241
298
|
std::string template_key = "tokenizer.chat_template";
|
242
299
|
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
|
243
|
-
|
244
|
-
|
300
|
+
if (res >= 0) {
|
301
|
+
llama_chat_message chat[] = {{"user", "test"}};
|
302
|
+
std::string tmpl = std::string(model_template.data(), model_template.size());
|
303
|
+
int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0);
|
304
|
+
return chat_res > 0;
|
305
|
+
}
|
306
|
+
return res > 0;
|
245
307
|
}
|
246
308
|
|
247
309
|
void truncatePrompt(std::vector<llama_token> &prompt_tokens) {
|
@@ -376,7 +438,7 @@ struct llama_rn_context
|
|
376
438
|
n_eval = params.n_batch;
|
377
439
|
}
|
378
440
|
if (llama_decode(ctx, llama_batch_get_one(&embd[n_past], n_eval)))
|
379
|
-
{
|
441
|
+
{
|
380
442
|
LOG_ERROR("failed to eval, n_eval: %d, n_past: %d, n_threads: %d, embd: %s",
|
381
443
|
n_eval,
|
382
444
|
n_past,
|
@@ -387,7 +449,7 @@ struct llama_rn_context
|
|
387
449
|
return result;
|
388
450
|
}
|
389
451
|
n_past += n_eval;
|
390
|
-
|
452
|
+
|
391
453
|
if(is_interrupted) {
|
392
454
|
LOG_INFO("Decoding Interrupted");
|
393
455
|
embd.resize(n_past);
|
@@ -409,11 +471,11 @@ struct llama_rn_context
|
|
409
471
|
candidates.reserve(llama_n_vocab(model));
|
410
472
|
|
411
473
|
result.tok = common_sampler_sample(ctx_sampling, ctx, -1);
|
412
|
-
|
474
|
+
|
413
475
|
llama_token_data_array cur_p = *common_sampler_get_candidates(ctx_sampling);
|
414
476
|
|
415
477
|
const int32_t n_probs = params.sparams.n_probs;
|
416
|
-
|
478
|
+
|
417
479
|
// deprecated
|
418
480
|
/*if (params.sparams.temp <= 0 && n_probs > 0)
|
419
481
|
{
|
@@ -421,7 +483,7 @@ struct llama_rn_context
|
|
421
483
|
llama_sampler_init_softmax();
|
422
484
|
|
423
485
|
}*/
|
424
|
-
|
486
|
+
|
425
487
|
|
426
488
|
for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
|
427
489
|
{
|
@@ -542,26 +604,28 @@ struct llama_rn_context
|
|
542
604
|
return token_with_probs;
|
543
605
|
}
|
544
606
|
|
545
|
-
std::vector<float> getEmbedding()
|
607
|
+
std::vector<float> getEmbedding(common_params &embd_params)
|
546
608
|
{
|
547
609
|
static const int n_embd = llama_n_embd(llama_get_model(ctx));
|
548
|
-
if (!
|
610
|
+
if (!embd_params.embedding)
|
549
611
|
{
|
550
|
-
LOG_WARNING("embedding disabled, embedding: %s",
|
612
|
+
LOG_WARNING("embedding disabled, embedding: %s", embd_params.embedding);
|
551
613
|
return std::vector<float>(n_embd, 0.0f);
|
552
614
|
}
|
553
615
|
float *data;
|
554
|
-
|
555
|
-
|
616
|
+
|
617
|
+
const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
|
618
|
+
printf("pooling_type: %d\n", pooling_type);
|
619
|
+
if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
|
556
620
|
data = llama_get_embeddings(ctx);
|
557
|
-
}
|
558
|
-
else {
|
621
|
+
} else {
|
559
622
|
data = llama_get_embeddings_seq(ctx, 0);
|
560
623
|
}
|
561
|
-
|
562
|
-
if(!data) {
|
624
|
+
|
625
|
+
if (!data) {
|
563
626
|
return std::vector<float>(n_embd, 0.0f);
|
564
627
|
}
|
628
|
+
|
565
629
|
std::vector<float> embedding(data, data + n_embd), out(data, data + n_embd);
|
566
630
|
common_embd_normalize(embedding.data(), out.data(), n_embd, params.embd_normalize);
|
567
631
|
return out;
|