cui-llama.rn 1.4.0 → 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/jni.cpp +9 -9
- package/cpp/common.cpp +163 -60
- package/cpp/common.h +43 -12
- package/cpp/ggml-alloc.c +1042 -1037
- package/cpp/ggml-backend-impl.h +255 -256
- package/cpp/ggml-backend-reg.cpp +582 -582
- package/cpp/ggml-backend.cpp +2002 -2002
- package/cpp/ggml-backend.h +354 -352
- package/cpp/ggml-common.h +1853 -1853
- package/cpp/ggml-cpp.h +39 -39
- package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
- package/cpp/ggml-cpu-aarch64.h +8 -8
- package/cpp/ggml-cpu-impl.h +386 -386
- package/cpp/ggml-cpu-quants.c +10920 -10839
- package/cpp/ggml-cpu-traits.cpp +36 -36
- package/cpp/ggml-cpu-traits.h +38 -38
- package/cpp/ggml-cpu.c +329 -60
- package/cpp/ggml-cpu.cpp +10 -2
- package/cpp/ggml-cpu.h +135 -135
- package/cpp/ggml-impl.h +567 -567
- package/cpp/ggml-metal-impl.h +17 -17
- package/cpp/ggml-metal.m +4884 -4884
- package/cpp/ggml-quants.c +5238 -5238
- package/cpp/ggml-threading.h +14 -14
- package/cpp/ggml.c +6514 -6448
- package/cpp/ggml.h +2194 -2163
- package/cpp/gguf.cpp +1329 -1325
- package/cpp/gguf.h +202 -202
- package/cpp/json-schema-to-grammar.cpp +1045 -1045
- package/cpp/json-schema-to-grammar.h +8 -8
- package/cpp/json.hpp +24766 -24766
- package/cpp/llama-adapter.cpp +347 -346
- package/cpp/llama-adapter.h +74 -73
- package/cpp/llama-arch.cpp +1487 -1434
- package/cpp/llama-arch.h +400 -395
- package/cpp/llama-batch.cpp +368 -368
- package/cpp/llama-batch.h +88 -88
- package/cpp/llama-chat.cpp +578 -567
- package/cpp/llama-chat.h +52 -51
- package/cpp/llama-context.cpp +1775 -1771
- package/cpp/llama-context.h +128 -128
- package/cpp/llama-cparams.cpp +1 -1
- package/cpp/llama-cparams.h +37 -37
- package/cpp/llama-cpp.h +30 -30
- package/cpp/llama-grammar.cpp +1139 -1139
- package/cpp/llama-grammar.h +143 -143
- package/cpp/llama-hparams.cpp +71 -71
- package/cpp/llama-hparams.h +139 -140
- package/cpp/llama-impl.cpp +167 -167
- package/cpp/llama-impl.h +61 -61
- package/cpp/llama-kv-cache.cpp +718 -718
- package/cpp/llama-kv-cache.h +218 -218
- package/cpp/llama-mmap.cpp +2 -1
- package/cpp/llama-mmap.h +67 -67
- package/cpp/llama-model-loader.cpp +1124 -1011
- package/cpp/llama-model-loader.h +167 -158
- package/cpp/llama-model.cpp +3997 -2202
- package/cpp/llama-model.h +370 -391
- package/cpp/llama-sampling.cpp +2408 -2406
- package/cpp/llama-sampling.h +32 -48
- package/cpp/llama-vocab.cpp +3247 -1982
- package/cpp/llama-vocab.h +125 -182
- package/cpp/llama.cpp +416 -2886
- package/cpp/llama.h +1323 -1285
- package/cpp/log.cpp +401 -401
- package/cpp/log.h +121 -121
- package/cpp/rn-llama.hpp +18 -12
- package/cpp/sampling.cpp +505 -500
- package/cpp/sgemm.cpp +2597 -2597
- package/cpp/speculative.cpp +277 -274
- package/cpp/speculative.h +28 -28
- package/cpp/unicode.cpp +2 -3
- package/package.json +1 -1
package/cpp/llama-impl.h
CHANGED
@@ -1,61 +1,61 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "ggml.h" // for lm_ggml_log_level
|
4
|
-
|
5
|
-
#include <string>
|
6
|
-
#include <vector>
|
7
|
-
|
8
|
-
#ifdef __GNUC__
|
9
|
-
#ifdef __MINGW32__
|
10
|
-
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
11
|
-
#else
|
12
|
-
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
13
|
-
#endif
|
14
|
-
#else
|
15
|
-
#define LLAMA_ATTRIBUTE_FORMAT(...)
|
16
|
-
#endif
|
17
|
-
|
18
|
-
//
|
19
|
-
// logging
|
20
|
-
//
|
21
|
-
|
22
|
-
LLAMA_ATTRIBUTE_FORMAT(2, 3)
|
23
|
-
void llama_log_internal (lm_ggml_log_level level, const char * format, ...);
|
24
|
-
void llama_log_callback_default(lm_ggml_log_level level, const char * text, void * user_data);
|
25
|
-
|
26
|
-
#define LLAMA_LOG(...) llama_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
27
|
-
#define LLAMA_LOG_INFO(...) llama_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
28
|
-
#define LLAMA_LOG_WARN(...) llama_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
29
|
-
#define LLAMA_LOG_ERROR(...) llama_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
30
|
-
#define LLAMA_LOG_DEBUG(...) llama_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
31
|
-
#define LLAMA_LOG_CONT(...) llama_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
32
|
-
|
33
|
-
//
|
34
|
-
// helpers
|
35
|
-
//
|
36
|
-
|
37
|
-
template <typename T>
|
38
|
-
struct no_init {
|
39
|
-
T value;
|
40
|
-
no_init() { /* do nothing */ }
|
41
|
-
};
|
42
|
-
|
43
|
-
struct time_meas {
|
44
|
-
time_meas(int64_t & t_acc, bool disable = false);
|
45
|
-
~time_meas();
|
46
|
-
|
47
|
-
const int64_t t_start_us;
|
48
|
-
|
49
|
-
int64_t & t_acc;
|
50
|
-
};
|
51
|
-
|
52
|
-
void replace_all(std::string & s, const std::string & search, const std::string & replace);
|
53
|
-
|
54
|
-
// TODO: rename to llama_format ?
|
55
|
-
LLAMA_ATTRIBUTE_FORMAT(1, 2)
|
56
|
-
std::string format(const char * fmt, ...);
|
57
|
-
|
58
|
-
std::string llama_format_tensor_shape(const std::vector<int64_t> & ne);
|
59
|
-
std::string llama_format_tensor_shape(const struct lm_ggml_tensor * t);
|
60
|
-
|
61
|
-
std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i);
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
4
|
+
|
5
|
+
#include <string>
|
6
|
+
#include <vector>
|
7
|
+
|
8
|
+
#ifdef __GNUC__
|
9
|
+
#ifdef __MINGW32__
|
10
|
+
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
11
|
+
#else
|
12
|
+
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
13
|
+
#endif
|
14
|
+
#else
|
15
|
+
#define LLAMA_ATTRIBUTE_FORMAT(...)
|
16
|
+
#endif
|
17
|
+
|
18
|
+
//
|
19
|
+
// logging
|
20
|
+
//
|
21
|
+
|
22
|
+
LLAMA_ATTRIBUTE_FORMAT(2, 3)
|
23
|
+
void llama_log_internal (lm_ggml_log_level level, const char * format, ...);
|
24
|
+
void llama_log_callback_default(lm_ggml_log_level level, const char * text, void * user_data);
|
25
|
+
|
26
|
+
#define LLAMA_LOG(...) llama_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
27
|
+
#define LLAMA_LOG_INFO(...) llama_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
28
|
+
#define LLAMA_LOG_WARN(...) llama_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
29
|
+
#define LLAMA_LOG_ERROR(...) llama_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
30
|
+
#define LLAMA_LOG_DEBUG(...) llama_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
31
|
+
#define LLAMA_LOG_CONT(...) llama_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
32
|
+
|
33
|
+
//
|
34
|
+
// helpers
|
35
|
+
//
|
36
|
+
|
37
|
+
template <typename T>
|
38
|
+
struct no_init {
|
39
|
+
T value;
|
40
|
+
no_init() { /* do nothing */ }
|
41
|
+
};
|
42
|
+
|
43
|
+
struct time_meas {
|
44
|
+
time_meas(int64_t & t_acc, bool disable = false);
|
45
|
+
~time_meas();
|
46
|
+
|
47
|
+
const int64_t t_start_us;
|
48
|
+
|
49
|
+
int64_t & t_acc;
|
50
|
+
};
|
51
|
+
|
52
|
+
void replace_all(std::string & s, const std::string & search, const std::string & replace);
|
53
|
+
|
54
|
+
// TODO: rename to llama_format ?
|
55
|
+
LLAMA_ATTRIBUTE_FORMAT(1, 2)
|
56
|
+
std::string format(const char * fmt, ...);
|
57
|
+
|
58
|
+
std::string llama_format_tensor_shape(const std::vector<int64_t> & ne);
|
59
|
+
std::string llama_format_tensor_shape(const struct lm_ggml_tensor * t);
|
60
|
+
|
61
|
+
std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i);
|