@novastera-oss/llamarn 0.5.3 → 0.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cpp/PureCppImpl.cpp +17 -16
- package/package.json +1 -1
package/cpp/PureCppImpl.cpp
CHANGED
|
@@ -19,19 +19,20 @@
|
|
|
19
19
|
#include "chat.h"
|
|
20
20
|
|
|
21
21
|
#if defined(__ANDROID__) || defined(__linux__)
|
|
22
|
-
#include <unistd.h>
|
|
23
22
|
#include <dlfcn.h>
|
|
24
|
-
#include <android/log.h>
|
|
25
|
-
#
|
|
26
|
-
#define
|
|
27
|
-
#
|
|
28
|
-
#define
|
|
29
|
-
#define
|
|
23
|
+
// #include <android/log.h>
|
|
24
|
+
// #ifndef LOG_TAG
|
|
25
|
+
// #define LOG_TAG "RNLlamaCpp"
|
|
26
|
+
// #endif
|
|
27
|
+
// #define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
|
|
28
|
+
// #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
|
|
29
|
+
// #define LOGW(...) __android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__)
|
|
30
|
+
// #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
|
|
30
31
|
#else
|
|
31
|
-
#define LOGI(...) fprintf(stderr, __VA_ARGS__)
|
|
32
|
-
#define LOGE(...) fprintf(stderr, __VA_ARGS__)
|
|
33
|
-
#define LOGW(...) fprintf(stderr, __VA_ARGS__)
|
|
34
|
-
#define LOGD(...) fprintf(stderr, __VA_ARGS__)
|
|
32
|
+
// #define LOGI(...) fprintf(stderr, __VA_ARGS__)
|
|
33
|
+
// #define LOGE(...) fprintf(stderr, __VA_ARGS__)
|
|
34
|
+
// #define LOGW(...) fprintf(stderr, __VA_ARGS__)
|
|
35
|
+
// #define LOGD(...) fprintf(stderr, __VA_ARGS__)
|
|
35
36
|
#endif
|
|
36
37
|
|
|
37
38
|
// Include the llama.cpp headers directly
|
|
@@ -88,11 +89,11 @@ jsi::Value PureCppImpl::loadLlamaModelInfo(jsi::Runtime &runtime, jsi::String mo
|
|
|
88
89
|
std::thread([selfPtr, path, resolve, reject, runtimePtr, invoker]() {
|
|
89
90
|
try {
|
|
90
91
|
// Set up logging callback to capture llama.cpp error messages
|
|
91
|
-
llama_log_set([](enum ggml_log_level level, const char * text, void * /* user_data */) {
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
}, nullptr);
|
|
92
|
+
// llama_log_set([](enum ggml_log_level level, const char * text, void * /* user_data */) {
|
|
93
|
+
// if (level >= GGML_LOG_LEVEL_ERROR) {
|
|
94
|
+
// LOGE("llama.cpp: %s", text);
|
|
95
|
+
// }
|
|
96
|
+
// }, nullptr);
|
|
96
97
|
|
|
97
98
|
// Load all available backends (CPU is dynamically loaded when GGML_BACKEND_DL is enabled)
|
|
98
99
|
// With GGML_BACKEND_DL=ON, ALL backends (CPU + GPU) are dynamically loaded
|