cui-llama.rn 1.7.1 → 1.7.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +2 -2
- package/android/src/main/java/com/rnllama/LlamaContext.java +22 -18
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/cpp/tools/mtmd/clip.cpp +56 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/package.json +1 -1
@@ -90,7 +90,7 @@ function(build_library target_name cpu_flags)
|
|
90
90
|
|
91
91
|
target_link_libraries(${target_name} ${LOG_LIB} android)
|
92
92
|
|
93
|
-
target_compile_options(${target_name} PRIVATE -DLM_GGML_USE_CPU -DLM_GGML_USE_CPU_AARCH64 -pthread ${cpu_flags})
|
93
|
+
target_compile_options(${target_name} PRIVATE -DLM_GGML_USE_CPU -DLM_GGML_USE_CPU_AARCH64 -DRNLLAMA_USE_FD_FILE -pthread ${cpu_flags})
|
94
94
|
|
95
95
|
if (${CMAKE_BUILD_TYPE} STREQUAL "Debug")
|
96
96
|
target_compile_options(${target_name} PRIVATE -DRNLLAMA_ANDROID_ENABLE_LOGGING)
|
@@ -99,7 +99,7 @@ function(build_library target_name cpu_flags)
|
|
99
99
|
# NOTE: If you want to debug the native code, you can uncomment if and endif
|
100
100
|
# Note that it will be extremely slow
|
101
101
|
# if (NOT ${CMAKE_BUILD_TYPE} STREQUAL "Debug")
|
102
|
-
target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG
|
102
|
+
target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG)
|
103
103
|
target_compile_options(${target_name} PRIVATE -fvisibility=hidden -fvisibility-inlines-hidden)
|
104
104
|
target_compile_options(${target_name} PRIVATE -ffunction-sections -fdata-sections)
|
105
105
|
|
@@ -107,13 +107,18 @@ public class LlamaContext {
|
|
107
107
|
}
|
108
108
|
|
109
109
|
String modelName = params.getString("model");
|
110
|
-
|
111
|
-
if(
|
112
|
-
|
110
|
+
|
111
|
+
if (modelName.startsWith("content://")) {
|
112
|
+
Uri uri = Uri.parse(modelName);
|
113
|
+
try {
|
114
|
+
ParcelFileDescriptor pfd = reactContext.getApplicationContext().getContentResolver().openFileDescriptor(uri, "r");
|
115
|
+
modelName = "" + pfd.getFd();
|
116
|
+
} catch (Exception e) {
|
117
|
+
Log.e(NAME, "Failed to convert to FD!");
|
118
|
+
}
|
113
119
|
}
|
114
|
-
|
115
|
-
modelName = getContentFileDescriptor(modelName);
|
116
120
|
|
121
|
+
|
117
122
|
// Check if file has GGUF magic numbers
|
118
123
|
this.id = id;
|
119
124
|
eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class);
|
@@ -438,9 +443,20 @@ public class LlamaContext {
|
|
438
443
|
throw new IllegalArgumentException("mmproj_path is empty");
|
439
444
|
}
|
440
445
|
File file = new File(mmprojPath);
|
441
|
-
if (!file.exists()) {
|
446
|
+
if (!mmprojPath.startsWith("content") && !file.exists()) {
|
442
447
|
throw new IllegalArgumentException("mmproj file does not exist: " + mmprojPath);
|
443
448
|
}
|
449
|
+
|
450
|
+
if (mmprojPath.startsWith("content://")) {
|
451
|
+
Uri uri = Uri.parse(mmprojPath);
|
452
|
+
try {
|
453
|
+
ParcelFileDescriptor pfd = this.reactContext.getApplicationContext().getContentResolver().openFileDescriptor(uri, "r");
|
454
|
+
mmprojPath = "" + pfd.getFd();
|
455
|
+
} catch (Exception e) {
|
456
|
+
Log.e(NAME, "Failed to convert to FD!");
|
457
|
+
}
|
458
|
+
}
|
459
|
+
|
444
460
|
return initMultimodal(this.context, mmprojPath, mmprojUseGpu);
|
445
461
|
}
|
446
462
|
|
@@ -463,18 +479,6 @@ public class LlamaContext {
|
|
463
479
|
freeContext(context);
|
464
480
|
}
|
465
481
|
|
466
|
-
private String getContentFileDescriptor(String modelName) {
|
467
|
-
if (!modelName.startsWith("content://")) return modelName;
|
468
|
-
Uri uri = Uri.parse(modelName);
|
469
|
-
try {
|
470
|
-
ParcelFileDescriptor pfd = reactContext.getApplicationContext().getContentResolver().openFileDescriptor(uri, "r");
|
471
|
-
return "" + pfd.getFd();
|
472
|
-
} catch (Exception e) {
|
473
|
-
Log.e(NAME, "Failed to convert to FD!");
|
474
|
-
}
|
475
|
-
return modelName;
|
476
|
-
}
|
477
|
-
|
478
482
|
static {
|
479
483
|
Log.d(NAME, "Primary ABI: " + Build.SUPPORTED_ABIS[0]);
|
480
484
|
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
package/cpp/tools/mtmd/clip.cpp
CHANGED
@@ -31,6 +31,12 @@
|
|
31
31
|
#include <numeric>
|
32
32
|
#include <functional>
|
33
33
|
|
34
|
+
// rnllama additions
|
35
|
+
#include <fcntl.h>
|
36
|
+
#include <unistd.h>
|
37
|
+
#include <sys/types.h>
|
38
|
+
#include <sys/stat.h>
|
39
|
+
|
34
40
|
struct clip_logger_state g_logger_state = {LM_GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
|
35
41
|
|
36
42
|
enum ffn_op_type {
|
@@ -2486,6 +2492,55 @@ struct clip_model_loader {
|
|
2486
2492
|
{
|
2487
2493
|
std::vector<uint8_t> read_buf;
|
2488
2494
|
|
2495
|
+
// rnllama addition - we support usage of file descriptors
|
2496
|
+
// Check if fname is an FD number (no '/' characters)
|
2497
|
+
bool is_fd = (fname.find('/') == std::string::npos);
|
2498
|
+
|
2499
|
+
if (is_fd) {
|
2500
|
+
// Routine for handling FD
|
2501
|
+
int fd = -1;
|
2502
|
+
try {
|
2503
|
+
fd = std::stoi(fname); // Convert string to integer FD
|
2504
|
+
} catch (const std::invalid_argument& e) {
|
2505
|
+
throw std::runtime_error(string_format("%s: invalid FD number provided: %s\n", __func__, fname.c_str()));
|
2506
|
+
} catch (const std::out_of_range& e) {
|
2507
|
+
throw std::runtime_error(string_format("%s: FD number out of range: %s\n", __func__, fname.c_str()));
|
2508
|
+
}
|
2509
|
+
|
2510
|
+
lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_get_default_buffer_type(ctx_clip.backend);
|
2511
|
+
ctx_clip.buf.reset(lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
|
2512
|
+
lm_ggml_backend_buffer_set_usage(ctx_clip.buf.get(), LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
2513
|
+
|
2514
|
+
for (auto & t : tensors_to_load) {
|
2515
|
+
lm_ggml_tensor * cur = lm_ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
|
2516
|
+
const size_t offset = tensor_offset[t->name];
|
2517
|
+
|
2518
|
+
if (lseek(fd, offset, SEEK_SET) == (off_t)-1) {
|
2519
|
+
throw std::runtime_error(string_format("%s: failed to seek for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
|
2520
|
+
}
|
2521
|
+
|
2522
|
+
size_t num_bytes = lm_ggml_nbytes(cur);
|
2523
|
+
if (lm_ggml_backend_buft_is_host(buft)) {
|
2524
|
+
// for the CPU and Metal backend, we can read directly into the tensor
|
2525
|
+
ssize_t bytes_read = read(fd, reinterpret_cast<char *>(cur->data), num_bytes);
|
2526
|
+
if (bytes_read == -1 || static_cast<size_t>(bytes_read) != num_bytes) {
|
2527
|
+
throw std::runtime_error(string_format("%s: failed to read for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
|
2528
|
+
}
|
2529
|
+
} else {
|
2530
|
+
// read into a temporary buffer first, then copy to device memory
|
2531
|
+
read_buf.resize(num_bytes);
|
2532
|
+
ssize_t bytes_read = read(fd, reinterpret_cast<char *>(read_buf.data()), num_bytes);
|
2533
|
+
if (bytes_read == -1 || static_cast<size_t>(bytes_read) != num_bytes) {
|
2534
|
+
throw std::runtime_error(string_format("%s: failed to read for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
|
2535
|
+
}
|
2536
|
+
lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
|
2537
|
+
}
|
2538
|
+
}
|
2539
|
+
// Assuming the FD is managed externally and shouldn't be closed here.
|
2540
|
+
LOG_DBG("%s: loaded %zu tensors from FD %s\n", __func__, tensors_to_load.size(), fname.c_str());
|
2541
|
+
|
2542
|
+
} else {
|
2543
|
+
// The original ifstream routine for file paths
|
2489
2544
|
auto fin = std::ifstream(fname, std::ios::binary);
|
2490
2545
|
if (!fin) {
|
2491
2546
|
throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
|
@@ -2516,6 +2571,7 @@ struct clip_model_loader {
|
|
2516
2571
|
fin.close();
|
2517
2572
|
|
2518
2573
|
LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
|
2574
|
+
}
|
2519
2575
|
}
|
2520
2576
|
}
|
2521
2577
|
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|