cui-llama.rn 1.7.2 → 1.7.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -90,7 +90,7 @@ function(build_library target_name cpu_flags)
90
90
 
91
91
  target_link_libraries(${target_name} ${LOG_LIB} android)
92
92
 
93
- target_compile_options(${target_name} PRIVATE -DLM_GGML_USE_CPU -DLM_GGML_USE_CPU_AARCH64 -pthread ${cpu_flags})
93
+ target_compile_options(${target_name} PRIVATE -DLM_GGML_USE_CPU -DLM_GGML_USE_CPU_AARCH64 -DRNLLAMA_USE_FD_FILE -pthread ${cpu_flags})
94
94
 
95
95
  if (${CMAKE_BUILD_TYPE} STREQUAL "Debug")
96
96
  target_compile_options(${target_name} PRIVATE -DRNLLAMA_ANDROID_ENABLE_LOGGING)
@@ -99,7 +99,7 @@ function(build_library target_name cpu_flags)
99
99
  # NOTE: If you want to debug the native code, you can uncomment if and endif
100
100
  # Note that it will be extremely slow
101
101
  # if (NOT ${CMAKE_BUILD_TYPE} STREQUAL "Debug")
102
- target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG -DRNLLAMA_USE_FD_FILE)
102
+ target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG)
103
103
  target_compile_options(${target_name} PRIVATE -fvisibility=hidden -fvisibility-inlines-hidden)
104
104
  target_compile_options(${target_name} PRIVATE -ffunction-sections -fdata-sections)
105
105
 
@@ -69,7 +69,11 @@ public class LlamaContext {
69
69
  try {
70
70
  if (filepath.startsWith("content")) {
71
71
  Uri uri = Uri.parse(filepath);
72
- reactContext.getApplicationContext().getContentResolver().takePersistableUriPermission(uri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
72
+ try {
73
+ reactContext.getApplicationContext().getContentResolver().takePersistableUriPermission(uri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
74
+ } catch (SecurityException e) {
75
+ Log.w(NAME, "Persistable permission not granted for URI: " + uri);
76
+ }
73
77
  fis = reactContext.getApplicationContext().getContentResolver().openInputStream(uri);
74
78
  } else {
75
79
  fis = new FileInputStream(filepath);
@@ -107,7 +111,11 @@ public class LlamaContext {
107
111
  }
108
112
 
109
113
  String modelName = params.getString("model");
110
-
114
+
115
+ if(!isGGUF(modelName, reactContext)) {
116
+ throw new IllegalArgumentException("File is not in GGUF format");
117
+ }
118
+
111
119
  if (modelName.startsWith("content://")) {
112
120
  Uri uri = Uri.parse(modelName);
113
121
  try {
@@ -117,7 +125,6 @@ public class LlamaContext {
117
125
  Log.e(NAME, "Failed to convert to FD!");
118
126
  }
119
127
  }
120
-
121
128
 
122
129
  // Check if file has GGUF magic numbers
123
130
  this.id = id;
@@ -442,10 +449,26 @@ public class LlamaContext {
442
449
  if (mmprojPath == null || mmprojPath.isEmpty()) {
443
450
  throw new IllegalArgumentException("mmproj_path is empty");
444
451
  }
452
+
453
+ if(!isGGUF(mmprojPath, this.reactContext)) {
454
+ throw new IllegalArgumentException("File is not in GGUF format");
455
+ }
456
+
445
457
  File file = new File(mmprojPath);
446
- if (!file.exists()) {
458
+ if (!mmprojPath.startsWith("content") && !file.exists()) {
447
459
  throw new IllegalArgumentException("mmproj file does not exist: " + mmprojPath);
448
460
  }
461
+
462
+ if (mmprojPath.startsWith("content://")) {
463
+ Uri uri = Uri.parse(mmprojPath);
464
+ try {
465
+ ParcelFileDescriptor pfd = this.reactContext.getApplicationContext().getContentResolver().openFileDescriptor(uri, "r");
466
+ mmprojPath = "" + pfd.getFd();
467
+ } catch (Exception e) {
468
+ Log.e(NAME, "Failed to convert to FD!");
469
+ }
470
+ }
471
+
449
472
  return initMultimodal(this.context, mmprojPath, mmprojUseGpu);
450
473
  }
451
474
 
@@ -7,6 +7,7 @@ import android.os.Handler;
7
7
  import android.os.AsyncTask;
8
8
  import android.os.ParcelFileDescriptor;
9
9
  import android.net.Uri;
10
+ import android.content.Intent;
10
11
 
11
12
  import com.facebook.react.bridge.Promise;
12
13
  import com.facebook.react.bridge.ReactApplicationContext;
@@ -85,6 +86,15 @@ public class RNLlama implements LifecycleEventListener {
85
86
 
86
87
  public void modelInfo(final String model, final ReadableArray skip, final Promise promise) {
87
88
  final String modelPath = getContentFileDescriptor(model);
89
+
90
+ if (model.startsWith("content")) {
91
+ Uri uri = Uri.parse(model);
92
+ try {
93
+ reactContext.getApplicationContext().getContentResolver().takePersistableUriPermission(uri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
94
+ } catch (SecurityException e) {
95
+ Log.w(NAME, "Persistable permission not granted for URI: " + uri);
96
+ }
97
+ }
88
98
 
89
99
  new AsyncTask<Void, Void, WritableMap>() {
90
100
  private Exception exception;
@@ -31,6 +31,12 @@
31
31
  #include <numeric>
32
32
  #include <functional>
33
33
 
34
+ // rnllama additions
35
+ #include <fcntl.h>
36
+ #include <unistd.h>
37
+ #include <sys/types.h>
38
+ #include <sys/stat.h>
39
+
34
40
  struct clip_logger_state g_logger_state = {LM_GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
35
41
 
36
42
  enum ffn_op_type {
@@ -2486,6 +2492,55 @@ struct clip_model_loader {
2486
2492
  {
2487
2493
  std::vector<uint8_t> read_buf;
2488
2494
 
2495
+ // rnllama addition - we support usage of file descriptors
2496
+ // Check if fname is an FD number (no '/' characters)
2497
+ bool is_fd = (fname.find('/') == std::string::npos);
2498
+
2499
+ if (is_fd) {
2500
+ // Routine for handling FD
2501
+ int fd = -1;
2502
+ try {
2503
+ fd = std::stoi(fname); // Convert string to integer FD
2504
+ } catch (const std::invalid_argument& e) {
2505
+ throw std::runtime_error(string_format("%s: invalid FD number provided: %s\n", __func__, fname.c_str()));
2506
+ } catch (const std::out_of_range& e) {
2507
+ throw std::runtime_error(string_format("%s: FD number out of range: %s\n", __func__, fname.c_str()));
2508
+ }
2509
+
2510
+ lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_get_default_buffer_type(ctx_clip.backend);
2511
+ ctx_clip.buf.reset(lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
2512
+ lm_ggml_backend_buffer_set_usage(ctx_clip.buf.get(), LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
2513
+
2514
+ for (auto & t : tensors_to_load) {
2515
+ lm_ggml_tensor * cur = lm_ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
2516
+ const size_t offset = tensor_offset[t->name];
2517
+
2518
+ if (lseek(fd, offset, SEEK_SET) == (off_t)-1) {
2519
+ throw std::runtime_error(string_format("%s: failed to seek for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
2520
+ }
2521
+
2522
+ size_t num_bytes = lm_ggml_nbytes(cur);
2523
+ if (lm_ggml_backend_buft_is_host(buft)) {
2524
+ // for the CPU and Metal backend, we can read directly into the tensor
2525
+ ssize_t bytes_read = read(fd, reinterpret_cast<char *>(cur->data), num_bytes);
2526
+ if (bytes_read == -1 || static_cast<size_t>(bytes_read) != num_bytes) {
2527
+ throw std::runtime_error(string_format("%s: failed to read for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
2528
+ }
2529
+ } else {
2530
+ // read into a temporary buffer first, then copy to device memory
2531
+ read_buf.resize(num_bytes);
2532
+ ssize_t bytes_read = read(fd, reinterpret_cast<char *>(read_buf.data()), num_bytes);
2533
+ if (bytes_read == -1 || static_cast<size_t>(bytes_read) != num_bytes) {
2534
+ throw std::runtime_error(string_format("%s: failed to read for tensor %s (FD %d): %s\n", __func__, t->name, fd, strerror(errno)));
2535
+ }
2536
+ lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
2537
+ }
2538
+ }
2539
+ // Assuming the FD is managed externally and shouldn't be closed here.
2540
+ LOG_DBG("%s: loaded %zu tensors from FD %s\n", __func__, tensors_to_load.size(), fname.c_str());
2541
+
2542
+ } else {
2543
+ // The original ifstream routine for file paths
2489
2544
  auto fin = std::ifstream(fname, std::ios::binary);
2490
2545
  if (!fin) {
2491
2546
  throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
@@ -2516,6 +2571,7 @@ struct clip_model_loader {
2516
2571
  fin.close();
2517
2572
 
2518
2573
  LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
2574
+ }
2519
2575
  }
2520
2576
  }
2521
2577
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cui-llama.rn",
3
- "version": "1.7.2",
3
+ "version": "1.7.4",
4
4
  "description": "Fork of llama.rn for ChatterUI",
5
5
  "main": "lib/commonjs/index",
6
6
  "module": "lib/module/index",