cui-llama.rn 1.5.0 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +20 -20
- package/README.md +345 -319
- package/android/build.gradle +116 -116
- package/android/gradle.properties +5 -5
- package/android/src/main/AndroidManifest.xml +4 -4
- package/android/src/main/CMakeLists.txt +129 -124
- package/android/src/main/java/com/rnllama/LlamaContext.java +648 -645
- package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
- package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
- package/android/src/main/jni-utils.h +100 -100
- package/android/src/main/jni.cpp +1279 -1263
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
- package/cpp/LICENSE +21 -0
- package/cpp/README.md +4 -4
- package/cpp/chat.cpp +1 -1
- package/cpp/common.cpp +17 -2
- package/cpp/common.h +7 -3
- package/cpp/ggml-alloc.c +4 -1
- package/cpp/ggml-cpp.h +1 -1
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/{binary-ops.h → ggml-cpu/binary-ops.h} +1 -1
- package/cpp/ggml-cpu/common.h +72 -0
- package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -101
- package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +109 -42
- package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +3 -0
- package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +246 -160
- package/cpp/{ops.h → ggml-cpu/ops.h} +2 -20
- package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
- package/cpp/{simd-mappings.h → ggml-cpu/simd-mappings.h} +7 -3
- package/cpp/{unary-ops.h → ggml-cpu/unary-ops.h} +1 -1
- package/cpp/ggml-cpu.h +5 -0
- package/cpp/ggml-impl.h +16 -9
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +597 -597
- package/cpp/ggml-metal.m +496 -47
- package/cpp/ggml.c +134 -244
- package/cpp/ggml.h +62 -95
- package/cpp/json-schema-to-grammar.cpp +3 -0
- package/cpp/llama-arch.cpp +46 -17
- package/cpp/llama-arch.h +9 -0
- package/cpp/llama-batch.cpp +5 -1
- package/cpp/llama-batch.h +2 -1
- package/cpp/llama-chat.cpp +31 -10
- package/cpp/llama-chat.h +3 -2
- package/cpp/llama-context.cpp +104 -489
- package/cpp/llama-context.h +14 -30
- package/cpp/llama-graph.cpp +69 -62
- package/cpp/llama-graph.h +21 -18
- package/cpp/llama-hparams.h +5 -0
- package/cpp/llama-kv-cache.cpp +1497 -391
- package/cpp/llama-kv-cache.h +272 -80
- package/cpp/llama-memory.h +11 -1
- package/cpp/llama-model.cpp +502 -176
- package/cpp/llama-model.h +13 -3
- package/cpp/llama-sampling.cpp +2 -1
- package/cpp/llama-vocab.cpp +8 -1
- package/cpp/llama.h +14 -11
- package/cpp/rn-llama.cpp +721 -873
- package/cpp/rn-llama.h +134 -138
- package/cpp/sampling.h +107 -107
- package/cpp/unicode-data.cpp +7034 -7034
- package/cpp/unicode-data.h +20 -20
- package/cpp/unicode.cpp +849 -849
- package/cpp/unicode.h +66 -66
- package/ios/CMakeLists.txt +119 -108
- package/ios/RNLlama.h +13 -7
- package/ios/RNLlama.mm +423 -405
- package/ios/RNLlamaContext.h +57 -57
- package/ios/RNLlamaContext.mm +833 -835
- package/ios/rnllama.xcframework/Info.plist +74 -74
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +203 -203
- package/lib/commonjs/NativeRNLlama.js +1 -2
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/chat.js.map +1 -1
- package/lib/commonjs/grammar.js +12 -31
- package/lib/commonjs/grammar.js.map +1 -1
- package/lib/commonjs/index.js +47 -47
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/package.json +1 -0
- package/lib/module/NativeRNLlama.js +2 -0
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/chat.js +2 -0
- package/lib/module/chat.js.map +1 -1
- package/lib/module/grammar.js +14 -31
- package/lib/module/grammar.js.map +1 -1
- package/lib/module/index.js +47 -45
- package/lib/module/index.js.map +1 -1
- package/lib/module/package.json +1 -0
- package/lib/typescript/NativeRNLlama.d.ts +10 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts.map +1 -1
- package/llama-rn.podspec +48 -48
- package/package.json +233 -233
- package/src/NativeRNLlama.ts +431 -426
- package/src/chat.ts +44 -44
- package/src/grammar.ts +854 -854
- package/src/index.ts +495 -487
- /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
- /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
- /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
- /package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +0 -0
- /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
- /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
- /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
- /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
- /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
- /package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -0
- /package/cpp/{vec.h → ggml-cpu/vec.h} +0 -0
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -1,135 +1,135 @@
|
|
1
|
-
package com.rnllama;
|
2
|
-
|
3
|
-
import androidx.annotation.NonNull;
|
4
|
-
|
5
|
-
import com.facebook.react.bridge.Promise;
|
6
|
-
import com.facebook.react.bridge.ReactApplicationContext;
|
7
|
-
import com.facebook.react.bridge.ReactMethod;
|
8
|
-
import com.facebook.react.bridge.ReadableMap;
|
9
|
-
import com.facebook.react.bridge.WritableMap;
|
10
|
-
import com.facebook.react.bridge.ReadableArray;
|
11
|
-
import com.facebook.react.module.annotations.ReactModule;
|
12
|
-
|
13
|
-
import java.util.HashMap;
|
14
|
-
import java.util.Random;
|
15
|
-
import java.io.File;
|
16
|
-
import java.io.FileInputStream;
|
17
|
-
import java.io.PushbackInputStream;
|
18
|
-
|
19
|
-
@ReactModule(name = RNLlama.NAME)
|
20
|
-
public class RNLlamaModule extends NativeRNLlamaSpec {
|
21
|
-
public static final String NAME = RNLlama.NAME;
|
22
|
-
|
23
|
-
private RNLlama rnllama = null;
|
24
|
-
|
25
|
-
public RNLlamaModule(ReactApplicationContext reactContext) {
|
26
|
-
super(reactContext);
|
27
|
-
rnllama = new RNLlama(reactContext);
|
28
|
-
}
|
29
|
-
|
30
|
-
@Override
|
31
|
-
@NonNull
|
32
|
-
public String getName() {
|
33
|
-
return NAME;
|
34
|
-
}
|
35
|
-
|
36
|
-
@ReactMethod
|
37
|
-
public void toggleNativeLog(boolean enabled, Promise promise) {
|
38
|
-
rnllama.toggleNativeLog(enabled, promise);
|
39
|
-
}
|
40
|
-
|
41
|
-
@ReactMethod
|
42
|
-
public void setContextLimit(double limit, Promise promise) {
|
43
|
-
rnllama.setContextLimit(limit, promise);
|
44
|
-
}
|
45
|
-
|
46
|
-
@ReactMethod
|
47
|
-
public void modelInfo(final String model, final ReadableArray skip, final Promise promise) {
|
48
|
-
rnllama.modelInfo(model, skip, promise);
|
49
|
-
}
|
50
|
-
|
51
|
-
@ReactMethod
|
52
|
-
public void initContext(double id, final ReadableMap params, final Promise promise) {
|
53
|
-
rnllama.initContext(id, params, promise);
|
54
|
-
}
|
55
|
-
|
56
|
-
@ReactMethod
|
57
|
-
public void getFormattedChat(double id, String messages, String chatTemplate, ReadableMap params, Promise promise) {
|
58
|
-
rnllama.getFormattedChat(id, messages, chatTemplate, params, promise);
|
59
|
-
}
|
60
|
-
|
61
|
-
@ReactMethod
|
62
|
-
public void loadSession(double id, String path, Promise promise) {
|
63
|
-
rnllama.loadSession(id, path, promise);
|
64
|
-
}
|
65
|
-
|
66
|
-
@ReactMethod
|
67
|
-
public void saveSession(double id, String path, double size, Promise promise) {
|
68
|
-
rnllama.saveSession(id, path, size, promise);
|
69
|
-
}
|
70
|
-
|
71
|
-
@ReactMethod
|
72
|
-
public void completion(double id, final ReadableMap params, final Promise promise) {
|
73
|
-
rnllama.completion(id, params, promise);
|
74
|
-
}
|
75
|
-
|
76
|
-
@ReactMethod
|
77
|
-
public void stopCompletion(double id, final Promise promise) {
|
78
|
-
rnllama.stopCompletion(id, promise);
|
79
|
-
}
|
80
|
-
|
81
|
-
@ReactMethod
|
82
|
-
public void tokenizeAsync(double id, final String text, final Promise promise) {
|
83
|
-
rnllama.tokenizeAsync(id, text, promise);
|
84
|
-
}
|
85
|
-
|
86
|
-
@ReactMethod(isBlockingSynchronousMethod=true)
|
87
|
-
public WritableMap tokenizeSync(double id, final String text) {
|
88
|
-
return rnllama.tokenizeSync(id, text);
|
89
|
-
}
|
90
|
-
|
91
|
-
@ReactMethod
|
92
|
-
public void getCpuFeatures(final Promise promise) {
|
93
|
-
rnllama.getCpuFeatures(promise);
|
94
|
-
}
|
95
|
-
|
96
|
-
@ReactMethod
|
97
|
-
public void detokenize(double id, final ReadableArray tokens, final Promise promise) {
|
98
|
-
rnllama.detokenize(id, tokens, promise);
|
99
|
-
}
|
100
|
-
|
101
|
-
@ReactMethod
|
102
|
-
public void embedding(double id, final String text, final ReadableMap params, final Promise promise) {
|
103
|
-
rnllama.embedding(id, text, params, promise);
|
104
|
-
}
|
105
|
-
|
106
|
-
@ReactMethod
|
107
|
-
public void bench(double id, final double pp, final double tg, final double pl, final double nr, final Promise promise) {
|
108
|
-
rnllama.bench(id, pp, tg, pl, nr, promise);
|
109
|
-
}
|
110
|
-
|
111
|
-
@ReactMethod
|
112
|
-
public void applyLoraAdapters(double id, final ReadableArray loraAdapters, final Promise promise) {
|
113
|
-
rnllama.applyLoraAdapters(id, loraAdapters, promise);
|
114
|
-
}
|
115
|
-
|
116
|
-
@ReactMethod
|
117
|
-
public void removeLoraAdapters(double id, final Promise promise) {
|
118
|
-
rnllama.removeLoraAdapters(id, promise);
|
119
|
-
}
|
120
|
-
|
121
|
-
@ReactMethod
|
122
|
-
public void getLoadedLoraAdapters(double id, final Promise promise) {
|
123
|
-
rnllama.getLoadedLoraAdapters(id, promise);
|
124
|
-
}
|
125
|
-
|
126
|
-
@ReactMethod
|
127
|
-
public void releaseContext(double id, Promise promise) {
|
128
|
-
rnllama.releaseContext(id, promise);
|
129
|
-
}
|
130
|
-
|
131
|
-
@ReactMethod
|
132
|
-
public void releaseAllContexts(Promise promise) {
|
133
|
-
rnllama.releaseAllContexts(promise);
|
134
|
-
}
|
135
|
-
}
|
1
|
+
package com.rnllama;
|
2
|
+
|
3
|
+
import androidx.annotation.NonNull;
|
4
|
+
|
5
|
+
import com.facebook.react.bridge.Promise;
|
6
|
+
import com.facebook.react.bridge.ReactApplicationContext;
|
7
|
+
import com.facebook.react.bridge.ReactMethod;
|
8
|
+
import com.facebook.react.bridge.ReadableMap;
|
9
|
+
import com.facebook.react.bridge.WritableMap;
|
10
|
+
import com.facebook.react.bridge.ReadableArray;
|
11
|
+
import com.facebook.react.module.annotations.ReactModule;
|
12
|
+
|
13
|
+
import java.util.HashMap;
|
14
|
+
import java.util.Random;
|
15
|
+
import java.io.File;
|
16
|
+
import java.io.FileInputStream;
|
17
|
+
import java.io.PushbackInputStream;
|
18
|
+
|
19
|
+
@ReactModule(name = RNLlama.NAME)
|
20
|
+
public class RNLlamaModule extends NativeRNLlamaSpec {
|
21
|
+
public static final String NAME = RNLlama.NAME;
|
22
|
+
|
23
|
+
private RNLlama rnllama = null;
|
24
|
+
|
25
|
+
public RNLlamaModule(ReactApplicationContext reactContext) {
|
26
|
+
super(reactContext);
|
27
|
+
rnllama = new RNLlama(reactContext);
|
28
|
+
}
|
29
|
+
|
30
|
+
@Override
|
31
|
+
@NonNull
|
32
|
+
public String getName() {
|
33
|
+
return NAME;
|
34
|
+
}
|
35
|
+
|
36
|
+
@ReactMethod
|
37
|
+
public void toggleNativeLog(boolean enabled, Promise promise) {
|
38
|
+
rnllama.toggleNativeLog(enabled, promise);
|
39
|
+
}
|
40
|
+
|
41
|
+
@ReactMethod
|
42
|
+
public void setContextLimit(double limit, Promise promise) {
|
43
|
+
rnllama.setContextLimit(limit, promise);
|
44
|
+
}
|
45
|
+
|
46
|
+
@ReactMethod
|
47
|
+
public void modelInfo(final String model, final ReadableArray skip, final Promise promise) {
|
48
|
+
rnllama.modelInfo(model, skip, promise);
|
49
|
+
}
|
50
|
+
|
51
|
+
@ReactMethod
|
52
|
+
public void initContext(double id, final ReadableMap params, final Promise promise) {
|
53
|
+
rnllama.initContext(id, params, promise);
|
54
|
+
}
|
55
|
+
|
56
|
+
@ReactMethod
|
57
|
+
public void getFormattedChat(double id, String messages, String chatTemplate, ReadableMap params, Promise promise) {
|
58
|
+
rnllama.getFormattedChat(id, messages, chatTemplate, params, promise);
|
59
|
+
}
|
60
|
+
|
61
|
+
@ReactMethod
|
62
|
+
public void loadSession(double id, String path, Promise promise) {
|
63
|
+
rnllama.loadSession(id, path, promise);
|
64
|
+
}
|
65
|
+
|
66
|
+
@ReactMethod
|
67
|
+
public void saveSession(double id, String path, double size, Promise promise) {
|
68
|
+
rnllama.saveSession(id, path, size, promise);
|
69
|
+
}
|
70
|
+
|
71
|
+
@ReactMethod
|
72
|
+
public void completion(double id, final ReadableMap params, final Promise promise) {
|
73
|
+
rnllama.completion(id, params, promise);
|
74
|
+
}
|
75
|
+
|
76
|
+
@ReactMethod
|
77
|
+
public void stopCompletion(double id, final Promise promise) {
|
78
|
+
rnllama.stopCompletion(id, promise);
|
79
|
+
}
|
80
|
+
|
81
|
+
@ReactMethod
|
82
|
+
public void tokenizeAsync(double id, final String text, final Promise promise) {
|
83
|
+
rnllama.tokenizeAsync(id, text, promise);
|
84
|
+
}
|
85
|
+
|
86
|
+
@ReactMethod(isBlockingSynchronousMethod=true)
|
87
|
+
public WritableMap tokenizeSync(double id, final String text) {
|
88
|
+
return rnllama.tokenizeSync(id, text);
|
89
|
+
}
|
90
|
+
|
91
|
+
@ReactMethod
|
92
|
+
public void getCpuFeatures(final Promise promise) {
|
93
|
+
rnllama.getCpuFeatures(promise);
|
94
|
+
}
|
95
|
+
|
96
|
+
@ReactMethod
|
97
|
+
public void detokenize(double id, final ReadableArray tokens, final Promise promise) {
|
98
|
+
rnllama.detokenize(id, tokens, promise);
|
99
|
+
}
|
100
|
+
|
101
|
+
@ReactMethod
|
102
|
+
public void embedding(double id, final String text, final ReadableMap params, final Promise promise) {
|
103
|
+
rnllama.embedding(id, text, params, promise);
|
104
|
+
}
|
105
|
+
|
106
|
+
@ReactMethod
|
107
|
+
public void bench(double id, final double pp, final double tg, final double pl, final double nr, final Promise promise) {
|
108
|
+
rnllama.bench(id, pp, tg, pl, nr, promise);
|
109
|
+
}
|
110
|
+
|
111
|
+
@ReactMethod
|
112
|
+
public void applyLoraAdapters(double id, final ReadableArray loraAdapters, final Promise promise) {
|
113
|
+
rnllama.applyLoraAdapters(id, loraAdapters, promise);
|
114
|
+
}
|
115
|
+
|
116
|
+
@ReactMethod
|
117
|
+
public void removeLoraAdapters(double id, final Promise promise) {
|
118
|
+
rnllama.removeLoraAdapters(id, promise);
|
119
|
+
}
|
120
|
+
|
121
|
+
@ReactMethod
|
122
|
+
public void getLoadedLoraAdapters(double id, final Promise promise) {
|
123
|
+
rnllama.getLoadedLoraAdapters(id, promise);
|
124
|
+
}
|
125
|
+
|
126
|
+
@ReactMethod
|
127
|
+
public void releaseContext(double id, Promise promise) {
|
128
|
+
rnllama.releaseContext(id, promise);
|
129
|
+
}
|
130
|
+
|
131
|
+
@ReactMethod
|
132
|
+
public void releaseAllContexts(Promise promise) {
|
133
|
+
rnllama.releaseAllContexts(promise);
|
134
|
+
}
|
135
|
+
}
|
@@ -1,136 +1,136 @@
|
|
1
|
-
package com.rnllama;
|
2
|
-
|
3
|
-
import androidx.annotation.NonNull;
|
4
|
-
|
5
|
-
import com.facebook.react.bridge.Promise;
|
6
|
-
import com.facebook.react.bridge.ReactApplicationContext;
|
7
|
-
import com.facebook.react.bridge.ReactContextBaseJavaModule;
|
8
|
-
import com.facebook.react.bridge.ReactMethod;
|
9
|
-
import com.facebook.react.bridge.ReadableMap;
|
10
|
-
import com.facebook.react.bridge.WritableMap;
|
11
|
-
import com.facebook.react.bridge.ReadableArray;
|
12
|
-
import com.facebook.react.module.annotations.ReactModule;
|
13
|
-
|
14
|
-
import java.util.HashMap;
|
15
|
-
import java.util.Random;
|
16
|
-
import java.io.File;
|
17
|
-
import java.io.FileInputStream;
|
18
|
-
import java.io.PushbackInputStream;
|
19
|
-
|
20
|
-
@ReactModule(name = RNLlama.NAME)
|
21
|
-
public class RNLlamaModule extends ReactContextBaseJavaModule {
|
22
|
-
public static final String NAME = RNLlama.NAME;
|
23
|
-
|
24
|
-
private RNLlama rnllama = null;
|
25
|
-
|
26
|
-
public RNLlamaModule(ReactApplicationContext reactContext) {
|
27
|
-
super(reactContext);
|
28
|
-
rnllama = new RNLlama(reactContext);
|
29
|
-
}
|
30
|
-
|
31
|
-
@Override
|
32
|
-
@NonNull
|
33
|
-
public String getName() {
|
34
|
-
return NAME;
|
35
|
-
}
|
36
|
-
|
37
|
-
@ReactMethod
|
38
|
-
public void toggleNativeLog(boolean enabled, Promise promise) {
|
39
|
-
rnllama.toggleNativeLog(enabled, promise);
|
40
|
-
}
|
41
|
-
|
42
|
-
@ReactMethod
|
43
|
-
public void setContextLimit(double limit, Promise promise) {
|
44
|
-
rnllama.setContextLimit(limit, promise);
|
45
|
-
}
|
46
|
-
|
47
|
-
@ReactMethod
|
48
|
-
public void modelInfo(final String model, final ReadableArray skip, final Promise promise) {
|
49
|
-
rnllama.modelInfo(model, skip, promise);
|
50
|
-
}
|
51
|
-
|
52
|
-
@ReactMethod
|
53
|
-
public void initContext(double id, final ReadableMap params, final Promise promise) {
|
54
|
-
rnllama.initContext(id, params, promise);
|
55
|
-
}
|
56
|
-
|
57
|
-
@ReactMethod
|
58
|
-
public void getFormattedChat(double id, String messages, String chatTemplate, ReadableMap params, Promise promise) {
|
59
|
-
rnllama.getFormattedChat(id, messages, chatTemplate, params, promise);
|
60
|
-
}
|
61
|
-
|
62
|
-
@ReactMethod
|
63
|
-
public void loadSession(double id, String path, Promise promise) {
|
64
|
-
rnllama.loadSession(id, path, promise);
|
65
|
-
}
|
66
|
-
|
67
|
-
@ReactMethod
|
68
|
-
public void saveSession(double id, String path, int size, Promise promise) {
|
69
|
-
rnllama.saveSession(id, path, size, promise);
|
70
|
-
}
|
71
|
-
|
72
|
-
@ReactMethod
|
73
|
-
public void completion(double id, final ReadableMap params, final Promise promise) {
|
74
|
-
rnllama.completion(id, params, promise);
|
75
|
-
}
|
76
|
-
|
77
|
-
@ReactMethod
|
78
|
-
public void stopCompletion(double id, final Promise promise) {
|
79
|
-
rnllama.stopCompletion(id, promise);
|
80
|
-
}
|
81
|
-
|
82
|
-
@ReactMethod
|
83
|
-
public void tokenizeAsync(double id, final String text, final Promise promise) {
|
84
|
-
rnllama.tokenizeAsync(id, text, promise);
|
85
|
-
}
|
86
|
-
|
87
|
-
@ReactMethod(isBlockingSynchronousMethod=true)
|
88
|
-
public WritableMap tokenizeSync(double id, final String text) {
|
89
|
-
return rnllama.tokenizeSync(id, text);
|
90
|
-
}
|
91
|
-
|
92
|
-
@ReactMethod
|
93
|
-
public void getCpuFeatures(final Promise promise) {
|
94
|
-
rnllama.getCpuFeatures(promise);
|
95
|
-
}
|
96
|
-
|
97
|
-
@ReactMethod
|
98
|
-
public void detokenize(double id, final ReadableArray tokens, final Promise promise) {
|
99
|
-
rnllama.detokenize(id, tokens, promise);
|
100
|
-
}
|
101
|
-
|
102
|
-
@ReactMethod
|
103
|
-
public void embedding(double id, final String text, final ReadableMap params, final Promise promise) {
|
104
|
-
rnllama.embedding(id, text, params, promise);
|
105
|
-
}
|
106
|
-
|
107
|
-
@ReactMethod
|
108
|
-
public void bench(double id, final double pp, final double tg, final double pl, final double nr, final Promise promise) {
|
109
|
-
rnllama.bench(id, pp, tg, pl, nr, promise);
|
110
|
-
}
|
111
|
-
|
112
|
-
@ReactMethod
|
113
|
-
public void applyLoraAdapters(double id, final ReadableArray loraAdapters, final Promise promise) {
|
114
|
-
rnllama.applyLoraAdapters(id, loraAdapters, promise);
|
115
|
-
}
|
116
|
-
|
117
|
-
@ReactMethod
|
118
|
-
public void removeLoraAdapters(double id, final Promise promise) {
|
119
|
-
rnllama.removeLoraAdapters(id, promise);
|
120
|
-
}
|
121
|
-
|
122
|
-
@ReactMethod
|
123
|
-
public void getLoadedLoraAdapters(double id, final Promise promise) {
|
124
|
-
rnllama.getLoadedLoraAdapters(id, promise);
|
125
|
-
}
|
126
|
-
|
127
|
-
@ReactMethod
|
128
|
-
public void releaseContext(double id, Promise promise) {
|
129
|
-
rnllama.releaseContext(id, promise);
|
130
|
-
}
|
131
|
-
|
132
|
-
@ReactMethod
|
133
|
-
public void releaseAllContexts(Promise promise) {
|
134
|
-
rnllama.releaseAllContexts(promise);
|
135
|
-
}
|
136
|
-
}
|
1
|
+
package com.rnllama;
|
2
|
+
|
3
|
+
import androidx.annotation.NonNull;
|
4
|
+
|
5
|
+
import com.facebook.react.bridge.Promise;
|
6
|
+
import com.facebook.react.bridge.ReactApplicationContext;
|
7
|
+
import com.facebook.react.bridge.ReactContextBaseJavaModule;
|
8
|
+
import com.facebook.react.bridge.ReactMethod;
|
9
|
+
import com.facebook.react.bridge.ReadableMap;
|
10
|
+
import com.facebook.react.bridge.WritableMap;
|
11
|
+
import com.facebook.react.bridge.ReadableArray;
|
12
|
+
import com.facebook.react.module.annotations.ReactModule;
|
13
|
+
|
14
|
+
import java.util.HashMap;
|
15
|
+
import java.util.Random;
|
16
|
+
import java.io.File;
|
17
|
+
import java.io.FileInputStream;
|
18
|
+
import java.io.PushbackInputStream;
|
19
|
+
|
20
|
+
@ReactModule(name = RNLlama.NAME)
|
21
|
+
public class RNLlamaModule extends ReactContextBaseJavaModule {
|
22
|
+
public static final String NAME = RNLlama.NAME;
|
23
|
+
|
24
|
+
private RNLlama rnllama = null;
|
25
|
+
|
26
|
+
public RNLlamaModule(ReactApplicationContext reactContext) {
|
27
|
+
super(reactContext);
|
28
|
+
rnllama = new RNLlama(reactContext);
|
29
|
+
}
|
30
|
+
|
31
|
+
@Override
|
32
|
+
@NonNull
|
33
|
+
public String getName() {
|
34
|
+
return NAME;
|
35
|
+
}
|
36
|
+
|
37
|
+
@ReactMethod
|
38
|
+
public void toggleNativeLog(boolean enabled, Promise promise) {
|
39
|
+
rnllama.toggleNativeLog(enabled, promise);
|
40
|
+
}
|
41
|
+
|
42
|
+
@ReactMethod
|
43
|
+
public void setContextLimit(double limit, Promise promise) {
|
44
|
+
rnllama.setContextLimit(limit, promise);
|
45
|
+
}
|
46
|
+
|
47
|
+
@ReactMethod
|
48
|
+
public void modelInfo(final String model, final ReadableArray skip, final Promise promise) {
|
49
|
+
rnllama.modelInfo(model, skip, promise);
|
50
|
+
}
|
51
|
+
|
52
|
+
@ReactMethod
|
53
|
+
public void initContext(double id, final ReadableMap params, final Promise promise) {
|
54
|
+
rnllama.initContext(id, params, promise);
|
55
|
+
}
|
56
|
+
|
57
|
+
@ReactMethod
|
58
|
+
public void getFormattedChat(double id, String messages, String chatTemplate, ReadableMap params, Promise promise) {
|
59
|
+
rnllama.getFormattedChat(id, messages, chatTemplate, params, promise);
|
60
|
+
}
|
61
|
+
|
62
|
+
@ReactMethod
|
63
|
+
public void loadSession(double id, String path, Promise promise) {
|
64
|
+
rnllama.loadSession(id, path, promise);
|
65
|
+
}
|
66
|
+
|
67
|
+
@ReactMethod
|
68
|
+
public void saveSession(double id, String path, int size, Promise promise) {
|
69
|
+
rnllama.saveSession(id, path, size, promise);
|
70
|
+
}
|
71
|
+
|
72
|
+
@ReactMethod
|
73
|
+
public void completion(double id, final ReadableMap params, final Promise promise) {
|
74
|
+
rnllama.completion(id, params, promise);
|
75
|
+
}
|
76
|
+
|
77
|
+
@ReactMethod
|
78
|
+
public void stopCompletion(double id, final Promise promise) {
|
79
|
+
rnllama.stopCompletion(id, promise);
|
80
|
+
}
|
81
|
+
|
82
|
+
@ReactMethod
|
83
|
+
public void tokenizeAsync(double id, final String text, final Promise promise) {
|
84
|
+
rnllama.tokenizeAsync(id, text, promise);
|
85
|
+
}
|
86
|
+
|
87
|
+
@ReactMethod(isBlockingSynchronousMethod=true)
|
88
|
+
public WritableMap tokenizeSync(double id, final String text) {
|
89
|
+
return rnllama.tokenizeSync(id, text);
|
90
|
+
}
|
91
|
+
|
92
|
+
@ReactMethod
|
93
|
+
public void getCpuFeatures(final Promise promise) {
|
94
|
+
rnllama.getCpuFeatures(promise);
|
95
|
+
}
|
96
|
+
|
97
|
+
@ReactMethod
|
98
|
+
public void detokenize(double id, final ReadableArray tokens, final Promise promise) {
|
99
|
+
rnllama.detokenize(id, tokens, promise);
|
100
|
+
}
|
101
|
+
|
102
|
+
@ReactMethod
|
103
|
+
public void embedding(double id, final String text, final ReadableMap params, final Promise promise) {
|
104
|
+
rnllama.embedding(id, text, params, promise);
|
105
|
+
}
|
106
|
+
|
107
|
+
@ReactMethod
|
108
|
+
public void bench(double id, final double pp, final double tg, final double pl, final double nr, final Promise promise) {
|
109
|
+
rnllama.bench(id, pp, tg, pl, nr, promise);
|
110
|
+
}
|
111
|
+
|
112
|
+
@ReactMethod
|
113
|
+
public void applyLoraAdapters(double id, final ReadableArray loraAdapters, final Promise promise) {
|
114
|
+
rnllama.applyLoraAdapters(id, loraAdapters, promise);
|
115
|
+
}
|
116
|
+
|
117
|
+
@ReactMethod
|
118
|
+
public void removeLoraAdapters(double id, final Promise promise) {
|
119
|
+
rnllama.removeLoraAdapters(id, promise);
|
120
|
+
}
|
121
|
+
|
122
|
+
@ReactMethod
|
123
|
+
public void getLoadedLoraAdapters(double id, final Promise promise) {
|
124
|
+
rnllama.getLoadedLoraAdapters(id, promise);
|
125
|
+
}
|
126
|
+
|
127
|
+
@ReactMethod
|
128
|
+
public void releaseContext(double id, Promise promise) {
|
129
|
+
rnllama.releaseContext(id, promise);
|
130
|
+
}
|
131
|
+
|
132
|
+
@ReactMethod
|
133
|
+
public void releaseAllContexts(Promise promise) {
|
134
|
+
rnllama.releaseAllContexts(promise);
|
135
|
+
}
|
136
|
+
}
|
package/cpp/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023-2024 The ggml authors
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
package/cpp/README.md
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Note
|
2
|
-
|
3
|
-
- Only `rn-llama.h` and `rn-llama.cpp` are the specific files for this folder, others are sync from [llama.cpp](https://github.com/ggerganov/llama.cpp).
|
4
|
-
- We can update the native source by using the [bootstrap](../scripts/bootstrap.sh) script.
|
1
|
+
# Note
|
2
|
+
|
3
|
+
- Only `rn-llama.h` and `rn-llama.cpp` are the specific files for this folder, others are sync from [llama.cpp](https://github.com/ggerganov/llama.cpp).
|
4
|
+
- We can update the native source by using the [bootstrap](../scripts/bootstrap.sh) script.
|
package/cpp/chat.cpp
CHANGED
@@ -1612,7 +1612,7 @@ static common_chat_params common_chat_templates_apply_jinja(
|
|
1612
1612
|
}
|
1613
1613
|
|
1614
1614
|
// Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
|
1615
|
-
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
|
1615
|
+
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null() && params.tools.is_array() && params.json_schema.is_null()) {
|
1616
1616
|
return common_chat_params_init_hermes_2_pro(tmpl, params);
|
1617
1617
|
}
|
1618
1618
|
|
package/cpp/common.cpp
CHANGED
@@ -837,7 +837,7 @@ std::string fs_get_cache_directory() {
|
|
837
837
|
if (getenv("LLAMA_CACHE")) {
|
838
838
|
cache_directory = std::getenv("LLAMA_CACHE");
|
839
839
|
} else {
|
840
|
-
#
|
840
|
+
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)
|
841
841
|
if (std::getenv("XDG_CACHE_HOME")) {
|
842
842
|
cache_directory = std::getenv("XDG_CACHE_HOME");
|
843
843
|
} else {
|
@@ -847,7 +847,9 @@ std::string fs_get_cache_directory() {
|
|
847
847
|
cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
|
848
848
|
#elif defined(_WIN32)
|
849
849
|
cache_directory = std::getenv("LOCALAPPDATA");
|
850
|
-
#
|
850
|
+
#else
|
851
|
+
# error Unknown architecture
|
852
|
+
#endif
|
851
853
|
cache_directory = ensure_trailing_slash(cache_directory);
|
852
854
|
cache_directory += "llama.cpp";
|
853
855
|
}
|
@@ -1034,6 +1036,19 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|
1034
1036
|
return iparams;
|
1035
1037
|
}
|
1036
1038
|
|
1039
|
+
std::string get_model_endpoint() {
|
1040
|
+
const char * model_endpoint_env = getenv("MODEL_ENDPOINT");
|
1041
|
+
// We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility.
|
1042
|
+
const char * hf_endpoint_env = getenv("HF_ENDPOINT");
|
1043
|
+
const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env;
|
1044
|
+
std::string model_endpoint = "https://huggingface.co/";
|
1045
|
+
if (endpoint_env) {
|
1046
|
+
model_endpoint = endpoint_env;
|
1047
|
+
if (model_endpoint.back() != '/') model_endpoint += '/';
|
1048
|
+
}
|
1049
|
+
return model_endpoint;
|
1050
|
+
}
|
1051
|
+
|
1037
1052
|
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
|
1038
1053
|
llama_clear_adapter_lora(ctx);
|
1039
1054
|
for (auto & la : lora) {
|