cui-llama.rn 1.5.0 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +20 -20
- package/README.md +345 -319
- package/android/build.gradle +116 -116
- package/android/gradle.properties +5 -5
- package/android/src/main/AndroidManifest.xml +4 -4
- package/android/src/main/CMakeLists.txt +129 -124
- package/android/src/main/java/com/rnllama/LlamaContext.java +648 -645
- package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
- package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
- package/android/src/main/jni-utils.h +100 -100
- package/android/src/main/jni.cpp +1279 -1263
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
- package/cpp/LICENSE +21 -0
- package/cpp/README.md +4 -4
- package/cpp/chat.cpp +1 -1
- package/cpp/common.cpp +17 -2
- package/cpp/common.h +7 -3
- package/cpp/ggml-alloc.c +4 -1
- package/cpp/ggml-cpp.h +1 -1
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/{binary-ops.h → ggml-cpu/binary-ops.h} +1 -1
- package/cpp/ggml-cpu/common.h +72 -0
- package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -101
- package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +109 -42
- package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +3 -0
- package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +246 -160
- package/cpp/{ops.h → ggml-cpu/ops.h} +2 -20
- package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
- package/cpp/{simd-mappings.h → ggml-cpu/simd-mappings.h} +7 -3
- package/cpp/{unary-ops.h → ggml-cpu/unary-ops.h} +1 -1
- package/cpp/ggml-cpu.h +5 -0
- package/cpp/ggml-impl.h +16 -9
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +597 -597
- package/cpp/ggml-metal.m +496 -47
- package/cpp/ggml.c +134 -244
- package/cpp/ggml.h +62 -95
- package/cpp/json-schema-to-grammar.cpp +3 -0
- package/cpp/llama-arch.cpp +46 -17
- package/cpp/llama-arch.h +9 -0
- package/cpp/llama-batch.cpp +5 -1
- package/cpp/llama-batch.h +2 -1
- package/cpp/llama-chat.cpp +31 -10
- package/cpp/llama-chat.h +3 -2
- package/cpp/llama-context.cpp +104 -489
- package/cpp/llama-context.h +14 -30
- package/cpp/llama-graph.cpp +69 -62
- package/cpp/llama-graph.h +21 -18
- package/cpp/llama-hparams.h +5 -0
- package/cpp/llama-kv-cache.cpp +1497 -391
- package/cpp/llama-kv-cache.h +272 -80
- package/cpp/llama-memory.h +11 -1
- package/cpp/llama-model.cpp +502 -176
- package/cpp/llama-model.h +13 -3
- package/cpp/llama-sampling.cpp +2 -1
- package/cpp/llama-vocab.cpp +8 -1
- package/cpp/llama.h +14 -11
- package/cpp/rn-llama.cpp +721 -873
- package/cpp/rn-llama.h +134 -138
- package/cpp/sampling.h +107 -107
- package/cpp/unicode-data.cpp +7034 -7034
- package/cpp/unicode-data.h +20 -20
- package/cpp/unicode.cpp +849 -849
- package/cpp/unicode.h +66 -66
- package/ios/CMakeLists.txt +119 -108
- package/ios/RNLlama.h +13 -7
- package/ios/RNLlama.mm +423 -405
- package/ios/RNLlamaContext.h +57 -57
- package/ios/RNLlamaContext.mm +833 -835
- package/ios/rnllama.xcframework/Info.plist +74 -74
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +203 -203
- package/lib/commonjs/NativeRNLlama.js +1 -2
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/chat.js.map +1 -1
- package/lib/commonjs/grammar.js +12 -31
- package/lib/commonjs/grammar.js.map +1 -1
- package/lib/commonjs/index.js +47 -47
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/package.json +1 -0
- package/lib/module/NativeRNLlama.js +2 -0
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/chat.js +2 -0
- package/lib/module/chat.js.map +1 -1
- package/lib/module/grammar.js +14 -31
- package/lib/module/grammar.js.map +1 -1
- package/lib/module/index.js +47 -45
- package/lib/module/index.js.map +1 -1
- package/lib/module/package.json +1 -0
- package/lib/typescript/NativeRNLlama.d.ts +10 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts.map +1 -1
- package/llama-rn.podspec +48 -48
- package/package.json +233 -233
- package/src/NativeRNLlama.ts +431 -426
- package/src/chat.ts +44 -44
- package/src/grammar.ts +854 -854
- package/src/index.ts +495 -487
- /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
- /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
- /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
- /package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +0 -0
- /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
- /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
- /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
- /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
- /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
- /package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -0
- /package/cpp/{vec.h → ggml-cpu/vec.h} +0 -0
package/ios/RNLlamaContext.mm
CHANGED
@@ -1,835 +1,833 @@
|
|
1
|
-
#import "RNLlamaContext.h"
|
2
|
-
#import <Metal/Metal.h>
|
3
|
-
|
4
|
-
@implementation RNLlamaContext
|
5
|
-
|
6
|
-
+ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog {
|
7
|
-
if (enabled) {
|
8
|
-
void (^copiedBlock)(NSString *, NSString *) = [onEmitLog copy];
|
9
|
-
llama_log_set([](lm_ggml_log_level level, const char * text, void * data) {
|
10
|
-
llama_log_callback_default(level, text, data);
|
11
|
-
NSString *levelStr = @"";
|
12
|
-
if (level == LM_GGML_LOG_LEVEL_ERROR) {
|
13
|
-
levelStr = @"error";
|
14
|
-
} else if (level == LM_GGML_LOG_LEVEL_INFO) {
|
15
|
-
levelStr = @"info";
|
16
|
-
} else if (level == LM_GGML_LOG_LEVEL_WARN) {
|
17
|
-
levelStr = @"warn";
|
18
|
-
}
|
19
|
-
|
20
|
-
NSString *textStr = [NSString stringWithUTF8String:text];
|
21
|
-
// NOTE: Convert to UTF-8 string may fail
|
22
|
-
if (!textStr) {
|
23
|
-
return;
|
24
|
-
}
|
25
|
-
void (^block)(NSString *, NSString *) = (__bridge void (^)(NSString *, NSString *))(data);
|
26
|
-
block(levelStr, textStr);
|
27
|
-
}, copiedBlock);
|
28
|
-
} else {
|
29
|
-
llama_log_set(llama_log_callback_default, nullptr);
|
30
|
-
}
|
31
|
-
}
|
32
|
-
|
33
|
-
+ (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip {
|
34
|
-
struct lm_gguf_init_params params = {
|
35
|
-
/*.no_alloc = */ false,
|
36
|
-
/*.ctx = */ NULL,
|
37
|
-
};
|
38
|
-
|
39
|
-
struct lm_gguf_context * ctx = lm_gguf_init_from_file([path UTF8String], params);
|
40
|
-
|
41
|
-
if (!ctx) {
|
42
|
-
NSLog(@"%s: failed to load '%s'\n", __func__, [path UTF8String]);
|
43
|
-
return @{};
|
44
|
-
}
|
45
|
-
|
46
|
-
NSMutableDictionary *info = [[NSMutableDictionary alloc] init];
|
47
|
-
|
48
|
-
info[@"version"] = @(lm_gguf_get_version(ctx));
|
49
|
-
info[@"alignment"] = @(lm_gguf_get_alignment(ctx));
|
50
|
-
info[@"data_offset"] = @(lm_gguf_get_data_offset(ctx));
|
51
|
-
|
52
|
-
// kv
|
53
|
-
{
|
54
|
-
const int n_kv = lm_gguf_get_n_kv(ctx);
|
55
|
-
|
56
|
-
for (int i = 0; i < n_kv; ++i) {
|
57
|
-
const char * key = lm_gguf_get_key(ctx, i);
|
58
|
-
|
59
|
-
if (skip && [skip containsObject:[NSString stringWithUTF8String:key]]) {
|
60
|
-
continue;
|
61
|
-
}
|
62
|
-
const std::string value = lm_gguf_kv_to_str(ctx, i);
|
63
|
-
info[[NSString stringWithUTF8String:key]] = [NSString stringWithUTF8String:value.c_str()];
|
64
|
-
}
|
65
|
-
}
|
66
|
-
|
67
|
-
lm_gguf_free(ctx);
|
68
|
-
|
69
|
-
return info;
|
70
|
-
}
|
71
|
-
|
72
|
-
+ (instancetype)initWithParams:(NSDictionary *)params onProgress:(void (^)(unsigned int progress))onProgress {
|
73
|
-
// llama_backend_init(false);
|
74
|
-
common_params defaultParams;
|
75
|
-
|
76
|
-
if (params[@"vocab_only"]) {
|
77
|
-
defaultParams.vocab_only = [params[@"vocab_only"] boolValue];
|
78
|
-
defaultParams.warmup = false;
|
79
|
-
}
|
80
|
-
|
81
|
-
NSString *modelPath = params[@"model"];
|
82
|
-
BOOL isAsset = [params[@"is_model_asset"] boolValue];
|
83
|
-
NSString *path = modelPath;
|
84
|
-
if (isAsset) path = [[NSBundle mainBundle] pathForResource:modelPath ofType:nil];
|
85
|
-
defaultParams.model = [path UTF8String];
|
86
|
-
|
87
|
-
NSString *chatTemplate = params[@"chat_template"];
|
88
|
-
if (chatTemplate) {
|
89
|
-
defaultParams.chat_template = [chatTemplate UTF8String];
|
90
|
-
NSLog(@"chatTemplate: %@", chatTemplate);
|
91
|
-
}
|
92
|
-
|
93
|
-
NSString *reasoningFormat = params[@"reasoning_format"];
|
94
|
-
if (reasoningFormat && [reasoningFormat isEqualToString:@"deepseek"]) {
|
95
|
-
defaultParams.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
96
|
-
} else {
|
97
|
-
defaultParams.reasoning_format = COMMON_REASONING_FORMAT_NONE;
|
98
|
-
}
|
99
|
-
|
100
|
-
if (params[@"n_ctx"]) defaultParams.n_ctx = [params[@"n_ctx"] intValue];
|
101
|
-
if (params[@"use_mlock"]) defaultParams.use_mlock = [params[@"use_mlock"]boolValue];
|
102
|
-
|
103
|
-
BOOL skipGpuDevices = params[@"no_gpu_devices"] && [params[@"no_gpu_devices"] boolValue];
|
104
|
-
|
105
|
-
BOOL isMetalEnabled = false;
|
106
|
-
NSString *reasonNoMetal = @"";
|
107
|
-
defaultParams.n_gpu_layers = 0;
|
108
|
-
#ifdef LM_GGML_USE_METAL
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
#
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
}
|
163
|
-
|
164
|
-
if (params[@"
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
if (params[@"
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
) {
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
)
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
llama->
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
if (params[@"
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
if (params[@"
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
if (params[@"
|
445
|
-
if (params[@"
|
446
|
-
|
447
|
-
if (params[@"
|
448
|
-
|
449
|
-
if (params[@"
|
450
|
-
if (params[@"
|
451
|
-
if (params[@"
|
452
|
-
|
453
|
-
|
454
|
-
if (params[@"
|
455
|
-
if (params[@"
|
456
|
-
if (params[@"
|
457
|
-
if (params[@"
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
if (params[@"
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
trigger
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
llama->
|
588
|
-
|
589
|
-
|
590
|
-
llama->generated_text.
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
result[@"
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
result[@"
|
668
|
-
result[@"
|
669
|
-
result[@"
|
670
|
-
result[@"
|
671
|
-
result[@"
|
672
|
-
result[@"
|
673
|
-
result[@"
|
674
|
-
result[@"
|
675
|
-
|
676
|
-
|
677
|
-
@"
|
678
|
-
@"
|
679
|
-
@"
|
680
|
-
@"
|
681
|
-
@"
|
682
|
-
@"
|
683
|
-
@"
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
llama->
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
llama->params.
|
730
|
-
|
731
|
-
llama->
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
llama->
|
737
|
-
|
738
|
-
llama->
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
la.
|
801
|
-
la.
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
@end
|
1
|
+
#import "RNLlamaContext.h"
|
2
|
+
#import <Metal/Metal.h>
|
3
|
+
|
4
|
+
@implementation RNLlamaContext
|
5
|
+
|
6
|
+
+ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog {
|
7
|
+
if (enabled) {
|
8
|
+
void (^copiedBlock)(NSString *, NSString *) = [onEmitLog copy];
|
9
|
+
llama_log_set([](lm_ggml_log_level level, const char * text, void * data) {
|
10
|
+
llama_log_callback_default(level, text, data);
|
11
|
+
NSString *levelStr = @"";
|
12
|
+
if (level == LM_GGML_LOG_LEVEL_ERROR) {
|
13
|
+
levelStr = @"error";
|
14
|
+
} else if (level == LM_GGML_LOG_LEVEL_INFO) {
|
15
|
+
levelStr = @"info";
|
16
|
+
} else if (level == LM_GGML_LOG_LEVEL_WARN) {
|
17
|
+
levelStr = @"warn";
|
18
|
+
}
|
19
|
+
|
20
|
+
NSString *textStr = [NSString stringWithUTF8String:text];
|
21
|
+
// NOTE: Convert to UTF-8 string may fail
|
22
|
+
if (!textStr) {
|
23
|
+
return;
|
24
|
+
}
|
25
|
+
void (^block)(NSString *, NSString *) = (__bridge void (^)(NSString *, NSString *))(data);
|
26
|
+
block(levelStr, textStr);
|
27
|
+
}, copiedBlock);
|
28
|
+
} else {
|
29
|
+
llama_log_set(llama_log_callback_default, nullptr);
|
30
|
+
}
|
31
|
+
}
|
32
|
+
|
33
|
+
+ (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip {
|
34
|
+
struct lm_gguf_init_params params = {
|
35
|
+
/*.no_alloc = */ false,
|
36
|
+
/*.ctx = */ NULL,
|
37
|
+
};
|
38
|
+
|
39
|
+
struct lm_gguf_context * ctx = lm_gguf_init_from_file([path UTF8String], params);
|
40
|
+
|
41
|
+
if (!ctx) {
|
42
|
+
NSLog(@"%s: failed to load '%s'\n", __func__, [path UTF8String]);
|
43
|
+
return @{};
|
44
|
+
}
|
45
|
+
|
46
|
+
NSMutableDictionary *info = [[NSMutableDictionary alloc] init];
|
47
|
+
|
48
|
+
info[@"version"] = @(lm_gguf_get_version(ctx));
|
49
|
+
info[@"alignment"] = @(lm_gguf_get_alignment(ctx));
|
50
|
+
info[@"data_offset"] = @(lm_gguf_get_data_offset(ctx));
|
51
|
+
|
52
|
+
// kv
|
53
|
+
{
|
54
|
+
const int n_kv = lm_gguf_get_n_kv(ctx);
|
55
|
+
|
56
|
+
for (int i = 0; i < n_kv; ++i) {
|
57
|
+
const char * key = lm_gguf_get_key(ctx, i);
|
58
|
+
|
59
|
+
if (skip && [skip containsObject:[NSString stringWithUTF8String:key]]) {
|
60
|
+
continue;
|
61
|
+
}
|
62
|
+
const std::string value = lm_gguf_kv_to_str(ctx, i);
|
63
|
+
info[[NSString stringWithUTF8String:key]] = [NSString stringWithUTF8String:value.c_str()];
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
lm_gguf_free(ctx);
|
68
|
+
|
69
|
+
return info;
|
70
|
+
}
|
71
|
+
|
72
|
+
+ (instancetype)initWithParams:(NSDictionary *)params onProgress:(void (^)(unsigned int progress))onProgress {
|
73
|
+
// llama_backend_init(false);
|
74
|
+
common_params defaultParams;
|
75
|
+
|
76
|
+
if (params[@"vocab_only"]) {
|
77
|
+
defaultParams.vocab_only = [params[@"vocab_only"] boolValue];
|
78
|
+
defaultParams.warmup = false;
|
79
|
+
}
|
80
|
+
|
81
|
+
NSString *modelPath = params[@"model"];
|
82
|
+
BOOL isAsset = [params[@"is_model_asset"] boolValue];
|
83
|
+
NSString *path = modelPath;
|
84
|
+
if (isAsset) path = [[NSBundle mainBundle] pathForResource:modelPath ofType:nil];
|
85
|
+
defaultParams.model.path = [path UTF8String];
|
86
|
+
|
87
|
+
NSString *chatTemplate = params[@"chat_template"];
|
88
|
+
if (chatTemplate) {
|
89
|
+
defaultParams.chat_template = [chatTemplate UTF8String];
|
90
|
+
NSLog(@"chatTemplate: %@", chatTemplate);
|
91
|
+
}
|
92
|
+
|
93
|
+
NSString *reasoningFormat = params[@"reasoning_format"];
|
94
|
+
if (reasoningFormat && [reasoningFormat isEqualToString:@"deepseek"]) {
|
95
|
+
defaultParams.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
96
|
+
} else {
|
97
|
+
defaultParams.reasoning_format = COMMON_REASONING_FORMAT_NONE;
|
98
|
+
}
|
99
|
+
|
100
|
+
if (params[@"n_ctx"]) defaultParams.n_ctx = [params[@"n_ctx"] intValue];
|
101
|
+
if (params[@"use_mlock"]) defaultParams.use_mlock = [params[@"use_mlock"]boolValue];
|
102
|
+
|
103
|
+
BOOL skipGpuDevices = params[@"no_gpu_devices"] && [params[@"no_gpu_devices"] boolValue];
|
104
|
+
|
105
|
+
BOOL isMetalEnabled = false;
|
106
|
+
NSString *reasonNoMetal = @"";
|
107
|
+
defaultParams.n_gpu_layers = 0;
|
108
|
+
#ifdef LM_GGML_USE_METAL
|
109
|
+
id<MTLDevice> device = MTLCreateSystemDefaultDevice();
|
110
|
+
|
111
|
+
// Check ggml-metal availability
|
112
|
+
BOOL supportsGgmlMetal = [device supportsFamily:MTLGPUFamilyApple7];
|
113
|
+
if (@available(iOS 16.0, tvOS 16.0, *)) {
|
114
|
+
supportsGgmlMetal = supportsGgmlMetal && [device supportsFamily:MTLGPUFamilyMetal3];
|
115
|
+
}
|
116
|
+
if (!supportsGgmlMetal) {
|
117
|
+
reasonNoMetal = @"Metal is not supported in this device";
|
118
|
+
skipGpuDevices = true;
|
119
|
+
}
|
120
|
+
|
121
|
+
#if TARGET_OS_SIMULATOR
|
122
|
+
// Use the backend, but no layers because not supported fully on simulator
|
123
|
+
defaultParams.n_gpu_layers = 0;
|
124
|
+
isMetalEnabled = true;
|
125
|
+
#else
|
126
|
+
defaultParams.n_gpu_layers = [params[@"n_gpu_layers"] intValue];
|
127
|
+
isMetalEnabled = true;
|
128
|
+
#endif
|
129
|
+
|
130
|
+
device = nil;
|
131
|
+
#else
|
132
|
+
reasonNoMetal = @"Metal is not enabled in this build";
|
133
|
+
isMetalEnabled = false;
|
134
|
+
#endif
|
135
|
+
|
136
|
+
if (skipGpuDevices) {
|
137
|
+
std::vector<lm_ggml_backend_dev_t> cpu_devs;
|
138
|
+
for (size_t i = 0; i < lm_ggml_backend_dev_count(); ++i) {
|
139
|
+
lm_ggml_backend_dev_t dev = lm_ggml_backend_dev_get(i);
|
140
|
+
switch (lm_ggml_backend_dev_type(dev)) {
|
141
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_CPU:
|
142
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_ACCEL:
|
143
|
+
cpu_devs.push_back(dev);
|
144
|
+
break;
|
145
|
+
case LM_GGML_BACKEND_DEVICE_TYPE_GPU:
|
146
|
+
break;
|
147
|
+
}
|
148
|
+
}
|
149
|
+
if (cpu_devs.size() > 0) {
|
150
|
+
defaultParams.devices = cpu_devs;
|
151
|
+
defaultParams.n_gpu_layers = 0;
|
152
|
+
isMetalEnabled = false;
|
153
|
+
}
|
154
|
+
}
|
155
|
+
|
156
|
+
if (params[@"n_batch"]) defaultParams.n_batch = [params[@"n_batch"] intValue];
|
157
|
+
if (params[@"n_ubatch"]) defaultParams.n_ubatch = [params[@"n_ubatch"] intValue];
|
158
|
+
if (params[@"use_mmap"]) defaultParams.use_mmap = [params[@"use_mmap"] boolValue];
|
159
|
+
|
160
|
+
if (params[@"pooling_type"] && [params[@"pooling_type"] isKindOfClass:[NSNumber class]]) {
|
161
|
+
defaultParams.pooling_type = static_cast<enum llama_pooling_type>([params[@"pooling_type"] intValue]);
|
162
|
+
}
|
163
|
+
|
164
|
+
if (params[@"embedding"] && [params[@"embedding"] boolValue]) {
|
165
|
+
defaultParams.embedding = true;
|
166
|
+
// For non-causal models, batch size must be equal to ubatch size
|
167
|
+
defaultParams.n_ubatch = defaultParams.n_batch;
|
168
|
+
|
169
|
+
if (params[@"embd_normalize"] && [params[@"embd_normalize"] isKindOfClass:[NSNumber class]]) {
|
170
|
+
defaultParams.embd_normalize = [params[@"embd_normalize"] intValue];
|
171
|
+
}
|
172
|
+
}
|
173
|
+
|
174
|
+
if (params[@"rope_freq_base"]) defaultParams.rope_freq_base = [params[@"rope_freq_base"] floatValue];
|
175
|
+
if (params[@"rope_freq_scale"]) defaultParams.rope_freq_scale = [params[@"rope_freq_scale"] floatValue];
|
176
|
+
|
177
|
+
if (params[@"flash_attn"] && [params[@"flash_attn"] boolValue]) defaultParams.flash_attn = true;
|
178
|
+
|
179
|
+
if (params[@"ctx_shift"]) defaultParams.ctx_shift = [params[@"ctx_shift"] boolValue];
|
180
|
+
|
181
|
+
if (params[@"cache_type_k"]) defaultParams.cache_type_k = rnllama::kv_cache_type_from_str([params[@"cache_type_k"] UTF8String]);
|
182
|
+
if (params[@"cache_type_v"]) defaultParams.cache_type_v = rnllama::kv_cache_type_from_str([params[@"cache_type_v"] UTF8String]);
|
183
|
+
|
184
|
+
int nThreads = params[@"n_threads"] ? [params[@"n_threads"] intValue] : 0;
|
185
|
+
const int maxThreads = (int) [[NSProcessInfo processInfo] processorCount];
|
186
|
+
// Use 2 threads by default on 4-core devices, 4 threads on more cores
|
187
|
+
const int defaultNThreads = nThreads == 4 ? 2 : MIN(4, maxThreads);
|
188
|
+
defaultParams.cpuparams.n_threads = nThreads > 0 ? nThreads : defaultNThreads;
|
189
|
+
|
190
|
+
RNLlamaContext *context = [[RNLlamaContext alloc] init];
|
191
|
+
context->llama = new rnllama::llama_rn_context();
|
192
|
+
context->llama->is_load_interrupted = false;
|
193
|
+
context->llama->loading_progress = 0;
|
194
|
+
context->onProgress = onProgress;
|
195
|
+
|
196
|
+
if (params[@"use_progress_callback"] && [params[@"use_progress_callback"] boolValue]) {
|
197
|
+
defaultParams.progress_callback = [](float progress, void * user_data) {
|
198
|
+
RNLlamaContext *context = (__bridge RNLlamaContext *)(user_data);
|
199
|
+
unsigned percentage = (unsigned) (100 * progress);
|
200
|
+
if (percentage > context->llama->loading_progress) {
|
201
|
+
context->llama->loading_progress = percentage;
|
202
|
+
context->onProgress(percentage);
|
203
|
+
}
|
204
|
+
return !context->llama->is_load_interrupted;
|
205
|
+
};
|
206
|
+
defaultParams.progress_callback_user_data = context;
|
207
|
+
}
|
208
|
+
|
209
|
+
context->is_model_loaded = context->llama->loadModel(defaultParams);
|
210
|
+
|
211
|
+
if (
|
212
|
+
params[@"embedding"] && [params[@"embedding"] boolValue] &&
|
213
|
+
llama_model_has_encoder(context->llama->model) && llama_model_has_decoder(context->llama->model)
|
214
|
+
) {
|
215
|
+
delete context->llama;
|
216
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Embedding is not supported in encoder-decoder models" userInfo:nil];
|
217
|
+
}
|
218
|
+
|
219
|
+
std::vector<common_adapter_lora_info> lora;
|
220
|
+
if (params[@"lora"]) {
|
221
|
+
common_adapter_lora_info la;
|
222
|
+
la.path = [params[@"lora"] UTF8String];
|
223
|
+
la.scale = 1.0f;
|
224
|
+
if (params[@"lora_scaled"]) la.scale = [params[@"lora_scaled"] floatValue];
|
225
|
+
lora.push_back(la);
|
226
|
+
}
|
227
|
+
if (params[@"lora_list"] && [params[@"lora_list"] isKindOfClass:[NSArray class]]) {
|
228
|
+
NSArray *lora_list = params[@"lora_list"];
|
229
|
+
for (NSDictionary *lora_adapter in lora_list) {
|
230
|
+
NSString *path = lora_adapter[@"path"];
|
231
|
+
if (!path) continue;
|
232
|
+
float scale = [lora_adapter[@"scaled"] floatValue];
|
233
|
+
common_adapter_lora_info la;
|
234
|
+
la.path = [path UTF8String];
|
235
|
+
la.scale = scale;
|
236
|
+
lora.push_back(la);
|
237
|
+
}
|
238
|
+
}
|
239
|
+
if (lora.size() > 0) {
|
240
|
+
int result = context->llama->applyLoraAdapters(lora);
|
241
|
+
if (result != 0) {
|
242
|
+
delete context->llama;
|
243
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to apply lora adapters" userInfo:nil];
|
244
|
+
}
|
245
|
+
}
|
246
|
+
|
247
|
+
context->is_metal_enabled = isMetalEnabled;
|
248
|
+
context->reason_no_metal = reasonNoMetal;
|
249
|
+
|
250
|
+
return context;
|
251
|
+
}
|
252
|
+
|
253
|
+
- (void)interruptLoad {
|
254
|
+
llama->is_load_interrupted = true;
|
255
|
+
}
|
256
|
+
|
257
|
+
- (bool)isMetalEnabled {
|
258
|
+
return is_metal_enabled;
|
259
|
+
}
|
260
|
+
|
261
|
+
- (NSString *)reasonNoMetal {
|
262
|
+
return reason_no_metal;
|
263
|
+
}
|
264
|
+
|
265
|
+
- (NSDictionary *)modelInfo {
|
266
|
+
char desc[1024];
|
267
|
+
llama_model_desc(llama->model, desc, sizeof(desc));
|
268
|
+
|
269
|
+
int count = llama_model_meta_count(llama->model);
|
270
|
+
NSDictionary *meta = [[NSMutableDictionary alloc] init];
|
271
|
+
for (int i = 0; i < count; i++) {
|
272
|
+
char key[256];
|
273
|
+
llama_model_meta_key_by_index(llama->model, i, key, sizeof(key));
|
274
|
+
char val[4096];
|
275
|
+
llama_model_meta_val_str_by_index(llama->model, i, val, sizeof(val));
|
276
|
+
|
277
|
+
NSString *keyStr = [NSString stringWithUTF8String:key];
|
278
|
+
NSString *valStr = [NSString stringWithUTF8String:val];
|
279
|
+
[meta setValue:valStr forKey:keyStr];
|
280
|
+
}
|
281
|
+
|
282
|
+
auto template_tool_use = llama->templates.get()->template_tool_use.get();
|
283
|
+
NSDictionary *tool_use_caps_dir = nil;
|
284
|
+
if (template_tool_use) {
|
285
|
+
auto tool_use_caps = template_tool_use->original_caps();
|
286
|
+
tool_use_caps_dir = @{
|
287
|
+
@"tools": @(tool_use_caps.supports_tools),
|
288
|
+
@"toolCalls": @(tool_use_caps.supports_tool_calls),
|
289
|
+
@"toolResponses": @(tool_use_caps.supports_tool_responses),
|
290
|
+
@"systemRole": @(tool_use_caps.supports_system_role),
|
291
|
+
@"parallelToolCalls": @(tool_use_caps.supports_parallel_tool_calls),
|
292
|
+
@"toolCallId": @(tool_use_caps.supports_tool_call_id)
|
293
|
+
};
|
294
|
+
}
|
295
|
+
|
296
|
+
auto default_tmpl = llama->templates.get()->template_default.get();
|
297
|
+
auto default_tmpl_caps = default_tmpl->original_caps();
|
298
|
+
|
299
|
+
return @{
|
300
|
+
@"desc": [NSString stringWithUTF8String:desc],
|
301
|
+
@"size": @(llama_model_size(llama->model)),
|
302
|
+
@"nEmbd": @(llama_model_n_embd(llama->model)),
|
303
|
+
@"nParams": @(llama_model_n_params(llama->model)),
|
304
|
+
@"chatTemplates": @{
|
305
|
+
@"llamaChat": @(llama->validateModelChatTemplate(false, nullptr)),
|
306
|
+
@"minja": @{
|
307
|
+
@"default": @(llama->validateModelChatTemplate(true, nullptr)),
|
308
|
+
@"defaultCaps": @{
|
309
|
+
@"tools": @(default_tmpl_caps.supports_tools),
|
310
|
+
@"toolCalls": @(default_tmpl_caps.supports_tool_calls),
|
311
|
+
@"toolResponses": @(default_tmpl_caps.supports_tool_responses),
|
312
|
+
@"systemRole": @(default_tmpl_caps.supports_system_role),
|
313
|
+
@"parallelToolCalls": @(default_tmpl_caps.supports_parallel_tool_calls),
|
314
|
+
@"toolCallId": @(default_tmpl_caps.supports_tool_call_id)
|
315
|
+
},
|
316
|
+
@"toolUse": @(llama->validateModelChatTemplate(true, "tool_use")),
|
317
|
+
@"toolUseCaps": tool_use_caps_dir ?: @{}
|
318
|
+
}
|
319
|
+
},
|
320
|
+
@"metadata": meta,
|
321
|
+
|
322
|
+
// deprecated
|
323
|
+
@"isChatTemplateSupported": @(llama->validateModelChatTemplate(false, nullptr))
|
324
|
+
};
|
325
|
+
}
|
326
|
+
|
327
|
+
- (bool)isModelLoaded {
|
328
|
+
return is_model_loaded;
|
329
|
+
}
|
330
|
+
|
331
|
+
- (bool)isPredicting {
|
332
|
+
return llama->is_predicting;
|
333
|
+
}
|
334
|
+
|
335
|
+
- (NSDictionary *)getFormattedChatWithJinja:(NSString *)messages
|
336
|
+
withChatTemplate:(NSString *)chatTemplate
|
337
|
+
withJsonSchema:(NSString *)jsonSchema
|
338
|
+
withTools:(NSString *)tools
|
339
|
+
withParallelToolCalls:(BOOL)parallelToolCalls
|
340
|
+
withToolChoice:(NSString *)toolChoice
|
341
|
+
{
|
342
|
+
auto tmpl_str = chatTemplate == nil ? "" : [chatTemplate UTF8String];
|
343
|
+
|
344
|
+
NSMutableDictionary *result = [[NSMutableDictionary alloc] init];
|
345
|
+
auto chatParams = llama->getFormattedChatWithJinja(
|
346
|
+
[messages UTF8String],
|
347
|
+
tmpl_str,
|
348
|
+
jsonSchema == nil ? "" : [jsonSchema UTF8String],
|
349
|
+
tools == nil ? "" : [tools UTF8String],
|
350
|
+
parallelToolCalls,
|
351
|
+
toolChoice == nil ? "" : [toolChoice UTF8String]
|
352
|
+
);
|
353
|
+
result[@"prompt"] = [NSString stringWithUTF8String:chatParams.prompt.c_str()];
|
354
|
+
result[@"chat_format"] = @(static_cast<int>(chatParams.format));
|
355
|
+
result[@"grammar"] = [NSString stringWithUTF8String:chatParams.grammar.c_str()];
|
356
|
+
result[@"grammar_lazy"] = @(chatParams.grammar_lazy);
|
357
|
+
NSMutableArray *grammar_triggers = [[NSMutableArray alloc] init];
|
358
|
+
for (const auto & trigger : chatParams.grammar_triggers) {
|
359
|
+
[grammar_triggers addObject:@{
|
360
|
+
@"type": @(trigger.type),
|
361
|
+
@"value": [NSString stringWithUTF8String:trigger.value.c_str()],
|
362
|
+
@"token": @(trigger.token),
|
363
|
+
}];
|
364
|
+
}
|
365
|
+
result[@"grammar_triggers"] = grammar_triggers;
|
366
|
+
NSMutableArray *preserved_tokens = [[NSMutableArray alloc] init];
|
367
|
+
for (const auto & token : chatParams.preserved_tokens) {
|
368
|
+
[preserved_tokens addObject:[NSString stringWithUTF8String:token.c_str()]];
|
369
|
+
}
|
370
|
+
result[@"preserved_tokens"] = preserved_tokens;
|
371
|
+
NSMutableArray *additional_stops = [[NSMutableArray alloc] init];
|
372
|
+
for (const auto & stop : chatParams.additional_stops) {
|
373
|
+
[additional_stops addObject:[NSString stringWithUTF8String:stop.c_str()]];
|
374
|
+
}
|
375
|
+
result[@"additional_stops"] = additional_stops;
|
376
|
+
|
377
|
+
return result;
|
378
|
+
}
|
379
|
+
|
380
|
+
- (NSString *)getFormattedChat:(NSString *)messages withChatTemplate:(NSString *)chatTemplate {
|
381
|
+
auto tmpl_str = chatTemplate == nil ? "" : [chatTemplate UTF8String];
|
382
|
+
return [NSString stringWithUTF8String:llama->getFormattedChat(
|
383
|
+
[messages UTF8String],
|
384
|
+
tmpl_str
|
385
|
+
).c_str()];;
|
386
|
+
}
|
387
|
+
|
388
|
+
- (NSArray *)tokenProbsToDict:(std::vector<rnllama::completion_token_output>)probs {
|
389
|
+
NSMutableArray *out = [[NSMutableArray alloc] init];
|
390
|
+
for (const auto &prob : probs)
|
391
|
+
{
|
392
|
+
NSMutableArray *probsForToken = [[NSMutableArray alloc] init];
|
393
|
+
for (const auto &p : prob.probs)
|
394
|
+
{
|
395
|
+
std::string tokStr = rnllama::tokens_to_output_formatted_string(llama->ctx, p.tok);
|
396
|
+
[probsForToken addObject:@{
|
397
|
+
@"tok_str": [NSString stringWithUTF8String:tokStr.c_str()],
|
398
|
+
@"prob": [NSNumber numberWithDouble:p.prob]
|
399
|
+
}];
|
400
|
+
}
|
401
|
+
std::string tokStr = rnllama::tokens_to_output_formatted_string(llama->ctx, prob.tok);
|
402
|
+
[out addObject:@{
|
403
|
+
@"content": [NSString stringWithUTF8String:tokStr.c_str()],
|
404
|
+
@"probs": probsForToken
|
405
|
+
}];
|
406
|
+
}
|
407
|
+
return out;
|
408
|
+
}
|
409
|
+
|
410
|
+
- (NSDictionary *)completion:(NSDictionary *)params
|
411
|
+
onToken:(void (^)(NSMutableDictionary * tokenResult))onToken
|
412
|
+
{
|
413
|
+
llama->rewind();
|
414
|
+
|
415
|
+
//llama_reset_timings(llama->ctx);
|
416
|
+
|
417
|
+
NSString *prompt = [params objectForKey:@"prompt"];
|
418
|
+
|
419
|
+
llama->params.prompt = [prompt UTF8String];
|
420
|
+
llama->params.sampling.seed = params[@"seed"] ? [params[@"seed"] intValue] : -1;
|
421
|
+
|
422
|
+
if (params[@"n_threads"]) {
|
423
|
+
int nThreads = params[@"n_threads"] ? [params[@"n_threads"] intValue] : llama->params.cpuparams.n_threads;
|
424
|
+
const int maxThreads = (int) [[NSProcessInfo processInfo] processorCount];
|
425
|
+
// Use 2 threads by default on 4-core devices, 4 threads on more cores
|
426
|
+
const int defaultNThreads = nThreads == 4 ? 2 : MIN(4, maxThreads);
|
427
|
+
llama->params.cpuparams.n_threads = nThreads > 0 ? nThreads : defaultNThreads;
|
428
|
+
}
|
429
|
+
if (params[@"n_predict"]) llama->params.n_predict = [params[@"n_predict"] intValue];
|
430
|
+
if (params[@"ignore_eos"]) llama->params.sampling.ignore_eos = [params[@"ignore_eos"] boolValue];
|
431
|
+
|
432
|
+
auto & sparams = llama->params.sampling;
|
433
|
+
|
434
|
+
if (params[@"temperature"]) sparams.temp = [params[@"temperature"] doubleValue];
|
435
|
+
|
436
|
+
if (params[@"n_probs"]) sparams.n_probs = [params[@"n_probs"] intValue];
|
437
|
+
|
438
|
+
if (params[@"penalty_last_n"]) sparams.penalty_last_n = [params[@"penalty_last_n"] intValue];
|
439
|
+
if (params[@"penalty_repeat"]) sparams.penalty_repeat = [params[@"penalty_repeat"] doubleValue];
|
440
|
+
if (params[@"penalty_freq"]) sparams.penalty_freq = [params[@"penalty_freq"] doubleValue];
|
441
|
+
if (params[@"penalty_present"]) sparams.penalty_present = [params[@"penalty_present"] doubleValue];
|
442
|
+
|
443
|
+
if (params[@"mirostat"]) sparams.mirostat = [params[@"mirostat"] intValue];
|
444
|
+
if (params[@"mirostat_tau"]) sparams.mirostat_tau = [params[@"mirostat_tau"] doubleValue];
|
445
|
+
if (params[@"mirostat_eta"]) sparams.mirostat_eta = [params[@"mirostat_eta"] doubleValue];
|
446
|
+
|
447
|
+
if (params[@"top_k"]) sparams.top_k = [params[@"top_k"] intValue];
|
448
|
+
if (params[@"top_p"]) sparams.top_p = [params[@"top_p"] doubleValue];
|
449
|
+
if (params[@"min_p"]) sparams.min_p = [params[@"min_p"] doubleValue];
|
450
|
+
if (params[@"xtc_threshold"]) sparams.xtc_threshold = [params[@"xtc_threshold"] doubleValue];
|
451
|
+
if (params[@"xtc_probability"]) sparams.xtc_probability = [params[@"xtc_probability"] doubleValue];
|
452
|
+
if (params[@"typical_p"]) sparams.typ_p = [params[@"typical_p"] doubleValue];
|
453
|
+
|
454
|
+
if (params[@"dry_multiplier"]) sparams.dry_multiplier = [params[@"dry_multiplier"] doubleValue];
|
455
|
+
if (params[@"dry_base"]) sparams.dry_base = [params[@"dry_base"] doubleValue];
|
456
|
+
if (params[@"dry_allowed_length"]) sparams.dry_allowed_length = [params[@"dry_allowed_length"] intValue];
|
457
|
+
if (params[@"dry_penalty_last_n"]) sparams.dry_penalty_last_n = [params[@"dry_penalty_last_n"] intValue];
|
458
|
+
|
459
|
+
if (params[@"top_n_sigma"]) sparams.top_n_sigma = [params[@"top_n_sigma"] doubleValue];
|
460
|
+
|
461
|
+
// dry break seq
|
462
|
+
if (params[@"dry_sequence_breakers"] && [params[@"dry_sequence_breakers"] isKindOfClass:[NSArray class]]) {
|
463
|
+
NSArray *dry_sequence_breakers = params[@"dry_sequence_breakers"];
|
464
|
+
for (NSString *s in dry_sequence_breakers) {
|
465
|
+
sparams.dry_sequence_breakers.push_back([s UTF8String]);
|
466
|
+
}
|
467
|
+
}
|
468
|
+
|
469
|
+
if (params[@"grammar"]) {
|
470
|
+
sparams.grammar = [params[@"grammar"] UTF8String];
|
471
|
+
}
|
472
|
+
|
473
|
+
if (params[@"json_schema"] && !params[@"grammar"]) {
|
474
|
+
sparams.grammar = json_schema_to_grammar(json::parse([params[@"json_schema"] UTF8String]));
|
475
|
+
}
|
476
|
+
|
477
|
+
if (params[@"grammar_lazy"]) {
|
478
|
+
sparams.grammar_lazy = [params[@"grammar_lazy"] boolValue];
|
479
|
+
}
|
480
|
+
|
481
|
+
if (params[@"preserved_tokens"] && [params[@"preserved_tokens"] isKindOfClass:[NSArray class]]) {
|
482
|
+
NSArray *preserved_tokens = params[@"preserved_tokens"];
|
483
|
+
for (NSString *token in preserved_tokens) {
|
484
|
+
auto ids = common_tokenize(llama->ctx, [token UTF8String], /* add_special= */ false, /* parse_special= */ true);
|
485
|
+
if (ids.size() == 1) {
|
486
|
+
sparams.preserved_tokens.insert(ids[0]);
|
487
|
+
} else {
|
488
|
+
// LOG_WRN("Not preserved because more than 1 token (wrong chat template override?): %s\n", [token UTF8String]);
|
489
|
+
}
|
490
|
+
}
|
491
|
+
}
|
492
|
+
|
493
|
+
if (params[@"grammar_triggers"] && [params[@"grammar_triggers"] isKindOfClass:[NSArray class]]) {
|
494
|
+
NSArray *grammar_triggers = params[@"grammar_triggers"];
|
495
|
+
for (NSDictionary *grammar_trigger in grammar_triggers) {
|
496
|
+
const auto type = static_cast<common_grammar_trigger_type>([grammar_trigger[@"type"] intValue]);
|
497
|
+
const auto & word = [grammar_trigger[@"value"] UTF8String];
|
498
|
+
|
499
|
+
if (type == COMMON_GRAMMAR_TRIGGER_TYPE_WORD) {
|
500
|
+
auto ids = common_tokenize(llama->ctx, word, /* add_special= */ false, /* parse_special= */ true);
|
501
|
+
if (ids.size() == 1) {
|
502
|
+
auto token = ids[0];
|
503
|
+
if (std::find(sparams.preserved_tokens.begin(), sparams.preserved_tokens.end(), (llama_token) token) == sparams.preserved_tokens.end()) {
|
504
|
+
throw std::runtime_error("Grammar trigger word should be marked as preserved token");
|
505
|
+
}
|
506
|
+
common_grammar_trigger trigger;
|
507
|
+
trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN;
|
508
|
+
trigger.value = word;
|
509
|
+
trigger.token = token;
|
510
|
+
sparams.grammar_triggers.push_back(std::move(trigger));
|
511
|
+
} else {
|
512
|
+
sparams.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, word});
|
513
|
+
}
|
514
|
+
} else {
|
515
|
+
common_grammar_trigger trigger;
|
516
|
+
trigger.type = type;
|
517
|
+
trigger.value = word;
|
518
|
+
if (type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
|
519
|
+
const auto token = (llama_token) [grammar_trigger[@"token"] intValue];
|
520
|
+
trigger.token = token;
|
521
|
+
}
|
522
|
+
sparams.grammar_triggers.push_back(std::move(trigger));
|
523
|
+
}
|
524
|
+
}
|
525
|
+
}
|
526
|
+
|
527
|
+
llama->params.antiprompt.clear();
|
528
|
+
if (params[@"stop"]) {
|
529
|
+
NSArray *stop = params[@"stop"];
|
530
|
+
for (NSString *s in stop) {
|
531
|
+
llama->params.antiprompt.push_back([s UTF8String]);
|
532
|
+
}
|
533
|
+
}
|
534
|
+
|
535
|
+
const llama_model * model = llama_get_model(llama->ctx);
|
536
|
+
const llama_vocab * vocab = llama_model_get_vocab(model);
|
537
|
+
|
538
|
+
sparams.logit_bias.clear();
|
539
|
+
if (params[@"ignore_eos"] && [params[@"ignore_eos"] boolValue]) {
|
540
|
+
sparams.logit_bias[llama_vocab_eos(vocab)].bias = -INFINITY;
|
541
|
+
}
|
542
|
+
|
543
|
+
if (params[@"logit_bias"] && [params[@"logit_bias"] isKindOfClass:[NSArray class]]) {
|
544
|
+
const int n_vocab = llama_vocab_n_tokens(vocab);
|
545
|
+
NSArray *logit_bias = params[@"logit_bias"];
|
546
|
+
for (NSArray *el in logit_bias) {
|
547
|
+
if ([el isKindOfClass:[NSArray class]] && [el count] == 2) {
|
548
|
+
llama_token tok = [el[0] intValue];
|
549
|
+
if (tok >= 0 && tok < n_vocab) {
|
550
|
+
if ([el[1] isKindOfClass:[NSNumber class]]) {
|
551
|
+
sparams.logit_bias[tok].bias = [el[1] doubleValue];
|
552
|
+
} else if ([el[1] isKindOfClass:[NSNumber class]] && ![el[1] boolValue]) {
|
553
|
+
sparams.logit_bias[tok].bias = -INFINITY;
|
554
|
+
}
|
555
|
+
}
|
556
|
+
}
|
557
|
+
}
|
558
|
+
}
|
559
|
+
|
560
|
+
if (!llama->initSampling()) {
|
561
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to initialize sampling" userInfo:nil];
|
562
|
+
}
|
563
|
+
llama->beginCompletion();
|
564
|
+
llama->loadPrompt();
|
565
|
+
if (llama->context_full) {
|
566
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Context is full" userInfo:nil];
|
567
|
+
}
|
568
|
+
|
569
|
+
size_t sent_count = 0;
|
570
|
+
size_t sent_token_probs_index = 0;
|
571
|
+
|
572
|
+
while (llama->has_next_token && !llama->is_interrupted) {
|
573
|
+
const rnllama::completion_token_output token_with_probs = llama->doCompletion();
|
574
|
+
if (token_with_probs.tok == -1 || llama->incomplete) {
|
575
|
+
continue;
|
576
|
+
}
|
577
|
+
const std::string token_text = common_token_to_piece(llama->ctx, token_with_probs.tok);
|
578
|
+
|
579
|
+
size_t pos = std::min(sent_count, llama->generated_text.size());
|
580
|
+
|
581
|
+
const std::string str_test = llama->generated_text.substr(pos);
|
582
|
+
bool is_stop_full = false;
|
583
|
+
size_t stop_pos =
|
584
|
+
llama->findStoppingStrings(str_test, token_text.size(), rnllama::STOP_FULL);
|
585
|
+
if (stop_pos != std::string::npos) {
|
586
|
+
is_stop_full = true;
|
587
|
+
llama->generated_text.erase(
|
588
|
+
llama->generated_text.begin() + pos + stop_pos,
|
589
|
+
llama->generated_text.end());
|
590
|
+
pos = std::min(sent_count, llama->generated_text.size());
|
591
|
+
} else {
|
592
|
+
is_stop_full = false;
|
593
|
+
stop_pos = llama->findStoppingStrings(str_test, token_text.size(),
|
594
|
+
rnllama::STOP_PARTIAL);
|
595
|
+
}
|
596
|
+
|
597
|
+
if (
|
598
|
+
stop_pos == std::string::npos ||
|
599
|
+
// Send rest of the text if we are at the end of the generation
|
600
|
+
(!llama->has_next_token && !is_stop_full && stop_pos > 0)
|
601
|
+
) {
|
602
|
+
const std::string to_send = llama->generated_text.substr(pos, std::string::npos);
|
603
|
+
|
604
|
+
sent_count += to_send.size();
|
605
|
+
|
606
|
+
std::vector<rnllama::completion_token_output> probs_output = {};
|
607
|
+
|
608
|
+
NSMutableDictionary *tokenResult = [[NSMutableDictionary alloc] init];
|
609
|
+
tokenResult[@"token"] = [NSString stringWithUTF8String:to_send.c_str()];
|
610
|
+
|
611
|
+
if (llama->params.sampling.n_probs > 0) {
|
612
|
+
const std::vector<llama_token> to_send_toks = common_tokenize(llama->ctx, to_send, false);
|
613
|
+
size_t probs_pos = std::min(sent_token_probs_index, llama->generated_token_probs.size());
|
614
|
+
size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama->generated_token_probs.size());
|
615
|
+
if (probs_pos < probs_stop_pos) {
|
616
|
+
probs_output = std::vector<rnllama::completion_token_output>(llama->generated_token_probs.begin() + probs_pos, llama->generated_token_probs.begin() + probs_stop_pos);
|
617
|
+
}
|
618
|
+
sent_token_probs_index = probs_stop_pos;
|
619
|
+
|
620
|
+
tokenResult[@"completion_probabilities"] = [self tokenProbsToDict:probs_output];
|
621
|
+
}
|
622
|
+
|
623
|
+
onToken(tokenResult);
|
624
|
+
}
|
625
|
+
}
|
626
|
+
|
627
|
+
llama_perf_context_print(llama->ctx);
|
628
|
+
llama->is_predicting = false;
|
629
|
+
|
630
|
+
const auto timings = llama_perf_context(llama->ctx);
|
631
|
+
|
632
|
+
NSMutableArray *toolCalls = nil;
|
633
|
+
NSString *reasoningContent = nil;
|
634
|
+
NSString *content = nil;
|
635
|
+
if (!llama->is_interrupted) {
|
636
|
+
try {
|
637
|
+
auto chat_format = params[@"chat_format"] ? [params[@"chat_format"] intValue] : COMMON_CHAT_FORMAT_CONTENT_ONLY;
|
638
|
+
common_chat_msg message = common_chat_parse(llama->generated_text, static_cast<common_chat_format>(chat_format));
|
639
|
+
if (!message.reasoning_content.empty()) {
|
640
|
+
reasoningContent = [NSString stringWithUTF8String:message.reasoning_content.c_str()];
|
641
|
+
}
|
642
|
+
content = [NSString stringWithUTF8String:message.content.c_str()];
|
643
|
+
toolCalls = [[NSMutableArray alloc] init];
|
644
|
+
for (const auto &tc : message.tool_calls) {
|
645
|
+
[toolCalls addObject:@{
|
646
|
+
@"type": @"function",
|
647
|
+
@"function": @{
|
648
|
+
@"name": [NSString stringWithUTF8String:tc.name.c_str()],
|
649
|
+
@"arguments": [NSString stringWithUTF8String:tc.arguments.c_str()],
|
650
|
+
},
|
651
|
+
@"id": tc.id.empty() ? [NSNull null] : [NSString stringWithUTF8String:tc.id.c_str()],
|
652
|
+
}];
|
653
|
+
}
|
654
|
+
} catch (const std::exception &e) {
|
655
|
+
} catch (...) {
|
656
|
+
}
|
657
|
+
}
|
658
|
+
|
659
|
+
NSMutableDictionary *result = [[NSMutableDictionary alloc] init];
|
660
|
+
result[@"text"] = [NSString stringWithUTF8String:llama->generated_text.c_str()]; // Original text
|
661
|
+
if (content) result[@"content"] = content;
|
662
|
+
if (reasoningContent) result[@"reasoning_content"] = reasoningContent;
|
663
|
+
if (toolCalls && toolCalls.count > 0) result[@"tool_calls"] = toolCalls;
|
664
|
+
result[@"completion_probabilities"] = [self tokenProbsToDict:llama->generated_token_probs];
|
665
|
+
result[@"tokens_predicted"] = @(llama->num_tokens_predicted);
|
666
|
+
result[@"tokens_evaluated"] = @(llama->num_prompt_tokens);
|
667
|
+
result[@"truncated"] = @(llama->truncated);
|
668
|
+
result[@"context_full"] = @(llama->context_full);
|
669
|
+
result[@"stopped_eos"] = @(llama->stopped_eos);
|
670
|
+
result[@"stopped_word"] = @(llama->stopped_word);
|
671
|
+
result[@"stopped_limit"] = @(llama->stopped_limit);
|
672
|
+
result[@"stopping_word"] = [NSString stringWithUTF8String:llama->stopping_word.c_str()];
|
673
|
+
result[@"tokens_cached"] = @(llama->n_past);
|
674
|
+
result[@"timings"] = @{
|
675
|
+
@"prompt_n": @(timings.n_p_eval),
|
676
|
+
@"prompt_ms": @(timings.t_p_eval_ms),
|
677
|
+
@"prompt_per_token_ms": @(timings.t_p_eval_ms / timings.n_p_eval),
|
678
|
+
@"prompt_per_second": @(1e3 / timings.t_p_eval_ms * timings.n_p_eval),
|
679
|
+
@"predicted_n": @(timings.n_eval),
|
680
|
+
@"predicted_n": @(timings.n_eval),
|
681
|
+
@"predicted_ms": @(timings.t_eval_ms),
|
682
|
+
@"predicted_per_token_ms": @(timings.t_eval_ms / timings.n_eval),
|
683
|
+
@"predicted_per_second": @(1e3 / timings.t_eval_ms * timings.n_eval),
|
684
|
+
};
|
685
|
+
return result;
|
686
|
+
}
|
687
|
+
|
688
|
+
- (void)stopCompletion {
|
689
|
+
llama->is_interrupted = true;
|
690
|
+
}
|
691
|
+
|
692
|
+
- (NSArray *)tokenize:(NSString *)text {
|
693
|
+
const std::vector<llama_token> toks = common_tokenize(llama->ctx, [text UTF8String], false);
|
694
|
+
NSMutableArray *result = [[NSMutableArray alloc] init];
|
695
|
+
for (llama_token tok : toks) {
|
696
|
+
[result addObject:@(tok)];
|
697
|
+
}
|
698
|
+
return result;
|
699
|
+
}
|
700
|
+
|
701
|
+
- (NSString *)detokenize:(NSArray *)tokens {
|
702
|
+
std::vector<llama_token> toks;
|
703
|
+
for (NSNumber *tok in tokens) {
|
704
|
+
toks.push_back([tok intValue]);
|
705
|
+
}
|
706
|
+
const std::string text = rnllama::tokens_to_str(llama->ctx, toks.cbegin(), toks.cend());
|
707
|
+
return [NSString stringWithUTF8String:text.c_str()];
|
708
|
+
}
|
709
|
+
|
710
|
+
- (NSDictionary *)embedding:(NSString *)text params:(NSDictionary *)params {
|
711
|
+
if (llama->params.embedding != true) {
|
712
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Embedding is not enabled" userInfo:nil];
|
713
|
+
}
|
714
|
+
|
715
|
+
common_params embdParams;
|
716
|
+
embdParams.embedding = true;
|
717
|
+
embdParams.embd_normalize = llama->params.embd_normalize;
|
718
|
+
|
719
|
+
if (params[@"embd_normalize"] && [params[@"embd_normalize"] isKindOfClass:[NSNumber class]]) {
|
720
|
+
embdParams.embd_normalize = [params[@"embd_normalize"] intValue];
|
721
|
+
}
|
722
|
+
|
723
|
+
llama->rewind();
|
724
|
+
|
725
|
+
llama_perf_context_reset(llama->ctx);
|
726
|
+
|
727
|
+
llama->params.prompt = [text UTF8String];
|
728
|
+
|
729
|
+
llama->params.n_predict = 0;
|
730
|
+
|
731
|
+
if (!llama->initSampling()) {
|
732
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to initialize sampling" userInfo:nil];
|
733
|
+
}
|
734
|
+
llama->beginCompletion();
|
735
|
+
llama->loadPrompt();
|
736
|
+
llama->doCompletion();
|
737
|
+
|
738
|
+
std::vector<float> result = llama->getEmbedding(embdParams);
|
739
|
+
|
740
|
+
NSMutableDictionary *resultDict = [[NSMutableDictionary alloc] init];
|
741
|
+
NSMutableArray *embeddingResult = [[NSMutableArray alloc] init];
|
742
|
+
for (float f : result) {
|
743
|
+
[embeddingResult addObject:@(f)];
|
744
|
+
}
|
745
|
+
resultDict[@"embedding"] = embeddingResult;
|
746
|
+
NSMutableArray *promptTokens = [[NSMutableArray alloc] init];
|
747
|
+
for (llama_token tok : llama->embd) {
|
748
|
+
[promptTokens addObject:[NSString stringWithUTF8String:common_token_to_piece(llama->ctx, tok).c_str()]];
|
749
|
+
}
|
750
|
+
resultDict[@"prompt_tokens"] = promptTokens;
|
751
|
+
|
752
|
+
llama->is_predicting = false;
|
753
|
+
return resultDict;
|
754
|
+
}
|
755
|
+
|
756
|
+
- (NSDictionary *)loadSession:(NSString *)path {
|
757
|
+
if (!path || [path length] == 0) {
|
758
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Session path is empty" userInfo:nil];
|
759
|
+
}
|
760
|
+
if (![[NSFileManager defaultManager] fileExistsAtPath:path]) {
|
761
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Session file does not exist" userInfo:nil];
|
762
|
+
}
|
763
|
+
|
764
|
+
size_t n_token_count_out = 0;
|
765
|
+
llama->embd.resize(llama->params.n_ctx);
|
766
|
+
if (!llama_state_load_file(llama->ctx, [path UTF8String], llama->embd.data(), llama->embd.capacity(), &n_token_count_out)) {
|
767
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to load session" userInfo:nil];
|
768
|
+
}
|
769
|
+
llama->embd.resize(n_token_count_out);
|
770
|
+
const std::string text = rnllama::tokens_to_str(llama->ctx, llama->embd.cbegin(), llama->embd.cend());
|
771
|
+
return @{
|
772
|
+
@"tokens_loaded": @(n_token_count_out),
|
773
|
+
@"prompt": [NSString stringWithUTF8String:text.c_str()]
|
774
|
+
};
|
775
|
+
}
|
776
|
+
|
777
|
+
- (int)saveSession:(NSString *)path size:(int)size {
|
778
|
+
if (!path || [path length] == 0) {
|
779
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Session path is empty" userInfo:nil];
|
780
|
+
}
|
781
|
+
std::vector<llama_token> session_tokens = llama->embd;
|
782
|
+
int default_size = session_tokens.size();
|
783
|
+
int save_size = size > 0 && size <= default_size ? size : default_size;
|
784
|
+
if (!llama_state_save_file(llama->ctx, [path UTF8String], session_tokens.data(), save_size)) {
|
785
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to save session" userInfo:nil];
|
786
|
+
}
|
787
|
+
return session_tokens.size();
|
788
|
+
}
|
789
|
+
|
790
|
+
- (NSString *)bench:(int)pp tg:(int)tg pl:(int)pl nr:(int)nr {
|
791
|
+
return [NSString stringWithUTF8String:llama->bench(pp, tg, pl, nr).c_str()];
|
792
|
+
}
|
793
|
+
|
794
|
+
- (void)applyLoraAdapters:(NSArray *)loraAdapters {
|
795
|
+
std::vector<common_adapter_lora_info> lora_adapters;
|
796
|
+
for (NSDictionary *loraAdapter in loraAdapters) {
|
797
|
+
common_adapter_lora_info la;
|
798
|
+
la.path = [loraAdapter[@"path"] UTF8String];
|
799
|
+
la.scale = [loraAdapter[@"scaled"] doubleValue];
|
800
|
+
la.ptr = llama_adapter_lora_init(llama->model, la.path.c_str());
|
801
|
+
if (la.ptr == nullptr) {
|
802
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to apply lora adapter" userInfo:nil];
|
803
|
+
}
|
804
|
+
lora_adapters.push_back(la);
|
805
|
+
}
|
806
|
+
int result = llama->applyLoraAdapters(lora_adapters);
|
807
|
+
if (result != 0) {
|
808
|
+
@throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to apply lora adapters" userInfo:nil];
|
809
|
+
}
|
810
|
+
}
|
811
|
+
|
812
|
+
- (void)removeLoraAdapters {
|
813
|
+
llama->removeLoraAdapters();
|
814
|
+
}
|
815
|
+
|
816
|
+
- (NSArray *)getLoadedLoraAdapters {
|
817
|
+
std::vector<common_adapter_lora_info> loaded_lora_adapters = llama->getLoadedLoraAdapters();
|
818
|
+
NSMutableArray *result = [[NSMutableArray alloc] init];
|
819
|
+
for (common_adapter_lora_info &la : loaded_lora_adapters) {
|
820
|
+
[result addObject:@{
|
821
|
+
@"path": [NSString stringWithUTF8String:la.path.c_str()],
|
822
|
+
@"scale": @(la.scale)
|
823
|
+
}];
|
824
|
+
}
|
825
|
+
return result;
|
826
|
+
}
|
827
|
+
|
828
|
+
- (void)invalidate {
|
829
|
+
delete llama;
|
830
|
+
// llama_backend_free();
|
831
|
+
}
|
832
|
+
|
833
|
+
@end
|