cui-llama.rn 1.5.0 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +20 -20
- package/README.md +317 -319
- package/android/build.gradle +116 -116
- package/android/gradle.properties +5 -5
- package/android/src/main/AndroidManifest.xml +4 -4
- package/android/src/main/CMakeLists.txt +124 -124
- package/android/src/main/java/com/rnllama/LlamaContext.java +645 -645
- package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
- package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
- package/android/src/main/jni-utils.h +100 -100
- package/android/src/main/jni.cpp +1263 -1263
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
- package/cpp/README.md +4 -4
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +597 -597
- package/cpp/ggml-metal.m +4 -0
- package/cpp/ggml.h +1 -1
- package/cpp/rn-llama.cpp +873 -873
- package/cpp/rn-llama.h +138 -138
- package/cpp/sampling.h +107 -107
- package/cpp/unicode-data.cpp +7034 -7034
- package/cpp/unicode-data.h +20 -20
- package/cpp/unicode.cpp +849 -849
- package/cpp/unicode.h +66 -66
- package/ios/CMakeLists.txt +116 -108
- package/ios/RNLlama.h +7 -7
- package/ios/RNLlama.mm +418 -405
- package/ios/RNLlamaContext.h +57 -57
- package/ios/RNLlamaContext.mm +835 -835
- package/ios/rnllama.xcframework/Info.plist +74 -74
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +677 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2222 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +265 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +409 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1434 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +128 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +802 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +677 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2222 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +265 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +409 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1434 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +128 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +802 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +203 -203
- package/lib/commonjs/NativeRNLlama.js +1 -2
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/chat.js.map +1 -1
- package/lib/commonjs/grammar.js +12 -31
- package/lib/commonjs/grammar.js.map +1 -1
- package/lib/commonjs/index.js +47 -47
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/package.json +1 -0
- package/lib/module/NativeRNLlama.js +2 -0
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/chat.js +2 -0
- package/lib/module/chat.js.map +1 -1
- package/lib/module/grammar.js +14 -31
- package/lib/module/grammar.js.map +1 -1
- package/lib/module/index.js +47 -45
- package/lib/module/index.js.map +1 -1
- package/lib/module/package.json +1 -0
- package/lib/typescript/NativeRNLlama.d.ts +6 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts.map +1 -1
- package/llama-rn.podspec +48 -48
- package/package.json +233 -233
- package/src/NativeRNLlama.ts +426 -426
- package/src/chat.ts +44 -44
- package/src/grammar.ts +854 -854
- package/src/index.ts +495 -487
@@ -0,0 +1,132 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
4
|
+
|
5
|
+
#define LOG_CLR_TO_EOL "\033[K\r"
|
6
|
+
#define LOG_COL_DEFAULT "\033[0m"
|
7
|
+
#define LOG_COL_BOLD "\033[1m"
|
8
|
+
#define LOG_COL_RED "\033[31m"
|
9
|
+
#define LOG_COL_GREEN "\033[32m"
|
10
|
+
#define LOG_COL_YELLOW "\033[33m"
|
11
|
+
#define LOG_COL_BLUE "\033[34m"
|
12
|
+
#define LOG_COL_MAGENTA "\033[35m"
|
13
|
+
#define LOG_COL_CYAN "\033[36m"
|
14
|
+
#define LOG_COL_WHITE "\033[37m"
|
15
|
+
|
16
|
+
#ifndef __GNUC__
|
17
|
+
# define LOG_ATTRIBUTE_FORMAT(...)
|
18
|
+
#elif defined(__MINGW32__) && !defined(__clang__)
|
19
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
20
|
+
#else
|
21
|
+
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
22
|
+
#endif
|
23
|
+
|
24
|
+
#define LOG_DEFAULT_DEBUG 1
|
25
|
+
#define LOG_DEFAULT_LLAMA 0
|
26
|
+
|
27
|
+
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
28
|
+
// set via common_log_set_verbosity()
|
29
|
+
extern int common_log_verbosity_thold;
|
30
|
+
|
31
|
+
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
32
|
+
|
33
|
+
// the common_log uses an internal worker thread to print/write log messages
|
34
|
+
// when the worker thread is paused, incoming log messages are discarded
|
35
|
+
struct common_log;
|
36
|
+
|
37
|
+
struct common_log * common_log_init();
|
38
|
+
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
39
|
+
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
40
|
+
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
41
|
+
void common_log_free (struct common_log * log);
|
42
|
+
|
43
|
+
LOG_ATTRIBUTE_FORMAT(3, 4)
|
44
|
+
void common_log_add(struct common_log * log, enum lm_ggml_log_level level, const char * fmt, ...);
|
45
|
+
|
46
|
+
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
47
|
+
//
|
48
|
+
// regular log output:
|
49
|
+
//
|
50
|
+
// lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
51
|
+
// llm_load_tensors: ggml ctx size = 0.27 MiB
|
52
|
+
// llm_load_tensors: offloading 32 repeating layers to GPU
|
53
|
+
// llm_load_tensors: offloading non-repeating layers to GPU
|
54
|
+
//
|
55
|
+
// with prefix = true, timestamps = true, the log output will look like this:
|
56
|
+
//
|
57
|
+
// 0.00.035.060 D lm_ggml_backend_metal_log_allocated_size: allocated buffer, size = 6695.84 MiB, ( 6695.91 / 21845.34)
|
58
|
+
// 0.00.035.064 I llm_load_tensors: ggml ctx size = 0.27 MiB
|
59
|
+
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
60
|
+
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
61
|
+
//
|
62
|
+
// I - info (stdout, V = 0)
|
63
|
+
// W - warning (stderr, V = 0)
|
64
|
+
// E - error (stderr, V = 0)
|
65
|
+
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
66
|
+
//
|
67
|
+
|
68
|
+
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
69
|
+
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
70
|
+
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
71
|
+
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
72
|
+
|
73
|
+
// helper macros for logging
|
74
|
+
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
75
|
+
//
|
76
|
+
// for example:
|
77
|
+
//
|
78
|
+
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
79
|
+
//
|
80
|
+
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
81
|
+
//
|
82
|
+
|
83
|
+
|
84
|
+
|
85
|
+
#if defined(__ANDROID__)
|
86
|
+
#include <android/log.h>
|
87
|
+
#define LLAMA_ANDROID_LOG_TAG "RNLLAMA_LOG_ANDROID"
|
88
|
+
|
89
|
+
#if defined(RNLLAMA_ANDROID_ENABLE_LOGGING)
|
90
|
+
#define RNLLAMA_LOG_LEVEL 1
|
91
|
+
#else
|
92
|
+
#define RNLLAMA_LOG_LEVEL 0
|
93
|
+
#endif
|
94
|
+
|
95
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
96
|
+
do { \
|
97
|
+
if ((verbosity) <= RNLLAMA_LOG_LEVEL) { \
|
98
|
+
int android_log_level = ANDROID_LOG_DEFAULT; \
|
99
|
+
switch (level) { \
|
100
|
+
case LM_GGML_LOG_LEVEL_INFO: android_log_level = ANDROID_LOG_INFO; break; \
|
101
|
+
case LM_GGML_LOG_LEVEL_WARN: android_log_level = ANDROID_LOG_WARN; break; \
|
102
|
+
case LM_GGML_LOG_LEVEL_ERROR: android_log_level = ANDROID_LOG_ERROR; break; \
|
103
|
+
default: android_log_level = ANDROID_LOG_DEFAULT; \
|
104
|
+
} \
|
105
|
+
__android_log_print(android_log_level, LLAMA_ANDROID_LOG_TAG, __VA_ARGS__); \
|
106
|
+
} \
|
107
|
+
} while(0)
|
108
|
+
#else
|
109
|
+
|
110
|
+
#define LOG_TMPL(level, verbosity, ...) \
|
111
|
+
do { \
|
112
|
+
if ((verbosity) <= common_log_verbosity_thold) { \
|
113
|
+
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
114
|
+
} \
|
115
|
+
} while (0)
|
116
|
+
|
117
|
+
#endif
|
118
|
+
|
119
|
+
#define LOG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, 0, __VA_ARGS__)
|
120
|
+
#define LOGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_NONE, verbosity, __VA_ARGS__)
|
121
|
+
|
122
|
+
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, 0, __VA_ARGS__)
|
123
|
+
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
|
124
|
+
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
|
125
|
+
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
|
126
|
+
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
|
127
|
+
|
128
|
+
#define LOG_INFV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
|
129
|
+
#define LOG_WRNV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
|
130
|
+
#define LOG_ERRV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
|
131
|
+
#define LOG_DBGV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
|
132
|
+
#define LOG_CNTV(verbosity, ...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)
|
@@ -0,0 +1,537 @@
|
|
1
|
+
/*
|
2
|
+
Copyright 2024 Google LLC
|
3
|
+
|
4
|
+
Use of this source code is governed by an MIT-style
|
5
|
+
license that can be found in the LICENSE file or at
|
6
|
+
https://opensource.org/licenses/MIT.
|
7
|
+
*/
|
8
|
+
// SPDX-License-Identifier: MIT
|
9
|
+
#pragma once
|
10
|
+
|
11
|
+
#include "minja.hpp"
|
12
|
+
|
13
|
+
#include <chrono>
|
14
|
+
#include <cstddef>
|
15
|
+
#include <cstdio>
|
16
|
+
#include <exception>
|
17
|
+
#include <iomanip>
|
18
|
+
#include <memory>
|
19
|
+
#include <sstream>
|
20
|
+
#include <string>
|
21
|
+
#include <vector>
|
22
|
+
|
23
|
+
#include "../json.hpp"
|
24
|
+
|
25
|
+
using json = nlohmann::ordered_json;
|
26
|
+
|
27
|
+
namespace minja {
|
28
|
+
|
29
|
+
struct chat_template_caps {
|
30
|
+
bool supports_tools = false;
|
31
|
+
bool supports_tool_calls = false;
|
32
|
+
bool supports_tool_responses = false;
|
33
|
+
bool supports_system_role = false;
|
34
|
+
bool supports_parallel_tool_calls = false;
|
35
|
+
bool supports_tool_call_id = false;
|
36
|
+
// meta-llama/Llama-3.1-8B-Instruct expects arguments to be an object.
|
37
|
+
// Most other templates (and OpenAI's API) expect the arguments object to be stringified.
|
38
|
+
bool requires_object_arguments = false;
|
39
|
+
// CohereForAI/c4ai-command-r-plus simple variant
|
40
|
+
bool requires_non_null_content = false;
|
41
|
+
// MiniMaxAI/MiniMax-Text-01 special
|
42
|
+
bool requires_typed_content = false;
|
43
|
+
};
|
44
|
+
|
45
|
+
struct chat_template_inputs {
|
46
|
+
nlohmann::ordered_json messages;
|
47
|
+
nlohmann::ordered_json tools;
|
48
|
+
bool add_generation_prompt = true;
|
49
|
+
nlohmann::ordered_json extra_context;
|
50
|
+
std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
|
51
|
+
};
|
52
|
+
|
53
|
+
struct chat_template_options {
|
54
|
+
bool apply_polyfills = true;
|
55
|
+
bool use_bos_token = true;
|
56
|
+
bool use_eos_token = true;
|
57
|
+
bool define_strftime_now = true;
|
58
|
+
|
59
|
+
bool polyfill_tools = true;
|
60
|
+
bool polyfill_tool_call_examples = true;
|
61
|
+
bool polyfill_tool_calls = true;
|
62
|
+
bool polyfill_tool_responses = true;
|
63
|
+
bool polyfill_system_role = true;
|
64
|
+
bool polyfill_object_arguments = true;
|
65
|
+
bool polyfill_typed_content = true;
|
66
|
+
};
|
67
|
+
|
68
|
+
class chat_template {
|
69
|
+
|
70
|
+
private:
|
71
|
+
chat_template_caps caps_;
|
72
|
+
std::string source_;
|
73
|
+
std::string bos_token_;
|
74
|
+
std::string eos_token_;
|
75
|
+
std::shared_ptr<minja::TemplateNode> template_root_;
|
76
|
+
std::string tool_call_example_;
|
77
|
+
|
78
|
+
std::string try_raw_render(
|
79
|
+
const nlohmann::ordered_json & messages,
|
80
|
+
const nlohmann::ordered_json & tools,
|
81
|
+
bool add_generation_prompt,
|
82
|
+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
|
83
|
+
{
|
84
|
+
try {
|
85
|
+
chat_template_inputs inputs;
|
86
|
+
inputs.messages = messages;
|
87
|
+
inputs.tools = tools;
|
88
|
+
inputs.add_generation_prompt = add_generation_prompt;
|
89
|
+
inputs.extra_context = extra_context;
|
90
|
+
// Use fixed date for tests
|
91
|
+
inputs.now = std::chrono::system_clock::from_time_t(0);
|
92
|
+
|
93
|
+
chat_template_options opts;
|
94
|
+
opts.apply_polyfills = false;
|
95
|
+
|
96
|
+
auto prompt = apply(inputs, opts);
|
97
|
+
// fprintf(stderr, "try_raw_render: %s\n", prompt.c_str());
|
98
|
+
return prompt;
|
99
|
+
} catch (const std::exception & e) {
|
100
|
+
// fprintf(stderr, "try_raw_render error: %s\n", e.what());
|
101
|
+
return "";
|
102
|
+
}
|
103
|
+
}
|
104
|
+
|
105
|
+
public:
|
106
|
+
|
107
|
+
chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
|
108
|
+
: source_(source), bos_token_(bos_token), eos_token_(eos_token)
|
109
|
+
{
|
110
|
+
template_root_ = minja::Parser::parse(source_, {
|
111
|
+
/* .trim_blocks = */ true,
|
112
|
+
/* .lstrip_blocks = */ true,
|
113
|
+
/* .keep_trailing_newline = */ false,
|
114
|
+
});
|
115
|
+
|
116
|
+
auto contains = [](const std::string & haystack, const std::string & needle) {
|
117
|
+
return haystack.find(needle) != std::string::npos;
|
118
|
+
};
|
119
|
+
|
120
|
+
const std::string user_needle = "<User Needle>";
|
121
|
+
const std::string sys_needle = "<System Needle>";
|
122
|
+
const json dummy_str_user_msg = {{"role", "user"}, {"content", user_needle}};
|
123
|
+
const json dummy_typed_user_msg = {{"role", "user"}, {"content", json::array({{{"type", "text"}, {"text", user_needle}}})}};
|
124
|
+
|
125
|
+
caps_.requires_typed_content =
|
126
|
+
!contains(try_raw_render(json::array({dummy_str_user_msg}), {}, false), user_needle)
|
127
|
+
&& contains(try_raw_render(json::array({dummy_typed_user_msg}), {}, false), user_needle);
|
128
|
+
|
129
|
+
const auto dummy_user_msg = caps_.requires_typed_content
|
130
|
+
? dummy_typed_user_msg
|
131
|
+
: dummy_str_user_msg;
|
132
|
+
const json needle_system_msg = {
|
133
|
+
{"role", "system"},
|
134
|
+
{"content", caps_.requires_typed_content ? json::array({{{"type", "text"}, {"text", sys_needle}}}) : json(sys_needle)},
|
135
|
+
};
|
136
|
+
|
137
|
+
caps_.supports_system_role = contains(try_raw_render({needle_system_msg, dummy_user_msg,}, {}, false), sys_needle);
|
138
|
+
|
139
|
+
auto out = try_raw_render(json::array({
|
140
|
+
dummy_user_msg
|
141
|
+
}), json::array({
|
142
|
+
{
|
143
|
+
{"name", "some_tool"},
|
144
|
+
{"type", "function"},
|
145
|
+
{"function", {
|
146
|
+
{"name", "some_tool"},
|
147
|
+
{"description", "Some tool."},
|
148
|
+
{"parameters", {
|
149
|
+
{"type", "object"},
|
150
|
+
{"properties", {
|
151
|
+
{"arg", {
|
152
|
+
{"type", "string"},
|
153
|
+
{"description", "Some argument."},
|
154
|
+
}},
|
155
|
+
}},
|
156
|
+
{"required", json::array({ "arg" })},
|
157
|
+
}},
|
158
|
+
}},
|
159
|
+
},
|
160
|
+
}), false);
|
161
|
+
caps_.supports_tools = contains(out, "some_tool");
|
162
|
+
|
163
|
+
auto make_tool_calls_msg = [&](const json & tool_calls) {
|
164
|
+
return json {
|
165
|
+
{"role", "assistant"},
|
166
|
+
{"content", nullptr},
|
167
|
+
{"tool_calls", tool_calls},
|
168
|
+
};
|
169
|
+
};
|
170
|
+
auto make_tool_call = [](const std::string & tool_name, const json & arguments) {
|
171
|
+
return json {
|
172
|
+
{"id", "call_1___"},
|
173
|
+
{"type", "function"},
|
174
|
+
{"function", {
|
175
|
+
{"arguments", arguments},
|
176
|
+
{"name", tool_name},
|
177
|
+
}},
|
178
|
+
};
|
179
|
+
};
|
180
|
+
const json dummy_args_obj {{"argument_needle", "print('Hello, World!')"}};
|
181
|
+
|
182
|
+
// Note: the arguments are rendered in both cases, but may be double-escaped, which we don't want.
|
183
|
+
out = try_raw_render(json::array({
|
184
|
+
dummy_user_msg,
|
185
|
+
make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj.dump())})),
|
186
|
+
}), {}, false);
|
187
|
+
auto tool_call_renders_str_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
|
188
|
+
out = try_raw_render(json::array({
|
189
|
+
dummy_user_msg,
|
190
|
+
make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj)})),
|
191
|
+
}), {}, false);
|
192
|
+
auto tool_call_renders_obj_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
|
193
|
+
|
194
|
+
caps_.supports_tool_calls = tool_call_renders_str_arguments || tool_call_renders_obj_arguments;
|
195
|
+
caps_.requires_object_arguments = !tool_call_renders_str_arguments && tool_call_renders_obj_arguments;
|
196
|
+
auto out_empty = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", ""}}}), {}, false);
|
197
|
+
auto out_null = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", nullptr}}}), {}, false);
|
198
|
+
caps_.requires_non_null_content = contains(out_empty, user_needle) && !contains(out_null, user_needle);
|
199
|
+
|
200
|
+
if (caps_.supports_tool_calls) {
|
201
|
+
auto dummy_args = caps_.requires_object_arguments ? dummy_args_obj : json(dummy_args_obj.dump());
|
202
|
+
auto tc1 = make_tool_call("test_tool1", dummy_args);
|
203
|
+
auto tc2 = make_tool_call("test_tool2", dummy_args);
|
204
|
+
auto out = try_raw_render(json::array({
|
205
|
+
dummy_user_msg,
|
206
|
+
make_tool_calls_msg(json::array({tc1, tc2})),
|
207
|
+
}), {}, false);
|
208
|
+
caps_.supports_parallel_tool_calls = contains(out, "test_tool1") && contains(out, "test_tool2");
|
209
|
+
|
210
|
+
out = try_raw_render(json::array({
|
211
|
+
dummy_user_msg,
|
212
|
+
make_tool_calls_msg(json::array({tc1})),
|
213
|
+
{
|
214
|
+
{"role", "tool"},
|
215
|
+
{"name", "test_tool1"},
|
216
|
+
{"content", "Some response!"},
|
217
|
+
{"tool_call_id", "call_911_"},
|
218
|
+
}
|
219
|
+
}), {}, false);
|
220
|
+
caps_.supports_tool_responses = contains(out, "Some response!");
|
221
|
+
caps_.supports_tool_call_id = contains(out, "call_911_");
|
222
|
+
}
|
223
|
+
|
224
|
+
try {
|
225
|
+
if (!caps_.supports_tools) {
|
226
|
+
const json user_msg {
|
227
|
+
{"role", "user"},
|
228
|
+
{"content", "Hey"},
|
229
|
+
};
|
230
|
+
const json args {
|
231
|
+
{"arg1", "some_value"},
|
232
|
+
};
|
233
|
+
const json tool_call_msg {
|
234
|
+
{"role", "assistant"},
|
235
|
+
{"content", nullptr},
|
236
|
+
{"tool_calls", json::array({
|
237
|
+
{
|
238
|
+
// TODO: detect if requires numerical id or fixed length == 6 like Nemo
|
239
|
+
{"id", "call_1___"},
|
240
|
+
{"type", "function"},
|
241
|
+
{"function", {
|
242
|
+
{"name", "tool_name"},
|
243
|
+
{"arguments", (caps_.requires_object_arguments ? args : json(minja::Value(args).dump(-1, /* to_json= */ true)))},
|
244
|
+
}},
|
245
|
+
},
|
246
|
+
})},
|
247
|
+
};
|
248
|
+
std::string prefix, full;
|
249
|
+
{
|
250
|
+
chat_template_inputs inputs;
|
251
|
+
inputs.messages = json::array({user_msg});
|
252
|
+
inputs.add_generation_prompt = true;
|
253
|
+
prefix = apply(inputs);
|
254
|
+
}
|
255
|
+
{
|
256
|
+
chat_template_inputs inputs;
|
257
|
+
inputs.messages = json::array({user_msg, tool_call_msg});
|
258
|
+
inputs.add_generation_prompt = false;
|
259
|
+
full = apply(inputs);
|
260
|
+
}
|
261
|
+
auto eos_pos_last = full.rfind(eos_token_);
|
262
|
+
if (eos_pos_last == prefix.size() - eos_token_.size() ||
|
263
|
+
(full[full.size() - 1] == '\n' && (eos_pos_last == full.size() - eos_token_.size() - 1))) {
|
264
|
+
full = full.substr(0, eos_pos_last);
|
265
|
+
}
|
266
|
+
size_t common_prefix_length = 0;
|
267
|
+
for (size_t i = 0; i < prefix.size() && i < full.size(); ++i) {
|
268
|
+
if (prefix[i] != full[i]) {
|
269
|
+
break;
|
270
|
+
}
|
271
|
+
if (prefix[i] == '<') {
|
272
|
+
// DeepSeek R1's template (as of 20250209) adds a trailing <think> if add_generation_prompt,
|
273
|
+
// but it removes thinking tags for past messages.
|
274
|
+
// The prefix and full strings diverge at <think> vs. <|tool▁calls▁begin|>, we avoid consuming the leading <.
|
275
|
+
continue;
|
276
|
+
}
|
277
|
+
common_prefix_length = i + 1;
|
278
|
+
}
|
279
|
+
auto example = full.substr(common_prefix_length);
|
280
|
+
if (example.find("tool_name") == std::string::npos && example.find("some_value") == std::string::npos) {
|
281
|
+
fprintf(stderr, "Failed to infer a tool call example (possible template bug)\n");
|
282
|
+
} else {
|
283
|
+
tool_call_example_ = example;
|
284
|
+
}
|
285
|
+
}
|
286
|
+
} catch (const std::exception & e) {
|
287
|
+
fprintf(stderr, "Failed to generate tool call example: %s\n", e.what());
|
288
|
+
}
|
289
|
+
}
|
290
|
+
|
291
|
+
const std::string & source() const { return source_; }
|
292
|
+
const std::string & bos_token() const { return bos_token_; }
|
293
|
+
const std::string & eos_token() const { return eos_token_; }
|
294
|
+
const chat_template_caps & original_caps() const { return caps_; }
|
295
|
+
|
296
|
+
// Deprecated, please use the form with chat_template_inputs and chat_template_options
|
297
|
+
std::string apply(
|
298
|
+
const nlohmann::ordered_json & messages,
|
299
|
+
const nlohmann::ordered_json & tools,
|
300
|
+
bool add_generation_prompt,
|
301
|
+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json(),
|
302
|
+
bool apply_polyfills = true)
|
303
|
+
{
|
304
|
+
fprintf(stderr, "[%s] Deprecated!\n", __func__);
|
305
|
+
chat_template_inputs inputs;
|
306
|
+
inputs.messages = messages;
|
307
|
+
inputs.tools = tools;
|
308
|
+
inputs.add_generation_prompt = add_generation_prompt;
|
309
|
+
inputs.extra_context = extra_context;
|
310
|
+
inputs.now = std::chrono::system_clock::now();
|
311
|
+
|
312
|
+
chat_template_options opts;
|
313
|
+
opts.apply_polyfills = apply_polyfills;
|
314
|
+
|
315
|
+
return apply(inputs, opts);
|
316
|
+
}
|
317
|
+
|
318
|
+
std::string apply(
|
319
|
+
const chat_template_inputs & inputs,
|
320
|
+
const chat_template_options & opts = chat_template_options()) const
|
321
|
+
{
|
322
|
+
json actual_messages;
|
323
|
+
|
324
|
+
auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
|
325
|
+
auto has_tool_calls = false;
|
326
|
+
auto has_tool_responses = false;
|
327
|
+
auto has_string_content = false;
|
328
|
+
for (const auto & message : inputs.messages) {
|
329
|
+
if (message.contains("tool_calls") && !message["tool_calls"].is_null()) {
|
330
|
+
has_tool_calls = true;
|
331
|
+
}
|
332
|
+
if (message.contains("role") && message["role"] == "tool") {
|
333
|
+
has_tool_responses = true;
|
334
|
+
}
|
335
|
+
if (message.contains("content") && message["content"].is_string()) {
|
336
|
+
has_string_content = true;
|
337
|
+
}
|
338
|
+
}
|
339
|
+
|
340
|
+
auto polyfill_system_role = opts.polyfill_system_role && !caps_.supports_system_role;
|
341
|
+
auto polyfill_tools = opts.polyfill_tools && has_tools && !caps_.supports_tools;
|
342
|
+
auto polyfill_tool_call_example = polyfill_tools && opts.polyfill_tool_call_examples;
|
343
|
+
auto polyfill_tool_calls = opts.polyfill_tool_calls && has_tool_calls && !caps_.supports_tool_calls;
|
344
|
+
auto polyfill_tool_responses = opts.polyfill_tool_responses && has_tool_responses && !caps_.supports_tool_responses;
|
345
|
+
auto polyfill_object_arguments = opts.polyfill_object_arguments && has_tool_calls && caps_.requires_object_arguments;
|
346
|
+
auto polyfill_typed_content = opts.polyfill_typed_content && has_string_content && caps_.requires_typed_content;
|
347
|
+
|
348
|
+
auto needs_polyfills = opts.apply_polyfills && (false
|
349
|
+
|| polyfill_system_role
|
350
|
+
|| polyfill_tools
|
351
|
+
|| polyfill_tool_calls
|
352
|
+
|| polyfill_tool_responses
|
353
|
+
|| polyfill_object_arguments
|
354
|
+
|| polyfill_typed_content
|
355
|
+
);
|
356
|
+
|
357
|
+
if (needs_polyfills) {
|
358
|
+
actual_messages = json::array();
|
359
|
+
|
360
|
+
auto add_message = [&](const json & msg) {
|
361
|
+
if (polyfill_typed_content && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) {
|
362
|
+
actual_messages.push_back({
|
363
|
+
{"role", msg.at("role")},
|
364
|
+
{"content", {{
|
365
|
+
{"type", "text"},
|
366
|
+
{"text", msg.at("content")},
|
367
|
+
}}},
|
368
|
+
});
|
369
|
+
} else {
|
370
|
+
actual_messages.push_back(msg);
|
371
|
+
}
|
372
|
+
};
|
373
|
+
|
374
|
+
std::string pending_system;
|
375
|
+
auto flush_sys = [&]() {
|
376
|
+
if (!pending_system.empty()) {
|
377
|
+
add_message({
|
378
|
+
{"role", "user"},
|
379
|
+
{"content", pending_system},
|
380
|
+
});
|
381
|
+
pending_system.clear();
|
382
|
+
}
|
383
|
+
};
|
384
|
+
|
385
|
+
json adjusted_messages;
|
386
|
+
if (polyfill_tools) {
|
387
|
+
adjusted_messages = add_system(inputs.messages,
|
388
|
+
"You can call any of the following tools to satisfy the user's requests: " + minja::Value(inputs.tools).dump(2, /* to_json= */ true) +
|
389
|
+
(!polyfill_tool_call_example || tool_call_example_.empty() ? "" : "\n\nExample tool call syntax:\n\n" + tool_call_example_ + "\n\n"));
|
390
|
+
} else {
|
391
|
+
adjusted_messages = inputs.messages;
|
392
|
+
}
|
393
|
+
|
394
|
+
for (const auto & message_ : adjusted_messages) {
|
395
|
+
auto message = message_;
|
396
|
+
if (!message.contains("role") || !message.contains("content")) {
|
397
|
+
throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
|
398
|
+
}
|
399
|
+
std::string role = message.at("role");
|
400
|
+
|
401
|
+
if (message.contains("tool_calls")) {
|
402
|
+
if (polyfill_object_arguments || polyfill_tool_calls) {
|
403
|
+
for (auto & tool_call : message.at("tool_calls")) {
|
404
|
+
if (tool_call["type"] == "function") {
|
405
|
+
auto & function = tool_call.at("function");
|
406
|
+
auto & arguments = function.at("arguments");
|
407
|
+
if (arguments.is_string()) {
|
408
|
+
try {
|
409
|
+
arguments = json::parse(arguments.get<std::string>());
|
410
|
+
} catch (const std::exception & ecvt) {
|
411
|
+
fprintf(stderr, "Failed to parse arguments: %s\n", ecvt.what());
|
412
|
+
}
|
413
|
+
}
|
414
|
+
}
|
415
|
+
}
|
416
|
+
}
|
417
|
+
if (polyfill_tool_calls) {
|
418
|
+
auto content = message.at("content");
|
419
|
+
auto tool_calls = json::array();
|
420
|
+
for (const auto & tool_call : message.at("tool_calls")) {
|
421
|
+
if (tool_call.at("type") != "function") {
|
422
|
+
continue;
|
423
|
+
}
|
424
|
+
const auto & function = tool_call.at("function");
|
425
|
+
auto tc = json {
|
426
|
+
{"name", function.at("name")},
|
427
|
+
{"arguments", function.at("arguments")},
|
428
|
+
};
|
429
|
+
if (tool_call.contains("id")) {
|
430
|
+
tc["id"] = tool_call["id"];
|
431
|
+
}
|
432
|
+
tool_calls.push_back(tc);
|
433
|
+
}
|
434
|
+
auto obj = json {
|
435
|
+
{"tool_calls", tool_calls},
|
436
|
+
};
|
437
|
+
if (!content.is_null() && !content.empty()) {
|
438
|
+
obj["content"] = content;
|
439
|
+
}
|
440
|
+
message["content"] = obj.dump(2);
|
441
|
+
message.erase("tool_calls");
|
442
|
+
}
|
443
|
+
}
|
444
|
+
if (polyfill_tool_responses && role == "tool") {
|
445
|
+
message["role"] = "user";
|
446
|
+
auto obj = json {
|
447
|
+
{"tool_response", json::object()},
|
448
|
+
};
|
449
|
+
if (message.contains("name")) {
|
450
|
+
obj["tool_response"]["tool"] = message.at("name");
|
451
|
+
}
|
452
|
+
obj["tool_response"]["content"] = message.at("content");
|
453
|
+
if (message.contains("tool_call_id")) {
|
454
|
+
obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
|
455
|
+
}
|
456
|
+
message["content"] = obj.dump(2);
|
457
|
+
message.erase("name");
|
458
|
+
}
|
459
|
+
|
460
|
+
if (!message["content"].is_null() && polyfill_system_role) {
|
461
|
+
std::string content = message.at("content");
|
462
|
+
if (role == "system") {
|
463
|
+
if (!pending_system.empty()) pending_system += "\n";
|
464
|
+
pending_system += content;
|
465
|
+
continue;
|
466
|
+
} else {
|
467
|
+
if (role == "user") {
|
468
|
+
if (!pending_system.empty()) {
|
469
|
+
message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
|
470
|
+
pending_system.clear();
|
471
|
+
}
|
472
|
+
} else {
|
473
|
+
flush_sys();
|
474
|
+
}
|
475
|
+
}
|
476
|
+
}
|
477
|
+
add_message(message);
|
478
|
+
}
|
479
|
+
flush_sys();
|
480
|
+
} else {
|
481
|
+
actual_messages = inputs.messages;
|
482
|
+
}
|
483
|
+
|
484
|
+
auto context = minja::Context::make(json({
|
485
|
+
{"messages", actual_messages},
|
486
|
+
{"add_generation_prompt", inputs.add_generation_prompt},
|
487
|
+
}));
|
488
|
+
context->set("bos_token", opts.use_bos_token ? bos_token_ : "");
|
489
|
+
context->set("eos_token", opts.use_eos_token ? eos_token_ : "");
|
490
|
+
if (opts.define_strftime_now) {
|
491
|
+
auto now = inputs.now;
|
492
|
+
context->set("strftime_now", Value::callable([now](const std::shared_ptr<minja::Context> &, minja::ArgumentsValue & args) {
|
493
|
+
args.expectArgs("strftime_now", {1, 1}, {0, 0});
|
494
|
+
auto format = args.args[0].get<std::string>();
|
495
|
+
|
496
|
+
auto time = std::chrono::system_clock::to_time_t(now);
|
497
|
+
auto local_time = *std::localtime(&time);
|
498
|
+
std::ostringstream ss;
|
499
|
+
ss << std::put_time(&local_time, format.c_str());
|
500
|
+
return ss.str();
|
501
|
+
}));
|
502
|
+
}
|
503
|
+
if (!inputs.tools.is_null()) {
|
504
|
+
context->set("tools", minja::Value(inputs.tools));
|
505
|
+
}
|
506
|
+
if (!inputs.extra_context.is_null()) {
|
507
|
+
for (auto & kv : inputs.extra_context.items()) {
|
508
|
+
context->set(kv.key(), minja::Value(kv.value()));
|
509
|
+
}
|
510
|
+
}
|
511
|
+
|
512
|
+
auto ret = template_root_->render(context);
|
513
|
+
// fprintf(stderr, "actual_messages: %s\n", actual_messages.dump(2).c_str());
|
514
|
+
// fprintf(stderr, "apply: %s\n\n", ret.c_str());
|
515
|
+
return ret;
|
516
|
+
}
|
517
|
+
|
518
|
+
static nlohmann::ordered_json add_system(const nlohmann::ordered_json & messages, const std::string & system_prompt) {
|
519
|
+
json messages_with_system = messages;
|
520
|
+
|
521
|
+
if (!messages_with_system.empty() && messages_with_system[0].at("role") == "system") {
|
522
|
+
std::string existing_system = messages_with_system.at(0).at("content");
|
523
|
+
messages_with_system[0] = json {
|
524
|
+
{"role", "system"},
|
525
|
+
{"content", existing_system + "\n\n" + system_prompt},
|
526
|
+
};
|
527
|
+
} else {
|
528
|
+
messages_with_system.insert(messages_with_system.begin(), json {
|
529
|
+
{"role", "system"},
|
530
|
+
{"content", system_prompt},
|
531
|
+
});
|
532
|
+
}
|
533
|
+
return messages_with_system;
|
534
|
+
}
|
535
|
+
};
|
536
|
+
|
537
|
+
} // namespace minja
|