cactus-react-native 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +230 -0
- package/android/build.gradle +104 -0
- package/android/gradle.properties +5 -0
- package/android/src/main/AndroidManifest.xml +4 -0
- package/android/src/main/CMakeLists.txt +104 -0
- package/android/src/main/java/com/cactus/Cactus.java +646 -0
- package/android/src/main/java/com/cactus/CactusPackage.java +48 -0
- package/android/src/main/java/com/cactus/LlamaContext.java +579 -0
- package/android/src/main/jni-utils.h +100 -0
- package/android/src/main/jni.cpp +1254 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus_x86_64.so +0 -0
- package/android/src/newarch/java/com/cactus/CactusModule.java +124 -0
- package/android/src/oldarch/java/com/cactus/CactusModule.java +125 -0
- package/cactus-react-native.podspec +45 -0
- package/ios/CMakeLists.txt +109 -0
- package/ios/Cactus.h +6 -0
- package/ios/Cactus.mm +405 -0
- package/ios/CactusContext.h +57 -0
- package/ios/CactusContext.mm +835 -0
- package/ios/cactus.xcframework/info.plist +74 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus.h +133 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/chat.h +143 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/common.h +683 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-alloc.h +76 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-backend.h +354 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-common.h +1851 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpp.h +39 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-impl.h +531 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu.h +138 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-impl.h +567 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-metal-impl.h +530 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-metal.h +66 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-opt.h +216 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-quants.h +100 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-threading.h +14 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml.h +2221 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/gguf.h +202 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/json.hpp +24766 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-adapter.h +76 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-arch.h +421 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-batch.h +88 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-chat.h +53 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-context.h +265 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-cparams.h +38 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-cpp.h +30 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-grammar.h +173 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-graph.h +574 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-hparams.h +148 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-impl.h +61 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-io.h +35 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-kv-cache.h +287 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-memory.h +21 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-mmap.h +68 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-loader.h +167 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model.h +403 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-sampling.h +32 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-vocab.h +125 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama.h +1416 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/log.h +103 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/chat-template.hpp +529 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/minja.hpp +2915 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/sampling.h +107 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/sgemm.h +14 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/unicode-data.h +20 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/unicode.h +66 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +133 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +143 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/common.h +683 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-alloc.h +76 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +354 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +1851 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +39 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +531 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +138 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +567 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +530 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal.h +66 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +216 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-quants.h +100 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-threading.h +14 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +2221 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/gguf.h +202 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/json.hpp +24766 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-adapter.h +76 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +421 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +88 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +53 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +265 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +38 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-cpp.h +30 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-grammar.h +173 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +574 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +148 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-impl.h +61 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-io.h +35 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +287 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +21 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-mmap.h +68 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +167 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +403 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-sampling.h +32 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +125 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +1416 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/log.h +103 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +529 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +2915 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/sampling.h +107 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +14 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/unicode-data.h +20 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/unicode.h +66 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +101 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus.h +133 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/chat.h +143 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/common.h +683 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-alloc.h +76 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-backend.h +354 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-common.h +1851 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpp.h +39 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-impl.h +531 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu.h +138 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-impl.h +567 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-metal-impl.h +530 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-metal.h +66 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-opt.h +216 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-quants.h +100 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-threading.h +14 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml.h +2221 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/gguf.h +202 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/json.hpp +24766 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-adapter.h +76 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-arch.h +421 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-batch.h +88 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-chat.h +53 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-context.h +265 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-cparams.h +38 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-cpp.h +30 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-grammar.h +173 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-graph.h +574 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-hparams.h +148 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-impl.h +61 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-io.h +35 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-kv-cache.h +287 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-memory.h +21 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-mmap.h +68 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-loader.h +167 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model.h +403 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-sampling.h +32 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-vocab.h +125 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama.h +1416 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/log.h +103 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/chat-template.hpp +529 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/minja.hpp +2915 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/sampling.h +107 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/sgemm.h +14 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/unicode-data.h +20 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/unicode.h +66 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +133 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +143 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/common.h +683 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-alloc.h +76 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +354 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +1851 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +39 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +8 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +531 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +63 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +38 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +138 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +567 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +530 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal.h +66 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +216 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-quants.h +100 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-threading.h +14 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +2221 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/gguf.h +202 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/json-schema-to-grammar.h +21 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/json.hpp +24766 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-adapter.h +76 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +421 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +88 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +53 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +265 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +38 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-cpp.h +30 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-grammar.h +173 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +574 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +148 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-impl.h +61 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-io.h +35 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +287 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +21 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-mmap.h +68 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +167 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +403 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-sampling.h +32 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +125 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +1416 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/log.h +103 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +529 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +2915 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/sampling.h +107 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +14 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/unicode-data.h +20 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/unicode.h +66 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +101 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/lib/commonjs/NativeCactus.js +9 -0
- package/lib/commonjs/NativeCactus.js.map +1 -0
- package/lib/commonjs/chat.js +37 -0
- package/lib/commonjs/chat.js.map +1 -0
- package/lib/commonjs/grammar.js +560 -0
- package/lib/commonjs/grammar.js.map +1 -0
- package/lib/commonjs/index.js +300 -0
- package/lib/commonjs/index.js.map +1 -0
- package/lib/commonjs/package.json +1 -0
- package/lib/module/NativeCactus.js +5 -0
- package/lib/module/NativeCactus.js.map +1 -0
- package/lib/module/chat.js +33 -0
- package/lib/module/chat.js.map +1 -0
- package/lib/module/grammar.js +553 -0
- package/lib/module/grammar.js.map +1 -0
- package/lib/module/index.js +277 -0
- package/lib/module/index.js.map +1 -0
- package/lib/module/package.json +1 -0
- package/lib/typescript/NativeCactus.d.ts +357 -0
- package/lib/typescript/NativeCactus.d.ts.map +1 -0
- package/lib/typescript/chat.d.ts +10 -0
- package/lib/typescript/chat.d.ts.map +1 -0
- package/lib/typescript/grammar.d.ts +37 -0
- package/lib/typescript/grammar.d.ts.map +1 -0
- package/lib/typescript/index.d.ts +96 -0
- package/lib/typescript/index.d.ts.map +1 -0
- package/package.json +223 -0
- package/src/NativeCactus.ts +418 -0
- package/src/chat.ts +44 -0
- package/src/grammar.ts +854 -0
- package/src/index.ts +482 -0
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
#include <array>
|
|
6
|
+
|
|
7
|
+
// bump if necessary
|
|
8
|
+
#define LLAMA_MAX_LAYERS 512
|
|
9
|
+
#define LLAMA_MAX_EXPERTS 256 // DeepSeekV3
|
|
10
|
+
|
|
11
|
+
enum llama_expert_gating_func_type {
|
|
12
|
+
LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0,
|
|
13
|
+
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
|
|
14
|
+
LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
struct llama_hparams_posnet {
|
|
18
|
+
uint32_t n_embd;
|
|
19
|
+
uint32_t n_layer;
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
struct llama_hparams_convnext {
|
|
23
|
+
uint32_t n_embd;
|
|
24
|
+
uint32_t n_layer;
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
struct llama_hparams {
|
|
28
|
+
bool vocab_only;
|
|
29
|
+
bool rope_finetuned;
|
|
30
|
+
bool use_par_res;
|
|
31
|
+
bool swin_norm;
|
|
32
|
+
|
|
33
|
+
uint32_t n_ctx_train; // context size the model was trained on
|
|
34
|
+
uint32_t n_embd;
|
|
35
|
+
uint32_t n_embd_features = 0;
|
|
36
|
+
uint32_t n_layer;
|
|
37
|
+
uint32_t n_rot;
|
|
38
|
+
uint32_t n_swa = 0; // sliding window attention (SWA)
|
|
39
|
+
uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
|
|
40
|
+
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
|
41
|
+
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
|
42
|
+
uint32_t n_expert = 0;
|
|
43
|
+
uint32_t n_expert_used = 0;
|
|
44
|
+
uint32_t n_rel_attn_bkts = 0;
|
|
45
|
+
|
|
46
|
+
// for WavTokenizer
|
|
47
|
+
struct llama_hparams_posnet posnet;
|
|
48
|
+
struct llama_hparams_convnext convnext;
|
|
49
|
+
|
|
50
|
+
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
|
|
51
|
+
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
|
|
52
|
+
std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
|
|
53
|
+
|
|
54
|
+
uint32_t n_layer_dense_lead = 0;
|
|
55
|
+
uint32_t n_lora_q = 0;
|
|
56
|
+
uint32_t n_lora_kv = 0;
|
|
57
|
+
uint32_t n_ff_exp = 0;
|
|
58
|
+
uint32_t n_ff_shexp = 0;
|
|
59
|
+
uint32_t n_expert_shared = 0;
|
|
60
|
+
uint32_t n_norm_groups = 0;
|
|
61
|
+
|
|
62
|
+
float expert_weights_scale = 0.0;
|
|
63
|
+
bool expert_weights_norm = false;
|
|
64
|
+
uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
|
|
65
|
+
|
|
66
|
+
float f_norm_eps;
|
|
67
|
+
float f_norm_rms_eps;
|
|
68
|
+
float f_norm_group_eps;
|
|
69
|
+
|
|
70
|
+
float f_attn_logit_softcapping = 50.0f;
|
|
71
|
+
float f_final_logit_softcapping = 30.0f;
|
|
72
|
+
|
|
73
|
+
// for RWKV
|
|
74
|
+
uint32_t rescale_every_n_layers = 0;
|
|
75
|
+
uint32_t time_mix_extra_dim = 0;
|
|
76
|
+
uint32_t time_decay_extra_dim = 0;
|
|
77
|
+
uint32_t wkv_head_size = 0;
|
|
78
|
+
uint32_t token_shift_count = 2;
|
|
79
|
+
uint32_t n_lora_decay = 0;
|
|
80
|
+
uint32_t n_lora_iclr = 0;
|
|
81
|
+
uint32_t n_lora_value_res_mix = 0;
|
|
82
|
+
uint32_t n_lora_gate = 0;
|
|
83
|
+
|
|
84
|
+
float rope_attn_factor = 1.0f;
|
|
85
|
+
float rope_freq_base_train;
|
|
86
|
+
float rope_freq_base_train_swa;
|
|
87
|
+
float rope_freq_scale_train;
|
|
88
|
+
float rope_freq_scale_train_swa;
|
|
89
|
+
uint32_t n_ctx_orig_yarn;
|
|
90
|
+
float rope_yarn_log_mul;
|
|
91
|
+
|
|
92
|
+
std::array<int, 4> rope_sections;
|
|
93
|
+
|
|
94
|
+
// for State Space Models
|
|
95
|
+
uint32_t ssm_d_conv = 0;
|
|
96
|
+
uint32_t ssm_d_inner = 0;
|
|
97
|
+
uint32_t ssm_d_state = 0;
|
|
98
|
+
uint32_t ssm_dt_rank = 0;
|
|
99
|
+
|
|
100
|
+
bool ssm_dt_b_c_rms = false;
|
|
101
|
+
|
|
102
|
+
float f_clamp_kqv = 0.0f;
|
|
103
|
+
float f_max_alibi_bias = 0.0f;
|
|
104
|
+
float f_logit_scale = 0.0f;
|
|
105
|
+
|
|
106
|
+
// Additional scale factors (Granite/Granite MoE)
|
|
107
|
+
float f_residual_scale = 0.0f;
|
|
108
|
+
float f_embedding_scale = 0.0f;
|
|
109
|
+
float f_attention_scale = 0.0f;
|
|
110
|
+
|
|
111
|
+
bool causal_attn = true;
|
|
112
|
+
bool use_alibi = false;
|
|
113
|
+
bool attn_soft_cap = false;
|
|
114
|
+
|
|
115
|
+
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
|
|
116
|
+
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
|
|
117
|
+
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
|
|
118
|
+
|
|
119
|
+
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
|
|
120
|
+
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
|
121
|
+
enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
|
|
122
|
+
|
|
123
|
+
uint32_t n_head(uint32_t il = 0) const;
|
|
124
|
+
|
|
125
|
+
uint32_t n_head_kv(uint32_t il = 0) const;
|
|
126
|
+
|
|
127
|
+
uint32_t n_ff(uint32_t il = 0) const;
|
|
128
|
+
|
|
129
|
+
uint32_t n_gqa(uint32_t il = 0) const;
|
|
130
|
+
|
|
131
|
+
// dimension of key embeddings across all k-v heads
|
|
132
|
+
uint32_t n_embd_k_gqa(uint32_t il = 0) const;
|
|
133
|
+
|
|
134
|
+
// dimension of value embeddings across all k-v heads
|
|
135
|
+
uint32_t n_embd_v_gqa(uint32_t il = 0) const;
|
|
136
|
+
|
|
137
|
+
// dimension of the rolling state embeddings
|
|
138
|
+
// corresponds to Mamba's conv_states size or RWKV's token_shift states size
|
|
139
|
+
uint32_t n_embd_k_s() const;
|
|
140
|
+
|
|
141
|
+
// dimension of the recurrent state embeddings
|
|
142
|
+
uint32_t n_embd_v_s() const;
|
|
143
|
+
|
|
144
|
+
bool is_swa(uint32_t il) const;
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
|
148
|
+
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-impl.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
|
4
|
+
|
|
5
|
+
#include <string>
|
|
6
|
+
#include <vector>
|
|
7
|
+
|
|
8
|
+
#ifdef __GNUC__
|
|
9
|
+
# if defined(__MINGW32__) && !defined(__clang__)
|
|
10
|
+
# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
|
11
|
+
# else
|
|
12
|
+
# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
|
13
|
+
# endif
|
|
14
|
+
#else
|
|
15
|
+
# define LLAMA_ATTRIBUTE_FORMAT(...)
|
|
16
|
+
#endif
|
|
17
|
+
|
|
18
|
+
//
|
|
19
|
+
// logging
|
|
20
|
+
//
|
|
21
|
+
|
|
22
|
+
LLAMA_ATTRIBUTE_FORMAT(2, 3)
|
|
23
|
+
void llama_log_internal (lm_ggml_log_level level, const char * format, ...);
|
|
24
|
+
void llama_log_callback_default(lm_ggml_log_level level, const char * text, void * user_data);
|
|
25
|
+
|
|
26
|
+
#define LLAMA_LOG(...) llama_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
|
27
|
+
#define LLAMA_LOG_INFO(...) llama_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
|
28
|
+
#define LLAMA_LOG_WARN(...) llama_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
|
29
|
+
#define LLAMA_LOG_ERROR(...) llama_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
|
30
|
+
#define LLAMA_LOG_DEBUG(...) llama_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
|
31
|
+
#define LLAMA_LOG_CONT(...) llama_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
|
32
|
+
|
|
33
|
+
//
|
|
34
|
+
// helpers
|
|
35
|
+
//
|
|
36
|
+
|
|
37
|
+
template <typename T>
|
|
38
|
+
struct no_init {
|
|
39
|
+
T value;
|
|
40
|
+
no_init() { /* do nothing */ }
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
struct time_meas {
|
|
44
|
+
time_meas(int64_t & t_acc, bool disable = false);
|
|
45
|
+
~time_meas();
|
|
46
|
+
|
|
47
|
+
const int64_t t_start_us;
|
|
48
|
+
|
|
49
|
+
int64_t & t_acc;
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
void replace_all(std::string & s, const std::string & search, const std::string & replace);
|
|
53
|
+
|
|
54
|
+
// TODO: rename to llama_format ?
|
|
55
|
+
LLAMA_ATTRIBUTE_FORMAT(1, 2)
|
|
56
|
+
std::string format(const char * fmt, ...);
|
|
57
|
+
|
|
58
|
+
std::string llama_format_tensor_shape(const std::vector<int64_t> & ne);
|
|
59
|
+
std::string llama_format_tensor_shape(const struct lm_ggml_tensor * t);
|
|
60
|
+
|
|
61
|
+
std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i);
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include <cstddef>
|
|
4
|
+
#include <cstdint>
|
|
5
|
+
#include <string>
|
|
6
|
+
|
|
7
|
+
struct lm_ggml_tensor;
|
|
8
|
+
|
|
9
|
+
class llama_io_write_i {
|
|
10
|
+
public:
|
|
11
|
+
llama_io_write_i() = default;
|
|
12
|
+
virtual ~llama_io_write_i() = default;
|
|
13
|
+
|
|
14
|
+
virtual void write(const void * src, size_t size) = 0;
|
|
15
|
+
virtual void write_tensor(const lm_ggml_tensor * tensor, size_t offset, size_t size) = 0;
|
|
16
|
+
|
|
17
|
+
// bytes written so far
|
|
18
|
+
virtual size_t n_bytes() = 0;
|
|
19
|
+
|
|
20
|
+
void write_string(const std::string & str);
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
class llama_io_read_i {
|
|
24
|
+
public:
|
|
25
|
+
llama_io_read_i() = default;
|
|
26
|
+
virtual ~llama_io_read_i() = default;
|
|
27
|
+
|
|
28
|
+
virtual const uint8_t * read(size_t size) = 0;
|
|
29
|
+
virtual void read_to(void * dst, size_t size) = 0;
|
|
30
|
+
|
|
31
|
+
// bytes read so far
|
|
32
|
+
virtual size_t n_bytes() = 0;
|
|
33
|
+
|
|
34
|
+
void read_string(std::string & str);
|
|
35
|
+
};
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
#include "llama-io.h"
|
|
5
|
+
#include "llama-memory.h"
|
|
6
|
+
|
|
7
|
+
#include "ggml-cpp.h"
|
|
8
|
+
|
|
9
|
+
#include <functional>
|
|
10
|
+
#include <set>
|
|
11
|
+
#include <vector>
|
|
12
|
+
|
|
13
|
+
struct llama_cparams;
|
|
14
|
+
struct llama_hparams;
|
|
15
|
+
struct llama_ubatch;
|
|
16
|
+
|
|
17
|
+
struct llama_kv_cache : public llama_memory_i {
|
|
18
|
+
using llama_memory_i::llama_memory_i;
|
|
19
|
+
|
|
20
|
+
virtual int32_t get_n_tokens() const = 0;
|
|
21
|
+
virtual uint32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
|
|
22
|
+
|
|
23
|
+
virtual bool get_can_shift() const = 0;
|
|
24
|
+
|
|
25
|
+
bool get_can_edit() const override { return get_can_shift(); }
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
struct llama_kv_cell {
|
|
29
|
+
llama_pos pos = -1;
|
|
30
|
+
llama_pos delta = 0;
|
|
31
|
+
int32_t src = -1; // used by recurrent state models to copy states
|
|
32
|
+
int32_t tail = -1;
|
|
33
|
+
|
|
34
|
+
std::set<llama_seq_id> seq_id;
|
|
35
|
+
|
|
36
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
|
37
|
+
return seq_id.find(id) != seq_id.end();
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
bool is_empty() const {
|
|
41
|
+
return seq_id.empty();
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
bool is_same_seq(const llama_kv_cell & other) const {
|
|
45
|
+
return seq_id == other.seq_id;
|
|
46
|
+
}
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
// a structure holds information about the slot found in llama_kv_cache_find_slot
|
|
50
|
+
struct llama_kv_cache_slot_info {
|
|
51
|
+
std::pair<uint32_t, uint32_t> boundaries; // slot boundaries [begin, end)
|
|
52
|
+
bool found = false; // the slot was found
|
|
53
|
+
|
|
54
|
+
explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
|
|
55
|
+
llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
|
|
56
|
+
|
|
57
|
+
operator bool() const { return found; }
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
// ring-buffer of cached KV data
|
|
61
|
+
// TODO: pimpl
|
|
62
|
+
// TODO: add notion of max sequences
|
|
63
|
+
class llama_kv_cache_unified : public llama_kv_cache {
|
|
64
|
+
public:
|
|
65
|
+
// can be used to query data from the model if needed
|
|
66
|
+
struct callbacks {
|
|
67
|
+
std::function<lm_ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
llama_kv_cache_unified(
|
|
71
|
+
const llama_hparams & hparams,
|
|
72
|
+
callbacks cbs);
|
|
73
|
+
|
|
74
|
+
virtual ~llama_kv_cache_unified() = default;
|
|
75
|
+
|
|
76
|
+
// TODO: become constructor
|
|
77
|
+
bool init(
|
|
78
|
+
const llama_model & model, // TODO: do not reference the model
|
|
79
|
+
const llama_cparams & cparams,
|
|
80
|
+
lm_ggml_type type_k,
|
|
81
|
+
lm_ggml_type type_v,
|
|
82
|
+
uint32_t kv_size,
|
|
83
|
+
bool offload);
|
|
84
|
+
|
|
85
|
+
int32_t get_n_tokens() const override;
|
|
86
|
+
uint32_t get_used_cells() const override;
|
|
87
|
+
|
|
88
|
+
size_t total_size() const;
|
|
89
|
+
|
|
90
|
+
// TODO: better data structures to reduce the cost of this operation
|
|
91
|
+
llama_pos pos_max() const;
|
|
92
|
+
|
|
93
|
+
void clear() override;
|
|
94
|
+
void defrag() override;
|
|
95
|
+
|
|
96
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
97
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
98
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
99
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
100
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
101
|
+
|
|
102
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) override;
|
|
103
|
+
|
|
104
|
+
bool get_can_shift() const override;
|
|
105
|
+
|
|
106
|
+
// find an empty slot of size "n_tokens" in the cache
|
|
107
|
+
// updates the cache head
|
|
108
|
+
// returns a structure holding information about the slot found
|
|
109
|
+
// Note: On success, it's important that cache.head points
|
|
110
|
+
// to the first cell of the slot.
|
|
111
|
+
llama_kv_cache_slot_info find_slot(const llama_ubatch & batch);
|
|
112
|
+
|
|
113
|
+
// TODO: maybe not needed
|
|
114
|
+
uint32_t get_padding(const llama_cparams & cparams) const;
|
|
115
|
+
|
|
116
|
+
// find how many cells are currently in use
|
|
117
|
+
uint32_t cell_max() const;
|
|
118
|
+
|
|
119
|
+
size_t size_k_bytes() const;
|
|
120
|
+
size_t size_v_bytes() const;
|
|
121
|
+
|
|
122
|
+
// defrag
|
|
123
|
+
|
|
124
|
+
struct {
|
|
125
|
+
std::vector<uint32_t> ids;
|
|
126
|
+
} defrag_info;
|
|
127
|
+
|
|
128
|
+
// return true if cells have been moved
|
|
129
|
+
bool defrag_prepare(int32_t n_max_nodes);
|
|
130
|
+
|
|
131
|
+
// state save/load
|
|
132
|
+
|
|
133
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
|
|
134
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
|
|
135
|
+
|
|
136
|
+
// members
|
|
137
|
+
|
|
138
|
+
const llama_hparams & hparams;
|
|
139
|
+
|
|
140
|
+
callbacks cbs;
|
|
141
|
+
|
|
142
|
+
bool has_shift = false;
|
|
143
|
+
bool do_defrag = false;
|
|
144
|
+
|
|
145
|
+
// TODO: remove this and implement llama_kv_cache_recurrent instead
|
|
146
|
+
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
|
|
147
|
+
|
|
148
|
+
bool v_trans = true; // the value tensor is transposed
|
|
149
|
+
bool can_shift = false;
|
|
150
|
+
|
|
151
|
+
// Note: The value of head isn't only used to optimize searching
|
|
152
|
+
// for a free KV slot. llama_decode_impl also uses it, so it
|
|
153
|
+
// cannot be freely changed after a slot has been allocated.
|
|
154
|
+
uint32_t head = 0;
|
|
155
|
+
uint32_t size = 0;
|
|
156
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
157
|
+
|
|
158
|
+
// computed before each graph build
|
|
159
|
+
uint32_t n = 0;
|
|
160
|
+
|
|
161
|
+
std::vector<llama_kv_cell> cells;
|
|
162
|
+
|
|
163
|
+
std::vector<lm_ggml_tensor *> k_l; // per layer
|
|
164
|
+
std::vector<lm_ggml_tensor *> v_l;
|
|
165
|
+
|
|
166
|
+
private:
|
|
167
|
+
lm_ggml_type type_k = LM_GGML_TYPE_F16;
|
|
168
|
+
lm_ggml_type type_v = LM_GGML_TYPE_F16;
|
|
169
|
+
|
|
170
|
+
std::vector<lm_ggml_context_ptr> ctxs;
|
|
171
|
+
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
|
172
|
+
|
|
173
|
+
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
174
|
+
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
175
|
+
|
|
176
|
+
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
|
177
|
+
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
|
|
181
|
+
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
|
|
182
|
+
//public:
|
|
183
|
+
// using llama_kv_cache_unified::llama_kv_cache_unified;
|
|
184
|
+
//};
|
|
185
|
+
|
|
186
|
+
//
|
|
187
|
+
// kv cache restore
|
|
188
|
+
//
|
|
189
|
+
|
|
190
|
+
// saves the kv_cache state for future recovery.
|
|
191
|
+
// used to rollback llama_kv_cache_find_slot changes.
|
|
192
|
+
struct llama_kv_slot_restorer {
|
|
193
|
+
struct llama_kv_cache_state {
|
|
194
|
+
uint32_t head = 0;
|
|
195
|
+
uint32_t n = 0;
|
|
196
|
+
} old_state;
|
|
197
|
+
|
|
198
|
+
// for non-recurrent models only
|
|
199
|
+
// list of slots to restore
|
|
200
|
+
std::vector<std::pair<uint32_t, uint32_t>> slot_boundaries;
|
|
201
|
+
|
|
202
|
+
bool do_restore = false;
|
|
203
|
+
|
|
204
|
+
llama_kv_cache_unified & cache;
|
|
205
|
+
|
|
206
|
+
explicit llama_kv_slot_restorer(llama_kv_cache_unified & cache) : cache(cache) {
|
|
207
|
+
old_state.head = cache.head;
|
|
208
|
+
old_state.n = cache.n;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// saves a slot information for future restoration
|
|
212
|
+
void save(const llama_kv_cache_slot_info & slot) {
|
|
213
|
+
if (slot) {
|
|
214
|
+
do_restore = true;
|
|
215
|
+
if (slot.boundaries.first != slot.boundaries.second) {
|
|
216
|
+
slot_boundaries.push_back(slot.boundaries);
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// must be explicitly called to restore the kv_cache state
|
|
222
|
+
// and rollback changes from all llama_kv_cache_find_slot calls
|
|
223
|
+
void restore() {
|
|
224
|
+
if (do_restore) {
|
|
225
|
+
cache.head = old_state.head;
|
|
226
|
+
cache.n = old_state.n;
|
|
227
|
+
|
|
228
|
+
if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
|
|
229
|
+
cache.seq_rm(-1, -1, -1);
|
|
230
|
+
} else {
|
|
231
|
+
for (auto & slot : slot_boundaries) {
|
|
232
|
+
cache.seq_rm(-1, slot.first, slot.second);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
// TODO: maybe become part of the public llama_kv_cache in the future
|
|
240
|
+
int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv);
|
|
241
|
+
|
|
242
|
+
int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv);
|
|
243
|
+
|
|
244
|
+
void llama_kv_cache_clear(llama_kv_cache * kv);
|
|
245
|
+
|
|
246
|
+
bool llama_kv_cache_seq_rm(
|
|
247
|
+
llama_kv_cache * kv,
|
|
248
|
+
llama_seq_id seq_id,
|
|
249
|
+
llama_pos p0,
|
|
250
|
+
llama_pos p1);
|
|
251
|
+
|
|
252
|
+
void llama_kv_cache_seq_cp(
|
|
253
|
+
llama_kv_cache * kv,
|
|
254
|
+
llama_seq_id seq_id_src,
|
|
255
|
+
llama_seq_id seq_id_dst,
|
|
256
|
+
llama_pos p0,
|
|
257
|
+
llama_pos p1);
|
|
258
|
+
|
|
259
|
+
void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id);
|
|
260
|
+
|
|
261
|
+
void llama_kv_cache_seq_add(
|
|
262
|
+
llama_kv_cache * kv,
|
|
263
|
+
llama_seq_id seq_id,
|
|
264
|
+
llama_pos p0,
|
|
265
|
+
llama_pos p1,
|
|
266
|
+
llama_pos delta);
|
|
267
|
+
|
|
268
|
+
void llama_kv_cache_seq_div(
|
|
269
|
+
llama_kv_cache * kv,
|
|
270
|
+
llama_seq_id seq_id,
|
|
271
|
+
llama_pos p0,
|
|
272
|
+
llama_pos p1,
|
|
273
|
+
int d);
|
|
274
|
+
|
|
275
|
+
llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id);
|
|
276
|
+
|
|
277
|
+
void llama_kv_cache_defrag(llama_kv_cache * kv);
|
|
278
|
+
|
|
279
|
+
bool llama_kv_cache_can_shift(const llama_kv_cache * kv);
|
|
280
|
+
|
|
281
|
+
//
|
|
282
|
+
// kv cache view
|
|
283
|
+
//
|
|
284
|
+
|
|
285
|
+
llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
|
|
286
|
+
|
|
287
|
+
void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "llama.h"
|
|
4
|
+
|
|
5
|
+
// general concept of LLM memory
|
|
6
|
+
// the KV cache is a type of LLM memory, but there can be other types
|
|
7
|
+
class llama_memory_i {
|
|
8
|
+
public:
|
|
9
|
+
virtual void clear() = 0;
|
|
10
|
+
virtual void defrag() = 0;
|
|
11
|
+
|
|
12
|
+
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
|
13
|
+
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
|
14
|
+
virtual void seq_keep(llama_seq_id seq_id) = 0;
|
|
15
|
+
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
|
|
16
|
+
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
|
17
|
+
|
|
18
|
+
virtual llama_pos seq_pos_max(llama_seq_id seq_id) = 0;
|
|
19
|
+
|
|
20
|
+
virtual bool get_can_edit() const = 0;
|
|
21
|
+
};
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-mmap.h
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include <cstdint>
|
|
4
|
+
#include <memory>
|
|
5
|
+
#include <vector>
|
|
6
|
+
|
|
7
|
+
struct llama_file;
|
|
8
|
+
struct llama_mmap;
|
|
9
|
+
struct llama_mlock;
|
|
10
|
+
|
|
11
|
+
using llama_files = std::vector<std::unique_ptr<llama_file>>;
|
|
12
|
+
using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
|
|
13
|
+
using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
|
|
14
|
+
|
|
15
|
+
struct llama_file {
|
|
16
|
+
llama_file(const char * fname, const char * mode);
|
|
17
|
+
~llama_file();
|
|
18
|
+
|
|
19
|
+
size_t tell() const;
|
|
20
|
+
size_t size() const;
|
|
21
|
+
|
|
22
|
+
int file_id() const; // fileno overload
|
|
23
|
+
|
|
24
|
+
void seek(size_t offset, int whence) const;
|
|
25
|
+
|
|
26
|
+
void read_raw(void * ptr, size_t len) const;
|
|
27
|
+
uint32_t read_u32() const;
|
|
28
|
+
|
|
29
|
+
void write_raw(const void * ptr, size_t len) const;
|
|
30
|
+
void write_u32(uint32_t val) const;
|
|
31
|
+
|
|
32
|
+
private:
|
|
33
|
+
struct impl;
|
|
34
|
+
std::unique_ptr<impl> pimpl;
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
struct llama_mmap {
|
|
38
|
+
llama_mmap(const llama_mmap &) = delete;
|
|
39
|
+
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false);
|
|
40
|
+
~llama_mmap();
|
|
41
|
+
|
|
42
|
+
size_t size() const;
|
|
43
|
+
void * addr() const;
|
|
44
|
+
|
|
45
|
+
void unmap_fragment(size_t first, size_t last);
|
|
46
|
+
|
|
47
|
+
static const bool SUPPORTED;
|
|
48
|
+
|
|
49
|
+
private:
|
|
50
|
+
struct impl;
|
|
51
|
+
std::unique_ptr<impl> pimpl;
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
struct llama_mlock {
|
|
55
|
+
llama_mlock();
|
|
56
|
+
~llama_mlock();
|
|
57
|
+
|
|
58
|
+
void init(void * ptr);
|
|
59
|
+
void grow_to(size_t target_size);
|
|
60
|
+
|
|
61
|
+
static const bool SUPPORTED;
|
|
62
|
+
|
|
63
|
+
private:
|
|
64
|
+
struct impl;
|
|
65
|
+
std::unique_ptr<impl> pimpl;
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
size_t llama_path_max();
|