cui-llama.rn 1.6.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -7
- package/android/src/main/CMakeLists.txt +22 -11
- package/android/src/main/java/com/rnllama/LlamaContext.java +42 -6
- package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
- package/android/src/main/jni.cpp +173 -18
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
- package/cpp/LICENSE +21 -0
- package/cpp/chat.cpp +129 -107
- package/cpp/chat.h +2 -0
- package/cpp/common.cpp +58 -78
- package/cpp/common.h +29 -21
- package/cpp/ggml-alloc.c +4 -1
- package/cpp/ggml-backend.cpp +9 -5
- package/cpp/ggml-backend.h +4 -4
- package/cpp/ggml-cpp.h +1 -1
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/binary-ops.h +1 -1
- package/cpp/ggml-cpu/common.h +72 -0
- package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -103
- package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +306 -6
- package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +114 -55
- package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +32 -16
- package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +353 -173
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/ops.h +2 -20
- package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/simd-mappings.h +7 -3
- package/{ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/unary-ops.h +1 -1
- package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -6
- package/{ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/vec.h +16 -0
- package/cpp/ggml-cpu.h +5 -0
- package/cpp/ggml-impl.h +16 -9
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +36 -11
- package/cpp/ggml-metal.m +810 -176
- package/cpp/ggml-opt.cpp +373 -190
- package/cpp/ggml-opt.h +49 -28
- package/cpp/ggml-quants.c +0 -6
- package/cpp/ggml.c +227 -282
- package/cpp/ggml.h +82 -101
- package/cpp/gguf.cpp +33 -33
- package/cpp/json-schema-to-grammar.cpp +3 -0
- package/cpp/llama-adapter.cpp +6 -0
- package/cpp/llama-arch.cpp +49 -17
- package/cpp/llama-arch.h +9 -0
- package/cpp/llama-batch.cpp +8 -2
- package/cpp/llama-batch.h +2 -1
- package/cpp/llama-chat.cpp +39 -16
- package/cpp/llama-chat.h +4 -2
- package/cpp/llama-context.cpp +440 -611
- package/cpp/llama-context.h +44 -33
- package/cpp/llama-cparams.h +1 -0
- package/cpp/llama-graph.cpp +214 -291
- package/cpp/llama-graph.h +69 -21
- package/cpp/llama-hparams.cpp +17 -1
- package/cpp/llama-hparams.h +39 -5
- package/cpp/llama-kv-cache.cpp +2067 -620
- package/cpp/llama-kv-cache.h +410 -108
- package/cpp/llama-memory.h +12 -1
- package/cpp/llama-model-loader.cpp +24 -15
- package/cpp/llama-model-saver.cpp +281 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +1089 -359
- package/cpp/llama-model.h +19 -3
- package/cpp/llama-sampling.cpp +20 -7
- package/cpp/llama-vocab.cpp +54 -9
- package/cpp/llama-vocab.h +6 -0
- package/cpp/llama.cpp +14 -0
- package/cpp/llama.h +86 -142
- package/cpp/minja/chat-template.hpp +9 -5
- package/cpp/minja/minja.hpp +69 -36
- package/cpp/rn-llama.cpp +602 -190
- package/cpp/rn-llama.h +34 -8
- package/cpp/sampling.cpp +57 -50
- package/cpp/tools/mtmd/clip-impl.h +462 -0
- package/cpp/tools/mtmd/clip.cpp +4024 -0
- package/cpp/tools/mtmd/clip.h +101 -0
- package/cpp/tools/mtmd/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
- package/cpp/tools/mtmd/mtmd-audio.h +62 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
- package/cpp/tools/mtmd/mtmd.cpp +942 -0
- package/cpp/tools/mtmd/mtmd.h +362 -0
- package/cpp/tools/mtmd/stb_image.h +7988 -0
- package/ios/CMakeLists.txt +20 -10
- package/ios/RNLlama.h +6 -0
- package/ios/RNLlama.mm +82 -3
- package/ios/RNLlamaContext.h +5 -1
- package/ios/RNLlamaContext.mm +131 -38
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +33 -7
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +153 -21
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +152 -20
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +54 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +72 -6
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +72 -4
- package/src/index.ts +212 -38
- package/cpp/binary-ops.h +0 -16
- package/cpp/ops.h +0 -128
- package/cpp/simd-mappings.h +0 -888
- package/cpp/unary-ops.h +0 -28
- package/cpp/vec.h +0 -802
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +0 -802
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
- package/lib/commonjs/chat.js +0 -37
- package/lib/commonjs/chat.js.map +0 -1
- package/lib/module/chat.js +0 -33
- package/lib/module/chat.js.map +0 -1
- package/lib/typescript/chat.d.ts +0 -10
- package/lib/typescript/chat.d.ts.map +0 -1
- package/src/chat.ts +0 -44
- /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
- /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
- /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
- /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
- /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
- /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
- /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
- /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
package/cpp/common.h
CHANGED
@@ -6,6 +6,7 @@
|
|
6
6
|
|
7
7
|
#include <set>
|
8
8
|
#include <string>
|
9
|
+
#include <string_view>
|
9
10
|
#include <vector>
|
10
11
|
#include <sstream>
|
11
12
|
|
@@ -77,7 +78,6 @@ enum llama_example {
|
|
77
78
|
LLAMA_EXAMPLE_COMMON,
|
78
79
|
LLAMA_EXAMPLE_SPECULATIVE,
|
79
80
|
LLAMA_EXAMPLE_MAIN,
|
80
|
-
LLAMA_EXAMPLE_INFILL,
|
81
81
|
LLAMA_EXAMPLE_EMBEDDING,
|
82
82
|
LLAMA_EXAMPLE_PERPLEXITY,
|
83
83
|
LLAMA_EXAMPLE_RETRIEVAL,
|
@@ -87,7 +87,7 @@ enum llama_example {
|
|
87
87
|
LLAMA_EXAMPLE_SERVER,
|
88
88
|
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
|
89
89
|
LLAMA_EXAMPLE_EXPORT_LORA,
|
90
|
-
|
90
|
+
LLAMA_EXAMPLE_MTMD,
|
91
91
|
LLAMA_EXAMPLE_LOOKUP,
|
92
92
|
LLAMA_EXAMPLE_PARALLEL,
|
93
93
|
LLAMA_EXAMPLE_TTS,
|
@@ -107,6 +107,7 @@ enum common_sampler_type {
|
|
107
107
|
COMMON_SAMPLER_TYPE_XTC = 8,
|
108
108
|
COMMON_SAMPLER_TYPE_INFILL = 9,
|
109
109
|
COMMON_SAMPLER_TYPE_PENALTIES = 10,
|
110
|
+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
|
110
111
|
};
|
111
112
|
|
112
113
|
// dimensionality reduction methods, used by cvector-generator
|
@@ -172,6 +173,7 @@ struct common_params_sampling {
|
|
172
173
|
std::vector<enum common_sampler_type> samplers = {
|
173
174
|
COMMON_SAMPLER_TYPE_PENALTIES,
|
174
175
|
COMMON_SAMPLER_TYPE_DRY,
|
176
|
+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
|
175
177
|
COMMON_SAMPLER_TYPE_TOP_K,
|
176
178
|
COMMON_SAMPLER_TYPE_TYPICAL_P,
|
177
179
|
COMMON_SAMPLER_TYPE_TOP_P,
|
@@ -336,17 +338,17 @@ struct common_params {
|
|
336
338
|
bool flash_attn = false; // flash attention
|
337
339
|
bool no_perf = false; // disable performance metrics
|
338
340
|
bool ctx_shift = true; // context shift on inifinite text generation
|
341
|
+
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
|
339
342
|
|
340
343
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
341
|
-
bool logits_all = false; // return logits for all tokens in the batch
|
342
344
|
bool use_mmap = true; // use mmap for faster loads
|
343
345
|
bool use_mlock = false; // use mlock to keep model in memory
|
344
346
|
bool verbose_prompt = false; // print prompt tokens before generation
|
345
347
|
bool display_prompt = true; // print prompt before generation
|
346
|
-
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
|
347
348
|
bool no_kv_offload = false; // disable KV offloading
|
348
349
|
bool warmup = true; // warmup run
|
349
350
|
bool check_tensors = false; // validate tensor data
|
351
|
+
bool no_op_offload = false; // globally disable offload host tensor operations to device
|
350
352
|
|
351
353
|
bool single_turn = false; // single turn chat conversation
|
352
354
|
|
@@ -355,8 +357,10 @@ struct common_params {
|
|
355
357
|
|
356
358
|
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
|
357
359
|
|
358
|
-
// multimodal models (see
|
360
|
+
// multimodal models (see tools/mtmd)
|
359
361
|
struct common_params_model mmproj;
|
362
|
+
bool mmproj_use_gpu = true; // use GPU for multimodal model
|
363
|
+
bool no_mmproj = false; // explicitly disable multimodal model
|
360
364
|
std::vector<std::string> image; // path to image file(s)
|
361
365
|
|
362
366
|
// embedding
|
@@ -379,6 +383,7 @@ struct common_params {
|
|
379
383
|
bool use_jinja = false; // NOLINT
|
380
384
|
bool enable_chat_template = true;
|
381
385
|
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
386
|
+
bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
|
382
387
|
|
383
388
|
std::vector<std::string> api_keys;
|
384
389
|
|
@@ -422,13 +427,14 @@ struct common_params {
|
|
422
427
|
|
423
428
|
bool process_output = false; // collect data for the output tensor
|
424
429
|
bool compute_ppl = true; // whether to compute perplexity
|
430
|
+
bool parse_special = false; // whether to parse special tokens during imatrix tokenization
|
425
431
|
|
426
432
|
// cvector-generator params
|
427
433
|
int n_pca_batch = 100;
|
428
434
|
int n_pca_iterations = 1000;
|
429
435
|
dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
|
430
|
-
std::string cvector_positive_file = "
|
431
|
-
std::string cvector_negative_file = "
|
436
|
+
std::string cvector_positive_file = "tools/cvector-generator/positive.txt";
|
437
|
+
std::string cvector_negative_file = "tools/cvector-generator/negative.txt";
|
432
438
|
|
433
439
|
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
434
440
|
|
@@ -437,6 +443,11 @@ struct common_params {
|
|
437
443
|
|
438
444
|
// common params
|
439
445
|
std::string out_file; // output filename for all example programs
|
446
|
+
// optional callback for model loading progress and cancellation:
|
447
|
+
// called with a progress value between 0.0 and 1.0.
|
448
|
+
// return false from callback to abort model loading or true to continue
|
449
|
+
llama_progress_callback load_progress_callback = NULL;
|
450
|
+
void * load_progress_callback_user_data = NULL;
|
440
451
|
};
|
441
452
|
|
442
453
|
// call once at the start of a program if it uses libcommon
|
@@ -514,10 +525,9 @@ static bool string_starts_with(const std::string & str,
|
|
514
525
|
return str.rfind(prefix, 0) == 0;
|
515
526
|
}
|
516
527
|
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
}
|
528
|
+
// While we wait for C++20's std::string::ends_with...
|
529
|
+
bool string_ends_with(const std::string_view & str, const std::string_view & suffix);
|
530
|
+
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop);
|
521
531
|
|
522
532
|
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
523
533
|
void string_process_escapes(std::string & input);
|
@@ -558,6 +568,8 @@ struct lm_ggml_threadpool_params lm_ggml_threadpool_params_from_cpu_params(const
|
|
558
568
|
// clear LoRA adapters from context, then apply new list of adapters
|
559
569
|
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
|
560
570
|
|
571
|
+
std::string get_model_endpoint();
|
572
|
+
|
561
573
|
//
|
562
574
|
// Batch utils
|
563
575
|
//
|
@@ -624,16 +636,6 @@ std::string common_detokenize(
|
|
624
636
|
const std::vector<llama_token> & tokens,
|
625
637
|
bool special = true);
|
626
638
|
|
627
|
-
//
|
628
|
-
// KV cache utils
|
629
|
-
//
|
630
|
-
|
631
|
-
// Dump the KV cache view with the number of sequences per cell.
|
632
|
-
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
633
|
-
|
634
|
-
// Dump the KV cache view showing individual sequences in each cell (long output).
|
635
|
-
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
636
|
-
|
637
639
|
//
|
638
640
|
// Embedding utils
|
639
641
|
//
|
@@ -675,3 +677,9 @@ const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
|
675
677
|
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
676
678
|
|
677
679
|
}
|
680
|
+
|
681
|
+
//
|
682
|
+
// training utils
|
683
|
+
//
|
684
|
+
|
685
|
+
lm_ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
|
package/cpp/ggml-alloc.c
CHANGED
@@ -816,7 +816,10 @@ static void lm_ggml_gallocr_init_tensor(lm_ggml_gallocr_t galloc, struct lm_ggml
|
|
816
816
|
static bool lm_ggml_gallocr_node_needs_realloc(lm_ggml_gallocr_t galloc, struct lm_ggml_tensor * node, struct tensor_alloc * talloc) {
|
817
817
|
size_t node_size = 0;
|
818
818
|
if (!node->data && !node->view_src) {
|
819
|
-
|
819
|
+
// If we previously had data but don't now then reallocate
|
820
|
+
if (talloc->buffer_id < 0) {
|
821
|
+
return false;
|
822
|
+
}
|
820
823
|
node_size = lm_ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
|
821
824
|
}
|
822
825
|
return talloc->size_max >= node_size;
|
package/cpp/ggml-backend.cpp
CHANGED
@@ -56,7 +56,7 @@ size_t lm_ggml_backend_buft_get_max_size(lm_ggml_backend_buffer_type_t buft) {
|
|
56
56
|
return SIZE_MAX;
|
57
57
|
}
|
58
58
|
|
59
|
-
size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, struct lm_ggml_tensor * tensor) {
|
59
|
+
size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor) {
|
60
60
|
// get_alloc_size is optional, defaults to lm_ggml_nbytes
|
61
61
|
if (buft->iface.get_alloc_size) {
|
62
62
|
size_t size = buft->iface.get_alloc_size(buft, tensor);
|
@@ -152,7 +152,7 @@ size_t lm_ggml_backend_buffer_get_max_size(lm_ggml_backend_buffer_t buffer) {
|
|
152
152
|
return lm_ggml_backend_buft_get_max_size(lm_ggml_backend_buffer_get_type(buffer));
|
153
153
|
}
|
154
154
|
|
155
|
-
size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor) {
|
155
|
+
size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor) {
|
156
156
|
return lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_get_type(buffer), tensor);
|
157
157
|
}
|
158
158
|
|
@@ -674,6 +674,8 @@ struct lm_ggml_backend_sched {
|
|
674
674
|
char * context_buffer;
|
675
675
|
size_t context_buffer_size;
|
676
676
|
|
677
|
+
bool op_offload;
|
678
|
+
|
677
679
|
int debug;
|
678
680
|
};
|
679
681
|
|
@@ -766,7 +768,7 @@ static int lm_ggml_backend_sched_backend_id_from_cur(lm_ggml_backend_sched_t sch
|
|
766
768
|
if (tensor->op != LM_GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
767
769
|
int src_backend_id = lm_ggml_backend_sched_backend_from_buffer(sched, src, tensor);
|
768
770
|
// check if a backend with higher prio wants to offload the op
|
769
|
-
if (src_backend_id == sched->n_backends - 1 && lm_ggml_backend_buffer_is_host(src->buffer)) {
|
771
|
+
if (sched->op_offload && src_backend_id == sched->n_backends - 1 && lm_ggml_backend_buffer_is_host(src->buffer)) {
|
770
772
|
for (int b = 0; b < src_backend_id; b++) {
|
771
773
|
if (lm_ggml_backend_supports_op(sched->backends[b], tensor) && lm_ggml_backend_offload_op(sched->backends[b], tensor)) {
|
772
774
|
SET_CAUSE(tensor, "1.off");
|
@@ -1109,7 +1111,7 @@ static void lm_ggml_backend_sched_split_graph(lm_ggml_backend_sched_t sched, str
|
|
1109
1111
|
|
1110
1112
|
const int node_backend_id = tensor_backend_id(node);
|
1111
1113
|
|
1112
|
-
assert(node_backend_id != -1); // all nodes should be assigned by now
|
1114
|
+
assert(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback
|
1113
1115
|
|
1114
1116
|
// check if we should start a new split based on the sources of the current node
|
1115
1117
|
bool need_new_split = false;
|
@@ -1452,7 +1454,8 @@ lm_ggml_backend_sched_t lm_ggml_backend_sched_new(
|
|
1452
1454
|
lm_ggml_backend_buffer_type_t * bufts,
|
1453
1455
|
int n_backends,
|
1454
1456
|
size_t graph_size,
|
1455
|
-
bool parallel
|
1457
|
+
bool parallel,
|
1458
|
+
bool op_offload) {
|
1456
1459
|
LM_GGML_ASSERT(n_backends > 0);
|
1457
1460
|
LM_GGML_ASSERT(n_backends <= LM_GGML_SCHED_MAX_BACKENDS);
|
1458
1461
|
LM_GGML_ASSERT(lm_ggml_backend_dev_type(lm_ggml_backend_get_device(backends[n_backends - 1])) == LM_GGML_BACKEND_DEVICE_TYPE_CPU);
|
@@ -1497,6 +1500,7 @@ lm_ggml_backend_sched_t lm_ggml_backend_sched_new(
|
|
1497
1500
|
}
|
1498
1501
|
|
1499
1502
|
sched->galloc = lm_ggml_gallocr_new_n(sched->bufts, n_backends);
|
1503
|
+
sched->op_offload = op_offload;
|
1500
1504
|
|
1501
1505
|
lm_ggml_backend_sched_reset(sched);
|
1502
1506
|
|
package/cpp/ggml-backend.h
CHANGED
@@ -38,7 +38,7 @@ extern "C" {
|
|
38
38
|
LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_buft_alloc_buffer (lm_ggml_backend_buffer_type_t buft, size_t size);
|
39
39
|
LM_GGML_API size_t lm_ggml_backend_buft_get_alignment (lm_ggml_backend_buffer_type_t buft);
|
40
40
|
LM_GGML_API size_t lm_ggml_backend_buft_get_max_size (lm_ggml_backend_buffer_type_t buft);
|
41
|
-
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, struct lm_ggml_tensor * tensor);
|
41
|
+
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
42
42
|
LM_GGML_API bool lm_ggml_backend_buft_is_host (lm_ggml_backend_buffer_type_t buft);
|
43
43
|
LM_GGML_API lm_ggml_backend_dev_t lm_ggml_backend_buft_get_device (lm_ggml_backend_buffer_type_t buft);
|
44
44
|
|
@@ -59,7 +59,7 @@ extern "C" {
|
|
59
59
|
LM_GGML_API enum lm_ggml_status lm_ggml_backend_buffer_init_tensor (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
60
60
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_alignment (lm_ggml_backend_buffer_t buffer);
|
61
61
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_max_size (lm_ggml_backend_buffer_t buffer);
|
62
|
-
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
62
|
+
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor);
|
63
63
|
LM_GGML_API void lm_ggml_backend_buffer_clear (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
64
64
|
LM_GGML_API bool lm_ggml_backend_buffer_is_host (lm_ggml_backend_buffer_t buffer);
|
65
65
|
LM_GGML_API void lm_ggml_backend_buffer_set_usage (lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
@@ -248,7 +248,7 @@ extern "C" {
|
|
248
248
|
// preferrably to run on the same backend as the buffer
|
249
249
|
lm_ggml_backend_buffer_set_usage(buf_weights, LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
250
250
|
|
251
|
-
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false);
|
251
|
+
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false, true);
|
252
252
|
|
253
253
|
// initialize buffers from a max size graph (optional)
|
254
254
|
reserve_graph = build_graph(sched, max_batch_size);
|
@@ -289,7 +289,7 @@ extern "C" {
|
|
289
289
|
typedef bool (*lm_ggml_backend_sched_eval_callback)(struct lm_ggml_tensor * t, bool ask, void * user_data);
|
290
290
|
|
291
291
|
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
292
|
-
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
292
|
+
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
|
293
293
|
LM_GGML_API void lm_ggml_backend_sched_free(lm_ggml_backend_sched_t sched);
|
294
294
|
|
295
295
|
// Initialize backend buffers from a measure graph
|
package/cpp/ggml-cpp.h
CHANGED
@@ -24,7 +24,7 @@ typedef std::unique_ptr<lm_gguf_context, lm_gguf_context_deleter> lm_gguf_contex
|
|
24
24
|
|
25
25
|
struct lm_ggml_gallocr_deleter { void operator()(lm_ggml_gallocr_t galloc) { lm_ggml_gallocr_free(galloc); } };
|
26
26
|
|
27
|
-
typedef std::unique_ptr<
|
27
|
+
typedef std::unique_ptr<lm_ggml_gallocr, lm_ggml_gallocr_deleter> lm_ggml_gallocr_ptr;
|
28
28
|
|
29
29
|
// ggml-backend
|
30
30
|
|
@@ -0,0 +1,221 @@
|
|
1
|
+
#include "amx.h"
|
2
|
+
#include "common.h"
|
3
|
+
#include "mmq.h"
|
4
|
+
#include "ggml-backend-impl.h"
|
5
|
+
#include "ggml-backend.h"
|
6
|
+
#include "ggml-impl.h"
|
7
|
+
#include "ggml-cpu.h"
|
8
|
+
#include "ggml-cpu-traits.h"
|
9
|
+
|
10
|
+
#if defined(__gnu_linux__)
|
11
|
+
#include <sys/syscall.h>
|
12
|
+
#include <unistd.h>
|
13
|
+
#endif
|
14
|
+
|
15
|
+
#include <cstdlib>
|
16
|
+
#include <cstring>
|
17
|
+
#include <memory>
|
18
|
+
|
19
|
+
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
20
|
+
|
21
|
+
// AMX type_trais
|
22
|
+
namespace ggml::cpu::amx {
|
23
|
+
class tensor_traits : public ggml::cpu::tensor_traits {
|
24
|
+
bool work_size(int /* n_threads */, const struct lm_ggml_tensor * op, size_t & size) override {
|
25
|
+
size = lm_ggml_backend_amx_desired_wsize(op);
|
26
|
+
return true;
|
27
|
+
}
|
28
|
+
|
29
|
+
bool compute_forward(struct lm_ggml_compute_params * params, struct lm_ggml_tensor * op) override {
|
30
|
+
if (op->op == LM_GGML_OP_MUL_MAT) {
|
31
|
+
lm_ggml_backend_amx_mul_mat(params, op);
|
32
|
+
return true;
|
33
|
+
}
|
34
|
+
return false;
|
35
|
+
}
|
36
|
+
};
|
37
|
+
|
38
|
+
static ggml::cpu::tensor_traits * get_tensor_traits(lm_ggml_backend_buffer_t, struct lm_ggml_tensor *) {
|
39
|
+
static tensor_traits traits;
|
40
|
+
return &traits;
|
41
|
+
}
|
42
|
+
} // namespace ggml::cpu::amx
|
43
|
+
|
44
|
+
// AMX buffer interface
|
45
|
+
static void lm_ggml_backend_amx_buffer_free_buffer(lm_ggml_backend_buffer_t buffer) {
|
46
|
+
free(buffer->context);
|
47
|
+
}
|
48
|
+
|
49
|
+
static void * lm_ggml_backend_amx_buffer_get_base(lm_ggml_backend_buffer_t buffer) {
|
50
|
+
return (void *) (buffer->context);
|
51
|
+
}
|
52
|
+
|
53
|
+
static enum lm_ggml_status lm_ggml_backend_amx_buffer_init_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor) {
|
54
|
+
tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
|
55
|
+
|
56
|
+
LM_GGML_UNUSED(buffer);
|
57
|
+
return LM_GGML_STATUS_SUCCESS;
|
58
|
+
}
|
59
|
+
|
60
|
+
static void lm_ggml_backend_amx_buffer_memset_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor,
|
61
|
+
uint8_t value, size_t offset, size_t size) {
|
62
|
+
memset((char *) tensor->data + offset, value, size);
|
63
|
+
|
64
|
+
LM_GGML_UNUSED(buffer);
|
65
|
+
}
|
66
|
+
|
67
|
+
static void lm_ggml_backend_amx_buffer_set_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor,
|
68
|
+
const void * data, size_t offset, size_t size) {
|
69
|
+
if (qtype_has_amx_kernels(tensor->type)) {
|
70
|
+
LM_GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, lm_ggml_type_name(tensor->type));
|
71
|
+
lm_ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
72
|
+
} else {
|
73
|
+
memcpy((char *) tensor->data + offset, data, size);
|
74
|
+
}
|
75
|
+
|
76
|
+
LM_GGML_UNUSED(buffer);
|
77
|
+
}
|
78
|
+
|
79
|
+
/*
|
80
|
+
// need to figure what we need to do with buffer->extra.
|
81
|
+
static void lm_ggml_backend_amx_buffer_get_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
82
|
+
LM_GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
83
|
+
memcpy(data, (const char *)tensor->data + offset, size);
|
84
|
+
|
85
|
+
LM_GGML_UNUSED(buffer);
|
86
|
+
}
|
87
|
+
|
88
|
+
static bool lm_ggml_backend_amx_buffer_cpy_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
|
89
|
+
if (lm_ggml_backend_buffer_is_host(src->buffer)) {
|
90
|
+
if (qtype_has_amx_kernels(src->type)) {
|
91
|
+
lm_ggml_backend_amx_convert_weight(dst, src->data, 0, lm_ggml_nbytes(dst));
|
92
|
+
} else {
|
93
|
+
memcpy(dst->data, src->data, lm_ggml_nbytes(src));
|
94
|
+
}
|
95
|
+
return true;
|
96
|
+
}
|
97
|
+
return false;
|
98
|
+
|
99
|
+
LM_GGML_UNUSED(buffer);
|
100
|
+
}
|
101
|
+
*/
|
102
|
+
|
103
|
+
static void lm_ggml_backend_amx_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
|
104
|
+
memset(buffer->context, value, buffer->size);
|
105
|
+
}
|
106
|
+
|
107
|
+
static lm_ggml_backend_buffer_i lm_ggml_backend_amx_buffer_interface = {
|
108
|
+
/* .free_buffer = */ lm_ggml_backend_amx_buffer_free_buffer,
|
109
|
+
/* .get_base = */ lm_ggml_backend_amx_buffer_get_base,
|
110
|
+
/* .init_tensor = */ lm_ggml_backend_amx_buffer_init_tensor,
|
111
|
+
/* .memset_tensor = */ lm_ggml_backend_amx_buffer_memset_tensor,
|
112
|
+
/* .set_tensor = */ lm_ggml_backend_amx_buffer_set_tensor,
|
113
|
+
/* .get_tensor = */ nullptr,
|
114
|
+
/* .cpy_tensor = */ nullptr,
|
115
|
+
/* .clear = */ lm_ggml_backend_amx_buffer_clear,
|
116
|
+
/* .reset = */ nullptr,
|
117
|
+
};
|
118
|
+
|
119
|
+
static const char * lm_ggml_backend_amx_buffer_type_get_name(lm_ggml_backend_buffer_type_t buft) {
|
120
|
+
return "AMX";
|
121
|
+
|
122
|
+
LM_GGML_UNUSED(buft);
|
123
|
+
}
|
124
|
+
|
125
|
+
static lm_ggml_backend_buffer_t lm_ggml_backend_amx_buffer_type_alloc_buffer(lm_ggml_backend_buffer_type_t buft, size_t size) {
|
126
|
+
void * data = lm_ggml_aligned_malloc(size);
|
127
|
+
if (data == NULL) {
|
128
|
+
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
129
|
+
return NULL;
|
130
|
+
}
|
131
|
+
|
132
|
+
return lm_ggml_backend_buffer_init(buft, lm_ggml_backend_amx_buffer_interface, data, size);
|
133
|
+
}
|
134
|
+
|
135
|
+
static size_t lm_ggml_backend_amx_buffer_type_get_alignment(lm_ggml_backend_buffer_type_t buft) {
|
136
|
+
return TENSOR_ALIGNMENT;
|
137
|
+
|
138
|
+
LM_GGML_UNUSED(buft);
|
139
|
+
}
|
140
|
+
|
141
|
+
namespace ggml::cpu::amx {
|
142
|
+
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
143
|
+
bool supports_op(lm_ggml_backend_dev_t, const struct lm_ggml_tensor * op) override {
|
144
|
+
// handle only 2d gemm for now
|
145
|
+
auto is_contiguous_2d = [](const struct lm_ggml_tensor * t) {
|
146
|
+
return lm_ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
147
|
+
};
|
148
|
+
|
149
|
+
if (op->op == LM_GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous
|
150
|
+
is_contiguous_2d(op->src[1]) && // src1 must be contiguous
|
151
|
+
op->src[0]->buffer && op->src[0]->buffer->buft == lm_ggml_backend_amx_buffer_type() &&
|
152
|
+
op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x
|
153
|
+
(qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == LM_GGML_TYPE_F16))) {
|
154
|
+
// src1 must be host buffer
|
155
|
+
if (op->src[1]->buffer && !lm_ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
156
|
+
return false;
|
157
|
+
}
|
158
|
+
// src1 must be float32
|
159
|
+
if (op->src[1]->type == LM_GGML_TYPE_F32) {
|
160
|
+
return true;
|
161
|
+
}
|
162
|
+
}
|
163
|
+
return false;
|
164
|
+
}
|
165
|
+
|
166
|
+
ggml::cpu::tensor_traits * get_tensor_traits(const struct lm_ggml_tensor * op) override {
|
167
|
+
if (op->op == LM_GGML_OP_MUL_MAT && op->src[0]->buffer &&
|
168
|
+
op->src[0]->buffer->buft == lm_ggml_backend_amx_buffer_type()) {
|
169
|
+
return (ggml::cpu::tensor_traits *) op->src[0]->extra;
|
170
|
+
}
|
171
|
+
|
172
|
+
return nullptr;
|
173
|
+
}
|
174
|
+
};
|
175
|
+
} // namespace ggml::cpu::amx
|
176
|
+
|
177
|
+
static size_t lm_ggml_backend_amx_buffer_type_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const lm_ggml_tensor * tensor) {
|
178
|
+
return lm_ggml_backend_amx_get_alloc_size(tensor);
|
179
|
+
|
180
|
+
LM_GGML_UNUSED(buft);
|
181
|
+
}
|
182
|
+
|
183
|
+
#define ARCH_GET_XCOMP_PERM 0x1022
|
184
|
+
#define ARCH_REQ_XCOMP_PERM 0x1023
|
185
|
+
#define XFEATURE_XTILECFG 17
|
186
|
+
#define XFEATURE_XTILEDATA 18
|
187
|
+
|
188
|
+
static bool lm_ggml_amx_init() {
|
189
|
+
#if defined(__gnu_linux__)
|
190
|
+
if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
|
191
|
+
fprintf(stderr, "AMX is not ready to be used!\n");
|
192
|
+
return false;
|
193
|
+
}
|
194
|
+
return true;
|
195
|
+
#elif defined(_WIN32)
|
196
|
+
return true;
|
197
|
+
#endif
|
198
|
+
}
|
199
|
+
|
200
|
+
lm_ggml_backend_buffer_type_t lm_ggml_backend_amx_buffer_type() {
|
201
|
+
static struct lm_ggml_backend_buffer_type lm_ggml_backend_buffer_type_amx = {
|
202
|
+
/* .iface = */ {
|
203
|
+
/* .get_name = */ lm_ggml_backend_amx_buffer_type_get_name,
|
204
|
+
/* .alloc_buffer = */ lm_ggml_backend_amx_buffer_type_alloc_buffer,
|
205
|
+
/* .get_alignment = */ lm_ggml_backend_amx_buffer_type_get_alignment,
|
206
|
+
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
207
|
+
/* .get_alloc_size = */ lm_ggml_backend_amx_buffer_type_get_alloc_size,
|
208
|
+
/* .is_host = */ nullptr,
|
209
|
+
},
|
210
|
+
/* .device = */ lm_ggml_backend_reg_dev_get(lm_ggml_backend_cpu_reg(), 0),
|
211
|
+
/* .context = */ new ggml::cpu::amx::extra_buffer_type(),
|
212
|
+
};
|
213
|
+
|
214
|
+
if (!lm_ggml_amx_init()) {
|
215
|
+
return nullptr;
|
216
|
+
}
|
217
|
+
|
218
|
+
return &lm_ggml_backend_buffer_type_amx;
|
219
|
+
}
|
220
|
+
|
221
|
+
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
@@ -0,0 +1,91 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h"
|
4
|
+
#include "ggml-cpu-impl.h"
|
5
|
+
|
6
|
+
#include <algorithm>
|
7
|
+
#include <memory>
|
8
|
+
#include <type_traits>
|
9
|
+
|
10
|
+
#if defined(LM_GGML_USE_OPENMP)
|
11
|
+
#include <omp.h>
|
12
|
+
#endif
|
13
|
+
|
14
|
+
#define TILE_M 16
|
15
|
+
#define TILE_N 16
|
16
|
+
#define TILE_K 32
|
17
|
+
#define VNNI_BLK 4
|
18
|
+
|
19
|
+
#define AMX_BLK_SIZE 32
|
20
|
+
|
21
|
+
#define TMM0 0
|
22
|
+
#define TMM1 1
|
23
|
+
#define TMM2 2
|
24
|
+
#define TMM3 3
|
25
|
+
#define TMM4 4
|
26
|
+
#define TMM5 5
|
27
|
+
#define TMM6 6
|
28
|
+
#define TMM7 7
|
29
|
+
|
30
|
+
// parallel routines
|
31
|
+
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
|
32
|
+
inline T div_up(T x, T y) { return (x + y - 1) / y; }
|
33
|
+
|
34
|
+
template <typename T>
|
35
|
+
inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
|
36
|
+
#if 0
|
37
|
+
// onednn partition pattern
|
38
|
+
T& n_my = n_end;
|
39
|
+
if (nth <= 1 || n == 0) {
|
40
|
+
n_start = 0;
|
41
|
+
n_my = n;
|
42
|
+
} else {
|
43
|
+
T n1 = div_up(n, nth);
|
44
|
+
T n2 = n1 - 1;
|
45
|
+
T T1 = n - n2 * nth;
|
46
|
+
n_my = ith < T1 ? n1 : n2;
|
47
|
+
n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
|
48
|
+
}
|
49
|
+
n_end += n_start;
|
50
|
+
#else
|
51
|
+
// pytorch aten partition pattern
|
52
|
+
T n_my = div_up(n, nth);
|
53
|
+
n_start = ith * n_my;
|
54
|
+
n_end = std::min(n_start + n_my, n);
|
55
|
+
#endif
|
56
|
+
}
|
57
|
+
|
58
|
+
template <typename func_t>
|
59
|
+
inline void parallel_for(int n, const func_t& f) {
|
60
|
+
#if defined(LM_GGML_USE_OPENMP)
|
61
|
+
#pragma omp parallel
|
62
|
+
{
|
63
|
+
int nth = omp_get_num_threads();
|
64
|
+
int ith = omp_get_thread_num();
|
65
|
+
int tbegin, tend;
|
66
|
+
balance211(n, nth, ith, tbegin, tend);
|
67
|
+
f(tbegin, tend);
|
68
|
+
}
|
69
|
+
#else
|
70
|
+
f(0, n);
|
71
|
+
#endif
|
72
|
+
}
|
73
|
+
|
74
|
+
template <typename func_t>
|
75
|
+
inline void parallel_for_ggml(const lm_ggml_compute_params * params, int n, const func_t & f) {
|
76
|
+
int tbegin, tend;
|
77
|
+
balance211(n, params->nth, params->ith, tbegin, tend);
|
78
|
+
f(tbegin, tend);
|
79
|
+
}
|
80
|
+
|
81
|
+
// quantized types that have AMX support
|
82
|
+
inline bool qtype_has_amx_kernels(const enum lm_ggml_type type) {
|
83
|
+
// TODO: fix padding for vnni format
|
84
|
+
return (type == LM_GGML_TYPE_Q4_0) ||
|
85
|
+
(type == LM_GGML_TYPE_Q4_1) ||
|
86
|
+
(type == LM_GGML_TYPE_Q8_0) ||
|
87
|
+
(type == LM_GGML_TYPE_Q4_K) ||
|
88
|
+
(type == LM_GGML_TYPE_Q5_K) ||
|
89
|
+
(type == LM_GGML_TYPE_Q6_K) ||
|
90
|
+
(type == LM_GGML_TYPE_IQ4_XS);
|
91
|
+
}
|