@fugood/llama.node 0.6.3 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +40 -30
- package/README.md +4 -1
- package/lib/binding.js +41 -29
- package/lib/binding.ts +26 -25
- package/package.json +40 -7
- package/scripts/build.js +47 -0
- package/scripts/llama.cpp.patch +109 -0
- package/src/anyascii.c +22223 -0
- package/src/anyascii.h +42 -0
- package/src/tts_utils.cpp +20 -7
- package/src/tts_utils.h +2 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
- package/src/llama.cpp/.github/workflows/build.yml +0 -1078
- package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
- package/src/llama.cpp/.github/workflows/docker.yml +0 -178
- package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
- package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
- package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
- package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
- package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
- package/src/llama.cpp/.github/workflows/release.yml +0 -739
- package/src/llama.cpp/.github/workflows/server.yml +0 -237
- package/src/llama.cpp/.github/workflows/winget.yml +0 -42
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
- package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
- package/src/llama.cpp/cmake/build-info.cmake +0 -64
- package/src/llama.cpp/cmake/common.cmake +0 -35
- package/src/llama.cpp/cmake/git-vars.cmake +0 -22
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
- package/src/llama.cpp/common/build-info.cpp.in +0 -4
- package/src/llama.cpp/docs/build.md +0 -561
- package/src/llama.cpp/examples/CMakeLists.txt +0 -43
- package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/batched/batched.cpp +0 -246
- package/src/llama.cpp/examples/chat-13B.bat +0 -57
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
- package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
- package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
- package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
- package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
- package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
- package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
- package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
- package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple/simple.cpp +0 -206
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
- package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
- package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
- package/src/llama.cpp/examples/sycl/build.sh +0 -23
- package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
- package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
- package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
- package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
- package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
- package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/training/finetune.cpp +0 -96
- package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
- package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
- package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
- package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
- package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
- package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
- package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
- package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
- package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
- package/src/llama.cpp/ggml/src/ggml.c +0 -6550
- package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
- package/src/llama.cpp/models/.editorconfig +0 -1
- package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
- package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
- package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
- package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
- package/src/llama.cpp/prompts/alpaca.txt +0 -1
- package/src/llama.cpp/prompts/assistant.txt +0 -31
- package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
- package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
- package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
- package/src/llama.cpp/prompts/chat.txt +0 -28
- package/src/llama.cpp/prompts/dan-modified.txt +0 -1
- package/src/llama.cpp/prompts/dan.txt +0 -1
- package/src/llama.cpp/prompts/mnemonics.txt +0 -93
- package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
- package/src/llama.cpp/prompts/reason-act.txt +0 -18
- package/src/llama.cpp/requirements/requirements-all.txt +0 -15
- package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
- package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
- package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
- package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
- package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
- package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
- package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
- package/src/llama.cpp/requirements.txt +0 -13
- package/src/llama.cpp/scripts/build-info.sh +0 -30
- package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
- package/src/llama.cpp/scripts/xxd.cmake +0 -16
- package/src/llama.cpp/tests/CMakeLists.txt +0 -177
- package/src/llama.cpp/tests/get-model.cpp +0 -21
- package/src/llama.cpp/tests/get-model.h +0 -2
- package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
- package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
- package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
- package/src/llama.cpp/tests/test-barrier.cpp +0 -94
- package/src/llama.cpp/tests/test-c.c +0 -7
- package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
- package/src/llama.cpp/tests/test-chat.cpp +0 -985
- package/src/llama.cpp/tests/test-double-float.cpp +0 -57
- package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
- package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
- package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
- package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
- package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
- package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
- package/src/llama.cpp/tests/test-log.cpp +0 -39
- package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
- package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
- package/src/llama.cpp/tests/test-opt.cpp +0 -904
- package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
- package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
- package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
- package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
- package/src/llama.cpp/tests/test-rope.cpp +0 -262
- package/src/llama.cpp/tests/test-sampling.cpp +0 -399
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
- package/src/llama.cpp/tools/CMakeLists.txt +0 -39
- package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
- package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
- package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
- package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
- package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
- package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
- package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
- package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
- package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
- package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
- package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
- package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/main/main.cpp +0 -977
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
- package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
- package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
- package/src/llama.cpp/tools/mtmd/clip.h +0 -101
- package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
- package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
- package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
- package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
- package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
- package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
- package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
- package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
- package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
- package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
- package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
- package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
- package/src/llama.cpp/tools/run/run.cpp +0 -1261
- package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
- package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
- package/src/llama.cpp/tools/server/httplib.h +0 -10506
- package/src/llama.cpp/tools/server/server.cpp +0 -4966
- package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
- package/src/llama.cpp/tools/server/utils.hpp +0 -1337
- package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
- package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
#include "softmax.hpp"
|
|
2
|
-
|
|
3
|
-
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
|
4
|
-
static void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par,
|
|
5
|
-
const int nrows_y, const float scale, const float max_bias, const float m0,
|
|
6
|
-
const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
|
|
7
|
-
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
|
8
|
-
|
|
9
|
-
const int tid = item_ct1.get_local_id(2);
|
|
10
|
-
const int rowx = item_ct1.get_group(2);
|
|
11
|
-
const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension
|
|
12
|
-
|
|
13
|
-
const int block_size = block_size_template == 0 ? item_ct1.get_local_range(2) : block_size_template;
|
|
14
|
-
|
|
15
|
-
const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
|
|
16
|
-
const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
|
|
17
|
-
const int nthreads = block_size;
|
|
18
|
-
const int nwarps = nthreads / WARP_SIZE;
|
|
19
|
-
size_t nreduce = nwarps / WARP_SIZE;
|
|
20
|
-
float slope = 1.0f;
|
|
21
|
-
|
|
22
|
-
// ALiBi
|
|
23
|
-
if (max_bias > 0.0f) {
|
|
24
|
-
const uint32_t h = rowx/nrows_y; // head index
|
|
25
|
-
|
|
26
|
-
const float base = h < n_head_log2 ? m0 : m1;
|
|
27
|
-
const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
|
28
|
-
|
|
29
|
-
slope = sycl::pow(base, float(exp));
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
float *vals = vals_smem ? buf + sycl::max(nwarps, WARP_SIZE) : dst + rowx * ncols;
|
|
33
|
-
float max_val = -INFINITY;
|
|
34
|
-
|
|
35
|
-
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
|
36
|
-
const int col = col0 + tid;
|
|
37
|
-
|
|
38
|
-
if (ncols_template == 0 && col >= ncols) {
|
|
39
|
-
break;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
const int ix = rowx*ncols + col;
|
|
43
|
-
const int iy = rowy*ncols + col;
|
|
44
|
-
|
|
45
|
-
const float val = x[ix]*scale + (mask ? slope*static_cast<float>(mask[iy]) : 0.0f);
|
|
46
|
-
|
|
47
|
-
vals[col] = val;
|
|
48
|
-
max_val = sycl::max(max_val, val);
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
// find the max value in the block
|
|
52
|
-
max_val = warp_reduce_max(max_val, item_ct1);
|
|
53
|
-
if (block_size > WARP_SIZE) {
|
|
54
|
-
if (warp_id == 0) {
|
|
55
|
-
buf[lane_id] = -INFINITY;
|
|
56
|
-
for (size_t i = 1; i < nreduce; i += 1) {
|
|
57
|
-
buf[lane_id + i * WARP_SIZE] = -INFINITY;
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
|
61
|
-
|
|
62
|
-
if (lane_id == 0) {
|
|
63
|
-
buf[warp_id] = max_val;
|
|
64
|
-
}
|
|
65
|
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
|
66
|
-
max_val = buf[lane_id];
|
|
67
|
-
for (size_t i = 1; i < nreduce; i += 1) {
|
|
68
|
-
max_val = sycl::max(max_val, buf[lane_id + i * WARP_SIZE]);
|
|
69
|
-
}
|
|
70
|
-
max_val = warp_reduce_max(max_val, item_ct1);
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
float tmp = 0.f;
|
|
74
|
-
#pragma unroll
|
|
75
|
-
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
|
76
|
-
const int col = col0 + tid;
|
|
77
|
-
if (ncols_template == 0 && col >= ncols) {
|
|
78
|
-
break;
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
const float val = sycl::native::exp(vals[col] - max_val);
|
|
82
|
-
tmp += val;
|
|
83
|
-
vals[col] = val;
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
// find the sum of exps in the block
|
|
87
|
-
tmp = warp_reduce_sum(tmp, item_ct1);
|
|
88
|
-
if (block_size > WARP_SIZE) {
|
|
89
|
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
|
90
|
-
if (warp_id == 0) {
|
|
91
|
-
buf[lane_id] = 0.f;
|
|
92
|
-
for (size_t i = 1; i < nreduce; i += 1) {
|
|
93
|
-
buf[lane_id + i * WARP_SIZE] = 0.f;
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
|
97
|
-
|
|
98
|
-
if (lane_id == 0) {
|
|
99
|
-
buf[warp_id] = tmp;
|
|
100
|
-
}
|
|
101
|
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
|
102
|
-
|
|
103
|
-
tmp = buf[lane_id];
|
|
104
|
-
for (size_t i = 1; i < nreduce; i += 1) {
|
|
105
|
-
tmp += buf[lane_id + i * WARP_SIZE];
|
|
106
|
-
}
|
|
107
|
-
tmp = warp_reduce_sum(tmp, item_ct1);
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
const float inv_sum = 1.f / tmp;
|
|
111
|
-
|
|
112
|
-
#pragma unroll
|
|
113
|
-
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
|
114
|
-
const int col = col0 + tid;
|
|
115
|
-
|
|
116
|
-
if (ncols_template == 0 && col >= ncols) {
|
|
117
|
-
return;
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
const int idst = rowx*ncols + col;
|
|
121
|
-
dst[idst] = vals[col] * inv_sum;
|
|
122
|
-
}
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
|
126
|
-
static void soft_max_f32_submitter(const float * x, const T * mask, float * dst, const int ncols_par,
|
|
127
|
-
const int nrows_y, const float scale, const float max_bias, const float m0,
|
|
128
|
-
const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
|
|
129
|
-
const size_t n_local_scratch, queue_ptr stream) {
|
|
130
|
-
stream->submit([&](sycl::handler &cgh) {
|
|
131
|
-
sycl::local_accessor<float, 1> local_buf_acc(n_local_scratch, cgh);
|
|
132
|
-
|
|
133
|
-
cgh.parallel_for(
|
|
134
|
-
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
|
135
|
-
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
|
136
|
-
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
|
|
137
|
-
nrows_y, scale, max_bias, m0,
|
|
138
|
-
m1, n_head_log2, item_ct1,
|
|
139
|
-
get_pointer(local_buf_acc));
|
|
140
|
-
});
|
|
141
|
-
});
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
template<typename T>
|
|
145
|
-
static void soft_max_f32_sycl(const float * x, const T * mask,
|
|
146
|
-
float * dst, const int ncols_x, const int nrows_x,
|
|
147
|
-
const int nrows_y, const float scale, const float max_bias,
|
|
148
|
-
queue_ptr stream, int device) {
|
|
149
|
-
int nth = WARP_SIZE;
|
|
150
|
-
int max_block_size = ggml_sycl_info().max_work_group_sizes[device];
|
|
151
|
-
while (nth < ncols_x && nth < max_block_size) nth *= 2;
|
|
152
|
-
if (nth>max_block_size) nth = max_block_size;
|
|
153
|
-
|
|
154
|
-
const sycl::range<3> block_dims(1, 1, nth);
|
|
155
|
-
const sycl::range<3> block_nums(1, 1, nrows_x);
|
|
156
|
-
const size_t n_val_tmp = nth / WARP_SIZE;
|
|
157
|
-
const size_t n_local_scratch = (GGML_PAD(ncols_x, WARP_SIZE) + n_val_tmp);
|
|
158
|
-
|
|
159
|
-
const uint32_t n_head_kv = nrows_x/nrows_y;
|
|
160
|
-
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
|
|
161
|
-
|
|
162
|
-
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
|
163
|
-
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
|
164
|
-
|
|
165
|
-
const size_t local_mem_size = stream->get_device().get_info<sycl::info::device::local_mem_size>();
|
|
166
|
-
if (n_local_scratch*sizeof(float) < local_mem_size) {
|
|
167
|
-
if (ncols_x > max_block_size) {
|
|
168
|
-
soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
169
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
170
|
-
block_dims, n_local_scratch, stream);
|
|
171
|
-
return;
|
|
172
|
-
}
|
|
173
|
-
switch (ncols_x) {
|
|
174
|
-
case 32:
|
|
175
|
-
soft_max_f32_submitter<true, 32, 32>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
176
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
177
|
-
block_dims, n_local_scratch, stream);
|
|
178
|
-
break;
|
|
179
|
-
case 64:
|
|
180
|
-
soft_max_f32_submitter<true, 64, 64>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
181
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
182
|
-
block_dims, n_local_scratch, stream);
|
|
183
|
-
break;
|
|
184
|
-
case 128:
|
|
185
|
-
soft_max_f32_submitter<true, 128, 128>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
186
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
187
|
-
block_dims, n_local_scratch, stream);
|
|
188
|
-
break;
|
|
189
|
-
case 256:
|
|
190
|
-
soft_max_f32_submitter<true, 256, 256>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
191
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
192
|
-
block_dims, n_local_scratch, stream);
|
|
193
|
-
break;
|
|
194
|
-
case 512:
|
|
195
|
-
soft_max_f32_submitter<true, 512, 512>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
196
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
197
|
-
block_dims, n_local_scratch, stream);
|
|
198
|
-
break;
|
|
199
|
-
case 1024:
|
|
200
|
-
soft_max_f32_submitter<true, 1024, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
201
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
202
|
-
block_dims, n_local_scratch, stream);
|
|
203
|
-
break;
|
|
204
|
-
case 2048:
|
|
205
|
-
soft_max_f32_submitter<true, 2048, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
206
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
207
|
-
block_dims, n_local_scratch, stream);
|
|
208
|
-
break;
|
|
209
|
-
case 4096:
|
|
210
|
-
soft_max_f32_submitter<true, 4096, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
211
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
212
|
-
block_dims, n_local_scratch, stream);
|
|
213
|
-
break;
|
|
214
|
-
default:
|
|
215
|
-
soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
216
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
217
|
-
block_dims, n_local_scratch, stream);
|
|
218
|
-
break;
|
|
219
|
-
}
|
|
220
|
-
} else {
|
|
221
|
-
soft_max_f32_submitter<false, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
|
222
|
-
max_bias, m0, m1, n_head_log2, block_nums,
|
|
223
|
-
block_dims, WARP_SIZE, stream);
|
|
224
|
-
}
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
|
228
|
-
|
|
229
|
-
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
|
|
230
|
-
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
|
231
|
-
|
|
232
|
-
GGML_ASSERT(!dst->src[1] || dst->src[1]->type == GGML_TYPE_F16 || dst->src[1]->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
|
233
|
-
|
|
234
|
-
const int64_t ne00 = dst->src[0]->ne[0];
|
|
235
|
-
const int64_t nrows_x = ggml_nrows(dst->src[0]);
|
|
236
|
-
const int64_t nrows_y = dst->src[0]->ne[1];
|
|
237
|
-
|
|
238
|
-
float scale = 1.0f;
|
|
239
|
-
float max_bias = 0.0f;
|
|
240
|
-
|
|
241
|
-
memcpy(&scale, dst->op_params + 0, sizeof(float));
|
|
242
|
-
memcpy(&max_bias, dst->op_params + 1, sizeof(float));
|
|
243
|
-
|
|
244
|
-
const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
|
|
245
|
-
float * dst_dd = static_cast<float *>(dst->data);
|
|
246
|
-
|
|
247
|
-
ggml_sycl_set_device(ctx.device);
|
|
248
|
-
dpct::queue_ptr main_stream = ctx.stream();
|
|
249
|
-
|
|
250
|
-
if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F16) {
|
|
251
|
-
const sycl::half * src1_dd = static_cast<sycl::half *>(dst->src[1]->data);
|
|
252
|
-
GGML_SYCL_DEBUG("%s: F16 mask\n", __func__);
|
|
253
|
-
soft_max_f32_sycl<sycl::half>(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias,
|
|
254
|
-
main_stream, ctx.device);
|
|
255
|
-
} else if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F32) {
|
|
256
|
-
const float * src1_dd = static_cast<const float *>(dst->src[1]->data);
|
|
257
|
-
GGML_SYCL_DEBUG("%s: F32 mask\n", __func__);
|
|
258
|
-
soft_max_f32_sycl<float>(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
|
|
259
|
-
} else {
|
|
260
|
-
/* mask unavailable */
|
|
261
|
-
GGML_SYCL_DEBUG("%s: No mask\n", __func__);
|
|
262
|
-
soft_max_f32_sycl<float>(src0_dd, nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
|
|
263
|
-
}
|
|
264
|
-
}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
//
|
|
2
|
-
// MIT license
|
|
3
|
-
// Copyright (C) 2024 Intel Corporation
|
|
4
|
-
// SPDX-License-Identifier: MIT
|
|
5
|
-
//
|
|
6
|
-
|
|
7
|
-
//
|
|
8
|
-
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
9
|
-
// See https://llvm.org/LICENSE.txt for license information.
|
|
10
|
-
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
11
|
-
//
|
|
12
|
-
|
|
13
|
-
#ifndef GGML_SYCL_SOFTMAX_HPP
|
|
14
|
-
#define GGML_SYCL_SOFTMAX_HPP
|
|
15
|
-
|
|
16
|
-
#include "common.hpp"
|
|
17
|
-
|
|
18
|
-
void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, ggml_tensor *dst);
|
|
19
|
-
|
|
20
|
-
#endif // GGML_SYCL_SOFTMAX_HPP
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
#include "sycl_hw.hpp"
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
sycl_hw_info get_device_hw_info(sycl::device *device_ptr) {
|
|
5
|
-
sycl_hw_info res;
|
|
6
|
-
int32_t id = device_ptr->get_info<sycl::ext::intel::info::device::device_id>();
|
|
7
|
-
res.device_id = id;
|
|
8
|
-
|
|
9
|
-
syclex::architecture arch = device_ptr->get_info<syclex::info::device::architecture>();
|
|
10
|
-
res.arch = arch;
|
|
11
|
-
|
|
12
|
-
return res;
|
|
13
|
-
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
#ifndef SYCL_HW_HPP
|
|
2
|
-
#define SYCL_HW_HPP
|
|
3
|
-
|
|
4
|
-
#include <algorithm>
|
|
5
|
-
#include <stdio.h>
|
|
6
|
-
#include <vector>
|
|
7
|
-
#include <map>
|
|
8
|
-
|
|
9
|
-
#include <sycl/sycl.hpp>
|
|
10
|
-
|
|
11
|
-
namespace syclex = sycl::ext::oneapi::experimental;
|
|
12
|
-
|
|
13
|
-
struct sycl_hw_info {
|
|
14
|
-
syclex::architecture arch;
|
|
15
|
-
int32_t device_id;
|
|
16
|
-
};
|
|
17
|
-
|
|
18
|
-
bool is_in_vector(std::vector<int> &vec, int item);
|
|
19
|
-
|
|
20
|
-
sycl_hw_info get_device_hw_info(sycl::device *device_ptr);
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
#endif // SYCL_HW_HPP
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
//
|
|
2
|
-
// MIT license
|
|
3
|
-
// Copyright (C) 2024 Intel Corporation
|
|
4
|
-
// SPDX-License-Identifier: MIT
|
|
5
|
-
//
|
|
6
|
-
|
|
7
|
-
//
|
|
8
|
-
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
9
|
-
// See https://llvm.org/LICENSE.txt for license information.
|
|
10
|
-
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
11
|
-
//
|
|
12
|
-
|
|
13
|
-
#include "tsembd.hpp"
|
|
14
|
-
|
|
15
|
-
static void timestep_embedding_f32(
|
|
16
|
-
const float * timesteps, float * dst, const int nb1,
|
|
17
|
-
const int dim, const int max_period, const sycl::nd_item<3> &item_ct1) {
|
|
18
|
-
// item_ct1.get_group(1)(blockIDx.y): idx of timesteps->ne[0]
|
|
19
|
-
// item_ct1.get_group(2) (blockIDx.x): idx of ((dim + 1) / 2) / BLOCK_SIZE
|
|
20
|
-
int i = item_ct1.get_group(1);
|
|
21
|
-
int j = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2);
|
|
22
|
-
float * embed_data = (float *)((char *)dst + i*nb1);
|
|
23
|
-
|
|
24
|
-
if (dim % 2 != 0 && j == ((dim + 1) / 2)) {
|
|
25
|
-
embed_data[dim] = 0.f;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
int half = dim / 2;
|
|
29
|
-
if (j >= half) {
|
|
30
|
-
return;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
float timestep = timesteps[i];
|
|
34
|
-
float freq = (float)sycl::native::exp(-(sycl::log((float)max_period)) * j / half);
|
|
35
|
-
float arg = timestep * freq;
|
|
36
|
-
embed_data[j] = sycl::cos(arg);
|
|
37
|
-
embed_data[j + half] = sycl::sin(arg);
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
static void timestep_embedding_f32_sycl(
|
|
41
|
-
const float * x, float * dst, const int ne00, const int nb1,
|
|
42
|
-
const int dim, const int max_period, const queue_ptr& stream) {
|
|
43
|
-
// As the kernel returns when thread.idx is larger than dim/2, the half_ceil does not need to pad
|
|
44
|
-
int half_ceil = dim / 2;
|
|
45
|
-
int num_blocks = (half_ceil + SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE - 1) / SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE;
|
|
46
|
-
sycl::range<3> block_dims(1, 1, SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE);
|
|
47
|
-
sycl::range<3> gridDim(1, ne00, num_blocks);
|
|
48
|
-
stream->parallel_for(
|
|
49
|
-
sycl::nd_range<3>(
|
|
50
|
-
gridDim * block_dims, block_dims),
|
|
51
|
-
[=](sycl::nd_item<3> item_ct1) {
|
|
52
|
-
timestep_embedding_f32(
|
|
53
|
-
x, dst, nb1, dim, max_period, item_ct1
|
|
54
|
-
);
|
|
55
|
-
});
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
|
59
|
-
const ggml_tensor *src0 = dst->src[0];
|
|
60
|
-
const ggml_tensor *src1 = dst->src[1];
|
|
61
|
-
const float * src0_d = (const float *)src0->data;
|
|
62
|
-
float * dst_d = (float *)dst->data;
|
|
63
|
-
dpct::queue_ptr stream = ctx.stream();
|
|
64
|
-
|
|
65
|
-
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
|
66
|
-
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
|
67
|
-
|
|
68
|
-
const int dim = dst->op_params[0];
|
|
69
|
-
const int max_period = dst->op_params[1];
|
|
70
|
-
|
|
71
|
-
timestep_embedding_f32_sycl(src0_d, dst_d, src0->ne[0], dst->nb[1], dim, max_period, stream);
|
|
72
|
-
GGML_UNUSED(src1);
|
|
73
|
-
}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
//
|
|
2
|
-
// MIT license
|
|
3
|
-
// Copyright (C) 2024 Intel Corporation
|
|
4
|
-
// SPDX-License-Identifier: MIT
|
|
5
|
-
//
|
|
6
|
-
|
|
7
|
-
//
|
|
8
|
-
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
9
|
-
// See https://llvm.org/LICENSE.txt for license information.
|
|
10
|
-
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
11
|
-
//
|
|
12
|
-
|
|
13
|
-
#ifndef GGML_SYCL_TSEMBD_HPP
|
|
14
|
-
#define GGML_SYCL_TSEMBD_HPP
|
|
15
|
-
|
|
16
|
-
#include "common.hpp"
|
|
17
|
-
|
|
18
|
-
void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
|
|
19
|
-
|
|
20
|
-
#endif // GGML_SYCL_TSEMBD_HPP
|