@fugood/llama.node 0.6.3 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. package/CMakeLists.txt +40 -30
  2. package/README.md +4 -1
  3. package/lib/binding.js +41 -29
  4. package/lib/binding.ts +26 -25
  5. package/package.json +45 -7
  6. package/scripts/build.js +47 -0
  7. package/scripts/llama.cpp.patch +109 -0
  8. package/src/anyascii.c +22223 -0
  9. package/src/anyascii.h +42 -0
  10. package/src/tts_utils.cpp +20 -7
  11. package/src/tts_utils.h +2 -0
  12. package/bin/darwin/arm64/llama-node.node +0 -0
  13. package/bin/darwin/x64/llama-node.node +0 -0
  14. package/bin/linux/arm64/llama-node.node +0 -0
  15. package/bin/linux/x64/llama-node.node +0 -0
  16. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  17. package/bin/linux-cuda/x64/llama-node.node +0 -0
  18. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  19. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  20. package/bin/win32/x64/llama-node.node +0 -0
  21. package/bin/win32/x64/node.lib +0 -0
  22. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  23. package/bin/win32-vulkan/arm64/node.lib +0 -0
  24. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  25. package/bin/win32-vulkan/x64/node.lib +0 -0
  26. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
  27. package/src/llama.cpp/.github/workflows/build.yml +0 -1078
  28. package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
  29. package/src/llama.cpp/.github/workflows/docker.yml +0 -178
  30. package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
  31. package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
  32. package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
  36. package/src/llama.cpp/.github/workflows/release.yml +0 -739
  37. package/src/llama.cpp/.github/workflows/server.yml +0 -237
  38. package/src/llama.cpp/.github/workflows/winget.yml +0 -42
  39. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
  40. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
  41. package/src/llama.cpp/cmake/build-info.cmake +0 -64
  42. package/src/llama.cpp/cmake/common.cmake +0 -35
  43. package/src/llama.cpp/cmake/git-vars.cmake +0 -22
  44. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
  45. package/src/llama.cpp/common/build-info.cpp.in +0 -4
  46. package/src/llama.cpp/docs/build.md +0 -561
  47. package/src/llama.cpp/examples/CMakeLists.txt +0 -43
  48. package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
  49. package/src/llama.cpp/examples/batched/batched.cpp +0 -246
  50. package/src/llama.cpp/examples/chat-13B.bat +0 -57
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
  53. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
  54. package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
  56. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
  57. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
  58. package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
  59. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
  60. package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
  61. package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
  62. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
  63. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
  64. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
  65. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
  66. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
  67. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
  68. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
  69. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
  70. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
  71. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
  72. package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
  73. package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
  74. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
  75. package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
  76. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
  77. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
  78. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
  79. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
  80. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
  81. package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
  82. package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
  83. package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
  84. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
  85. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
  86. package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
  87. package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
  88. package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
  89. package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
  90. package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
  91. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
  92. package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
  93. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
  94. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
  96. package/src/llama.cpp/examples/simple/simple.cpp +0 -206
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
  99. package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
  100. package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
  101. package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
  102. package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
  103. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
  104. package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
  105. package/src/llama.cpp/examples/sycl/build.sh +0 -23
  106. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
  107. package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
  108. package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
  109. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
  110. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
  111. package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
  112. package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
  113. package/src/llama.cpp/examples/training/finetune.cpp +0 -96
  114. package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
  115. package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
  116. package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
  117. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
  118. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
  119. package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
  120. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
  121. package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
  122. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
  123. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
  124. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
  125. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
  126. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
  127. package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
  128. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
  129. package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
  130. package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
  131. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
  132. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
  133. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
  134. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
  135. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
  136. package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
  137. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  138. package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  139. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
  140. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
  141. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
  142. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
  143. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
  144. package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
  145. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
  146. package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
  147. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
  148. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
  149. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
  150. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
  151. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
  152. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
  153. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
  154. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
  155. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
  156. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
  157. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
  158. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
  159. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
  160. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
  161. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
  162. package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
  163. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
  164. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
  165. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
  166. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
  167. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
  168. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
  169. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
  170. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
  171. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
  172. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
  173. package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
  174. package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
  175. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
  176. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
  177. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
  178. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
  179. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
  180. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
  181. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
  182. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
  183. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
  184. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
  185. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
  186. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
  187. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
  188. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
  189. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
  190. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
  191. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
  192. package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
  194. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
  195. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
  197. package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
  198. package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
  199. package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
  200. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
  201. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
  202. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
  203. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
  204. package/src/llama.cpp/ggml/src/ggml.c +0 -6550
  205. package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
  206. package/src/llama.cpp/models/.editorconfig +0 -1
  207. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  208. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  209. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  210. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
  211. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
  212. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  213. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  214. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  215. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
  216. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
  217. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
  219. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
  220. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  221. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
  222. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
  223. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  225. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  226. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
  227. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
  228. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  229. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
  230. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
  231. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  232. package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  233. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  235. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
  236. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
  237. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
  239. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
  240. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  241. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  242. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
  245. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
  248. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  249. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
  252. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
  255. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
  256. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  257. package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
  258. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  259. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
  260. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
  261. package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
  262. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
  263. package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
  264. package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
  265. package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
  266. package/src/llama.cpp/prompts/alpaca.txt +0 -1
  267. package/src/llama.cpp/prompts/assistant.txt +0 -31
  268. package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
  269. package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
  270. package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
  271. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
  272. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
  273. package/src/llama.cpp/prompts/chat.txt +0 -28
  274. package/src/llama.cpp/prompts/dan-modified.txt +0 -1
  275. package/src/llama.cpp/prompts/dan.txt +0 -1
  276. package/src/llama.cpp/prompts/mnemonics.txt +0 -93
  277. package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
  278. package/src/llama.cpp/prompts/reason-act.txt +0 -18
  279. package/src/llama.cpp/requirements/requirements-all.txt +0 -15
  280. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
  281. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
  282. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
  283. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
  284. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
  285. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
  286. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
  287. package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
  288. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
  289. package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
  290. package/src/llama.cpp/requirements.txt +0 -13
  291. package/src/llama.cpp/scripts/build-info.sh +0 -30
  292. package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
  293. package/src/llama.cpp/scripts/xxd.cmake +0 -16
  294. package/src/llama.cpp/tests/CMakeLists.txt +0 -177
  295. package/src/llama.cpp/tests/get-model.cpp +0 -21
  296. package/src/llama.cpp/tests/get-model.h +0 -2
  297. package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
  298. package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
  299. package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
  300. package/src/llama.cpp/tests/test-barrier.cpp +0 -94
  301. package/src/llama.cpp/tests/test-c.c +0 -7
  302. package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
  303. package/src/llama.cpp/tests/test-chat.cpp +0 -985
  304. package/src/llama.cpp/tests/test-double-float.cpp +0 -57
  305. package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
  306. package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
  307. package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
  308. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
  309. package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
  310. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
  311. package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
  312. package/src/llama.cpp/tests/test-log.cpp +0 -39
  313. package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
  314. package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
  315. package/src/llama.cpp/tests/test-opt.cpp +0 -904
  316. package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
  317. package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
  318. package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
  319. package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
  320. package/src/llama.cpp/tests/test-rope.cpp +0 -262
  321. package/src/llama.cpp/tests/test-sampling.cpp +0 -399
  322. package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
  323. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
  324. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
  325. package/src/llama.cpp/tools/CMakeLists.txt +0 -39
  326. package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
  327. package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
  328. package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
  329. package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
  330. package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
  331. package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
  332. package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
  333. package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
  334. package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
  335. package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
  336. package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
  337. package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
  338. package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
  339. package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
  340. package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
  341. package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
  342. package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
  343. package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
  344. package/src/llama.cpp/tools/main/main.cpp +0 -977
  345. package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
  346. package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
  347. package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
  348. package/src/llama.cpp/tools/mtmd/clip.h +0 -101
  349. package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
  350. package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
  351. package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
  352. package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
  353. package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
  354. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
  355. package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
  356. package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
  357. package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
  358. package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
  359. package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
  360. package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
  361. package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
  362. package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
  363. package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
  364. package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
  365. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
  366. package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
  367. package/src/llama.cpp/tools/run/run.cpp +0 -1261
  368. package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
  369. package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
  370. package/src/llama.cpp/tools/server/httplib.h +0 -10506
  371. package/src/llama.cpp/tools/server/server.cpp +0 -4966
  372. package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
  373. package/src/llama.cpp/tools/server/utils.hpp +0 -1337
  374. package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
  375. package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
  376. package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
  377. package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
@@ -1,1536 +0,0 @@
1
- #include "common.hpp"
2
- #include "ggml.h"
3
- #include "element_wise.hpp"
4
-
5
- static void acc_f32(const float * x, const float * y, float * dst, const int ne,
6
- const int ne10, const int ne11, const int ne12,
7
- const int nb1, const int nb2, int offset, const sycl::nd_item<3> &item_ct1) {
8
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
9
- item_ct1.get_local_id(2);
10
- if (i >= ne) {
11
- return;
12
- }
13
- int src1_idx = i - offset;
14
- int oz = src1_idx / nb2;
15
- int oy = (src1_idx - (oz * nb2)) / nb1;
16
- int ox = src1_idx % nb1;
17
- if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
18
- dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
19
- } else {
20
- dst[i] = x[i];
21
- }
22
- }
23
-
24
- template<typename T>
25
- static void sgn(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) {
26
- for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) {
27
- dst[i] = x[i] > static_cast<T>(0.f) ? static_cast<T>(1.f) : ((x[i] < static_cast<T>(0.f) ? static_cast<T>(-1.f) : static_cast<T>(0.f)));
28
- }
29
- }
30
-
31
- template<typename T>
32
- static void abs_op(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) {
33
- for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) {
34
- dst[i] = sycl::fabs(x[i]);
35
- }
36
- }
37
-
38
- template<typename T>
39
- static void elu_op(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) {
40
- for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) {
41
- dst[i] = (x[i] > static_cast<T>(0.f)) ? x[i] : sycl::expm1(x[i]);
42
- }
43
- }
44
-
45
- template<typename T>
46
- static void gelu(const T * x, T * dst, const int k,
47
- const sycl::nd_item<3> &item_ct1) {
48
- const T GELU_COEF_A = static_cast<T>(0.044715f);
49
- const T SQRT_2_OVER_PI = static_cast<T>(0.79788456080286535587989211986876f);
50
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
51
- item_ct1.get_local_id(2);
52
-
53
- if (i >= k) {
54
- return;
55
- }
56
-
57
- float xi = x[i];
58
- dst[i] = static_cast<T>(0.5f) * xi *
59
- (static_cast<T>(1.0f) +
60
- sycl::tanh(SQRT_2_OVER_PI * xi * (static_cast<T>(1.0f) + GELU_COEF_A * xi * xi)));
61
- }
62
-
63
- template<typename T>
64
- static void silu(const T * x, T * dst, const int k,
65
- const sycl::nd_item<3> &item_ct1) {
66
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
67
- item_ct1.get_local_id(2);
68
-
69
- if (i >= k) {
70
- return;
71
- }
72
- dst[i] = x[i] / (static_cast<T>(1.0f) + sycl::native::exp(-x[i]));
73
- }
74
-
75
- template<typename T>
76
- static void gelu_quick(const T *x, T *dst, int k,
77
- const sycl::nd_item<3> &item_ct1) {
78
- const float GELU_QUICK_COEF = -1.702f;
79
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
80
- item_ct1.get_local_id(2);
81
- if (i >= k) {
82
- return;
83
- }
84
- dst[i] = x[i] * (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + sycl::native::exp(GELU_QUICK_COEF * x[i])));
85
- }
86
-
87
- template<typename T>
88
- static void tanh(const T *x, T *dst, int k,
89
- const sycl::nd_item<3> &item_ct1) {
90
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
91
- item_ct1.get_local_id(2);
92
- if (i >= k) {
93
- return;
94
- }
95
- dst[i] = sycl::tanh((x[i]));
96
- }
97
-
98
- template<typename T>
99
- static void relu(const T * x, T * dst, const int k,
100
- const sycl::nd_item<3> &item_ct1) {
101
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
102
- item_ct1.get_local_id(2);
103
-
104
- if (i >= k) {
105
- return;
106
- }
107
- dst[i] = sycl::fmax((x[i]), static_cast<T>(0));
108
- }
109
-
110
- template<typename T>
111
- static void sigmoid(const T * x, T * dst, const int k,
112
- const sycl::nd_item<3> &item_ct1) {
113
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
114
- item_ct1.get_local_id(2);
115
-
116
- if (i >= k) {
117
- return;
118
- }
119
- dst[i] = 1.0f / (static_cast<T>(1.0f) + sycl::native::exp(-x[i]));
120
- }
121
-
122
- template<typename T>
123
- static void sqrt(const T * x, T * dst, const int k,
124
- const sycl::nd_item<3> &item_ct1) {
125
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
126
- item_ct1.get_local_id(2);
127
-
128
- if (i >= k) {
129
- return;
130
- }
131
- dst[i] = sycl::sqrt(x[i]);
132
- }
133
-
134
- template<typename T>
135
- static void sin(const T * x, T * dst, const int k,
136
- const sycl::nd_item<3> &item_ct1) {
137
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
138
- item_ct1.get_local_id(2);
139
-
140
- if (i >= k) {
141
- return;
142
- }
143
- dst[i] = sycl::sin(x[i]);
144
- }
145
-
146
- template<typename T>
147
- static void cos(const T * x, T * dst, const int k,
148
- const sycl::nd_item<3> &item_ct1) {
149
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
150
- item_ct1.get_local_id(2);
151
-
152
- if (i >= k) {
153
- return;
154
- }
155
- dst[i] = sycl::cos(x[i]);
156
- }
157
-
158
- template<typename T>
159
- static void hardsigmoid(const T * x, T * dst, const int k,
160
- const sycl::nd_item<3> &item_ct1) {
161
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
162
- item_ct1.get_local_id(2);
163
-
164
- if (i >= k) {
165
- return;
166
- }
167
- dst[i] = sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x[i] + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
168
- }
169
-
170
- template<typename T>
171
- static void hardswish(const T * x, T * dst, const int k,
172
- const sycl::nd_item<3> &item_ct1) {
173
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
174
- item_ct1.get_local_id(2);
175
-
176
- if (i >= k) {
177
- return;
178
- }
179
- dst[i] = x[i] * sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x[i] + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
180
- }
181
-
182
- template<typename T>
183
- static void exp(const T * x, T * dst, const int k,
184
- const sycl::nd_item<3> &item_ct1) {
185
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
186
- item_ct1.get_local_id(2);
187
-
188
- if (i >= k) {
189
- return;
190
- }
191
- dst[i] = sycl::exp(x[i]);
192
- }
193
-
194
- template<typename T>
195
- static void log(const T * x, T * dst, const int k,
196
- const sycl::nd_item<3> &item_ct1) {
197
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
198
- item_ct1.get_local_id(2);
199
-
200
- if (i >= k) {
201
- return;
202
- }
203
- T xi = x[i];
204
- if (xi <= 0) {
205
- dst[i] = neg_infinity<T>();
206
- } else {
207
- dst[i] = sycl::log(xi);
208
- }
209
- }
210
-
211
- template<typename T>
212
- static void neg(const T * x, T * dst, const int k,
213
- const sycl::nd_item<3> &item_ct1) {
214
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
215
- item_ct1.get_local_id(2);
216
-
217
- if (i >= k) {
218
- return;
219
- }
220
- dst[i] = -x[i];
221
- }
222
-
223
- template<typename T>
224
- static void step(const T * x, T * dst, const int k,
225
- const sycl::nd_item<3> &item_ct1) {
226
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
227
- item_ct1.get_local_id(2);
228
-
229
- if (i >= k) {
230
- return;
231
- }
232
- dst[i] = x[i] > static_cast<T>(0.0f);
233
- }
234
-
235
- template<typename T>
236
- static void leaky_relu(const T *x, T *dst, const int k, const float negative_slope,
237
- const sycl::nd_item<3> &item_ct1) {
238
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
239
- item_ct1.get_local_id(2);
240
- if (i >= k) {
241
- return;
242
- }
243
- dst[i] = sycl::fmax((x[i]), static_cast<T>(0)) +
244
- sycl::fmin((x[i]), static_cast<T>(0.0f)) * negative_slope;
245
- }
246
-
247
- template<typename T>
248
- static void sqr(const T * x, T * dst, const int k,
249
- const sycl::nd_item<3> &item_ct1) {
250
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
251
- item_ct1.get_local_id(2);
252
-
253
- if (i >= k) {
254
- return;
255
- }
256
- dst[i] = x[i] * x[i];
257
- }
258
-
259
- template<typename T>
260
- static void upscale(const T *x, T *dst, const int nb00, const int nb01,
261
- const int nb02, const int nb03, const int ne10, const int ne11,
262
- const int ne12, const int ne13, const float sf0, const float sf1,
263
- const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) {
264
- int index = item_ct1.get_local_id(0) +
265
- item_ct1.get_group(0) * item_ct1.get_local_range(0);
266
- if (index >= ne10 * ne11 * ne12 * ne13) {
267
- return;
268
- }
269
- // operation
270
- int i10 = index % ne10;
271
- int i11 = (index / ne10) % ne11;
272
- int i12 = (index / (ne10 * ne11)) % ne12;
273
- int i13 = (index / (ne10 * ne11 * ne12)) % ne13;
274
-
275
- int i00 = i10 / sf0;
276
- int i01 = i11 / sf1;
277
- int i02 = i12 / sf2;
278
- int i03 = i13 / sf3;
279
-
280
- dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
281
- }
282
-
283
- template <typename T>
284
- static void pad(const T *x, T *dst, const int ne0, const int ne00, const int ne01, const int ne02,
285
- const sycl::nd_item<3> &item_ct1) {
286
- int nidx = item_ct1.get_local_id(2) +
287
- item_ct1.get_group(2) * item_ct1.get_local_range(2);
288
- if (nidx >= ne0) {
289
- return;
290
- }
291
-
292
- // operation
293
- int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
294
- item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
295
- if (nidx < ne00 && item_ct1.get_group(1) < (size_t) ne01 && item_ct1.get_group(0) < (size_t) ne02) {
296
- int offset_src = nidx + item_ct1.get_group(1) * ne00 +
297
- item_ct1.get_group(0) * ne00 * ne01;
298
- dst[offset_dst] = x[offset_src];
299
- } else {
300
- dst[offset_dst] = static_cast<T>(0.0f);
301
- }
302
- }
303
-
304
-
305
- template<typename T>
306
- static void clamp(const T * x, T * dst, const float min, const float max, const int k,
307
- const sycl::nd_item<3> &item_ct1) {
308
- const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
309
- item_ct1.get_local_id(2);
310
-
311
- if (i >= k) {
312
- return;
313
- }
314
-
315
- dst[i] = x[i] < static_cast<T>(min) ? static_cast<T>(min) : (x[i] > static_cast<T>(max) ? static_cast<T>(max) : x[i]);
316
- }
317
-
318
- static void acc_f32_sycl(const float *x, const float *y, float *dst,
319
- const int n_elements, const int ne10, const int ne11,
320
- const int ne12, const int nb1, const int nb2,
321
- const int offset, queue_ptr stream) {
322
- int num_blocks = (n_elements + SYCL_ACC_BLOCK_SIZE - 1) / SYCL_ACC_BLOCK_SIZE;
323
- stream->parallel_for(
324
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
325
- sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE),
326
- sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)),
327
- [=](sycl::nd_item<3> item_ct1) {
328
- acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset,
329
- item_ct1);
330
- });
331
- }
332
-
333
- template<typename T>
334
- static void gelu_sycl(const T *x, T *dst, const int k,
335
- queue_ptr stream) {
336
- const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
337
- stream->parallel_for(
338
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
339
- sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
340
- sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
341
- [=](sycl::nd_item<3> item_ct1) {
342
- gelu(x, dst, k, item_ct1);
343
- });
344
- }
345
-
346
- template<typename T>
347
- static void silu_sycl(const T *x, T *dst, const int k,
348
- queue_ptr stream) {
349
- const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE;
350
- stream->parallel_for(
351
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
352
- sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE),
353
- sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)),
354
- [=](sycl::nd_item<3> item_ct1) {
355
- silu(x, dst, k, item_ct1);
356
- });
357
- }
358
-
359
- template<typename T>
360
- static void sgn_sycl(const T * x, T * dst, const int k, queue_ptr stream) {
361
- // hard code for now
362
- const int num_blocks = ceil_div(k, 256);
363
- stream->parallel_for(
364
- sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range(1, 1, 256)), sycl::range(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) {
365
- sgn(x, dst, k, item_ct1);
366
- });
367
- }
368
-
369
- template<typename T>
370
- static void abs_sycl(const T * x, T * dst, const int k, queue_ptr stream) {
371
- // hard code for now
372
- const int num_blocks = ceil_div(k, 256);
373
- stream->parallel_for(
374
- sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) {
375
- abs_op(x, dst, k, item_ct1);
376
- });
377
- }
378
-
379
-
380
- template<typename T>
381
- static void elu_sycl(const T * x, T * dst, const int k, queue_ptr stream) {
382
- // hard code for now
383
- const int num_blocks = ceil_div(k, 256);
384
- stream->parallel_for(
385
- sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) {
386
- elu_op(x, dst, k, item_ct1);
387
- });
388
- }
389
-
390
- template<typename T>
391
- static void gelu_quick_sycl(const T *x, T *dst, const int k,
392
- queue_ptr stream) {
393
- const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
394
- stream->parallel_for(
395
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
396
- sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
397
- sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
398
- [=](sycl::nd_item<3> item_ct1) {
399
- gelu_quick(x, dst, k, item_ct1);
400
- });
401
- }
402
-
403
- template<typename T>
404
- static void tanh_sycl(const T *x, T *dst, const int k,
405
- queue_ptr stream) {
406
- const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE;
407
- stream->parallel_for(
408
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
409
- sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE),
410
- sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)),
411
- [=](sycl::nd_item<3> item_ct1) {
412
- tanh(x, dst, k, item_ct1);
413
- });
414
- }
415
-
416
- template<typename T>
417
- static void relu_sycl(const T *x, T *dst, const int k,
418
- queue_ptr stream) {
419
- const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
420
- stream->parallel_for(
421
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
422
- sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
423
- sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
424
- [=](sycl::nd_item<3> item_ct1) {
425
- relu(x, dst, k, item_ct1);
426
- });
427
- }
428
-
429
- template<typename T>
430
- static void hardsigmoid_sycl(const T *x, T *dst, const int k,
431
- queue_ptr stream) {
432
- const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE;
433
- stream->parallel_for(
434
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
435
- sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE),
436
- sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE)),
437
- [=](sycl::nd_item<3> item_ct1) {
438
- hardsigmoid(x, dst, k, item_ct1);
439
- });
440
- }
441
-
442
- template<typename T>
443
- static void hardswish_sycl(const T *x, T *dst, const int k,
444
- queue_ptr stream) {
445
- const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE;
446
- stream->parallel_for(
447
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
448
- sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE),
449
- sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE)),
450
- [=](sycl::nd_item<3> item_ct1) {
451
- hardswish(x, dst, k, item_ct1);
452
- });
453
- }
454
-
455
- template<typename T>
456
- static void exp_sycl(const T *x, T *dst, const int k,
457
- queue_ptr stream) {
458
- const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE;
459
- stream->parallel_for(
460
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
461
- sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE),
462
- sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)),
463
- [=](sycl::nd_item<3> item_ct1) {
464
- exp(x, dst, k, item_ct1);
465
- });
466
- }
467
-
468
- template<typename T>
469
- static void log_sycl(const T *x, T *dst, const int k,
470
- queue_ptr stream) {
471
- const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE;
472
- stream->parallel_for(
473
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
474
- sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE),
475
- sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)),
476
- [=](sycl::nd_item<3> item_ct1) {
477
- log(x, dst, k, item_ct1);
478
- });
479
- }
480
-
481
- template<typename T>
482
- static void neg_sycl(const T *x, T *dst, const int k,
483
- queue_ptr stream) {
484
- const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE;
485
- stream->parallel_for(
486
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
487
- sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE),
488
- sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)),
489
- [=](sycl::nd_item<3> item_ct1) {
490
- neg(x, dst, k, item_ct1);
491
- });
492
- }
493
-
494
- template<typename T>
495
- static void step_sycl(const T *x, T *dst, const int k,
496
- queue_ptr stream) {
497
- const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE;
498
- stream->parallel_for(
499
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
500
- sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE),
501
- sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)),
502
- [=](sycl::nd_item<3> item_ct1) {
503
- step(x, dst, k, item_ct1);
504
- });
505
- }
506
-
507
- template<typename T>
508
- static void sigmoid_sycl(const T *x, T *dst, const int k,
509
- queue_ptr stream) {
510
- const int num_blocks = (k + SYCL_SIGMOID_BLOCK_SIZE - 1) / SYCL_SIGMOID_BLOCK_SIZE;
511
- stream->parallel_for(
512
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
513
- sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE),
514
- sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE)),
515
- [=](sycl::nd_item<3> item_ct1) {
516
- sigmoid(x, dst, k, item_ct1);
517
- });
518
- }
519
-
520
- template<typename T>
521
- static void sqrt_sycl(const T *x, T *dst, const int k,
522
- queue_ptr stream) {
523
- const int num_blocks = (k + SYCL_SQRT_BLOCK_SIZE - 1) / SYCL_SQRT_BLOCK_SIZE;
524
- stream->parallel_for(
525
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
526
- sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE),
527
- sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE)),
528
- [=](sycl::nd_item<3> item_ct1) {
529
- sqrt(x, dst, k, item_ct1);
530
- });
531
- }
532
-
533
- template<typename T>
534
- static void sin_sycl(const T *x, T *dst, const int k,
535
- queue_ptr stream) {
536
- const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE;
537
- stream->parallel_for(
538
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
539
- sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE),
540
- sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)),
541
- [=](sycl::nd_item<3> item_ct1) {
542
- sin(x, dst, k, item_ct1);
543
- });
544
- }
545
-
546
- template<typename T>
547
- static void cos_sycl(const T *x, T *dst, const int k,
548
- queue_ptr stream) {
549
- const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE;
550
- stream->parallel_for(
551
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
552
- sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE),
553
- sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)),
554
- [=](sycl::nd_item<3> item_ct1) {
555
- cos(x, dst, k, item_ct1);
556
- });
557
- }
558
-
559
- template<typename T>
560
- static void leaky_relu_sycl(const T *x, T *dst, const int k,
561
- const float negative_slope,
562
- queue_ptr stream) {
563
- const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
564
- stream->parallel_for(
565
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
566
- sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
567
- sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
568
- [=](sycl::nd_item<3> item_ct1) {
569
- leaky_relu(x, dst, k, negative_slope, item_ct1);
570
- });
571
- }
572
-
573
- template<typename T>
574
- static void sqr_sycl(const T *x, T *dst, const int k,
575
- queue_ptr stream) {
576
- const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE;
577
- stream->parallel_for(
578
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
579
- sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE),
580
- sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)),
581
- [=](sycl::nd_item<3> item_ct1) {
582
- sqr(x, dst, k, item_ct1);
583
- });
584
- }
585
-
586
- template<typename T>
587
- static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01,
588
- const int nb02, const int nb03, const int ne10, const int ne11,
589
- const int ne12, const int ne13, const float sf0, const float sf1,
590
- const float sf2, const float sf3, queue_ptr stream) {
591
- int dst_size = ne10 * ne11 * ne12 * ne13;
592
- int num_blocks = (dst_size + SYCL_UPSCALE_BLOCK_SIZE - 1) / SYCL_UPSCALE_BLOCK_SIZE;
593
- sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE);
594
- stream->parallel_for(
595
- sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)),
596
- [=](sycl::nd_item<1> item_ct1) {
597
- upscale(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1);
598
- });
599
- }
600
-
601
- template<typename T>
602
- static void pad_sycl(const T *x, T *dst, const int ne00,
603
- const int ne01, const int ne02, const int ne0,
604
- const int ne1, const int ne2, queue_ptr stream) {
605
- int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE;
606
- sycl::range<3> gridDim(ne2, ne1, num_blocks);
607
- stream->parallel_for(
608
- sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE),
609
- sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)),
610
- [=](sycl::nd_item<3> item_ct1) {
611
- pad(x, dst, ne0, ne00, ne01, ne02, item_ct1);
612
- });
613
- }
614
-
615
- template<typename T>
616
- static void clamp_sycl(const T *x, T *dst, const float min,
617
- const float max, const int k,
618
- queue_ptr stream) {
619
- const int num_blocks = (k + SYCL_CLAMP_BLOCK_SIZE - 1) / SYCL_CLAMP_BLOCK_SIZE;
620
- stream->parallel_for(
621
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
622
- sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE),
623
- sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)),
624
- [=](sycl::nd_item<3> item_ct1) {
625
- clamp(x, dst, min, max, k, item_ct1);
626
- });
627
- }
628
-
629
- inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
630
- #if defined (GGML_SYCL_F16)
631
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
632
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
633
-
634
- #else
635
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
636
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
637
- #endif
638
- GGML_ASSERT(dst->src[0]->type == dst->type);
639
- dpct::queue_ptr main_stream = ctx.stream();
640
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
641
- switch (dst->type) {
642
- #if defined (GGML_SYCL_F16)
643
- case GGML_TYPE_F16:
644
- {
645
- auto data_pts = cast_data<sycl::half>(dst);
646
- sgn_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
647
- break;
648
- }
649
- #endif
650
- case GGML_TYPE_F32:
651
- {
652
- auto data_pts = cast_data<float>(dst);
653
- sgn_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
654
- break;
655
- }
656
- default:
657
- GGML_ABORT("GGML tensor type not supported!\n");
658
- }
659
- }
660
-
661
- inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
662
- #if defined (GGML_SYCL_F16)
663
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
664
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
665
-
666
- #else
667
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
668
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
669
- #endif
670
- GGML_ASSERT(dst->src[0]->type == dst->type);
671
- dpct::queue_ptr main_stream = ctx.stream();
672
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
673
- switch (dst->type) {
674
- #if defined (GGML_SYCL_F16)
675
- case GGML_TYPE_F16:
676
- {
677
- auto data_pts = cast_data<sycl::half>(dst);
678
- abs_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
679
- break;
680
- }
681
- #endif
682
- case GGML_TYPE_F32:
683
- {
684
- auto data_pts = cast_data<float>(dst);
685
- abs_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
686
- break;
687
- }
688
- default:
689
- GGML_ABORT("GGML tensor type not supported!\n");
690
- }
691
- }
692
-
693
-
694
- inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
695
- #if defined (GGML_SYCL_F16)
696
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
697
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
698
-
699
- #else
700
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
701
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
702
- #endif
703
- GGML_ASSERT(dst->src[0]->type == dst->type);
704
- dpct::queue_ptr main_stream = ctx.stream();
705
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
706
- switch (dst->type) {
707
- #if defined (GGML_SYCL_F16)
708
- case GGML_TYPE_F16:
709
- {
710
- auto data_pts = cast_data<sycl::half>(dst);
711
- elu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
712
- break;
713
- }
714
- #endif
715
- case GGML_TYPE_F32:
716
- {
717
- auto data_pts = cast_data<float>(dst);
718
- elu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
719
- break;
720
- }
721
- default:
722
- GGML_ABORT("GGML tensor type not supported!\n");
723
- }
724
- }
725
-
726
- inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
727
- #if defined (GGML_SYCL_F16)
728
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
729
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
730
- #else
731
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
732
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
733
- #endif
734
- GGML_ASSERT(dst->src[0]->type == dst->type);
735
- dpct::queue_ptr main_stream = ctx.stream();
736
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
737
- switch (dst->type) {
738
- #if defined (GGML_SYCL_F16)
739
- case GGML_TYPE_F16:
740
- {
741
- auto data_pts = cast_data<sycl::half>(dst);
742
- silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
743
- break;
744
- }
745
- #endif
746
- case GGML_TYPE_F32:
747
- {
748
- auto data_pts = cast_data<float>(dst);
749
- silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
750
- break;
751
- }
752
- default:
753
- GGML_ABORT("GGML tensor type not supported!\n");
754
- }
755
- }
756
-
757
- inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
758
- #if defined (GGML_SYCL_F16)
759
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
760
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
761
- #else
762
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
763
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
764
- #endif
765
- GGML_ASSERT(dst->src[0]->type == dst->type);
766
- dpct::queue_ptr main_stream = ctx.stream();
767
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
768
- switch (dst->type) {
769
- #if defined (GGML_SYCL_F16)
770
- case GGML_TYPE_F16:
771
- {
772
- auto data_pts = cast_data<sycl::half>(dst);
773
- gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
774
- break;
775
- }
776
- #endif
777
- case GGML_TYPE_F32:
778
- {
779
- auto data_pts = cast_data<float>(dst);
780
- gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
781
- break;
782
- }
783
- default:
784
- GGML_ABORT("GGML tensor type not supported!\n");
785
- }
786
- }
787
-
788
- inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
789
- #if defined (GGML_SYCL_F16)
790
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
791
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
792
- #else
793
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
794
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
795
- #endif
796
- GGML_ASSERT(dst->src[0]->type == dst->type);
797
- dpct::queue_ptr main_stream = ctx.stream();
798
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
799
- switch (dst->type) {
800
- #if defined (GGML_SYCL_F16)
801
- case GGML_TYPE_F16:
802
- {
803
- auto data_pts = cast_data<sycl::half>(dst);
804
- gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
805
- break;
806
- }
807
- #endif
808
- case GGML_TYPE_F32:
809
- {
810
- auto data_pts = cast_data<float>(dst);
811
- gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
812
- break;
813
- }
814
- default:
815
- GGML_ABORT("GGML tensor type not supported!\n");
816
- }
817
- }
818
-
819
- inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
820
- #if defined (GGML_SYCL_F16)
821
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
822
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
823
- #else
824
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
825
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
826
- #endif
827
- GGML_ASSERT(dst->src[0]->type == dst->type);
828
- dpct::queue_ptr main_stream = ctx.stream();
829
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
830
- switch (dst->type) {
831
- #if defined (GGML_SYCL_F16)
832
- case GGML_TYPE_F16:
833
- {
834
- auto data_pts = cast_data<sycl::half>(dst);
835
- tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
836
- break;
837
- }
838
- #endif
839
- case GGML_TYPE_F32:
840
- {
841
- auto data_pts = cast_data<float>(dst);
842
- tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
843
- break;
844
- }
845
- default:
846
- GGML_ABORT("GGML tensor type not supported!\n");
847
- }
848
- }
849
-
850
- inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
851
- #if defined (GGML_SYCL_F16)
852
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
853
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
854
- #else
855
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
856
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
857
- #endif
858
- GGML_ASSERT(dst->src[0]->type == dst->type);
859
- dpct::queue_ptr main_stream = ctx.stream();
860
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
861
-
862
- switch (dst->type) {
863
- #if defined (GGML_SYCL_F16)
864
- case GGML_TYPE_F16:
865
- {
866
- auto data_pts = cast_data<sycl::half>(dst);
867
- relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
868
- break;
869
- }
870
- #endif
871
- case GGML_TYPE_F32:
872
- {
873
- auto data_pts = cast_data<float>(dst);
874
- relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
875
- break;
876
- }
877
- default:
878
- GGML_ABORT("GGML tensor type not supported!\n");
879
- }
880
- }
881
-
882
- inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
883
- #if defined (GGML_SYCL_F16)
884
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
885
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
886
- #else
887
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
888
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
889
- #endif
890
- GGML_ASSERT(dst->src[0]->type == dst->type);
891
-
892
- dpct::queue_ptr main_stream = ctx.stream();
893
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
894
-
895
- switch (dst->type) {
896
- #if defined (GGML_SYCL_F16)
897
- case GGML_TYPE_F16:
898
- {
899
- auto data_pts = cast_data<sycl::half>(dst);
900
- hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
901
- break;
902
- }
903
- #endif
904
- case GGML_TYPE_F32:
905
- {
906
- auto data_pts = cast_data<float>(dst);
907
- hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
908
- break;
909
- }
910
- default:
911
- GGML_ABORT("GGML tensor type not supported!\n");
912
- }
913
- }
914
-
915
- inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
916
- #if defined (GGML_SYCL_F16)
917
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
918
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
919
- #else
920
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
921
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
922
- #endif
923
- GGML_ASSERT(dst->src[0]->type == dst->type);
924
- dpct::queue_ptr main_stream = ctx.stream();
925
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
926
- switch (dst->type) {
927
- #if defined (GGML_SYCL_F16)
928
- case GGML_TYPE_F16:
929
- {
930
- auto data_pts = cast_data<sycl::half>(dst);
931
- hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
932
- break;
933
- }
934
- #endif
935
- case GGML_TYPE_F32:
936
- {
937
- auto data_pts = cast_data<float>(dst);
938
- hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
939
- break;
940
- }
941
- default:
942
- GGML_ABORT("GGML tensor type not supported!\n");
943
- }
944
- }
945
-
946
- inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
947
- #if defined (GGML_SYCL_F16)
948
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
949
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
950
- #else
951
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
952
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
953
- #endif
954
- GGML_ASSERT(dst->src[0]->type == dst->type);
955
- dpct::queue_ptr main_stream = ctx.stream();
956
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
957
- switch (dst->type) {
958
- #if defined (GGML_SYCL_F16)
959
- case GGML_TYPE_F16:
960
- {
961
- auto data_pts = cast_data<sycl::half>(dst);
962
- exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
963
- break;
964
- }
965
- #endif
966
- case GGML_TYPE_F32:
967
- {
968
- auto data_pts = cast_data<float>(dst);
969
- exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
970
- break;
971
- }
972
- default:
973
- GGML_ABORT("GGML tensor type not supported!\n");
974
- }
975
- }
976
-
977
- inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
978
- #if defined (GGML_SYCL_F16)
979
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
980
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
981
- #else
982
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
983
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
984
- #endif
985
- GGML_ASSERT(dst->src[0]->type == dst->type);
986
- dpct::queue_ptr main_stream = ctx.stream();
987
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
988
- switch (dst->type) {
989
- #if defined (GGML_SYCL_F16)
990
- case GGML_TYPE_F16:
991
- {
992
- auto data_pts = cast_data<sycl::half>(dst);
993
- log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
994
- break;
995
- }
996
- #endif
997
- case GGML_TYPE_F32:
998
- {
999
- auto data_pts = cast_data<float>(dst);
1000
- log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1001
- break;
1002
- }
1003
- default:
1004
- GGML_ABORT("GGML tensor type not supported!\n");
1005
- }
1006
- }
1007
-
1008
- inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1009
- #if defined (GGML_SYCL_F16)
1010
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1011
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1012
- #else
1013
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1014
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1015
- #endif
1016
- GGML_ASSERT(dst->src[0]->type == dst->type);
1017
- dpct::queue_ptr main_stream = ctx.stream();
1018
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1019
- switch (dst->type) {
1020
- #if defined (GGML_SYCL_F16)
1021
- case GGML_TYPE_F16:
1022
- {
1023
- auto data_pts = cast_data<sycl::half>(dst);
1024
- sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1025
- break;
1026
- }
1027
- #endif
1028
- case GGML_TYPE_F32:
1029
- {
1030
- auto data_pts = cast_data<float>(dst);
1031
- sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1032
- break;
1033
- }
1034
- default:
1035
- GGML_ABORT("GGML tensor type not supported!\n");
1036
- }
1037
- }
1038
-
1039
- inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1040
- #if defined (GGML_SYCL_F16)
1041
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1042
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1043
- #else
1044
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1045
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1046
- #endif
1047
- GGML_ASSERT(dst->src[0]->type == dst->type);
1048
-
1049
- dpct::queue_ptr main_stream = ctx.stream();
1050
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1051
- switch (dst->type) {
1052
- #if defined (GGML_SYCL_F16)
1053
- case GGML_TYPE_F16:
1054
- {
1055
- auto data_pts = cast_data<sycl::half>(dst);
1056
- sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1057
- break;
1058
- }
1059
- #endif
1060
- case GGML_TYPE_F32:
1061
- {
1062
- auto data_pts = cast_data<float>(dst);
1063
- sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1064
- break;
1065
- }
1066
- default:
1067
- GGML_ABORT("GGML tensor type not supported!\n");
1068
- }
1069
- }
1070
-
1071
- inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1072
- #if defined (GGML_SYCL_F16)
1073
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1074
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1075
- #else
1076
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1077
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1078
- #endif
1079
- GGML_ASSERT(dst->src[0]->type == dst->type);
1080
- dpct::queue_ptr main_stream = ctx.stream();
1081
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1082
- switch (dst->type) {
1083
- #if defined (GGML_SYCL_F16)
1084
- case GGML_TYPE_F16:
1085
- {
1086
- auto data_pts = cast_data<sycl::half>(dst);
1087
- sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1088
- break;
1089
- }
1090
- #endif
1091
- case GGML_TYPE_F32:
1092
- {
1093
- auto data_pts = cast_data<float>(dst);
1094
- sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1095
- break;
1096
- }
1097
- default:
1098
- GGML_ABORT("GGML tensor type not supported!\n");
1099
- }
1100
- }
1101
-
1102
- inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1103
- #if defined (GGML_SYCL_F16)
1104
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1105
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1106
- #else
1107
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1108
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1109
- #endif
1110
- GGML_ASSERT(dst->src[0]->type == dst->type);
1111
- dpct::queue_ptr main_stream = ctx.stream();
1112
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1113
- switch (dst->type) {
1114
- #if defined (GGML_SYCL_F16)
1115
- case GGML_TYPE_F16:
1116
- {
1117
- auto data_pts = cast_data<sycl::half>(dst);
1118
- cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1119
- break;
1120
- }
1121
- #endif
1122
- case GGML_TYPE_F32:
1123
- {
1124
- auto data_pts = cast_data<float>(dst);
1125
- cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1126
- break;
1127
- }
1128
- default:
1129
- GGML_ABORT("GGML tensor type not supported!\n");
1130
- }
1131
- }
1132
-
1133
- inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1134
- #if defined (GGML_SYCL_F16)
1135
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1136
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1137
- #else
1138
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1139
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1140
- #endif
1141
- GGML_ASSERT(dst->src[0]->type == dst->type);
1142
- dpct::queue_ptr main_stream = ctx.stream();
1143
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1144
- switch (dst->type) {
1145
- #if defined (GGML_SYCL_F16)
1146
- case GGML_TYPE_F16:
1147
- {
1148
- auto data_pts = cast_data<sycl::half>(dst);
1149
- step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1150
- break;
1151
- }
1152
- #endif
1153
- case GGML_TYPE_F32:
1154
- {
1155
- auto data_pts = cast_data<float>(dst);
1156
- step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1157
- break;
1158
- }
1159
- default:
1160
- GGML_ABORT("GGML tensor type not supported!\n");
1161
- }
1162
- }
1163
-
1164
- inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1165
- #if defined (GGML_SYCL_F16)
1166
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1167
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1168
- #else
1169
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1170
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1171
- #endif
1172
- GGML_ASSERT(dst->src[0]->type == dst->type);
1173
- dpct::queue_ptr main_stream = ctx.stream();
1174
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1175
- switch (dst->type) {
1176
- #if defined (GGML_SYCL_F16)
1177
- case GGML_TYPE_F16:
1178
- {
1179
- auto data_pts = cast_data<sycl::half>(dst);
1180
- neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1181
- break;
1182
- }
1183
- #endif
1184
- case GGML_TYPE_F32:
1185
- {
1186
- auto data_pts = cast_data<float>(dst);
1187
- neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1188
- break;
1189
- }
1190
- default:
1191
- GGML_ABORT("GGML tensor type not supported!\n");
1192
- }
1193
- }
1194
-
1195
- inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1196
- #if defined (GGML_SYCL_F16)
1197
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1198
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1199
- #else
1200
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1201
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1202
- #endif
1203
-
1204
- GGML_ASSERT(dst->src[0]->type == dst->type);
1205
- float negative_slope;
1206
- memcpy(&negative_slope, dst->op_params, sizeof(float));
1207
- dpct::queue_ptr main_stream = ctx.stream();
1208
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1209
- switch (dst->type) {
1210
- #if defined (GGML_SYCL_F16)
1211
- case GGML_TYPE_F16:
1212
- {
1213
- auto data_pts = cast_data<sycl::half>(dst);
1214
- leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream);
1215
- break;
1216
- }
1217
- #endif
1218
- case GGML_TYPE_F32:
1219
- {
1220
- auto data_pts = cast_data<float>(dst);
1221
- leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream);
1222
- break;
1223
- }
1224
- default:
1225
- GGML_ABORT("GGML tensor type not supported!\n");
1226
- }
1227
- }
1228
-
1229
- inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1230
- #if defined (GGML_SYCL_F16)
1231
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1232
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1233
- #else
1234
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1235
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1236
- #endif
1237
- GGML_ASSERT(dst->src[0]->type == dst->type);
1238
- dpct::queue_ptr main_stream = ctx.stream();
1239
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1240
- switch (dst->type) {
1241
- #if defined (GGML_SYCL_F16)
1242
- case GGML_TYPE_F16:
1243
- {
1244
- auto data_pts = cast_data<sycl::half>(dst);
1245
- sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1246
- break;
1247
- }
1248
- #endif
1249
- case GGML_TYPE_F32:
1250
- {
1251
- auto data_pts = cast_data<float>(dst);
1252
- sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
1253
- break;
1254
- }
1255
- default:
1256
- GGML_ABORT("GGML tensor type not supported!\n");
1257
- }
1258
- }
1259
-
1260
- inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1261
- #if defined (GGML_SYCL_F16)
1262
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1263
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1264
- #else
1265
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1266
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1267
- #endif
1268
- GGML_ASSERT(dst->src[0]->type == dst->type);
1269
-
1270
- dpct::queue_ptr main_stream = ctx.stream();
1271
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1272
-
1273
- const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0];
1274
- const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1];
1275
- const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2];
1276
- const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3];
1277
- switch (dst->type) {
1278
- #if defined (GGML_SYCL_F16)
1279
- case GGML_TYPE_F16:
1280
- {
1281
- auto data_pts = cast_data<sycl::half>(dst);
1282
- upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2],
1283
- dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
1284
- main_stream);
1285
- break;
1286
- }
1287
- #endif
1288
- case GGML_TYPE_F32:
1289
- {
1290
- auto data_pts = cast_data<float>(dst);
1291
- upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2],
1292
- dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
1293
- main_stream);
1294
- break;
1295
- }
1296
- default:
1297
- GGML_ABORT("GGML tensor type not supported!\n");
1298
- }
1299
- }
1300
-
1301
- inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1302
- #if defined (GGML_SYCL_F16)
1303
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1304
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1305
- #else
1306
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1307
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1308
- #endif
1309
- GGML_ASSERT(dst->src[0]->type == dst->type);
1310
- GGML_ASSERT(dst->src[0]->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
1311
- dpct::queue_ptr main_stream = ctx.stream();
1312
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1313
- switch (dst->type) {
1314
- #if defined (GGML_SYCL_F16)
1315
- case GGML_TYPE_F16:
1316
- {
1317
- auto data_pts = cast_data<sycl::half>(dst);
1318
- pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0],
1319
- dst->ne[1], dst->ne[2], main_stream);
1320
- break;
1321
- }
1322
- #endif
1323
- case GGML_TYPE_F32:
1324
- {
1325
- auto data_pts = cast_data<float>(dst);
1326
- pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0],
1327
- dst->ne[1], dst->ne[2], main_stream);
1328
- break;
1329
- }
1330
- default:
1331
- GGML_ABORT("GGML tensor type not supported!\n");
1332
- }
1333
- }
1334
-
1335
- inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1336
- #if defined(GGML_SYCL_F16)
1337
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
1338
- GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
1339
- #else
1340
-
1341
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1342
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
1343
- #endif
1344
- GGML_ASSERT(dst->src[0]->type == dst->type);
1345
- dpct::queue_ptr main_stream = ctx.stream();
1346
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1347
- float min;
1348
- float max;
1349
- memcpy(&min, dst->op_params, sizeof(float));
1350
- memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
1351
-
1352
- switch (dst->type) {
1353
- #if defined(GGML_SYCL_F16)
1354
- case GGML_TYPE_F16:
1355
- {
1356
- auto data_pts = cast_data<sycl::half>(dst);
1357
- clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream);
1358
- break;
1359
- }
1360
- #endif
1361
- case GGML_TYPE_F32:
1362
- {
1363
- auto data_pts = cast_data<float>(dst);
1364
- clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream);
1365
- break;
1366
- }
1367
- default:
1368
- GGML_ABORT("GGML tensor type not supported!\n");
1369
- }
1370
- }
1371
-
1372
- inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
1373
-
1374
- GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
1375
- GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32);
1376
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
1377
- GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
1378
- dpct::queue_ptr main_stream = ctx.stream();
1379
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
1380
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
1381
- const float * src1_dd = static_cast<const float*>(dst->src[1]->data);
1382
- float * dst_dd = static_cast<float *>(dst->data);
1383
-
1384
- int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
1385
- int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
1386
- // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
1387
- int offset = dst->op_params[3] / 4; // offset in bytes
1388
-
1389
- acc_f32_sycl(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), dst->src[1]->ne[0], dst->src[1]->ne[1], dst->src[1]->ne[2], nb1, nb2, offset, main_stream);
1390
- }
1391
-
1392
-
1393
- void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1394
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1395
- ggml_sycl_op_sqrt(ctx, dst);
1396
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1397
- }
1398
-
1399
- void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1400
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1401
- ggml_sycl_op_sin(ctx, dst);
1402
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1403
- }
1404
-
1405
- void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1406
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1407
- ggml_sycl_op_cos(ctx, dst);
1408
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1409
- }
1410
-
1411
- void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1412
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1413
- ggml_sycl_op_acc(ctx, dst);
1414
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1415
- }
1416
-
1417
- void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1418
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1419
- ggml_sycl_op_gelu(ctx, dst);
1420
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1421
- }
1422
-
1423
- void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1424
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1425
- ggml_sycl_op_silu(ctx, dst);
1426
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1427
- }
1428
-
1429
- void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1430
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1431
- ggml_sycl_op_gelu_quick(ctx, dst);
1432
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1433
- }
1434
-
1435
- void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1436
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1437
- ggml_sycl_op_tanh(ctx, dst);
1438
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1439
- }
1440
-
1441
- void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1442
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1443
- ggml_sycl_op_relu(ctx, dst);
1444
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1445
- }
1446
-
1447
- void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1448
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1449
- ggml_sycl_op_sigmoid(ctx, dst);
1450
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1451
- }
1452
-
1453
- void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1454
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1455
- ggml_sycl_op_hardsigmoid(ctx, dst);
1456
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1457
- }
1458
-
1459
- void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1460
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1461
- ggml_sycl_op_hardswish(ctx, dst);
1462
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1463
- }
1464
-
1465
-
1466
- void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1467
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1468
- ggml_sycl_op_exp(ctx, dst);
1469
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1470
- }
1471
-
1472
- void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1473
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1474
- ggml_sycl_op_log(ctx, dst);
1475
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1476
- }
1477
-
1478
- void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1479
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1480
- ggml_sycl_op_neg(ctx, dst);
1481
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1482
- }
1483
-
1484
- void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1485
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1486
- ggml_sycl_op_step(ctx, dst);
1487
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1488
- }
1489
-
1490
- void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1491
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1492
- ggml_sycl_op_leaky_relu(ctx, dst);
1493
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1494
- }
1495
-
1496
- void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1497
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1498
- ggml_sycl_op_sqr(ctx, dst);
1499
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1500
- }
1501
-
1502
- void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1503
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1504
- ggml_sycl_op_upscale(ctx, dst);
1505
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1506
- }
1507
-
1508
- void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1509
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1510
- ggml_sycl_op_pad(ctx, dst);
1511
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1512
- }
1513
-
1514
- void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1515
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1516
- ggml_sycl_op_clamp(ctx, dst);
1517
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1518
- }
1519
-
1520
- void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1521
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1522
- ggml_sycl_op_sgn(ctx, dst);
1523
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1524
- }
1525
-
1526
- void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1527
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1528
- ggml_sycl_op_abs(ctx, dst);
1529
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1530
- }
1531
-
1532
- void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
1533
- GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
1534
- ggml_sycl_op_elu(ctx, dst);
1535
- GGML_SYCL_DEBUG("call %s done\n", __func__);
1536
- }