@novastera-oss/llamarn 0.2.9 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (314) hide show
  1. package/android/build.gradle +2 -1
  2. package/android/proguard-rules.pro +12 -0
  3. package/android/src/main/cpp/include/llama.h +15 -47
  4. package/android/src/main/jniLibs/arm64-v8a/libggml-base.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/libggml-cpu.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/libggml.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/libllama.so +0 -0
  8. package/android/src/main/jniLibs/armeabi-v7a/libggml-base.so +0 -0
  9. package/android/src/main/jniLibs/armeabi-v7a/libggml-cpu.so +0 -0
  10. package/android/src/main/jniLibs/armeabi-v7a/libggml.so +0 -0
  11. package/android/src/main/jniLibs/armeabi-v7a/libllama.so +0 -0
  12. package/android/src/main/jniLibs/x86/libggml-base.so +0 -0
  13. package/android/src/main/jniLibs/x86/libggml-cpu.so +0 -0
  14. package/android/src/main/jniLibs/x86/libggml.so +0 -0
  15. package/android/src/main/jniLibs/x86/libllama.so +0 -0
  16. package/android/src/main/jniLibs/x86_64/libggml-base.so +0 -0
  17. package/android/src/main/jniLibs/x86_64/libggml-cpu.so +0 -0
  18. package/android/src/main/jniLibs/x86_64/libggml.so +0 -0
  19. package/android/src/main/jniLibs/x86_64/libllama.so +0 -0
  20. package/cpp/build-info.cpp +2 -2
  21. package/cpp/llama.cpp/CMakeLists.txt +0 -1
  22. package/cpp/llama.cpp/CMakePresets.json +11 -0
  23. package/cpp/llama.cpp/CODEOWNERS +1 -0
  24. package/cpp/llama.cpp/README.md +8 -8
  25. package/cpp/llama.cpp/build-xcframework.sh +1 -1
  26. package/cpp/llama.cpp/common/CMakeLists.txt +4 -5
  27. package/cpp/llama.cpp/common/arg.cpp +62 -1
  28. package/cpp/llama.cpp/common/chat.cpp +37 -20
  29. package/cpp/llama.cpp/common/chat.h +2 -0
  30. package/cpp/llama.cpp/common/common.cpp +22 -6
  31. package/cpp/llama.cpp/common/common.h +22 -4
  32. package/cpp/llama.cpp/convert_hf_to_gguf.py +1250 -43
  33. package/cpp/llama.cpp/convert_hf_to_gguf_update.py +21 -13
  34. package/cpp/llama.cpp/ggml/CMakeLists.txt +13 -3
  35. package/cpp/llama.cpp/ggml/cmake/ggml-config.cmake.in +85 -47
  36. package/cpp/llama.cpp/ggml/include/ggml-backend.h +1 -1
  37. package/cpp/llama.cpp/ggml/include/ggml-webgpu.h +19 -0
  38. package/cpp/llama.cpp/ggml/include/ggml.h +173 -10
  39. package/cpp/llama.cpp/ggml/src/CMakeLists.txt +1 -1
  40. package/cpp/llama.cpp/ggml/src/ggml-alloc.c +0 -15
  41. package/cpp/llama.cpp/ggml/src/ggml-backend-reg.cpp +7 -8
  42. package/cpp/llama.cpp/ggml/src/ggml-backend.cpp +44 -38
  43. package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +3 -1
  44. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +126 -8
  45. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +130 -22
  46. package/cpp/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +138 -18
  47. package/cpp/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +11 -3
  48. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/loongarch/quants.c +1 -1
  49. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +28 -1
  50. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +109 -12
  51. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +3 -0
  52. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +88 -10
  53. package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +343 -1094
  54. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.cpp +1206 -163
  55. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.h +6 -0
  56. package/cpp/llama.cpp/ggml/src/ggml-cpu/repack.cpp +0 -1
  57. package/cpp/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +1 -1
  58. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.cpp +36 -9
  59. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.h +142 -9
  60. package/cpp/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +3 -3
  61. package/cpp/llama.cpp/ggml/src/ggml-cuda/common.cuh +31 -4
  62. package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cu +86 -17
  63. package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cuh +5 -0
  64. package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy-utils.cuh +225 -0
  65. package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cu +41 -301
  66. package/cpp/llama.cpp/ggml/src/ggml-cuda/cross-entropy-loss.cu +2 -14
  67. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-common.cuh +85 -64
  68. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-mma-f16.cuh +47 -60
  69. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f16.cu +29 -42
  70. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f32.cu +46 -59
  71. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f16.cuh +36 -45
  72. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f32.cuh +38 -45
  73. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-wmma-f16.cu +23 -36
  74. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cu +3 -13
  75. package/cpp/llama.cpp/ggml/src/ggml-cuda/getrows.cu +8 -0
  76. package/cpp/llama.cpp/ggml/src/ggml-cuda/ggml-cuda.cu +255 -99
  77. package/cpp/llama.cpp/ggml/src/ggml-cuda/im2col.cu +1 -1
  78. package/cpp/llama.cpp/ggml/src/ggml-cuda/mma.cuh +111 -3
  79. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cu +6 -4
  80. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cuh +1152 -695
  81. package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cu +92 -5
  82. package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cuh +2 -0
  83. package/cpp/llama.cpp/ggml/src/ggml-cuda/rope.cu +21 -27
  84. package/cpp/llama.cpp/ggml/src/ggml-cuda/scale.cu +8 -6
  85. package/cpp/llama.cpp/ggml/src/ggml-cuda/set-rows.cu +275 -0
  86. package/cpp/llama.cpp/ggml/src/ggml-cuda/set-rows.cuh +7 -0
  87. package/cpp/llama.cpp/ggml/src/ggml-cuda/softmax.cu +119 -58
  88. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu +10 -2
  89. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cu +192 -52
  90. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cu +104 -0
  91. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cuh +13 -0
  92. package/cpp/llama.cpp/ggml/src/ggml-cuda/upscale.cu +92 -6
  93. package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +27 -6
  94. package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +2 -2
  95. package/cpp/llama.cpp/ggml/src/ggml-impl.h +80 -0
  96. package/cpp/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +4 -2
  97. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +48 -12
  98. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.m +572 -106
  99. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal +599 -105
  100. package/cpp/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +18 -4
  101. package/cpp/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +5 -0
  102. package/cpp/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +800 -42
  103. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/conv2d.cl +185 -0
  104. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/conv2d_f16_f32.cl +176 -0
  105. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gelu.cl +27 -0
  106. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/glu.cl +337 -0
  107. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f16.cl +1 -1
  108. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f32.cl +1 -1
  109. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mat_f16_f32.cl +130 -0
  110. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/rms_norm.cl +79 -0
  111. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/scale.cl +3 -2
  112. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/set_rows.cl +95 -0
  113. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +24 -11
  114. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +24 -11
  115. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f16.cl +24 -11
  116. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f32.cl +24 -11
  117. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/upscale.cl +2 -3
  118. package/cpp/llama.cpp/ggml/src/ggml-quants.c +6 -6
  119. package/cpp/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +4 -4
  120. package/cpp/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -0
  121. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +693 -1034
  122. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +18 -9
  123. package/cpp/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +14 -26
  124. package/cpp/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +191 -55
  125. package/cpp/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +1 -1
  126. package/cpp/llama.cpp/ggml/src/ggml-sycl/quants.hpp +8 -9
  127. package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.cpp +15 -18
  128. package/cpp/llama.cpp/ggml/src/ggml-sycl/set_rows.cpp +131 -0
  129. package/cpp/llama.cpp/ggml/src/ggml-sycl/set_rows.hpp +8 -0
  130. package/cpp/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +2 -6
  131. package/cpp/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +991 -307
  132. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +265 -0
  133. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +59 -12
  134. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +1 -1
  135. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
  136. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +1 -1
  137. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +1 -1
  138. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
  139. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +28 -23
  140. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +14 -9
  141. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +38 -32
  142. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +32 -27
  143. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +44 -12
  144. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +13 -0
  145. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +27 -0
  146. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +11 -0
  147. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +39 -0
  148. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +2 -0
  149. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +17 -0
  150. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp +29 -0
  151. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +3 -8
  152. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +128 -72
  153. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +38 -9
  154. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +9 -0
  155. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +18 -3
  156. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +46 -0
  157. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +1 -4
  158. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -9
  159. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -9
  160. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -9
  161. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rte.comp +5 -0
  162. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +1 -1
  163. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +20 -4
  164. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +9 -0
  165. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +69 -5
  166. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +84 -9
  167. package/cpp/llama.cpp/ggml/src/ggml-webgpu/CMakeLists.txt +54 -0
  168. package/cpp/llama.cpp/ggml/src/ggml-webgpu/ggml-webgpu.cpp +907 -0
  169. package/cpp/llama.cpp/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +60 -0
  170. package/cpp/llama.cpp/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +35 -0
  171. package/cpp/llama.cpp/ggml/src/ggml-webgpu/wgsl-shaders/memset.wgsl +40 -0
  172. package/cpp/llama.cpp/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat.wgsl +56 -0
  173. package/cpp/llama.cpp/ggml/src/ggml.c +386 -67
  174. package/cpp/llama.cpp/ggml/src/gguf.cpp +8 -1
  175. package/cpp/llama.cpp/gguf-py/gguf/constants.py +307 -0
  176. package/cpp/llama.cpp/gguf-py/gguf/gguf_writer.py +8 -2
  177. package/cpp/llama.cpp/gguf-py/gguf/metadata.py +4 -0
  178. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_dump.py +24 -1
  179. package/cpp/llama.cpp/gguf-py/gguf/tensor_mapping.py +122 -47
  180. package/cpp/llama.cpp/gguf-py/gguf/vocab.py +12 -3
  181. package/cpp/llama.cpp/include/llama.h +15 -47
  182. package/cpp/llama.cpp/models/templates/llama-cpp-rwkv-world.jinja +34 -0
  183. package/cpp/llama.cpp/models/templates/moonshotai-Kimi-K2.jinja +43 -0
  184. package/cpp/llama.cpp/requirements/requirements-all.txt +1 -0
  185. package/cpp/llama.cpp/requirements/requirements-server-bench.txt +5 -0
  186. package/cpp/llama.cpp/src/llama-arch.cpp +316 -3
  187. package/cpp/llama.cpp/src/llama-arch.h +23 -1
  188. package/cpp/llama.cpp/src/llama-batch.cpp +103 -71
  189. package/cpp/llama.cpp/src/llama-batch.h +31 -18
  190. package/cpp/llama.cpp/src/llama-chat.cpp +58 -1
  191. package/cpp/llama.cpp/src/llama-chat.h +3 -0
  192. package/cpp/llama.cpp/src/llama-context.cpp +180 -106
  193. package/cpp/llama.cpp/src/llama-context.h +26 -16
  194. package/cpp/llama.cpp/src/llama-cparams.h +3 -2
  195. package/cpp/llama.cpp/src/llama-graph.cpp +310 -211
  196. package/cpp/llama.cpp/src/llama-graph.h +184 -122
  197. package/cpp/llama.cpp/src/llama-hparams.cpp +47 -1
  198. package/cpp/llama.cpp/src/llama-hparams.h +13 -2
  199. package/cpp/llama.cpp/src/llama-kv-cache-unified-iswa.cpp +38 -22
  200. package/cpp/llama.cpp/src/llama-kv-cache-unified-iswa.h +7 -2
  201. package/cpp/llama.cpp/src/llama-kv-cache-unified.cpp +849 -304
  202. package/cpp/llama.cpp/src/llama-kv-cache-unified.h +143 -47
  203. package/cpp/llama.cpp/src/llama-kv-cells.h +62 -10
  204. package/cpp/llama.cpp/src/llama-memory-hybrid.cpp +10 -4
  205. package/cpp/llama.cpp/src/llama-memory-hybrid.h +3 -1
  206. package/cpp/llama.cpp/src/llama-memory-recurrent.cpp +36 -11
  207. package/cpp/llama.cpp/src/llama-memory.cpp +17 -0
  208. package/cpp/llama.cpp/src/llama-memory.h +3 -0
  209. package/cpp/llama.cpp/src/llama-model.cpp +3545 -719
  210. package/cpp/llama.cpp/src/llama-model.h +21 -4
  211. package/cpp/llama.cpp/src/llama-quant.cpp +2 -2
  212. package/cpp/llama.cpp/src/llama-vocab.cpp +376 -10
  213. package/cpp/llama.cpp/src/llama-vocab.h +43 -0
  214. package/cpp/llama.cpp/src/unicode.cpp +207 -0
  215. package/cpp/llama.cpp/src/unicode.h +2 -0
  216. package/ios/include/chat.h +2 -0
  217. package/ios/include/common.h +22 -4
  218. package/ios/include/llama.h +15 -47
  219. package/ios/libs/llama.xcframework/Info.plist +13 -13
  220. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  221. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5267 -4890
  222. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-backend.h +1 -1
  223. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml.h +173 -10
  224. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/llama.h +15 -47
  225. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/llama +0 -0
  226. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  227. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5238 -4861
  228. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +4014 -3764
  229. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +1 -1
  230. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +173 -10
  231. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/llama.h +15 -47
  232. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/llama +0 -0
  233. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  234. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5238 -4861
  235. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +4016 -3766
  236. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-backend.h +1 -1
  237. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml.h +173 -10
  238. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/llama.h +15 -47
  239. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-backend.h +1 -1
  240. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml.h +173 -10
  241. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/llama.h +15 -47
  242. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/llama +0 -0
  243. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-backend.h +1 -1
  244. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml.h +173 -10
  245. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/llama.h +15 -47
  246. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/llama +0 -0
  247. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/llama +0 -0
  248. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  249. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5267 -4890
  250. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-backend.h +1 -1
  251. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml.h +173 -10
  252. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/llama.h +15 -47
  253. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/llama +0 -0
  254. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  255. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5238 -4861
  256. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +4014 -3764
  257. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +1 -1
  258. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +173 -10
  259. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/llama.h +15 -47
  260. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/llama +0 -0
  261. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  262. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5303 -4926
  263. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-backend.h +1 -1
  264. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml.h +173 -10
  265. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/llama.h +15 -47
  266. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/llama +0 -0
  267. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  268. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +5274 -4897
  269. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +4044 -3794
  270. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +1 -1
  271. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +173 -10
  272. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/llama.h +15 -47
  273. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/llama +0 -0
  274. package/package.json +4 -4
  275. package/cpp/llama.cpp/ggml/include/ggml-kompute.h +0 -50
  276. package/cpp/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  277. package/cpp/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  278. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/common.comp +0 -112
  279. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +0 -58
  280. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +0 -25
  281. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +0 -52
  282. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +0 -52
  283. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +0 -52
  284. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +0 -52
  285. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +0 -30
  286. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +0 -22
  287. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +0 -17
  288. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +0 -31
  289. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +0 -31
  290. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +0 -38
  291. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +0 -39
  292. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +0 -44
  293. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +0 -52
  294. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +0 -69
  295. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +0 -51
  296. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +0 -33
  297. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +0 -35
  298. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +0 -140
  299. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +0 -106
  300. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +0 -73
  301. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +0 -52
  302. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +0 -28
  303. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +0 -84
  304. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +0 -21
  305. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +0 -53
  306. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +0 -52
  307. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +0 -52
  308. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +0 -52
  309. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +0 -52
  310. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +0 -19
  311. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +0 -23
  312. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +0 -22
  313. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +0 -72
  314. package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +0 -71
@@ -83,6 +83,10 @@ static __device__ __forceinline__ float op_log(float x) {
83
83
  return logf(x);
84
84
  }
85
85
 
86
+ static __device__ __forceinline__ float op_elu(float x) {
87
+ return (x > 0.f) ? x : expm1f(x);
88
+ }
89
+
86
90
  template <float (*op)(float), typename T>
87
91
  static __global__ void unary_op_kernel(const T * x, T * dst, const int k) {
88
92
  const int i = blockDim.x*blockIdx.x + threadIdx.x;
@@ -196,6 +200,106 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
196
200
  ggml_cuda_op_unary<op_log>(ctx, dst);
197
201
  }
198
202
 
203
+ void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
204
+ ggml_cuda_op_unary<op_elu>(ctx, dst);
205
+ }
206
+ /* gated ops */
207
+
208
+ template <float (*op)(float), typename T>
209
+ static __global__ void unary_gated_op_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1) {
210
+ const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x;
211
+
212
+ if (i >= k) {
213
+ return;
214
+ }
215
+
216
+ // perform base op and multiply with gate (either offset in same tensor or a separate one)
217
+ const int64_t j0 = (i / n) * o0 + (i % n);
218
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
219
+
220
+ dst[i] = (T)(op((float)x[j0]) * (float)g[j1]);
221
+ }
222
+
223
+ template <float (*op)(float), typename T>
224
+ static void unary_gated_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, cudaStream_t stream) {
225
+ const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE;
226
+ unary_gated_op_kernel<op><<<num_blocks, CUDA_GLU_BLOCK_SIZE, 0, stream>>>(x, g, dst, k, n, o0, o1);
227
+ }
228
+
229
+ template <float (*op)(float)>
230
+ void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
231
+ const ggml_tensor * src0 = dst->src[0];
232
+ const ggml_tensor * src1 = dst->src[1];
233
+ void * src0_d = src0->data;
234
+ void * src1_d = src1 ? src1->data : src0->data;
235
+ const int64_t src0_o = src0->nb[1];
236
+ const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1];
237
+ void * dst_d = dst->data;
238
+ const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2;
239
+ cudaStream_t stream = ctx.stream();
240
+
241
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
242
+ GGML_ASSERT(src0->nb[0] == ggml_element_size(src0));
243
+ GGML_ASSERT(ggml_is_contiguous(dst));
244
+
245
+ GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
246
+ GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
247
+ GGML_ASSERT(src0->type == dst->type);
248
+ GGML_ASSERT(dst->ne[0] == nc);
249
+ GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0));
250
+
251
+ if (src1) {
252
+ GGML_ASSERT(ggml_is_contiguous_1(src1));
253
+ GGML_ASSERT(src1->nb[0] == ggml_element_size(src1));
254
+ GGML_ASSERT(src1->ne[0] == nc);
255
+ GGML_ASSERT(src0->type == src1->type);
256
+ }
257
+
258
+ const int32_t swapped = ((const int32_t *) dst->op_params)[1];
259
+
260
+ if (src0->type == GGML_TYPE_F16) {
261
+ half * src0_p = (half *) src0_d;
262
+ half * src1_p = (half *) src1_d;
263
+
264
+ if (!src1) {
265
+ src0_p += swapped ? nc : 0;
266
+ src1_p += swapped ? 0 : nc;
267
+ }
268
+
269
+ unary_gated_cuda<op>(src0_p, src1_p, (half *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(half), src1_o / sizeof(half), stream);
270
+ } else {
271
+ float * src0_p = (float *) src0_d;
272
+ float * src1_p = (float *) src1_d;
273
+
274
+ if (!src1) {
275
+ src0_p += swapped ? nc : 0;
276
+ src1_p += swapped ? 0 : nc;
277
+ }
278
+
279
+ unary_gated_cuda<op>(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), stream);
280
+ }
281
+ }
282
+
283
+ void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
284
+ ggml_cuda_op_unary_gated<op_relu>(ctx, dst);
285
+ }
286
+
287
+ void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
288
+ ggml_cuda_op_unary_gated<op_gelu>(ctx, dst);
289
+ }
290
+
291
+ void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
292
+ ggml_cuda_op_unary_gated<op_silu>(ctx, dst);
293
+ }
294
+
295
+ void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
296
+ ggml_cuda_op_unary_gated<op_gelu_erf>(ctx, dst);
297
+ }
298
+
299
+ void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
300
+ ggml_cuda_op_unary_gated<op_gelu_quick>(ctx, dst);
301
+ }
302
+
199
303
  /* silu_back */
200
304
 
201
305
  static __device__ __forceinline__ float op_silu_back(float grad, float x) {
@@ -15,6 +15,7 @@
15
15
  #define CUDA_SQRT_BLOCK_SIZE 256
16
16
  #define CUDA_SIN_BLOCK_SIZE 256
17
17
  #define CUDA_COS_BLOCK_SIZE 256
18
+ #define CUDA_GLU_BLOCK_SIZE 256
18
19
 
19
20
  void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
20
21
 
@@ -57,3 +58,15 @@ void ggml_cuda_op_sin(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
57
58
  void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
58
59
 
59
60
  void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
61
+
62
+ void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
63
+
64
+ void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
65
+
66
+ void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
67
+
68
+ void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
69
+
70
+ void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
71
+
72
+ void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
@@ -22,17 +22,88 @@ static __global__ void upscale_f32(const float * x, float * dst,
22
22
  dst[index] = *( (const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00) );
23
23
  }
24
24
 
25
+ static __global__ void upscale_f32_bilinear(const float * x, float * dst,
26
+ const int nb00, const int nb01, const int nb02, const int nb03,
27
+ const int ne00_src, const int ne01_src,
28
+ const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst,
29
+ const float sf0, const float sf1, const float sf2, const float sf3,
30
+ const float pixel_offset) {
31
+ const int64_t index = threadIdx.x + blockIdx.x * blockDim.x;
32
+ const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst;
33
+
34
+ if (index >= dst_total_elements) {
35
+ return;
36
+ }
37
+
38
+ const int i10_dst = index % ne10_dst;
39
+ const int i11_dst = (index / ne10_dst) % ne11_dst;
40
+ const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst;
41
+ const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst);
42
+
43
+ const int i02_src = (int)(i12_dst / sf2);
44
+ const int i03_src = (int)(i13_dst / sf3);
45
+
46
+ const float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset;
47
+ int y0_src = (int)floorf(y_src_f);
48
+ int y1_src = y0_src + 1;
49
+
50
+ y0_src = max(0, min(y0_src, ne01_src - 1));
51
+ y1_src = max(0, min(y1_src, ne01_src - 1));
52
+
53
+ float dy = y_src_f - (float)y0_src;
54
+ dy = max(0.0f, min(dy, 1.0f));
55
+
56
+ float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset;
57
+ int x0_src = (int)floorf(x_src_f);
58
+ int x1_src = x0_src + 1;
59
+
60
+ x0_src = max(0, min(x0_src, ne00_src - 1));
61
+ x1_src = max(0, min(x1_src, ne00_src - 1));
62
+
63
+ float dx = x_src_f - (float)x0_src;
64
+ dx = max(0.0f, min(dx, 1.0f));
65
+
66
+ const float * p_a = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03);
67
+ const float * p_b = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03);
68
+ const float * p_c = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03);
69
+ const float * p_d = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03);
70
+
71
+ const float val_a = *p_a;
72
+ const float val_b = *p_b;
73
+ const float val_c = *p_c;
74
+ const float val_d = *p_d;
75
+
76
+ float result = val_a * (1.0f - dx) * (1.0f - dy) +
77
+ val_b * dx * (1.0f - dy) +
78
+ val_c * (1.0f - dx) * dy +
79
+ val_d * dx * dy;
80
+
81
+ dst[index] = result;
82
+ }
83
+
25
84
  static void upscale_f32_cuda(const float * x, float * dst,
26
85
  const int nb00, const int nb01, const int nb02, const int nb03,
27
86
  const int ne10, const int ne11, const int ne12, const int ne13,
28
87
  const float sf0, const float sf1, const float sf2, const float sf3,
29
88
  cudaStream_t stream) {
30
- int dst_size = ne10 * ne11 * ne12 * ne13;
31
- int num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE;
89
+ const int64_t dst_size = ne10 * ne11 * ne12 * ne13;
90
+ const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE;
32
91
 
33
92
  upscale_f32<<<num_blocks, CUDA_UPSCALE_BLOCK_SIZE,0,stream>>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3);
34
93
  }
35
94
 
95
+ static void upscale_f32_bilinear_cuda(const float * x, float * dst,
96
+ const int nb00, const int nb01, const int nb02, const int nb03,
97
+ const int ne00_src, const int ne01_src,
98
+ const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst,
99
+ const float sf0, const float sf1, const float sf2, const float sf3,
100
+ const float pixel_offset, cudaStream_t stream) {
101
+ const int64_t dst_size = ne10_dst * ne11_dst * ne12_dst * ne13_dst;
102
+ const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE;
103
+
104
+ upscale_f32_bilinear<<<num_blocks, CUDA_UPSCALE_BLOCK_SIZE,0,stream>>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset);
105
+ }
106
+
36
107
  void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
37
108
  const ggml_tensor * src0 = dst->src[0];
38
109
  const float * src0_d = (const float *)src0->data;
@@ -42,10 +113,25 @@ void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
42
113
  GGML_ASSERT(src0->type == GGML_TYPE_F32);
43
114
  GGML_ASSERT( dst->type == GGML_TYPE_F32);
44
115
 
45
- const float sf0 = (float)dst->ne[0]/src0->ne[0];
46
- const float sf1 = (float)dst->ne[1]/src0->ne[1];
47
- const float sf2 = (float)dst->ne[2]/src0->ne[2];
116
+ const int mode_flags = dst->op_params[0];
117
+ const ggml_scale_mode mode = (ggml_scale_mode)(mode_flags & 0xFF);
118
+
119
+ float sf0 = (float)dst->ne[0]/src0->ne[0];
120
+ float sf1 = (float)dst->ne[1]/src0->ne[1];
121
+ float sf2 = (float)dst->ne[2]/src0->ne[2];
48
122
  const float sf3 = (float)dst->ne[3]/src0->ne[3];
49
123
 
50
- upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream);
124
+ if (mode == GGML_SCALE_MODE_NEAREST) {
125
+ upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream);
126
+ } else if (mode == GGML_SCALE_MODE_BILINEAR) {
127
+ float pixel_offset = 0.5f;
128
+ if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) {
129
+ sf0 = (float)(dst->ne[0] - 1) / (src0->ne[0] - 1);
130
+ sf1 = (float)(dst->ne[1] - 1) / (src0->ne[1] - 1);
131
+ pixel_offset = 0.0f;
132
+ }
133
+ upscale_f32_bilinear_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3],
134
+ src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
135
+ sf0, sf1, sf2, sf3, pixel_offset, stream);
136
+ }
51
137
  }
@@ -10,9 +10,6 @@
10
10
  #include "rocblas/rocblas.h"
11
11
  #endif // __HIP_PLATFORM_AMD__
12
12
 
13
- #define CUBLAS_COMPUTE_16F HIPBLAS_R_16F
14
- #define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
15
- #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
16
13
  #define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT
17
14
  #define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT
18
15
  #define CUBLAS_OP_N HIPBLAS_OP_N
@@ -30,7 +27,6 @@
30
27
  #define CU_CHECK(fn) {hipError_t err = fn; if(err != hipSuccess) { GGML_ABORT("HipVMM Failure: %s\n", hipGetErrorString(err)); }}
31
28
  #define __shfl_sync(mask, var, laneMask, width) __shfl(var, laneMask, width)
32
29
  #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
33
- #define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
34
30
  #define cublasCreate hipblasCreate
35
31
  #define cublasDestroy hipblasDestroy
36
32
  #define cublasGemmEx hipblasGemmEx
@@ -42,7 +38,6 @@
42
38
  #define cublasSgemm hipblasSgemm
43
39
  #define cublasStatus_t hipblasStatus_t
44
40
  #define cublasOperation_t hipblasOperation_t
45
- #define cudaDataType_t hipblasDatatype_t //deprecated, new hipblasDatatype not in 5.6
46
41
  #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer
47
42
  #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess
48
43
  #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess
@@ -144,6 +139,20 @@
144
139
  #define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR
145
140
  #define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED
146
141
 
142
+ #if defined(__HIP_PLATFORM_AMD__) && HIP_VERSION >= 70000000
143
+ #define CUBLAS_COMPUTE_16F HIPBLAS_COMPUTE_16F
144
+ #define CUBLAS_COMPUTE_32F HIPBLAS_COMPUTE_32F
145
+ #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_COMPUTE_32F_FAST_16F
146
+ #define cublasComputeType_t hipblasComputeType_t
147
+ #define cudaDataType_t hipDataType
148
+ #else
149
+ #define CUBLAS_COMPUTE_16F HIPBLAS_R_16F
150
+ #define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
151
+ #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
152
+ #define cublasComputeType_t hipblasDatatype_t
153
+ #define cudaDataType_t hipblasDatatype_t
154
+ #endif
155
+
147
156
  #define __CUDA_ARCH__ 1300
148
157
 
149
158
  #if defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__)
@@ -151,7 +160,19 @@
151
160
  #endif
152
161
 
153
162
  #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__)
154
- #define CDNA
163
+ #define CDNA // For the entire family
164
+ #endif
165
+
166
+ #if defined(__gfx942__)
167
+ #define CDNA3
168
+ #endif
169
+
170
+ #if defined(__gfx90a__)
171
+ #define CDNA2
172
+ #endif
173
+
174
+ #if defined(__gfx908__)
175
+ #define CDNA1
155
176
  #endif
156
177
 
157
178
  #if defined(__GFX12__)
@@ -13,7 +13,7 @@
13
13
  #define CUBLAS_OP_N MUBLAS_OP_N
14
14
  #define CUBLAS_OP_T MUBLAS_OP_T
15
15
  #define CUBLAS_STATUS_SUCCESS MUBLAS_STATUS_SUCCESS
16
- #define CUBLAS_TF32_TENSOR_OP_MATH MUBLAS_MATH_MODE_DEFAULT
16
+ #define CUBLAS_TF32_TENSOR_OP_MATH MUBLAS_TENSOR_OP_MATH
17
17
  #define CUDA_R_16F MUSA_R_16F
18
18
  #define CUDA_R_16BF MUSA_R_16BF
19
19
  #define CUDA_R_32F MUSA_R_32F
@@ -29,7 +29,7 @@
29
29
  #define cublasSgemm mublasSgemm
30
30
  #define cublasStatus_t mublasStatus_t
31
31
  #define cublasOperation_t mublasOperation_t
32
- #define cublasGetStatusString mublasStatus_to_string
32
+ #define cublasGetStatusString mublasGetStatusString
33
33
  #define cudaDataType_t musaDataType_t
34
34
  #define cudaDeviceCanAccessPeer musaDeviceCanAccessPeer
35
35
  #define cudaDeviceDisablePeerAccess musaDeviceDisablePeerAccess
@@ -73,6 +73,22 @@ static inline int ggml_up(int n, int m) {
73
73
  return (n + m - 1) & ~(m - 1);
74
74
  }
75
75
 
76
+ // TODO: move to ggml.h?
77
+ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
78
+ if (a->type != b->type) {
79
+ return false;
80
+ }
81
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
82
+ if (a->ne[i] != b->ne[i]) {
83
+ return false;
84
+ }
85
+ if (a->nb[i] != b->nb[i]) {
86
+ return false;
87
+ }
88
+ }
89
+ return true;
90
+ }
91
+
76
92
  //
77
93
  // logging
78
94
  //
@@ -301,6 +317,7 @@ struct ggml_cgraph {
301
317
  struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
302
318
  struct ggml_tensor ** grad_accs; // accumulators for node gradients
303
319
  struct ggml_tensor ** leafs; // tensors with constant data
320
+ int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot
304
321
 
305
322
  struct ggml_hash_set visited_hash_set;
306
323
 
@@ -467,13 +484,76 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
467
484
  #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
468
485
  #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
469
486
 
487
+ // return true if the node's results are only used by N other nodes
488
+ // and can be fused into their calculations.
489
+ static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) {
490
+ const struct ggml_tensor * node = cgraph->nodes[node_idx];
491
+
492
+ // check the use count against how many we're replacing
493
+ size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node);
494
+ if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos) || cgraph->use_counts[hash_pos] != n_uses) {
495
+ return false;
496
+ }
497
+
498
+ // if node is a view, some other node might be using the intermediate result
499
+ // via the view source.
500
+ if (node->view_src) {
501
+ return false;
502
+ }
503
+
504
+ // If the user requested output for the node, can't fuse
505
+ if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
506
+ return false;
507
+ }
508
+
509
+ return true;
510
+ }
511
+
512
+ // Returns true if nodes [i, i+ops.size()) are the sequence of ggml_ops in ops[]
513
+ // and are fusable. Nodes are considered fusable according to this function if:
514
+ // - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses).
515
+ // - all nodes except the last are a src of the following node.
516
+ // - all nodes are the same shape.
517
+ // TODO: Consider allowing GGML_OP_NONE nodes in between
518
+ static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) {
519
+ if (node_idx + num_ops > cgraph->n_nodes) {
520
+ return false;
521
+ }
522
+
523
+ for (int i = 0; i < num_ops; ++i) {
524
+ struct ggml_tensor * node = cgraph->nodes[node_idx + i];
525
+ if (node->op != ops[i]) {
526
+ return false;
527
+ }
528
+ if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idx + i, 1)) {
529
+ return false;
530
+ }
531
+ if (i > 0) {
532
+ struct ggml_tensor * prev = cgraph->nodes[node_idx + i - 1];
533
+ if (node->src[0] != prev && node->src[1] != prev) {
534
+ return false;
535
+ }
536
+ if (!ggml_are_same_shape(node, prev)) {
537
+ return false;
538
+ }
539
+ }
540
+ }
541
+ return true;
542
+ }
543
+
470
544
  #ifdef __cplusplus
471
545
  }
472
546
  #endif
473
547
 
474
548
  #ifdef __cplusplus
549
+ #include <initializer_list>
475
550
  #include <vector>
476
551
 
552
+ // nicer C++ syntax for ggml_can_fuse
553
+ inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list<enum ggml_op> ops) {
554
+ return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size());
555
+ }
556
+
477
557
  // expose GGUF internals for test code
478
558
  GGML_API size_t gguf_type_size(enum gguf_type type);
479
559
  GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
@@ -71,7 +71,9 @@ else()
71
71
  # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1
72
72
  # note: unfortunately, we have to call it default.metallib instead of ggml.metallib
73
73
  # ref: https://github.com/ggerganov/whisper.cpp/issues/1720
74
- set(XC_FLAGS -fno-fast-math -fno-inline -g)
74
+ # note: adding -g causes segmentation fault during compile
75
+ #set(XC_FLAGS -fno-fast-math -fno-inline -g)
76
+ set(XC_FLAGS -fno-fast-math -fno-inline)
75
77
  else()
76
78
  set(XC_FLAGS -O3)
77
79
  endif()
@@ -90,7 +92,7 @@ else()
90
92
  add_custom_command(
91
93
  OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
92
94
  COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - |
93
- xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
95
+ xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
94
96
  COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h
95
97
  COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal
96
98
  DEPENDS ggml-metal.metal ${METALLIB_COMMON}
@@ -126,6 +126,7 @@ typedef struct {
126
126
  uint64_t nb2;
127
127
  uint64_t nb3;
128
128
  uint64_t offs;
129
+ uint64_t o1[8];
129
130
  } ggml_metal_kargs_bin;
130
131
 
131
132
  typedef struct {
@@ -229,14 +230,18 @@ typedef struct {
229
230
  uint64_t nb21;
230
231
  uint64_t nb22;
231
232
  uint64_t nb23;
233
+ int32_t ne32;
234
+ int32_t ne33;
232
235
  uint64_t nb31;
236
+ uint64_t nb32;
237
+ uint64_t nb33;
233
238
  int32_t ne1;
234
239
  int32_t ne2;
235
240
  float scale;
236
241
  float max_bias;
237
242
  float m0;
238
243
  float m1;
239
- uint16_t n_head_log2;
244
+ int32_t n_head_log2;
240
245
  float logit_softcap;
241
246
  } ggml_metal_kargs_flash_attn_ext;
242
247
 
@@ -373,8 +378,16 @@ typedef struct {
373
378
  typedef struct {
374
379
  int32_t ne00;
375
380
  int32_t ne00_4;
376
- uint64_t nb01;
381
+ uint64_t nb1;
382
+ uint64_t nb2;
383
+ uint64_t nb3;
377
384
  float eps;
385
+ int32_t nef1[3];
386
+ int32_t nef2[3];
387
+ int32_t nef3[3];
388
+ uint64_t nbf1[3];
389
+ uint64_t nbf2[3];
390
+ uint64_t nbf3[3];
378
391
  } ggml_metal_kargs_rms_norm;
379
392
 
380
393
  typedef struct {
@@ -422,6 +435,17 @@ typedef struct {
422
435
  int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources
423
436
  } ggml_metal_kargs_im2col;
424
437
 
438
+ typedef struct{
439
+ int32_t ne00;
440
+ uint64_t nb01;
441
+ int32_t ne10;
442
+ uint64_t nb11;
443
+ int32_t ne0;
444
+ uint64_t nb1;
445
+ int32_t i00;
446
+ int32_t i10;
447
+ } ggml_metal_kargs_glu;
448
+
425
449
  typedef struct {
426
450
  int64_t ne00;
427
451
  int64_t ne01;
@@ -450,14 +474,26 @@ typedef struct {
450
474
  } ggml_metal_kargs_sum_rows;
451
475
 
452
476
  typedef struct {
453
- int64_t ne00;
454
- int64_t ne01;
455
- int64_t ne02;
477
+ int32_t ne00;
478
+ int32_t ne01;
479
+ int32_t ne02;
480
+ uint64_t nb01;
481
+ uint64_t nb02;
482
+ uint64_t nb03;
483
+ int32_t ne11;
484
+ int32_t ne12;
485
+ int32_t ne13;
486
+ uint64_t nb11;
487
+ uint64_t nb12;
488
+ uint64_t nb13;
489
+ uint64_t nb1;
490
+ uint64_t nb2;
491
+ uint64_t nb3;
456
492
  float scale;
457
493
  float max_bias;
458
494
  float m0;
459
495
  float m1;
460
- uint32_t n_head_log2;
496
+ int32_t n_head_log2;
461
497
  } ggml_metal_kargs_soft_max;
462
498
 
463
499
  typedef struct {
@@ -488,26 +524,26 @@ typedef struct {
488
524
  typedef struct {
489
525
  int64_t d_state;
490
526
  int64_t d_inner;
527
+ int64_t n_head;
528
+ int64_t n_group;
491
529
  int64_t n_seq_tokens;
492
530
  int64_t n_seqs;
493
- uint64_t nb00;
531
+ int64_t s_off;
494
532
  uint64_t nb01;
495
533
  uint64_t nb02;
496
- uint64_t nb10;
534
+ uint64_t nb03;
497
535
  uint64_t nb11;
498
536
  uint64_t nb12;
499
537
  uint64_t nb13;
500
- uint64_t nb20;
501
538
  uint64_t nb21;
502
539
  uint64_t nb22;
503
- uint64_t nb30;
504
540
  uint64_t nb31;
505
- uint64_t nb40;
506
541
  uint64_t nb41;
507
542
  uint64_t nb42;
508
- uint64_t nb50;
543
+ uint64_t nb43;
509
544
  uint64_t nb51;
510
545
  uint64_t nb52;
546
+ uint64_t nb53;
511
547
  } ggml_metal_kargs_ssm_scan;
512
548
 
513
549
  typedef struct {