@fugood/llama.node 0.3.16 → 0.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. package/CMakeLists.txt +3 -0
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  7. package/bin/linux-cuda/x64/llama-node.node +0 -0
  8. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  9. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  10. package/bin/win32/arm64/llama-node.node +0 -0
  11. package/bin/win32/arm64/node.lib +0 -0
  12. package/bin/win32/x64/llama-node.node +0 -0
  13. package/bin/win32/x64/node.lib +0 -0
  14. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/arm64/node.lib +0 -0
  16. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  17. package/bin/win32-vulkan/x64/node.lib +0 -0
  18. package/lib/binding.ts +5 -0
  19. package/package.json +1 -1
  20. package/src/LlamaCompletionWorker.cpp +8 -0
  21. package/src/LlamaCompletionWorker.h +1 -0
  22. package/src/LlamaContext.cpp +3 -2
  23. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +124 -0
  24. package/src/llama.cpp/.github/workflows/build.yml +70 -27
  25. package/src/llama.cpp/.github/workflows/docker.yml +6 -6
  26. package/src/llama.cpp/.github/workflows/server.yml +7 -11
  27. package/src/llama.cpp/CMakeLists.txt +23 -1
  28. package/src/llama.cpp/common/CMakeLists.txt +6 -3
  29. package/src/llama.cpp/common/arg.cpp +809 -105
  30. package/src/llama.cpp/common/arg.h +9 -0
  31. package/src/llama.cpp/common/chat.cpp +1 -1
  32. package/src/llama.cpp/common/common.cpp +31 -521
  33. package/src/llama.cpp/common/common.h +17 -36
  34. package/src/llama.cpp/common/json-schema-to-grammar.cpp +3 -0
  35. package/src/llama.cpp/common/llguidance.cpp +30 -47
  36. package/src/llama.cpp/common/minja/chat-template.hpp +15 -7
  37. package/src/llama.cpp/common/minja/minja.hpp +119 -93
  38. package/src/llama.cpp/common/sampling.cpp +3 -0
  39. package/src/llama.cpp/docs/build.md +122 -7
  40. package/src/llama.cpp/examples/CMakeLists.txt +0 -9
  41. package/src/llama.cpp/examples/batched/batched.cpp +1 -1
  42. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +1 -1
  43. package/src/llama.cpp/examples/embedding/embedding.cpp +7 -1
  44. package/src/llama.cpp/examples/export-lora/export-lora.cpp +1 -1
  45. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +15 -16
  46. package/src/llama.cpp/examples/gritlm/gritlm.cpp +1 -1
  47. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +210 -8
  48. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +1 -0
  49. package/src/llama.cpp/examples/llava/CMakeLists.txt +39 -24
  50. package/src/llama.cpp/examples/llava/clip-impl.h +345 -0
  51. package/src/llama.cpp/examples/llava/clip.cpp +2152 -1803
  52. package/src/llama.cpp/examples/llava/clip.h +39 -22
  53. package/src/llama.cpp/examples/llava/deprecation-warning.cpp +22 -0
  54. package/src/llama.cpp/examples/llava/llava.cpp +64 -52
  55. package/src/llama.cpp/examples/llava/mtmd-cli.cpp +344 -0
  56. package/src/llama.cpp/examples/llava/mtmd.cpp +708 -0
  57. package/src/llama.cpp/examples/llava/mtmd.h +168 -0
  58. package/src/llama.cpp/examples/llava/{qwen2vl-cli.cpp → qwen2vl-test.cpp} +83 -31
  59. package/src/llama.cpp/examples/main/main.cpp +16 -5
  60. package/src/llama.cpp/examples/parallel/parallel.cpp +3 -1
  61. package/src/llama.cpp/examples/passkey/passkey.cpp +1 -1
  62. package/src/llama.cpp/examples/perplexity/perplexity.cpp +17 -3
  63. package/src/llama.cpp/examples/quantize/quantize.cpp +115 -2
  64. package/src/llama.cpp/examples/rpc/CMakeLists.txt +4 -2
  65. package/src/llama.cpp/examples/rpc/rpc-server.cpp +163 -8
  66. package/src/llama.cpp/examples/run/CMakeLists.txt +12 -1
  67. package/src/llama.cpp/examples/run/run.cpp +14 -28
  68. package/src/llama.cpp/examples/server/httplib.h +313 -247
  69. package/src/llama.cpp/examples/server/server.cpp +238 -139
  70. package/src/llama.cpp/examples/server/utils.hpp +51 -2
  71. package/src/llama.cpp/examples/speculative/speculative.cpp +1 -1
  72. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +1 -1
  73. package/src/llama.cpp/examples/sycl/build.sh +2 -2
  74. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +2 -2
  75. package/src/llama.cpp/examples/tts/tts.cpp +6 -9
  76. package/src/llama.cpp/ggml/CMakeLists.txt +8 -2
  77. package/src/llama.cpp/ggml/cmake/GitVars.cmake +22 -0
  78. package/src/llama.cpp/ggml/include/ggml-cpu.h +5 -0
  79. package/src/llama.cpp/ggml/include/ggml-rpc.h +6 -1
  80. package/src/llama.cpp/ggml/include/ggml.h +66 -99
  81. package/src/llama.cpp/ggml/src/CMakeLists.txt +10 -7
  82. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -2
  83. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +8 -4
  84. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +5 -5
  85. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +692 -1534
  86. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +613 -122
  87. package/src/llama.cpp/ggml/src/ggml-cann/common.h +135 -1
  88. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +507 -137
  89. package/src/llama.cpp/ggml/src/ggml-common.h +12 -6
  90. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +48 -22
  91. package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  92. package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.h +16 -0
  93. package/src/llama.cpp/ggml/src/ggml-cpu/common.h +72 -0
  94. package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +1 -1
  95. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +896 -192
  96. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +2 -21
  97. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +754 -404
  98. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +1003 -13519
  99. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +2 -0
  100. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +2 -7
  101. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +0 -1
  102. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +3 -4
  103. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +533 -88
  104. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +8809 -0
  105. package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +110 -0
  106. package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  107. package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  108. package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.h +28 -0
  109. package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +258 -0
  110. package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +802 -0
  111. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +7 -0
  112. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +1 -0
  113. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -4
  114. package/src/llama.cpp/ggml/src/ggml-impl.h +52 -18
  115. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +70 -3
  116. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +67 -119
  117. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +1023 -260
  118. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +293 -40
  119. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +96 -22
  120. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -0
  121. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +350 -0
  122. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  123. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -35
  124. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +2 -292
  125. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +79 -90
  126. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +967 -438
  127. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +22 -23
  128. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +24 -20
  129. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +1 -4
  130. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +204 -280
  131. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +84 -74
  132. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +1 -3
  133. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +37 -49
  134. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +7 -22
  135. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +4 -14
  136. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +204 -118
  137. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +1 -3
  138. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +23 -0
  139. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +646 -114
  140. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +12 -0
  141. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +17 -8
  142. package/src/llama.cpp/ggml/src/ggml.c +141 -245
  143. package/src/llama.cpp/ggml/src/gguf.cpp +1 -0
  144. package/src/llama.cpp/include/llama.h +30 -11
  145. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +112 -0
  146. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +46 -0
  147. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +112 -0
  148. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +46 -0
  149. package/src/llama.cpp/requirements/requirements-all.txt +2 -0
  150. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +3 -0
  151. package/src/llama.cpp/src/CMakeLists.txt +3 -2
  152. package/src/llama.cpp/src/llama-adapter.cpp +37 -1
  153. package/src/llama.cpp/src/llama-arch.cpp +160 -17
  154. package/src/llama.cpp/src/llama-arch.h +16 -0
  155. package/src/llama.cpp/src/llama-chat.cpp +82 -17
  156. package/src/llama.cpp/src/llama-chat.h +6 -2
  157. package/src/llama.cpp/src/llama-context.cpp +108 -92
  158. package/src/llama.cpp/src/llama-context.h +1 -2
  159. package/src/llama.cpp/src/llama-graph.cpp +189 -119
  160. package/src/llama.cpp/src/llama-graph.h +26 -6
  161. package/src/llama.cpp/src/llama-hparams.h +13 -0
  162. package/src/llama.cpp/src/llama-kv-cache.cpp +70 -123
  163. package/src/llama.cpp/src/llama-kv-cache.h +41 -115
  164. package/src/llama.cpp/src/llama-memory.h +1 -1
  165. package/src/llama.cpp/src/llama-mmap.cpp +1 -1
  166. package/src/llama.cpp/src/llama-model-loader.cpp +10 -5
  167. package/src/llama.cpp/src/llama-model-loader.h +5 -3
  168. package/src/llama.cpp/src/llama-model.cpp +1760 -534
  169. package/src/llama.cpp/src/llama-model.h +13 -1
  170. package/src/llama.cpp/src/llama-quant.cpp +29 -8
  171. package/src/llama.cpp/src/llama-sampling.cpp +7 -1
  172. package/src/llama.cpp/src/llama-vocab.cpp +44 -6
  173. package/src/llama.cpp/src/llama.cpp +1 -1
  174. package/src/llama.cpp/tests/CMakeLists.txt +43 -30
  175. package/src/llama.cpp/tests/test-arg-parser.cpp +51 -4
  176. package/src/llama.cpp/tests/test-backend-ops.cpp +82 -43
  177. package/src/llama.cpp/tests/test-chat-template.cpp +34 -13
  178. package/src/llama.cpp/tests/test-chat.cpp +12 -2
  179. package/src/llama.cpp/{examples/gbnf-validator/gbnf-validator.cpp → tests/test-gbnf-validator.cpp} +2 -2
  180. package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -2
  181. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +63 -2
  182. package/src/llama.cpp/tests/test-grammar-parser.cpp +3 -1
  183. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -1
  184. package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -1
  185. package/src/llama.cpp/{examples/quantize-stats/quantize-stats.cpp → tests/test-quantize-stats.cpp} +3 -1
  186. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +2 -1
  187. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +2 -1
  188. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +0 -5
  189. package/src/llama.cpp/examples/llava/gemma3-cli.cpp +0 -341
  190. package/src/llama.cpp/examples/llava/llava-cli.cpp +0 -332
  191. package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +0 -354
  192. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +0 -6
  193. package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
  194. package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
  195. package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
  196. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
  197. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
  198. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
  199. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
  200. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
  201. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
  202. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
@@ -0,0 +1,186 @@
1
+ #include "unary-ops.h"
2
+
3
+ static inline float op_abs(float x) {
4
+ return fabsf(x);
5
+ }
6
+
7
+ static inline float op_sgn(float x) {
8
+ return (x > 0.f) ? 1.f : ((x < 0.f) ? -1.f : 0.f);
9
+ }
10
+
11
+ static inline float op_neg(float x) {
12
+ return -x;
13
+ }
14
+
15
+ static inline float op_step(float x) {
16
+ return (x > 0.f) ? 1.f : 0.f;
17
+ }
18
+
19
+ static inline float op_tanh(float x) {
20
+ return tanhf(x);
21
+ }
22
+
23
+ static inline float op_elu(float x) {
24
+ return (x > 0.f) ? x : expm1f(x);
25
+ }
26
+
27
+ static inline float op_relu(float x) {
28
+ return (x > 0.f) ? x : 0.f;
29
+ }
30
+
31
+ static inline float op_sigmoid(float x) {
32
+ return 1.f / (1.f + expf(-x));
33
+ }
34
+
35
+ static inline float op_hardsigmoid(float x) {
36
+ return fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f));
37
+ }
38
+
39
+ static inline float op_exp(float x) {
40
+ return expf(x);
41
+ }
42
+
43
+ static inline float op_hardswish(float x) {
44
+ return x * fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f));
45
+ }
46
+
47
+ static inline float op_sqr(float x) {
48
+ return x * x;
49
+ }
50
+
51
+ static inline float op_sqrt(float x) {
52
+ return sqrtf(x);
53
+ }
54
+
55
+ static inline float op_sin(float x) {
56
+ return sinf(x);
57
+ }
58
+
59
+ static inline float op_cos(float x) {
60
+ return cosf(x);
61
+ }
62
+
63
+ static inline float op_log(float x) {
64
+ return logf(x);
65
+ }
66
+
67
+ template <float (*op)(float), typename src0_t, typename dst_t>
68
+ static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) {
69
+ constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
70
+ constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
71
+
72
+ for (int i = 0; i < n; i++) {
73
+ y[i] = f32_to_dst(op(src0_to_f32(x[i])));
74
+ }
75
+ }
76
+
77
+ template <float (*op)(float), typename src0_t, typename dst_t>
78
+ static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
79
+ const ggml_tensor * src0 = dst->src[0];
80
+
81
+ GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst));
82
+
83
+ GGML_TENSOR_UNARY_OP_LOCALS
84
+
85
+ GGML_ASSERT( nb0 == sizeof(dst_t));
86
+ GGML_ASSERT(nb00 == sizeof(src0_t));
87
+
88
+ const auto [ir0, ir1] = get_thread_range(params, src0);
89
+
90
+ for (int64_t ir = ir0; ir < ir1; ++ir) {
91
+ const int64_t i03 = ir/(ne02*ne01);
92
+ const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
93
+ const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
94
+
95
+ dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
96
+ const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
97
+
98
+ vec_unary_op<op>(ne0, dst_ptr, src0_ptr);
99
+ }
100
+ }
101
+
102
+ // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates
103
+ template <float (*op)(float)>
104
+ static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
105
+ const ggml_tensor * src0 = dst->src[0];
106
+
107
+ /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
108
+ apply_unary_op<op, float, float>(params, dst);
109
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
110
+ apply_unary_op<op, ggml_fp16_t, ggml_fp16_t>(params, dst);
111
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
112
+ apply_unary_op<op, ggml_bf16_t, ggml_bf16_t>(params, dst);
113
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) {
114
+ apply_unary_op<op, ggml_bf16_t, float>(params, dst);
115
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
116
+ apply_unary_op<op, ggml_fp16_t, float>(params, dst);
117
+ } else {
118
+ fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__,
119
+ ggml_type_name(dst->type), ggml_type_name(src0->type));
120
+ GGML_ABORT("fatal error");
121
+ }
122
+ }
123
+
124
+ void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) {
125
+ unary_op<op_abs>(params, dst);
126
+ }
127
+
128
+ void ggml_compute_forward_sgn(const ggml_compute_params * params, ggml_tensor * dst) {
129
+ unary_op<op_sgn>(params, dst);
130
+ }
131
+
132
+ void ggml_compute_forward_neg(const ggml_compute_params * params, ggml_tensor * dst) {
133
+ unary_op<op_neg>(params, dst);
134
+ }
135
+
136
+ void ggml_compute_forward_step(const ggml_compute_params * params, ggml_tensor * dst) {
137
+ unary_op<op_step>(params, dst);
138
+ }
139
+
140
+ void ggml_compute_forward_tanh(const ggml_compute_params * params, ggml_tensor * dst) {
141
+ unary_op<op_tanh>(params, dst);
142
+ }
143
+
144
+ void ggml_compute_forward_elu(const ggml_compute_params * params, ggml_tensor * dst) {
145
+ unary_op<op_elu>(params, dst);
146
+ }
147
+
148
+ void ggml_compute_forward_relu(const ggml_compute_params * params, ggml_tensor * dst) {
149
+ unary_op<op_relu>(params, dst);
150
+ }
151
+
152
+ void ggml_compute_forward_sigmoid(const ggml_compute_params * params, ggml_tensor * dst) {
153
+ unary_op<op_sigmoid>(params, dst);
154
+ }
155
+
156
+ void ggml_compute_forward_hardsigmoid(const ggml_compute_params * params, ggml_tensor * dst) {
157
+ unary_op<op_hardsigmoid>(params, dst);
158
+ }
159
+
160
+ void ggml_compute_forward_exp(const ggml_compute_params * params, ggml_tensor * dst) {
161
+ unary_op<op_exp>(params, dst);
162
+ }
163
+
164
+ void ggml_compute_forward_hardswish(const ggml_compute_params * params, ggml_tensor * dst) {
165
+ unary_op<op_hardswish>(params, dst);
166
+ }
167
+
168
+ void ggml_compute_forward_sqr(const ggml_compute_params * params, ggml_tensor * dst) {
169
+ unary_op<op_sqr>(params, dst);
170
+ }
171
+
172
+ void ggml_compute_forward_sqrt(const ggml_compute_params * params, ggml_tensor * dst) {
173
+ unary_op<op_sqrt>(params, dst);
174
+ }
175
+
176
+ void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) {
177
+ unary_op<op_sin>(params, dst);
178
+ }
179
+
180
+ void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) {
181
+ unary_op<op_cos>(params, dst);
182
+ }
183
+
184
+ void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) {
185
+ unary_op<op_log>(params, dst);
186
+ }
@@ -0,0 +1,28 @@
1
+ #pragma once
2
+
3
+ #include "common.h"
4
+
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+ void ggml_compute_forward_abs(const struct ggml_compute_params * params, struct ggml_tensor * dst);
10
+ void ggml_compute_forward_sgn(const struct ggml_compute_params * params, struct ggml_tensor * dst);
11
+ void ggml_compute_forward_neg(const struct ggml_compute_params * params, struct ggml_tensor * dst);
12
+ void ggml_compute_forward_step(const struct ggml_compute_params * params, struct ggml_tensor * dst);
13
+ void ggml_compute_forward_tanh(const struct ggml_compute_params * params, struct ggml_tensor * dst);
14
+ void ggml_compute_forward_elu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
15
+ void ggml_compute_forward_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
16
+ void ggml_compute_forward_sigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst);
17
+ void ggml_compute_forward_hardsigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst);
18
+ void ggml_compute_forward_exp(const struct ggml_compute_params * params, struct ggml_tensor * dst);
19
+ void ggml_compute_forward_hardswish(const struct ggml_compute_params * params, struct ggml_tensor * dst);
20
+ void ggml_compute_forward_sqr(const struct ggml_compute_params * params, struct ggml_tensor * dst);
21
+ void ggml_compute_forward_sqrt(const struct ggml_compute_params * params, struct ggml_tensor * dst);
22
+ void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst);
23
+ void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst);
24
+ void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst);
25
+
26
+ #ifdef __cplusplus
27
+ }
28
+ #endif
@@ -0,0 +1,258 @@
1
+ #include "vec.h"
2
+
3
+ #include <cassert>
4
+
5
+ #if defined(_MSC_VER)
6
+ // disable "possible loss of data" to avoid hundreds of casts
7
+ // we should just be careful :)
8
+ #pragma warning(disable: 4244 4267)
9
+ #endif
10
+
11
+ // precomputed gelu table for f16 (128 KB)
12
+ ggml_fp16_t ggml_table_gelu_f16[1 << 16];
13
+
14
+ // precomputed quick gelu table for f16 (128 KB)
15
+ ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
16
+
17
+ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc) {
18
+ assert(nrc == 1);
19
+ GGML_UNUSED(nrc);
20
+ GGML_UNUSED(bx);
21
+ GGML_UNUSED(by);
22
+ GGML_UNUSED(bs);
23
+
24
+ #if defined(GGML_SIMD)
25
+ float sumf = 0.0f;
26
+ const int np = (n & ~(GGML_F32_STEP - 1));
27
+
28
+ GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
29
+
30
+ GGML_F32_VEC ax[GGML_F32_ARR];
31
+ GGML_F32_VEC ay[GGML_F32_ARR];
32
+
33
+ for (int i = 0; i < np; i += GGML_F32_STEP) {
34
+ for (int j = 0; j < GGML_F32_ARR; j++) {
35
+ ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
36
+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
37
+
38
+ sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
39
+ }
40
+ }
41
+
42
+ // reduce sum0..sum3 to sum0
43
+ GGML_F32_VEC_REDUCE(sumf, sum);
44
+
45
+ // leftovers
46
+ for (int i = np; i < n; ++i) {
47
+ sumf += x[i]*y[i];
48
+ }
49
+ #else
50
+ // scalar
51
+ ggml_float sumf = 0.0;
52
+ for (int i = 0; i < n; ++i) {
53
+ sumf += (ggml_float)(x[i]*y[i]);
54
+ }
55
+ #endif
56
+
57
+ *s = sumf;
58
+ }
59
+
60
+ void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc) {
61
+ assert(nrc == 1);
62
+ GGML_UNUSED(nrc);
63
+ GGML_UNUSED(bx);
64
+ GGML_UNUSED(by);
65
+ GGML_UNUSED(bs);
66
+ int i = 0;
67
+ ggml_float sumf = 0;
68
+
69
+ #if defined(__AVX512BF16__)
70
+ __m512 c1 = _mm512_setzero_ps();
71
+ __m512 c2 = _mm512_setzero_ps();
72
+ for (; i + 64 <= n; i += 64) {
73
+ c1 = _mm512_dpbf16_ps(c1, m512bh(_mm512_loadu_si512((x + i))),
74
+ m512bh(_mm512_loadu_si512((y + i))));
75
+ c2 = _mm512_dpbf16_ps(c2, m512bh(_mm512_loadu_si512((x + i + 32))),
76
+ m512bh(_mm512_loadu_si512((y + i + 32))));
77
+ }
78
+ sumf += (ggml_float)_mm512_reduce_add_ps(c1);
79
+ sumf += (ggml_float)_mm512_reduce_add_ps(c2);
80
+
81
+ #elif defined(__AVX512F__)
82
+ #define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16))
83
+ __m512 c1 = _mm512_setzero_ps();
84
+ __m512 c2 = _mm512_setzero_ps();
85
+ for (; i + 32 <= n; i += 32) {
86
+ c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
87
+ c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2);
88
+ }
89
+ sumf += (ggml_float)_mm512_reduce_add_ps(c1);
90
+ sumf += (ggml_float)_mm512_reduce_add_ps(c2);
91
+
92
+ #undef LOAD
93
+ #elif defined(__AVX2__) || defined(__AVX__)
94
+ #if defined(__AVX2__)
95
+ #define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16))
96
+ #else
97
+ #define LOAD(p) _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16)), (_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_bsrli_si128(_mm_loadu_si128((const __m128i *)(p)), 8)), 16)), 1))
98
+ #endif
99
+ __m256 c1 = _mm256_setzero_ps();
100
+ __m256 c2 = _mm256_setzero_ps();
101
+ __m256 c3 = _mm256_setzero_ps();
102
+ __m256 c4 = _mm256_setzero_ps();
103
+ for (; i + 32 <= n; i += 32) {
104
+ c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
105
+ c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2);
106
+ c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3);
107
+ c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4);
108
+ }
109
+ __m128 g;
110
+ c1 = _mm256_add_ps(_mm256_add_ps(c1, c3),
111
+ _mm256_add_ps(c2, c4));
112
+ g = _mm_add_ps(_mm256_extractf128_ps(c1, 1),
113
+ _mm256_castps256_ps128(c1));
114
+ g = _mm_add_ps(g, _mm_movehl_ps(g, g));
115
+ g = _mm_add_ss(g, _mm_movehdup_ps(g));
116
+ sumf += (ggml_float)_mm_cvtss_f32(g);
117
+
118
+ #undef LOAD
119
+ #endif
120
+
121
+ for (; i < n; ++i) {
122
+ sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) *
123
+ GGML_BF16_TO_FP32(y[i]));
124
+ }
125
+ *s = sumf;
126
+ }
127
+
128
+ void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc) {
129
+ assert(nrc == 1);
130
+ GGML_UNUSED(nrc);
131
+ GGML_UNUSED(bx);
132
+ GGML_UNUSED(by);
133
+ GGML_UNUSED(bs);
134
+
135
+ ggml_float sumf = 0.0;
136
+
137
+ #if defined(GGML_SIMD)
138
+ const int np = (n & ~(GGML_F16_STEP - 1));
139
+
140
+ GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
141
+
142
+ GGML_F16_VEC ax[GGML_F16_ARR];
143
+ GGML_F16_VEC ay[GGML_F16_ARR];
144
+
145
+ for (int i = 0; i < np; i += GGML_F16_STEP) {
146
+ for (int j = 0; j < GGML_F16_ARR; j++) {
147
+ ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
148
+ ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
149
+
150
+ sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
151
+ }
152
+ }
153
+
154
+ // reduce sum0..sum3 to sum0
155
+ GGML_F16_VEC_REDUCE(sumf, sum);
156
+
157
+ // leftovers
158
+ for (int i = np; i < n; ++i) {
159
+ sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
160
+ }
161
+ #else
162
+ for (int i = 0; i < n; ++i) {
163
+ sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
164
+ }
165
+ #endif
166
+
167
+ *s = sumf;
168
+ }
169
+
170
+ void ggml_vec_silu_f32(const int n, float * y, const float * x) {
171
+ int i = 0;
172
+ #if defined(__AVX512F__) && defined(__AVX512DQ__)
173
+ for (; i + 15 < n; i += 16) {
174
+ _mm512_storeu_ps(y + i, ggml_v_silu(_mm512_loadu_ps(x + i)));
175
+ }
176
+ #elif defined(__AVX2__) && defined(__FMA__)
177
+ for (; i + 7 < n; i += 8) {
178
+ _mm256_storeu_ps(y + i, ggml_v_silu(_mm256_loadu_ps(x + i)));
179
+ }
180
+ #elif defined(__SSE2__)
181
+ for (; i + 3 < n; i += 4) {
182
+ _mm_storeu_ps(y + i, ggml_v_silu(_mm_loadu_ps(x + i)));
183
+ }
184
+ #elif defined(__ARM_NEON) && defined(__aarch64__)
185
+ for (; i + 3 < n; i += 4) {
186
+ vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i)));
187
+ }
188
+ #endif
189
+ for (; i < n; ++i) {
190
+ y[i] = ggml_silu_f32(x[i]);
191
+ }
192
+ }
193
+
194
+ ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) {
195
+ int i = 0;
196
+ ggml_float sum = 0;
197
+ #if defined(__AVX512F__) && defined(__AVX512DQ__)
198
+ for (; i + 15 < n; i += 16) {
199
+ __m512 val = ggml_v_expf(_mm512_sub_ps(_mm512_loadu_ps(x + i),
200
+ _mm512_set1_ps(max)));
201
+ _mm512_storeu_ps(y + i, val);
202
+ sum += (ggml_float)_mm512_reduce_add_ps(val);
203
+ }
204
+ #elif defined(__AVX2__) && defined(__FMA__)
205
+ for (; i + 7 < n; i += 8) {
206
+ __m256 val = ggml_v_expf(_mm256_sub_ps(_mm256_loadu_ps(x + i),
207
+ _mm256_set1_ps(max)));
208
+ _mm256_storeu_ps(y + i, val);
209
+ __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1),
210
+ _mm256_castps256_ps128(val));
211
+ val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2));
212
+ val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2));
213
+ sum += (ggml_float)_mm_cvtss_f32(val2);
214
+ }
215
+ #elif defined(__SSE2__)
216
+ for (; i + 3 < n; i += 4) {
217
+ __m128 val = ggml_v_expf(_mm_sub_ps(_mm_loadu_ps(x + i),
218
+ _mm_set1_ps(max)));
219
+ _mm_storeu_ps(y + i, val);
220
+ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
221
+ val = _mm_add_ps(val, _mm_movehl_ps(val, val));
222
+ val = _mm_add_ss(val, _mm_movehdup_ps(val));
223
+ #else
224
+ __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1));
225
+ val = _mm_add_ps(val, tmp);
226
+ tmp = _mm_movehl_ps(tmp, val);
227
+ val = _mm_add_ss(val, tmp);
228
+ #endif
229
+ sum += (ggml_float)_mm_cvtss_f32(val);
230
+ }
231
+ #elif defined(__ARM_NEON) && defined(__aarch64__)
232
+ for (; i + 3 < n; i += 4) {
233
+ float32x4_t val = ggml_v_expf(vsubq_f32(vld1q_f32(x + i),
234
+ vdupq_n_f32(max)));
235
+ vst1q_f32(y + i, val);
236
+ sum += (ggml_float)vaddvq_f32(val);
237
+ }
238
+ #endif
239
+ for (; i < n; ++i) {
240
+ float val = expf(x[i] - max);
241
+ sum += (ggml_float)val;
242
+ y[i] = val;
243
+ }
244
+ return sum;
245
+ }
246
+
247
+ ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max) {
248
+ // log(soft_max) = log(soft_max_i / soft_max_sum) = log(soft_max_i) - log(soft_max_sum) = (logit_i - max) - log(soft_max_i)
249
+
250
+ int i = 0;
251
+ ggml_float sum = 0;
252
+ for (; i < n; ++i) {
253
+ float val = x[i] - max;
254
+ y[i] = val;
255
+ sum += (ggml_float)expf(val);
256
+ }
257
+ return sum = (ggml_float)logf(sum);
258
+ }