whispercpp 1.2.0.2 → 1.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (135) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +5 -0
  3. data/LICENSE +1 -1
  4. data/README.md +165 -434
  5. data/Rakefile +46 -86
  6. data/ext/.gitignore +13 -0
  7. data/ext/cpu.mk +9 -0
  8. data/ext/{dr_wav.h → examples/dr_wav.h} +3560 -1179
  9. data/ext/extconf.rb +185 -7
  10. data/ext/ggml/include/ggml-alloc.h +76 -0
  11. data/ext/ggml/include/ggml-backend.h +352 -0
  12. data/ext/ggml/include/ggml-blas.h +25 -0
  13. data/ext/ggml/include/ggml-cann.h +123 -0
  14. data/ext/ggml/include/ggml-cpp.h +38 -0
  15. data/ext/ggml/include/ggml-cpu.h +135 -0
  16. data/ext/ggml/include/ggml-cuda.h +47 -0
  17. data/ext/ggml/include/ggml-kompute.h +50 -0
  18. data/ext/ggml/include/ggml-metal.h +66 -0
  19. data/ext/ggml/include/ggml-opencl.h +26 -0
  20. data/ext/ggml/include/ggml-opt.h +216 -0
  21. data/ext/ggml/include/ggml-rpc.h +28 -0
  22. data/ext/ggml/include/ggml-sycl.h +49 -0
  23. data/ext/ggml/include/ggml-vulkan.h +31 -0
  24. data/ext/ggml/include/ggml.h +2285 -0
  25. data/ext/ggml/src/ggml-alloc.c +1037 -0
  26. data/ext/ggml/src/ggml-amx/common.h +94 -0
  27. data/ext/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
  28. data/ext/ggml/src/ggml-amx/mmq.cpp +2510 -0
  29. data/ext/ggml/src/ggml-amx/mmq.h +17 -0
  30. data/ext/ggml/src/ggml-backend-impl.h +256 -0
  31. data/ext/ggml/src/ggml-backend-reg.cpp +552 -0
  32. data/ext/ggml/src/ggml-backend.cpp +1999 -0
  33. data/ext/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
  34. data/ext/ggml/src/ggml-cann/acl_tensor.cpp +175 -0
  35. data/ext/ggml/src/ggml-cann/acl_tensor.h +258 -0
  36. data/ext/ggml/src/ggml-cann/aclnn_ops.cpp +3427 -0
  37. data/ext/ggml/src/ggml-cann/aclnn_ops.h +592 -0
  38. data/ext/ggml/src/ggml-cann/common.h +286 -0
  39. data/ext/ggml/src/ggml-cann/ggml-cann.cpp +2188 -0
  40. data/ext/ggml/src/ggml-cann/kernels/ascendc_kernels.h +19 -0
  41. data/ext/ggml/src/ggml-cann/kernels/dup.cpp +236 -0
  42. data/ext/ggml/src/ggml-cann/kernels/get_row_f16.cpp +197 -0
  43. data/ext/ggml/src/ggml-cann/kernels/get_row_f32.cpp +190 -0
  44. data/ext/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +204 -0
  45. data/ext/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +191 -0
  46. data/ext/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +218 -0
  47. data/ext/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +216 -0
  48. data/ext/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +295 -0
  49. data/ext/ggml/src/ggml-common.h +1853 -0
  50. data/ext/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
  51. data/ext/ggml/src/ggml-cpu/amx/amx.h +8 -0
  52. data/ext/ggml/src/ggml-cpu/amx/common.h +91 -0
  53. data/ext/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
  54. data/ext/ggml/src/ggml-cpu/amx/mmq.h +10 -0
  55. data/ext/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
  56. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +4262 -0
  57. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
  58. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
  59. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
  60. data/ext/ggml/src/ggml-cpu/ggml-cpu-impl.h +386 -0
  61. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
  62. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  63. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
  64. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
  65. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
  66. data/ext/ggml/src/ggml-cpu/ggml-cpu.cpp +622 -0
  67. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1884 -0
  68. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  69. data/ext/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
  70. data/ext/ggml/src/ggml-cuda/vendors/hip.h +186 -0
  71. data/ext/ggml/src/ggml-cuda/vendors/musa.h +134 -0
  72. data/ext/ggml/src/ggml-impl.h +556 -0
  73. data/ext/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
  74. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
  75. data/ext/ggml/src/ggml-metal/ggml-metal.m +4884 -0
  76. data/ext/ggml/src/ggml-metal/ggml-metal.metal +6732 -0
  77. data/ext/ggml/src/ggml-opt.cpp +854 -0
  78. data/ext/ggml/src/ggml-quants.c +5238 -0
  79. data/ext/ggml/src/ggml-quants.h +100 -0
  80. data/ext/ggml/src/ggml-rpc/ggml-rpc.cpp +1406 -0
  81. data/ext/ggml/src/ggml-sycl/common.cpp +95 -0
  82. data/ext/ggml/src/ggml-sycl/concat.cpp +196 -0
  83. data/ext/ggml/src/ggml-sycl/conv.cpp +99 -0
  84. data/ext/ggml/src/ggml-sycl/convert.cpp +547 -0
  85. data/ext/ggml/src/ggml-sycl/dmmv.cpp +1023 -0
  86. data/ext/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
  87. data/ext/ggml/src/ggml-sycl/ggml-sycl.cpp +4729 -0
  88. data/ext/ggml/src/ggml-sycl/im2col.cpp +126 -0
  89. data/ext/ggml/src/ggml-sycl/mmq.cpp +3031 -0
  90. data/ext/ggml/src/ggml-sycl/mmvq.cpp +1015 -0
  91. data/ext/ggml/src/ggml-sycl/norm.cpp +378 -0
  92. data/ext/ggml/src/ggml-sycl/outprod.cpp +56 -0
  93. data/ext/ggml/src/ggml-sycl/rope.cpp +276 -0
  94. data/ext/ggml/src/ggml-sycl/softmax.cpp +251 -0
  95. data/ext/ggml/src/ggml-sycl/tsembd.cpp +72 -0
  96. data/ext/ggml/src/ggml-sycl/wkv6.cpp +141 -0
  97. data/ext/ggml/src/ggml-threading.cpp +12 -0
  98. data/ext/ggml/src/ggml-threading.h +14 -0
  99. data/ext/ggml/src/ggml-vulkan/ggml-vulkan.cpp +8657 -0
  100. data/ext/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
  101. data/ext/ggml/src/ggml.c +7694 -0
  102. data/ext/include/whisper.h +672 -0
  103. data/ext/metal-embed.mk +17 -0
  104. data/ext/metal.mk +6 -0
  105. data/ext/ruby_whisper.cpp +1608 -159
  106. data/ext/ruby_whisper.h +10 -0
  107. data/ext/scripts/get-flags.mk +38 -0
  108. data/ext/src/coreml/whisper-decoder-impl.h +146 -0
  109. data/ext/src/coreml/whisper-decoder-impl.m +201 -0
  110. data/ext/src/coreml/whisper-encoder-impl.h +142 -0
  111. data/ext/src/coreml/whisper-encoder-impl.m +197 -0
  112. data/ext/src/coreml/whisper-encoder.h +26 -0
  113. data/ext/src/openvino/whisper-openvino-encoder.cpp +108 -0
  114. data/ext/src/openvino/whisper-openvino-encoder.h +31 -0
  115. data/ext/src/whisper.cpp +7393 -0
  116. data/extsources.rb +6 -0
  117. data/lib/whisper/model/uri.rb +157 -0
  118. data/lib/whisper.rb +2 -0
  119. data/tests/helper.rb +7 -0
  120. data/tests/jfk_reader/.gitignore +5 -0
  121. data/tests/jfk_reader/extconf.rb +3 -0
  122. data/tests/jfk_reader/jfk_reader.c +68 -0
  123. data/tests/test_callback.rb +160 -0
  124. data/tests/test_error.rb +20 -0
  125. data/tests/test_model.rb +71 -0
  126. data/tests/test_package.rb +31 -0
  127. data/tests/test_params.rb +160 -0
  128. data/tests/test_segment.rb +83 -0
  129. data/tests/test_whisper.rb +211 -123
  130. data/whispercpp.gemspec +36 -0
  131. metadata +137 -11
  132. data/ext/ggml.c +0 -8616
  133. data/ext/ggml.h +0 -748
  134. data/ext/whisper.cpp +0 -4829
  135. data/ext/whisper.h +0 -402
@@ -0,0 +1,258 @@
1
+ /*
2
+ * Copyright (c) 2023-2024 The ggml authors
3
+ *
4
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
5
+ * of this software and associated documentation files (the "Software"), to
6
+ * deal in the Software without restriction, including without limitation the
7
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
+ * sell copies of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be included in
12
+ * all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
+ * IN THE SOFTWARE.
21
+ */
22
+
23
+ #ifndef CANN_ACL_TENSOR_H
24
+ #define CANN_ACL_TENSOR_H
25
+
26
+ #include <algorithm>
27
+ #include <cstring>
28
+
29
+ #include <aclnn/aclnn_base.h>
30
+ #include "common.h"
31
+
32
+ /**
33
+ * @brief Maps a ggml_type to its corresponding aclDataType.
34
+ *
35
+ * @details This function takes a ggml_type as input and returns the corresponding
36
+ * aclDataType. It supports mapping for various ggml_types. If the input type
37
+ * does not match any of the predefined ggml_types, the function returns
38
+ * ACL_DT_UNDEFINED.
39
+ *
40
+ * @param type The ggml_type to be mapped.
41
+ * @return The corresponding aclDataType. If the input type is not recognized,
42
+ * ACL_DT_UNDEFINED is returned.
43
+ */
44
+ aclDataType ggml_cann_type_mapping(ggml_type type);
45
+
46
+ /**
47
+ * @brief Creates an ACL tensor from a ggml_tensor with optional shape.
48
+ *
49
+ * @details This function creates an ACL tensor based on the properties of the
50
+ * provided ggml_tensor. It supports customer shape by adjusting dimensions
51
+ * and strides accordingly. If customer shape is applied, additional
52
+ * dimensions and strides are calculated based on the provided parameters.
53
+ *
54
+ * @param tensor Pointer to the ggml_tensor to be converted to ACL tensor.
55
+ * @param ne Pointer to an array containing dimensions. Defaults to nullptr
56
+ * if no customer shape is applied.
57
+ * @param nb Pointer to an array containing strides. Defaults to nullptr
58
+ * if no customer shape is applied.
59
+ * @param dims Number of dimensions in the tensor. Defaults to 0 if no customer
60
+ * shape is applied.
61
+ * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
62
+ * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
63
+ * @return Pointer to the created ACL tensor.
64
+ */
65
+ aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
66
+ size_t* nb = nullptr, int64_t dims = 0,
67
+ aclFormat format = ACL_FORMAT_ND,
68
+ size_t offset = 0);
69
+
70
+ /**
71
+ * @brief Template for creating an ACL tensor from provided parameters. typename TYPE
72
+ * should be size_t or float.
73
+ *
74
+ * @details This function creates an ACL tensor using the provided data pointer,
75
+ * data type, dimensions, strides, format, offset, and additional parameters.
76
+ * It calculates necessary dimensions and strides based on the provided ne and nb
77
+ * arrays, adjusting them for the ACL tensor creation. The ACL storage length
78
+ * is also calculated based on the provided dimensions and strides.
79
+ *
80
+ * @param data_ptr Pointer to the data buffer for the ACL tensor.
81
+ * @param dtype ACL data type of the tensor.
82
+ * @param type_size Size of each element in the tensor data buffer.
83
+ * @param ne Pointer to an array containing tensor dimensions.
84
+ * @param nb Pointer to an array containing tensor strides.
85
+ * @param dims Number of dimensions of the tensor.
86
+ * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
87
+ * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
88
+ * @return Pointer to the created ACL tensor.
89
+ */
90
+ template<typename TYPE>
91
+ aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
92
+ TYPE type_size, int64_t* ne, TYPE* nb,
93
+ int64_t dims,
94
+ aclFormat format = ACL_FORMAT_ND,
95
+ size_t offset = 0) {
96
+ int64_t tmp_ne[GGML_MAX_DIMS * 2];
97
+ int64_t tmp_stride[GGML_MAX_DIMS * 2];
98
+
99
+ memcpy(tmp_ne, ne, dims * sizeof(int64_t));
100
+ for (int i = 0; i < dims; i++) {
101
+ tmp_stride[i] = nb[i] / type_size;
102
+ }
103
+
104
+ std::reverse(tmp_ne, tmp_ne + dims);
105
+ std::reverse(tmp_stride, tmp_stride + dims);
106
+
107
+ int64_t acl_storage_len = 0;
108
+ for (int i = 0; i < dims; i++) {
109
+ acl_storage_len += (ne[i] - 1) * nb[i];
110
+ }
111
+
112
+ aclTensor* acl_tensor =
113
+ aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size,
114
+ format, &acl_storage_len, 1, data_ptr);
115
+
116
+ return acl_tensor;
117
+ }
118
+
119
+ /**
120
+ * @brief Checks if tensors require broadcasting based on their shapes.
121
+ *
122
+ * @details This function determines if two ggml_tensors need to be broadcasted for
123
+ * element-wise operations. Broadcasting is necessary if the shapes of the
124
+ * tensors are not identical and no dimension in either tensor equals 1.
125
+ *
126
+ * @param t0 Pointer to the first ggml_tensor.
127
+ * @param t1 Pointer to the second ggml_tensor.
128
+ * @return True if broadcasting is needed, False otherwise.
129
+ *
130
+ * @remarks This function iterates over the dimensions of t0 and t1. It checks if each
131
+ * dimension in t1 differs from t0's corresponding dimension and is not equal
132
+ * to 1. If such a dimension is found, broadcasting is required to align t1
133
+ * with t0 for element-wise operations.
134
+ */
135
+ bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
136
+
137
+ /**
138
+ * @brief Computes broadcast shapes and strides for two ggml_tensors.
139
+ *
140
+ * @details This function calculates the broadcast shapes and strides for two ggml_tensors,
141
+ * following the broadcasting rules similar to numpy. It adjusts dimensions and
142
+ * strides to ensure compatibility for element-wise operations where one tensor
143
+ * can be broadcasted to match the shape of another tensor.
144
+ *
145
+ * @param src0 Pointer to the first ggml_tensor.
146
+ * @param src1 Pointer to the second ggml_tensor.
147
+ * @param bcast_ne_src0 Output array to store broadcasted dimensions for src0.
148
+ * @param bcast_ne_src1 Output array to store broadcasted dimensions for src1.
149
+ * @param bcast_nb_src0 Output array to store broadcasted strides for src0.
150
+ * @param bcast_nb_src1 Output array to store broadcasted strides for src1.
151
+ * @return Number of dimensions in the broadcasted shape.
152
+ *
153
+ * @pre ggml_can_repeat(src1, src0) must return true, indicating src1 can be broadcasted
154
+ * to match src0.
155
+ *
156
+ * @remarks This function iterates over the dimensions of src0 and src1, calculating the
157
+ * necessary broadcast dimensions and strides. If a dimension requires broadcasting
158
+ * (i.e., its size in src1 is smaller than in src0), an additional dimension is
159
+ * added with size calculated to match src0's dimension. This adjustment ensures
160
+ * that src1 can be element-wise broadcasted to src0's shape.
161
+ *
162
+ * How it works:
163
+ *
164
+ * if dim0 has padding.
165
+ * a -> (2, 2) padding = 2
166
+ * a: [[1, 2, *, *]
167
+ * [2, 3, *, *]]
168
+ * nb = (8, 4, 2)
169
+ *
170
+ * if a should bcast with b -> (2, 4)
171
+ * b' -> (2, 2, 2)
172
+ * b : [[1, 2, 3, 4, *, *]
173
+ * [5, 6, 7, 8, *, *]]
174
+ * nb = (12, 6, 1)
175
+ *
176
+ * after bcast:
177
+ * a' -> (2, 1, 2)
178
+ * a': [[[1, 2], *, *]
179
+ * [[2, 3], *, *]]
180
+ * nb = (8, 4, 2, 1)
181
+ *
182
+ * b' : [[[1, 2], [3, 4], *, *]
183
+ * [[5, 6], [7, 8], *, *]]
184
+ * nb = (12, 6, 2, 1)
185
+ * \endcode
186
+ *
187
+ * dim1 in a inserted dim, should add nb for dim1,
188
+ * and all other nb moves to next in order.
189
+ */
190
+ int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
191
+ int64_t* bcast_ne_src0, int64_t* bcast_ne_src1,
192
+ size_t* bcast_nb_src0, size_t* bcast_nb_src1);
193
+
194
+ // Bcast macro to avoid duplicate code.
195
+ #define BCAST_SHAPE(src0, src1) \
196
+ int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
197
+ int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
198
+ size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
199
+ size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
200
+ int64_t bcast_dims = ggml_cann_get_bcast_shape( \
201
+ src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, bcast_##src0##_nb, \
202
+ bcast_##src1##_nb);
203
+
204
+ #define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
205
+
206
+ /**
207
+ * @brief Calculates broadcast shapes for matrix multiplication.
208
+ *
209
+ * @details This function computes the broadcast shapes required for matrix multiplication
210
+ * based on the input, weight, and destination tensor shapes. It ensures that the
211
+ * dimensions of weight tensors are expanded appropriately to satisfy matrix
212
+ * multiplication broadcast rules.
213
+ *
214
+ * @param input_ne Array containing the dimensions of the input tensor.
215
+ * @param weight_ne Array containing the dimensions of the weight tensor.
216
+ * @param dst_ne Array containing the dimensions of the destination tensor.
217
+ * @param input_nb Array containing the strides of the input tensor.
218
+ * @param weight_nb Array containing the strides of the weight tensor.
219
+ * @param dst_nb Array containing the strides of the destination tensor.
220
+ * @param bcast_input_ne Output array for broadcasted input tensor dimensions.
221
+ * @param bcast_weight_ne Output array for broadcasted weight tensor dimensions.
222
+ * @param bcast_dst_ne Output array for broadcasted destination tensor dimensions.
223
+ * @param bcast_input_nb Output array for broadcasted input tensor strides.
224
+ * @param bcast_weight_nb Output array for broadcasted weight tensor strides.
225
+ * @param bcast_dst_nb Output array for broadcasted destination tensor strides.
226
+ * @return The number of dimensions in the broadcasted tensors.
227
+ *
228
+ * @remarks This function iterates over the tensor dimensions and calculates the broadcast
229
+ * shapes needed for matrix multiplication. It ensures that dimensions where
230
+ * weight tensor requires expansion are appropriately handled to conform with
231
+ * broadcasting rules.
232
+ * @note compare with ggml_cann_get_bcast_shape, mul_mat broadcast need add this new dim
233
+ * before cast dim.
234
+ * @sa ggml_cann_get_bcast_shape
235
+ */
236
+ int64_t ggml_cann_get_mulmat_bcast_shape(
237
+ const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
238
+ const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
239
+ int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
240
+ size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb);
241
+
242
+ // Bcast macro to avoid duplicate code.
243
+ #define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
244
+ int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
245
+ int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
246
+ int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
247
+ size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
248
+ size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
249
+ size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
250
+ int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
251
+ input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, \
252
+ bcast_##input##_ne, bcast_##weight##_ne, bcast_##dst##_ne, \
253
+ bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
254
+
255
+ #define BCAST_MUL_MAT_PARAM(tensor) \
256
+ bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
257
+
258
+ #endif // CANN_ACL_TENSOR_H