mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (275) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +3 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +50 -9
  7. mindspore/_extends/parse/compile_config.py +41 -0
  8. mindspore/_extends/parse/parser.py +9 -7
  9. mindspore/_extends/parse/standard_method.py +52 -14
  10. mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
  11. mindspore/amp.py +24 -10
  12. mindspore/common/__init__.py +6 -4
  13. mindspore/common/_pijit_context.py +190 -0
  14. mindspore/common/_register_for_tensor.py +2 -1
  15. mindspore/common/_tensor_overload.py +139 -0
  16. mindspore/common/api.py +102 -87
  17. mindspore/common/dump.py +5 -6
  18. mindspore/common/generator.py +1 -7
  19. mindspore/common/hook_handle.py +14 -26
  20. mindspore/common/initializer.py +51 -15
  21. mindspore/common/mindir_util.py +2 -2
  22. mindspore/common/parameter.py +62 -15
  23. mindspore/common/recompute.py +39 -9
  24. mindspore/common/sparse_tensor.py +7 -3
  25. mindspore/common/tensor.py +183 -37
  26. mindspore/communication/__init__.py +1 -1
  27. mindspore/communication/_comm_helper.py +38 -3
  28. mindspore/communication/comm_func.py +315 -60
  29. mindspore/communication/management.py +14 -14
  30. mindspore/context.py +132 -22
  31. mindspore/dataset/__init__.py +1 -1
  32. mindspore/dataset/audio/__init__.py +1 -1
  33. mindspore/dataset/core/config.py +7 -0
  34. mindspore/dataset/core/validator_helpers.py +7 -0
  35. mindspore/dataset/engine/cache_client.py +1 -1
  36. mindspore/dataset/engine/datasets.py +72 -44
  37. mindspore/dataset/engine/datasets_audio.py +7 -7
  38. mindspore/dataset/engine/datasets_standard_format.py +53 -3
  39. mindspore/dataset/engine/datasets_text.py +20 -20
  40. mindspore/dataset/engine/datasets_user_defined.py +174 -104
  41. mindspore/dataset/engine/datasets_vision.py +33 -33
  42. mindspore/dataset/engine/iterators.py +29 -0
  43. mindspore/dataset/engine/obs/util.py +7 -0
  44. mindspore/dataset/engine/queue.py +114 -60
  45. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  46. mindspore/dataset/engine/validators.py +34 -14
  47. mindspore/dataset/text/__init__.py +1 -4
  48. mindspore/dataset/transforms/__init__.py +0 -3
  49. mindspore/dataset/utils/line_reader.py +2 -0
  50. mindspore/dataset/vision/__init__.py +1 -4
  51. mindspore/dataset/vision/utils.py +1 -1
  52. mindspore/dataset/vision/validators.py +2 -1
  53. mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
  54. mindspore/experimental/es/embedding_service.py +883 -0
  55. mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
  56. mindspore/experimental/llm_boost/__init__.py +21 -0
  57. mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
  58. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  59. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  60. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  61. mindspore/experimental/llm_boost/register.py +129 -0
  62. mindspore/experimental/llm_boost/utils.py +31 -0
  63. mindspore/experimental/optim/adamw.py +85 -0
  64. mindspore/experimental/optim/optimizer.py +3 -0
  65. mindspore/hal/__init__.py +3 -3
  66. mindspore/hal/contiguous_tensors_handle.py +175 -0
  67. mindspore/hal/stream.py +18 -0
  68. mindspore/include/api/model_group.h +13 -1
  69. mindspore/include/api/types.h +10 -10
  70. mindspore/include/dataset/config.h +2 -2
  71. mindspore/include/dataset/constants.h +2 -2
  72. mindspore/include/dataset/execute.h +2 -2
  73. mindspore/include/dataset/vision.h +4 -0
  74. mindspore/log.py +1 -1
  75. mindspore/mindrecord/filewriter.py +68 -51
  76. mindspore/mindspore_backend.dll +0 -0
  77. mindspore/mindspore_common.dll +0 -0
  78. mindspore/mindspore_core.dll +0 -0
  79. mindspore/mindspore_np_dtype.dll +0 -0
  80. mindspore/mindspore_ops.dll +0 -0
  81. mindspore/mint/__init__.py +983 -46
  82. mindspore/mint/distributed/__init__.py +31 -0
  83. mindspore/mint/distributed/distributed.py +254 -0
  84. mindspore/mint/nn/__init__.py +268 -23
  85. mindspore/mint/nn/functional.py +125 -19
  86. mindspore/mint/nn/layer/__init__.py +39 -0
  87. mindspore/mint/nn/layer/activation.py +133 -0
  88. mindspore/mint/nn/layer/normalization.py +477 -0
  89. mindspore/mint/nn/layer/pooling.py +110 -0
  90. mindspore/mint/optim/adamw.py +26 -13
  91. mindspore/mint/special/__init__.py +63 -0
  92. mindspore/multiprocessing/__init__.py +2 -1
  93. mindspore/nn/__init__.py +0 -1
  94. mindspore/nn/cell.py +276 -96
  95. mindspore/nn/layer/activation.py +211 -44
  96. mindspore/nn/layer/basic.py +137 -10
  97. mindspore/nn/layer/embedding.py +137 -2
  98. mindspore/nn/layer/normalization.py +101 -5
  99. mindspore/nn/layer/padding.py +34 -48
  100. mindspore/nn/layer/pooling.py +161 -7
  101. mindspore/nn/layer/transformer.py +3 -3
  102. mindspore/nn/loss/__init__.py +2 -2
  103. mindspore/nn/loss/loss.py +84 -6
  104. mindspore/nn/optim/__init__.py +2 -1
  105. mindspore/nn/optim/adadelta.py +1 -1
  106. mindspore/nn/optim/adam.py +1 -1
  107. mindspore/nn/optim/lamb.py +1 -1
  108. mindspore/nn/optim/tft_wrapper.py +124 -0
  109. mindspore/nn/wrap/cell_wrapper.py +12 -23
  110. mindspore/nn/wrap/grad_reducer.py +5 -5
  111. mindspore/nn/wrap/loss_scale.py +17 -3
  112. mindspore/numpy/__init__.py +1 -1
  113. mindspore/numpy/array_creations.py +65 -68
  114. mindspore/numpy/array_ops.py +64 -60
  115. mindspore/numpy/fft.py +610 -75
  116. mindspore/numpy/logic_ops.py +11 -10
  117. mindspore/numpy/math_ops.py +85 -84
  118. mindspore/numpy/utils_const.py +4 -4
  119. mindspore/opencv_core452.dll +0 -0
  120. mindspore/opencv_imgcodecs452.dll +0 -0
  121. mindspore/opencv_imgproc452.dll +0 -0
  122. mindspore/ops/__init__.py +6 -4
  123. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -11
  124. mindspore/ops/_grad_experimental/grad_comm_ops.py +67 -4
  125. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
  126. mindspore/ops/_vmap/vmap_array_ops.py +2 -4
  127. mindspore/ops/_vmap/vmap_math_ops.py +17 -1
  128. mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
  129. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +91 -7
  130. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
  131. mindspore/ops/auto_generate/gen_extend_func.py +767 -13
  132. mindspore/ops/auto_generate/gen_ops_def.py +2452 -364
  133. mindspore/ops/auto_generate/gen_ops_prim.py +5442 -1756
  134. mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
  135. mindspore/ops/composite/base.py +85 -48
  136. mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
  137. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
  138. mindspore/ops/function/__init__.py +22 -0
  139. mindspore/ops/function/array_func.py +492 -153
  140. mindspore/ops/function/debug_func.py +113 -1
  141. mindspore/ops/function/fft_func.py +15 -2
  142. mindspore/ops/function/grad/grad_func.py +3 -2
  143. mindspore/ops/function/math_func.py +564 -207
  144. mindspore/ops/function/nn_func.py +817 -383
  145. mindspore/ops/function/other_func.py +3 -2
  146. mindspore/ops/function/random_func.py +402 -12
  147. mindspore/ops/function/reshard_func.py +13 -11
  148. mindspore/ops/function/sparse_unary_func.py +1 -1
  149. mindspore/ops/function/vmap_func.py +3 -2
  150. mindspore/ops/functional.py +24 -14
  151. mindspore/ops/op_info_register.py +3 -3
  152. mindspore/ops/operations/__init__.py +7 -2
  153. mindspore/ops/operations/_grad_ops.py +2 -76
  154. mindspore/ops/operations/_infer_ops.py +1 -1
  155. mindspore/ops/operations/_inner_ops.py +71 -94
  156. mindspore/ops/operations/array_ops.py +14 -146
  157. mindspore/ops/operations/comm_ops.py +63 -53
  158. mindspore/ops/operations/custom_ops.py +83 -19
  159. mindspore/ops/operations/debug_ops.py +42 -10
  160. mindspore/ops/operations/manually_defined/_inner.py +12 -0
  161. mindspore/ops/operations/manually_defined/ops_def.py +273 -20
  162. mindspore/ops/operations/math_ops.py +12 -223
  163. mindspore/ops/operations/nn_ops.py +20 -114
  164. mindspore/ops/operations/other_ops.py +7 -4
  165. mindspore/ops/operations/random_ops.py +46 -1
  166. mindspore/ops/primitive.py +18 -6
  167. mindspore/ops_generate/arg_dtype_cast.py +2 -0
  168. mindspore/ops_generate/gen_aclnn_implement.py +11 -11
  169. mindspore/ops_generate/gen_constants.py +36 -0
  170. mindspore/ops_generate/gen_ops.py +67 -52
  171. mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
  172. mindspore/ops_generate/gen_pyboost_func.py +131 -47
  173. mindspore/ops_generate/op_proto.py +10 -3
  174. mindspore/ops_generate/pyboost_utils.py +14 -1
  175. mindspore/ops_generate/template.py +43 -21
  176. mindspore/parallel/__init__.py +3 -1
  177. mindspore/parallel/_auto_parallel_context.py +31 -9
  178. mindspore/parallel/_cell_wrapper.py +85 -0
  179. mindspore/parallel/_parallel_serialization.py +47 -19
  180. mindspore/parallel/_tensor.py +127 -13
  181. mindspore/parallel/_utils.py +53 -22
  182. mindspore/parallel/algo_parameter_config.py +5 -5
  183. mindspore/parallel/checkpoint_transform.py +46 -39
  184. mindspore/parallel/cluster/process_entity/__init__.py +1 -1
  185. mindspore/parallel/cluster/process_entity/_api.py +31 -23
  186. mindspore/parallel/cluster/process_entity/_utils.py +2 -27
  187. mindspore/parallel/parameter_broadcast.py +3 -4
  188. mindspore/parallel/shard.py +162 -31
  189. mindspore/parallel/transform_safetensors.py +1146 -0
  190. mindspore/profiler/__init__.py +2 -1
  191. mindspore/profiler/common/constant.py +29 -0
  192. mindspore/profiler/common/registry.py +47 -0
  193. mindspore/profiler/common/util.py +28 -0
  194. mindspore/profiler/dynamic_profiler.py +694 -0
  195. mindspore/profiler/envprofiling.py +17 -19
  196. mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
  197. mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
  198. mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
  199. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
  200. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
  201. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
  202. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  203. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
  204. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
  205. mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
  206. mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
  207. mindspore/profiler/parser/base_timeline_generator.py +19 -25
  208. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  209. mindspore/profiler/parser/framework_parser.py +1 -391
  210. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  211. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  212. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  213. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  214. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  215. mindspore/profiler/parser/profiler_info.py +78 -6
  216. mindspore/profiler/profiler.py +153 -0
  217. mindspore/profiler/profiling.py +285 -413
  218. mindspore/rewrite/__init__.py +1 -2
  219. mindspore/rewrite/common/namespace.py +4 -4
  220. mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
  221. mindspore/run_check/_check_version.py +39 -104
  222. mindspore/safeguard/rewrite_obfuscation.py +591 -247
  223. mindspore/train/__init__.py +4 -3
  224. mindspore/train/_utils.py +105 -19
  225. mindspore/train/amp.py +171 -53
  226. mindspore/train/callback/__init__.py +2 -2
  227. mindspore/train/callback/_callback.py +4 -4
  228. mindspore/train/callback/_checkpoint.py +97 -31
  229. mindspore/train/callback/_cluster_monitor.py +1 -1
  230. mindspore/train/callback/_flops_collector.py +1 -0
  231. mindspore/train/callback/_loss_monitor.py +3 -3
  232. mindspore/train/callback/_on_request_exit.py +145 -31
  233. mindspore/train/callback/_summary_collector.py +5 -5
  234. mindspore/train/callback/_tft_register.py +375 -0
  235. mindspore/train/dataset_helper.py +15 -3
  236. mindspore/train/metrics/metric.py +3 -3
  237. mindspore/train/metrics/roc.py +4 -4
  238. mindspore/train/mind_ir_pb2.py +44 -39
  239. mindspore/train/model.py +154 -58
  240. mindspore/train/serialization.py +342 -128
  241. mindspore/utils/__init__.py +21 -0
  242. mindspore/utils/utils.py +60 -0
  243. mindspore/version.py +1 -1
  244. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/METADATA +13 -7
  245. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/RECORD +248 -242
  246. mindspore/include/c_api/ms/abstract.h +0 -67
  247. mindspore/include/c_api/ms/attribute.h +0 -197
  248. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  249. mindspore/include/c_api/ms/base/macros.h +0 -32
  250. mindspore/include/c_api/ms/base/status.h +0 -33
  251. mindspore/include/c_api/ms/base/types.h +0 -283
  252. mindspore/include/c_api/ms/context.h +0 -102
  253. mindspore/include/c_api/ms/graph.h +0 -160
  254. mindspore/include/c_api/ms/node.h +0 -606
  255. mindspore/include/c_api/ms/tensor.h +0 -161
  256. mindspore/include/c_api/ms/value.h +0 -84
  257. mindspore/mindspore_shared_lib.dll +0 -0
  258. mindspore/nn/extend/basic.py +0 -140
  259. mindspore/nn/extend/embedding.py +0 -143
  260. mindspore/nn/extend/layer/normalization.py +0 -109
  261. mindspore/nn/extend/pooling.py +0 -117
  262. mindspore/nn/layer/embedding_service.py +0 -531
  263. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  264. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  265. mindspore/ops/extend/__init__.py +0 -53
  266. mindspore/ops/extend/array_func.py +0 -218
  267. mindspore/ops/extend/math_func.py +0 -76
  268. mindspore/ops/extend/nn_func.py +0 -308
  269. mindspore/ops/silent_check.py +0 -162
  270. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  271. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  272. mindspore/train/callback/_mindio_ttp.py +0 -443
  273. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/WHEEL +0 -0
  274. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/entry_points.txt +0 -0
  275. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/top_level.txt +0 -0
@@ -54,6 +54,39 @@ def abs(input):
54
54
  return abs_op(input)
55
55
 
56
56
 
57
+ def acos_ext(input):
58
+ r"""
59
+ Computes arccosine of input tensors element-wise.
60
+
61
+ .. math::
62
+
63
+ out_i = \cos^{-1}(input_i)
64
+
65
+ Args:
66
+ input (Tensor): The shape of tensor is
67
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
68
+
69
+ Returns:
70
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
71
+
72
+ Raises:
73
+ TypeError: If `input` is not a Tensor.
74
+
75
+ Supported Platforms:
76
+ ``Ascend`` ``GPU`` ``CPU``
77
+
78
+ Examples:
79
+ >>> import mindspore
80
+ >>> import numpy as np
81
+ >>> from mindspore import Tensor, ops
82
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
83
+ >>> output = ops.acos_ext(input)
84
+ >>> print(output)
85
+ [0.7377037 1.5307857 1.2661037 0.9764114]
86
+ """
87
+ return acos_ext_op(input)
88
+
89
+
57
90
  def acos(input):
58
91
  r"""
59
92
  Computes arccosine of input tensors element-wise.
@@ -88,6 +121,42 @@ def acos(input):
88
121
  return acos_op(input)
89
122
 
90
123
 
124
+ def acosh_ext(input):
125
+ r"""
126
+ Computes inverse hyperbolic cosine of the inputs element-wise.
127
+
128
+ .. math::
129
+
130
+ out_i = \cosh^{-1}(input_i)
131
+
132
+ .. note::
133
+ Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
134
+ Input range is [1, inf].
135
+
136
+ Args:
137
+ input (Tensor): The input tensor of inverse hyperbolic cosine function.
138
+
139
+ Returns:
140
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
141
+
142
+ Raises:
143
+ TypeError: If `input` is not a Tensor.
144
+
145
+ Supported Platforms:
146
+ ``Ascend`` ``GPU`` ``CPU``
147
+
148
+ Examples:
149
+ >>> import mindspore
150
+ >>> import numpy as np
151
+ >>> from mindspore import Tensor, ops
152
+ >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
153
+ >>> output = ops.acosh_ext(input)
154
+ >>> print(output)
155
+ [0. 0.9624236 1.7627472 5.298292 ]
156
+ """
157
+ return acosh_ext_op(input)
158
+
159
+
91
160
  def acosh(input):
92
161
  r"""
93
162
  Computes inverse hyperbolic cosine of the inputs element-wise.
@@ -96,7 +165,7 @@ def acosh(input):
96
165
 
97
166
  out_i = \cosh^{-1}(input_i)
98
167
 
99
- .. warning::
168
+ .. note::
100
169
  Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
101
170
  Input range is [1, inf].
102
171
 
@@ -124,6 +193,52 @@ def acosh(input):
124
193
  return acosh_op(input)
125
194
 
126
195
 
196
+ def adaptive_avg_pool1d(input, output_size):
197
+ r"""
198
+ Performs 1D adaptive average pooling on a multi-plane input signal.
199
+ That is, for any input size, the size of the specified output is L.
200
+ The number of output features is equal to the number of input features.
201
+
202
+ .. warning::
203
+ This is an experimental API that is subject to change or deletion.
204
+
205
+ Args:
206
+ input (Tensor): The input of adaptive_avg_pool1d, which is a 2D or 3D tensor,
207
+ with float16 or float32 data type.
208
+ output_size (int): The target output feature size. `output_size` is an integer.
209
+
210
+ Returns:
211
+ Tensor, with the same type as the `input`.
212
+
213
+ Shape of the output is `input_shape[:len(input_shape) - 1] + [output_size]`.
214
+
215
+ Raises:
216
+ ValueError: If `output_size` is not integer.
217
+ TypeError: If `input` is not a Tensor.
218
+ TypeError: If dtype of `input` is not float16, float32.
219
+
220
+ Supported Platforms:
221
+ ``Ascend``
222
+
223
+ Examples:
224
+ >>> import mindspore
225
+ >>> from mindspore import Tensor, mint
226
+ >>> input = Tensor([[2,3],[3,4]],dtype=mindspore.float16)
227
+ >>> output = mint.nn.functional.adaptive_avg_pool1d(input, 3)
228
+ >>> print(output)
229
+ [[2. 2.5 3. ]
230
+ [3. 3.5 4. ]]
231
+ """
232
+ return adaptive_avg_pool1d_op(input, output_size)
233
+
234
+
235
+ def adaptive_avg_pool2d_grad_ext(grad_output, x):
236
+ r"""
237
+
238
+ """
239
+ return adaptive_avg_pool2d_grad_ext_op(grad_output, x)
240
+
241
+
127
242
  def add_ext(input, other, alpha=1):
128
243
  r"""
129
244
  Adds scaled other value to input Tensor.
@@ -141,12 +256,12 @@ def add_ext(input, other, alpha=1):
141
256
  Args:
142
257
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
143
258
  a bool or a tensor whose data type is
144
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
145
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
259
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
260
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
146
261
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
147
262
  a bool or a tensor whose data type is
148
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
149
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
263
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
264
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
150
265
  alpha (number.Number): A scaling factor applied to `other`, default 1.
151
266
 
152
267
  Returns:
@@ -180,6 +295,20 @@ def add_ext(input, other, alpha=1):
180
295
  return add_ext_op(input, other, alpha)
181
296
 
182
297
 
298
+ def add_layer_norm_grad(dy, x1, x2, rstd, mean, gamma, dsumOptional):
299
+ r"""
300
+
301
+ """
302
+ return add_layer_norm_grad_op(dy, x1, x2, rstd, mean, gamma, dsumOptional)
303
+
304
+
305
+ def add_layernorm_v2(x1, x2, gamma, beta, epsilon=1e-5, additionalOut=False):
306
+ r"""
307
+
308
+ """
309
+ return add_layernorm_v2_op(x1, x2, gamma, beta, epsilon, additionalOut)
310
+
311
+
183
312
  def add(input, other):
184
313
  r"""
185
314
  Adds other value to input Tensor.
@@ -200,12 +329,12 @@ def add(input, other):
200
329
  Args:
201
330
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
202
331
  a bool or a tensor whose data type is
203
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
204
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
332
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
333
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
205
334
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
206
335
  a bool or a tensor whose data type is
207
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
208
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
336
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
337
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
209
338
 
210
339
  Returns:
211
340
  Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
@@ -353,6 +482,72 @@ def argmax_ext(input, dim=None, keepdim=False):
353
482
  return argmax_ext_op(input, dim, keepdim)
354
483
 
355
484
 
485
+ def argmin_ext(input, dim=None, keepdim=False):
486
+ r"""
487
+ Return the indices of the minimum values of a tensor across a dimension.
488
+
489
+ Args:
490
+ input (Tensor): Input tensor.
491
+ dim (Union[int, None], optional): Specify the axis for calculation. If `dim` is ``None`` , the indices of the minimum
492
+ value within the flattened input will be returned. Default: ``None`` .
493
+ keepdim (bool, optional): Whether the output tensor retains the specified
494
+ dimension. Ignored if `dim` is None. Default: ``False`` .
495
+
496
+ Returns:
497
+ Tensor, indices of the minimum values of the input tensor across a dimension.
498
+
499
+ Raises:
500
+ TypeError: If `keepdim` is not bool.
501
+ ValueError: If `dim` is out of range.
502
+
503
+ Supported Platforms:
504
+ ``Ascend``
505
+
506
+ Examples:
507
+ >>> import numpy as np
508
+ >>> from mindspore import Tensor
509
+ >>> from mindspore import mint
510
+ >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
511
+ >>> output = mint.argmin(x, dim=-1)
512
+ >>> print(output)
513
+ [0 1 2]
514
+ """
515
+ return argmin_ext_op(input, dim, keepdim)
516
+
517
+
518
+ def asin_ext(input):
519
+ r"""
520
+ Computes arcsine of input tensors element-wise.
521
+
522
+ .. math::
523
+
524
+ out_i = \sin^{-1}(input_i)
525
+
526
+ Args:
527
+ input (Tensor): The shape of tensor is
528
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
529
+
530
+ Returns:
531
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
532
+
533
+ Raises:
534
+ TypeError: If `input` is not a Tensor.
535
+
536
+ Supported Platforms:
537
+ ``Ascend`` ``GPU`` ``CPU``
538
+
539
+ Examples:
540
+ >>> import mindspore
541
+ >>> import numpy as np
542
+ >>> from mindspore import Tensor, ops
543
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
544
+ >>> output = ops.asin_ext(input)
545
+ >>> print(output)
546
+ [0.8330927 0.04001068 0.30469266 0.59438497 ]
547
+ """
548
+ return asin_ext_op(input)
549
+
550
+
356
551
  def asin(input):
357
552
  r"""
358
553
  Computes arcsine of input tensors element-wise.
@@ -387,6 +582,38 @@ def asin(input):
387
582
  return asin_op(input)
388
583
 
389
584
 
585
+ def asinh_ext(input):
586
+ r"""
587
+ Computes inverse hyperbolic sine of the input element-wise.
588
+
589
+ .. math::
590
+
591
+ out_i = \sinh^{-1}(input_i)
592
+
593
+ Args:
594
+ input (Tensor): The input tensor of inverse hyperbolic sine function.
595
+
596
+ Returns:
597
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
598
+
599
+ Raises:
600
+ TypeError: If `input` is not a Tensor.
601
+
602
+ Supported Platforms:
603
+ ``Ascend`` ``GPU`` ``CPU``
604
+
605
+ Examples:
606
+ >>> import mindspore
607
+ >>> import numpy as np
608
+ >>> from mindspore import Tensor, ops
609
+ >>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
610
+ >>> output = ops.asinh_ext(input)
611
+ >>> print(output)
612
+ [-2.3124385 1.1947632 1.8184465 5.298342 ]
613
+ """
614
+ return asinh_ext_op(input)
615
+
616
+
390
617
  def asinh(input):
391
618
  r"""
392
619
  Computes inverse hyperbolic sine of the input element-wise.
@@ -522,7 +749,9 @@ def atan2_ext(input, other):
522
749
  its shape is able to broadcast with `input`.
523
750
 
524
751
  Returns:
525
- Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
752
+ Tensor, the shape is the same as the one after broadcasting.
753
+ The dtype of output is float32 when dtype of `input` is in
754
+ [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
526
755
 
527
756
  Raises:
528
757
  TypeError: If `input` or `other` is not a Tensor or scalar.
@@ -586,6 +815,39 @@ def atan2(input, other):
586
815
  return atan2_op(input, other)
587
816
 
588
817
 
818
+ def atan_ext(input):
819
+ r"""
820
+ Computes the trigonometric inverse tangent of the input element-wise.
821
+
822
+ .. math::
823
+
824
+ out_i = \tan^{-1}(input_i)
825
+
826
+ Args:
827
+ input (Tensor): The shape of tensor is
828
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
829
+
830
+ Returns:
831
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
832
+
833
+ Raises:
834
+ TypeError: If `input` is not a Tensor.
835
+
836
+ Supported Platforms:
837
+ ``Ascend`` ``GPU`` ``CPU``
838
+
839
+ Examples:
840
+ >>> import mindspore
841
+ >>> import numpy as np
842
+ >>> from mindspore import Tensor, ops
843
+ >>> input = Tensor(np.array([1.0, 0.0]), mindspore.float32)
844
+ >>> output = ops.atan_ext(input)
845
+ >>> print(output)
846
+ [0.7853982 0. ]
847
+ """
848
+ return atan_ext_op(input)
849
+
850
+
589
851
  def atan(input):
590
852
  r"""
591
853
  Computes the trigonometric inverse tangent of the input element-wise.
@@ -635,14 +897,12 @@ def atanh(input):
635
897
  Args:
636
898
  input (Tensor): The shape of tensor is
637
899
  :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
638
- The data type should be one of the following types: float16, float32.
639
900
 
640
901
  Returns:
641
- A Tensor, has the same type as the input.
902
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
642
903
 
643
904
  Raises:
644
905
  TypeError: If `input` is not a Tensor.
645
- TypeError: If dtype of `input` is not float16 or float32.
646
906
 
647
907
  Supported Platforms:
648
908
  ``Ascend`` ``GPU`` ``CPU``
@@ -659,6 +919,50 @@ def atanh(input):
659
919
  return atanh_op(input)
660
920
 
661
921
 
922
+ def baddbmm(input, batch1, batch2, beta, alpha):
923
+ r"""
924
+ The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
925
+ The formula is defined as follows:
926
+
927
+ .. math::
928
+ \text{out}_{i} = \beta \text{input}_{i} + \alpha (\text{batch1}_{i} \mathbin{@} \text{batch2}_{i})
929
+
930
+ Args:
931
+ input (Tensor): The input Tensor. When batch1 is a :math:`(C, W, T)` Tensor and batch2 is a
932
+ :math:`(C, T, H)` Tensor, input must be broadcastable with :math:`(C, W, H)` Tensor.
933
+ batch1 (Tensor): :math:`batch1` in the above formula. Must be 3-D Tensor, dtype is same as input.
934
+ batch2 (Tensor): :math:`batch2` in the above formula. Must be 3-D Tensor, dtype is same as input.
935
+
936
+ Keyword Args:
937
+ beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
938
+ alpha (Union[float, int], optional): multiplier for :math:`batch1 @ batch2`. Default: ``1`` .
939
+
940
+ Returns:
941
+ Tensor, has the same dtype as input, shape will be :math:`(C, W, H)`.
942
+
943
+ Raises:
944
+ TypeError: If the type of `input`, `batch1`, `batch2` is not Tensor.
945
+ TypeError: If the types of `input`, `batch1`, `batch2` are different.
946
+ ValueError: If `batch1` and `batch2` are not 3-D tensors.
947
+
948
+ Supported Platforms:
949
+ ``Ascend``
950
+
951
+ Examples:
952
+ >>> import numpy as np
953
+ >>> from mindspore import Tensor, ops
954
+ >>> input = Tensor(np.ones([1, 3, 3]).astype(np.float32))
955
+ >>> batch1 = Tensor(np.ones([1, 3, 4]).astype(np.float32))
956
+ >>> batch2 = Tensor(np.ones([1, 4, 3]).astype(np.float32))
957
+ >>> output = ops.baddbmm_ext(input, batch1, batch2)
958
+ >>> print(output)
959
+ [[[5. 5. 5.]
960
+ [5. 5. 5.]
961
+ [5. 5. 5.]]]
962
+ """
963
+ return baddbmm_op(input, batch1, batch2, beta, alpha)
964
+
965
+
662
966
  def bmm_ext(input, mat2):
663
967
  r"""
664
968
  Performs batch matrix-matrix multiplication of two three-dimensional tensors.
@@ -778,7 +1082,7 @@ def broadcast_to(input, shape):
778
1082
 
779
1083
  cast_op=Cast()
780
1084
 
781
- def cast(input_x, dtype):
1085
+ def cast(input, dtype):
782
1086
  r"""
783
1087
  Returns a tensor with the new specified data type.
784
1088
 
@@ -786,17 +1090,16 @@ def cast(input_x, dtype):
786
1090
  When converting complex numbers to boolean type, the imaginary part of the complex number is not
787
1091
  taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
788
1092
 
789
- Inputs:
790
- - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
791
- The tensor to be cast.
792
- - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
1093
+ Args:
1094
+ input (Union[Tensor, Number]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The tensor to be cast.
1095
+ dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
793
1096
 
794
- Outputs:
795
- Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
1097
+ Returns:
1098
+ Tensor, the shape of tensor is the same as `input`, :math:`(x_1, x_2, ..., x_R)`.
796
1099
 
797
1100
  Raises:
798
- TypeError: If `input_x` is neither Tensor nor Number.
799
- TypeError: If `type` is not a Number.
1101
+ TypeError: If `input` is neither Tensor nor Number.
1102
+ TypeError: If `dtype` is not a Number.
800
1103
 
801
1104
  Supported Platforms:
802
1105
  ``Ascend`` ``GPU`` ``CPU``
@@ -806,16 +1109,15 @@ def cast(input_x, dtype):
806
1109
  >>> import numpy as np
807
1110
  >>> from mindspore import Tensor, ops
808
1111
  >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
809
- >>> input_x = Tensor(input_np)
810
- >>> type_dst = mindspore.int32
811
- >>> cast = ops.Cast()
812
- >>> output = cast(input_x, type_dst)
1112
+ >>> input = Tensor(input_np)
1113
+ >>> dtype = mindspore.int32
1114
+ >>> output = ops.cast(input, dtype)
813
1115
  >>> print(output.dtype)
814
1116
  Int32
815
1117
  >>> print(output.shape)
816
1118
  (2, 3, 4, 5)
817
1119
  """
818
- return cast_op(input_x, dtype)
1120
+ return cast_op(input, dtype)
819
1121
 
820
1122
 
821
1123
  def ceil(input):
@@ -1198,6 +1500,13 @@ def contiguous(input):
1198
1500
  return contiguous_op(input)
1199
1501
 
1200
1502
 
1503
+ def copy_ext(variable, value):
1504
+ r"""
1505
+
1506
+ """
1507
+ return copy_ext_op(variable, value)
1508
+
1509
+
1201
1510
  def copy(input):
1202
1511
  r"""
1203
1512
 
@@ -1313,16 +1622,19 @@ def cosh(input):
1313
1622
  out_i = \cosh(input_i)
1314
1623
 
1315
1624
  Args:
1316
- input (Tensor): The input tensor of hyperbolic cosine function, its data type
1317
- must be float16, float32, float64, complex64 or complex128.
1625
+ input (Tensor): The input tensor of hyperbolic cosine function.
1626
+ Supported dtypes:
1627
+
1628
+ - GPU/CPU: float16, float32, float64, complex64 or complex128.
1629
+ - Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
1318
1630
 
1319
1631
  Returns:
1320
1632
  Tensor, has the same shape as `input`.
1321
1633
 
1322
- Raises:
1323
- TypeError: If the dtype of `input` is not one of the following types:
1324
- float16, float32, float64, complex64, complex128.
1325
- TypeError: If `input` is not a Tensor.
1634
+ :raise TypeError: If `input` is not a Tensor.
1635
+ :raise TypeError:
1636
+ * CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
1637
+ * Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
1326
1638
 
1327
1639
  Supported Platforms:
1328
1640
  ``Ascend`` ``GPU`` ``CPU``
@@ -1343,15 +1655,63 @@ def cosh(input):
1343
1655
  return cosh_op(input)
1344
1656
 
1345
1657
 
1346
- def cummax(input, axis):
1658
+ def count_nonzero(input, dim=None):
1347
1659
  r"""
1348
- Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
1349
- along the dimension `axis`, and `indices` is the index location of each maximum value.
1660
+ Counts the number of non-zero values in the tensor input along the given dim. If no dim is specified then all non-zeros in the tensor are counted.
1661
+
1662
+ .. warning::
1663
+ This is an experimental API that is subject to change or deletion.
1664
+
1665
+ Args:
1666
+ input (Tensor): Input data is used to count non-zero numbers. With shape
1667
+ :math:`(*)` where :math:`*` means, any number of additional dimensions.
1668
+ dim (Union[int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
1669
+
1670
+ Returns:
1671
+ Tensor, number of nonzero element across dim specified by `dim`.
1672
+
1673
+ Raises:
1674
+ TypeError: If `input` is not tensor.
1675
+ TypeError: If `dim` is not int, tuple(int), list(int) or None.
1676
+ ValueError: If any value in `dim` is not in range [-x.ndim, x.ndim).
1677
+
1678
+ Supported Platforms:
1679
+ ``Ascend``
1680
+
1681
+ Examples:
1682
+ >>> from mindspore import Tensor, ops
1683
+ >>> import numpy as np
1684
+ >>> import mindspore
1685
+ >>> # case 1: each value specified.
1686
+ >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
1687
+ >>> nonzero_num = ops.count_nonzero(input=x, dim=[0, 1])
1688
+ >>> print(nonzero_num)
1689
+ [[3]]
1690
+ >>> # case 2: all value is default.
1691
+ >>> nonzero_num = ops.count_nonzero(input=x)
1692
+ >>> print(nonzero_num)
1693
+ 3
1694
+ >>> # case 3: dim value was specified 0.
1695
+ >>> nonzero_num = ops.count_nonzero(input=x, dim=[0,])
1696
+ >>> print(nonzero_num)
1697
+ [1 2 0]
1698
+ >>> # case 4: dim value was specified 1.
1699
+ >>> nonzero_num = ops.count_nonzero(input=x, dim=[1,])
1700
+ >>> print(nonzero_num)
1701
+ [1 2]
1702
+ """
1703
+ return count_nonzero_op(input, dim)
1350
1704
 
1351
- .. math::
1352
- \begin{array}{ll} \\
1353
- y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
1354
- \end{array}
1705
+
1706
+ def cummax(input, axis):
1707
+ r"""
1708
+ Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
1709
+ along the dimension `axis`, and `indices` is the index location of each maximum value.
1710
+
1711
+ .. math::
1712
+ \begin{array}{ll} \\
1713
+ y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
1714
+ \end{array}
1355
1715
 
1356
1716
  Args:
1357
1717
  input (Tensor): The input Tensor, rank of `input` > 0.
@@ -1367,14 +1727,17 @@ def cummax(input, axis):
1367
1727
  TypeError: If `axis` is not an int.
1368
1728
  ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
1369
1729
 
1730
+ .. note::
1731
+ O2 mode is not supported in Ascend.
1732
+
1370
1733
  Supported Platforms:
1371
- ``GPU`` ``CPU``
1734
+ ``Ascend`` ``GPU`` ``CPU``
1372
1735
 
1373
1736
  Examples:
1374
1737
  >>> import mindspore
1375
1738
  >>> import numpy as np
1376
1739
  >>> from mindspore import Tensor
1377
- >>> import mindspore.ops as ops
1740
+ >>> from mindspore import ops
1378
1741
  >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
1379
1742
  >>> output = ops.cummax(x, axis=0)
1380
1743
  >>> print(output[0])
@@ -1388,8 +1751,50 @@ def cummax(input, axis):
1388
1751
  [2 1 2 0]
1389
1752
  [2 1 2 0]]
1390
1753
  """
1391
- cummax_op = _get_cache_prim(Cummax)(axis)
1392
- return cummax_op(input)
1754
+ return cummax_impl(input, axis)
1755
+
1756
+
1757
+ def cummin_ext(input, dim):
1758
+ r"""
1759
+ Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
1760
+ along the dimension `dim`, and `indices` is the index location of each minimum value.
1761
+
1762
+ .. math::
1763
+ \begin{array}{ll} \\
1764
+ y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
1765
+ \end{array}
1766
+
1767
+ Args:
1768
+ input (Tensor): The input Tensor, The dimension must be greater than 0.
1769
+ dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
1770
+
1771
+ Returns:
1772
+ tuple [Tensor], tuple of 2 Tensors, containing the cumulative minimum of elements and the index.
1773
+ The shape of each output tensor is the same as that of input `input`.
1774
+
1775
+ Raises:
1776
+ TypeError: If `input` is not a Tensor.
1777
+ TypeError: If `input` is a Tensor, but the type is complex or bool.
1778
+ TypeError: If `dim` is not an int.
1779
+ ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
1780
+
1781
+ .. note::
1782
+ O2 mode is not supported in Ascend.
1783
+
1784
+ Supported Platforms:
1785
+ ``Ascend``
1786
+
1787
+ Examples:
1788
+ >>> from mindspore import Tensor, ops
1789
+ >>> import mindspore
1790
+ >>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
1791
+ >>> output = ops.cummin_ext(a, dim=0)
1792
+ >>> print(output[0])
1793
+ [-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
1794
+ >>> print(output[1])
1795
+ [0 1 1 1 4 4]
1796
+ """
1797
+ return cummin_ext_op(input, dim)
1393
1798
 
1394
1799
 
1395
1800
  def cumsum_ext(input, dim, dtype=None):
@@ -1503,7 +1908,8 @@ def dense(input, weight, bias=None):
1503
1908
  output = input * weight^{T} + bias
1504
1909
 
1505
1910
  .. warning::
1506
- This is an experimental API that is subject to change or deletion.
1911
+ - This is an experimental API that is subject to change or deletion.
1912
+ - In PYNATIVE mode, if `bias` is not 1D, the `input` cannot be greater than 6D.
1507
1913
 
1508
1914
  Args:
1509
1915
  input (Tensor): Input Tensor of shape :math:`(*, in\_channels)`,
@@ -1520,6 +1926,7 @@ def dense(input, weight, bias=None):
1520
1926
  TypeError: If `input` is not Tensor.
1521
1927
  TypeError: If `weight` is not Tensor.
1522
1928
  TypeError: If `bias` is not Tensor.
1929
+ RuntimeError: If `bias` is not 1D and `input` is greater than 6D in PYNATIVE mode.
1523
1930
 
1524
1931
  Supported Platforms:
1525
1932
  ``Ascend`` ``GPU`` ``CPU``
@@ -1731,6 +2138,104 @@ def elu(input_x, alpha=1.0):
1731
2138
  return elu_op(input_x)
1732
2139
 
1733
2140
 
2141
+ def embedding_apply_ada_grad(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2142
+ r"""
2143
+
2144
+ """
2145
+ return embedding_apply_ada_grad_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2146
+
2147
+
2148
+ def embedding_apply_adam(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2149
+ r"""
2150
+
2151
+ """
2152
+ return embedding_apply_adam_op(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2153
+
2154
+
2155
+ def embedding_apply_adam_w(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad=(0,), mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2156
+ r"""
2157
+
2158
+ """
2159
+ return embedding_apply_adam_w_op(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2160
+
2161
+
2162
+ def embedding_apply_ftrl(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2163
+ r"""
2164
+
2165
+ """
2166
+ return embedding_apply_ftrl_op(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2167
+
2168
+
2169
+ def embedding_apply_rmsprop(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2170
+ r"""
2171
+
2172
+ """
2173
+ return embedding_apply_rmsprop_op(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2174
+
2175
+
2176
+ def embedding_apply_sgd(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2177
+ r"""
2178
+
2179
+ """
2180
+ return embedding_apply_sgd_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2181
+
2182
+
2183
+ def embedding_feature_mapping_export(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id):
2184
+ r"""
2185
+
2186
+ """
2187
+ return embedding_feature_mapping_export_op(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
2188
+
2189
+
2190
+ def embedding_feature_mapping_file_size(file_path, table_name, global_step, embedding_dim, only_offset_flag=True):
2191
+ r"""
2192
+
2193
+ """
2194
+ return embedding_feature_mapping_file_size_op(file_path, table_name, global_step, embedding_dim, only_offset_flag)
2195
+
2196
+
2197
+ def embedding_feature_mapping_find(table_name, feature_size, num=1):
2198
+ r"""
2199
+
2200
+ """
2201
+ return embedding_feature_mapping_find_op(table_name, feature_size, num)
2202
+
2203
+
2204
+ def embedding_feature_mapping_import(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag=True, num=1):
2205
+ r"""
2206
+
2207
+ """
2208
+ return embedding_feature_mapping_import_op(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
2209
+
2210
+
2211
+ def embedding_feature_mapping_insert(table_name, num, feature_id, offset_id):
2212
+ r"""
2213
+
2214
+ """
2215
+ return embedding_feature_mapping_insert_op(table_name, num, feature_id, offset_id)
2216
+
2217
+
2218
+ def embedding_feature_mapping_table_size(table_name):
2219
+ r"""
2220
+
2221
+ """
2222
+ return embedding_feature_mapping_table_size_op(table_name)
2223
+
2224
+
2225
+ def embedding_feature_mapping_v2(table_name, feature_id, table_total_size, table_actual_size):
2226
+ r"""
2227
+
2228
+ """
2229
+ return embedding_feature_mapping_v2_op(table_name, feature_id, table_total_size, table_actual_size)
2230
+
2231
+
2232
+ def embedding_table_evict(var_handle, global_step, steps_to_live=0):
2233
+ r"""
2234
+
2235
+ """
2236
+ return embedding_table_evict_op(var_handle, global_step, steps_to_live)
2237
+
2238
+
1734
2239
  def equal(input, other):
1735
2240
  r"""
1736
2241
  Computes the equivalence between two tensors element-wise.
@@ -1834,15 +2339,19 @@ def erfc(input):
1834
2339
  input (Tensor): The input tensor of the complementary error function, :math:`x` in the above formula.
1835
2340
  Supported dtypes:
1836
2341
 
1837
- - Ascend: float16, float32.
2342
+ - Ascend: float16, float32, float64, int64, bool, bfloat16.
1838
2343
  - GPU/CPU: float16, float32, float64.
1839
2344
 
1840
2345
  Returns:
1841
- Tensor, has the same shape and dtype as `input`.
2346
+ Tensor.
2347
+ The dtype of output is float32 when dtype of `input` is in
2348
+ [bool, int64]. Otherwise output has the same dtype as the `input`.
1842
2349
 
1843
- Raises:
1844
- TypeError: If `input` is not a Tensor.
1845
- TypeError: If dtype of `input` is not float16, float32 or float64.
2350
+ :raise TypeError: If `input` is not a Tensor.
2351
+ :raise TypeError: If dtype of `input` is not the following:
2352
+
2353
+ * Ascend: float16, float32, float64, int64, bool, bfloat16.
2354
+ * GPU/CPU: float16, float32, float64.
1846
2355
 
1847
2356
  Supported Platforms:
1848
2357
  ``Ascend`` ``GPU`` ``CPU``
@@ -1851,17 +2360,17 @@ def erfc(input):
1851
2360
  >>> import mindspore
1852
2361
  >>> import numpy as np
1853
2362
  >>> from mindspore import Tensor, ops
1854
- >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
1855
- >>> output = ops.erfc(x)
2363
+ >>> input = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2364
+ >>> output = ops.erfc(input)
1856
2365
  >>> print(output)
1857
- [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
2366
+ [1.8427008e+00 1.0000000e+00 1.5729921e-01 4.6777348e-03 2.2090497e-05]
1858
2367
  """
1859
2368
  return erfc_op(input)
1860
2369
 
1861
2370
 
1862
2371
  def erfinv(input):
1863
2372
  r"""
1864
- Returns the result of the inverse error function with `input`, which is defined in the range `(-1, 1)` as:
2373
+ Returns the result of the inverse error function with `input`. It is defined in the range `(-1, 1)` as:
1865
2374
 
1866
2375
  .. math::
1867
2376
 
@@ -2119,7 +2628,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2119
2628
  dim (tuple[int], optional): The dimension along which to take the one dimensional `fft2`.
2120
2629
  Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2121
2630
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2122
- Three modes are defined as,
2631
+ Three modes are defined as, where :math: `n = prod(s)`
2123
2632
 
2124
2633
  - ``"backward"`` (no normalization).
2125
2634
  - ``"forward"`` (normalize by :math:`1/n`).
@@ -2138,6 +2647,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2138
2647
  ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
2139
2648
  ValueError: If `dim` has duplicate values.
2140
2649
  ValueError: If `s` is less than 1.
2650
+ ValueError: If `s` and `dim` are given but have different shapes.
2141
2651
  ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
2142
2652
 
2143
2653
  Supported Platforms:
@@ -2147,12 +2657,12 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2147
2657
  >>> import mindspore
2148
2658
  >>> from mindspore import Tensor, ops
2149
2659
  >>> input = ops.ones((4, 4))
2150
- >>> ops.fft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2151
- Tensor(shape=[4, 4], dtype=Complex64, value=
2152
- [[16+0j, 0+0j, 0+0j, 0+0j],
2153
- [0+0j, 0+0j, 0+0j, 0+0j],
2154
- [0+0j, 0+0j, 0+0j, 0+0j],
2155
- [0+0j, 0+0j, 0+0j, 0+0j]])
2660
+ >>> out = ops.fft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2661
+ >>> print(out)
2662
+ [[16.+0.j 0.+0.j 0.+0.j 0.+0.j]
2663
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
2664
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
2665
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
2156
2666
  """
2157
2667
  return fft2_op(input, s, dim, norm)
2158
2668
 
@@ -2205,12 +2715,51 @@ def fft(input, n=None, dim=-1, norm=None):
2205
2715
  >>> import mindspore
2206
2716
  >>> from mindspore import Tensor, ops
2207
2717
  >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
2208
- >>> ops.fft(input)
2209
- Tensor(shape=[4], dtype=Complex64, value= [-0.588551+0j, 2.15252-0.461212j, 2.7809+0j, 2.15252+0.461212j])
2718
+ >>> out = ops.fft(input, n=4, dim=-1, norm="backward")
2719
+ >>> print(out)
2720
+ [-0.5885514+0.j 2.1525173-0.46121222j 2.7808986+0.j
2721
+ 2.1525173+0.46121222j]
2210
2722
  """
2211
2723
  return fft_op(input, n, dim, norm)
2212
2724
 
2213
2725
 
2726
+ def fftfreq(n, d=1.0, dtype=None):
2727
+ r"""
2728
+ Computes the discrete Fourier Transform sample frequencies for a signal of size `n`.
2729
+ For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
2730
+
2731
+ .. math::
2732
+ f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
2733
+
2734
+ Note:
2735
+ - `fftfreq` is currently only used in `mindscience` scientific computing scenarios and
2736
+ dose not support other usage scenarios.
2737
+ - `fftfreq` is not supported on Windows platform yet.
2738
+
2739
+ Args:
2740
+ n (int): Window length.
2741
+ d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
2742
+ dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
2743
+
2744
+ Returns:
2745
+ Tensor, Array of length ``n`` containing the sample frequencies.
2746
+
2747
+ Raises:
2748
+ ValueError: If `n` is less than 1.
2749
+
2750
+ Supported Platforms:
2751
+ ``Ascend`` ``CPU``
2752
+
2753
+ Examples:
2754
+ >>> import mindspore
2755
+ >>> from mindspore import ops
2756
+ >>> out = ops.fftfreq(n=4, d=1.0)
2757
+ >>> print(out)
2758
+ [ 0. 0.25 -0.5 -0.25]
2759
+ """
2760
+ return fftfreq_op(n, d, dtype)
2761
+
2762
+
2214
2763
  def fftn(input, s=None, dim=None, norm=None):
2215
2764
  r"""
2216
2765
  Computes the N dimensional discrete Fourier transform of `input`.
@@ -2232,7 +2781,7 @@ def fftn(input, s=None, dim=None, norm=None):
2232
2781
  dim (tuple[int], optional): The dimension along which to take the one dimensional `fftn`.
2233
2782
  Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
2234
2783
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2235
- Three modes are defined as,
2784
+ Three modes are defined as, where :math: `n = prod(s)`
2236
2785
 
2237
2786
  - ``"backward"`` (no normalization).
2238
2787
  - ``"forward"`` (normalize by :math:`1/n`).
@@ -2261,12 +2810,12 @@ def fftn(input, s=None, dim=None, norm=None):
2261
2810
  >>> import mindspore
2262
2811
  >>> from mindspore import Tensor, ops
2263
2812
  >>> input = ops.ones((2, 2, 2))
2264
- >>> ops.fftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2265
- Tensor(shape=[2, 2, 2], dtype=Complex64, value=
2266
- [[[8+0j, 0+0j],
2267
- [0+0j, 0+0j]],
2268
- [[0+0j, 0+0j],
2269
- [0+0j, 0+0j]]])
2813
+ >>> out = ops.fftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2814
+ >>> print(out)
2815
+ [[[8.+0.j 0.+0.j]
2816
+ [0.+0.j 0.+0.j]]
2817
+ [[0.+0.j 0.+0.j]
2818
+ [0.+0.j 0.+0.j]]]
2270
2819
  """
2271
2820
  return fftn_op(input, s, dim, norm)
2272
2821
 
@@ -2313,8 +2862,6 @@ def flatten_ext(input, start_dim=0, end_dim=-1):
2313
2862
 
2314
2863
  Args:
2315
2864
  input (Tensor): The input Tensor.
2316
-
2317
- Keyword Args:
2318
2865
  start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
2319
2866
  end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
2320
2867
 
@@ -2334,9 +2881,9 @@ def flatten_ext(input, start_dim=0, end_dim=-1):
2334
2881
  Examples:
2335
2882
  >>> import mindspore
2336
2883
  >>> import numpy as np
2337
- >>> from mindspore import Tensor, mint
2884
+ >>> from mindspore import Tensor, ops
2338
2885
  >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
2339
- >>> output = mint.flatten(input_x)
2886
+ >>> output = ops.auto_generate.flatten_ext(input_x)
2340
2887
  >>> print(output.shape)
2341
2888
  (24,)
2342
2889
  """
@@ -2569,7 +3116,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
2569
3116
  On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
2570
3117
  undefined.
2571
3118
  2. The data type of input_params cannot be
2572
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3119
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
2573
3120
  platform currently.
2574
3121
 
2575
3122
  Args:
@@ -2758,11 +3305,11 @@ def greater(input, other):
2758
3305
 
2759
3306
  Args:
2760
3307
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
2761
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
2762
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
3308
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3309
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ .
2763
3310
  other (Union[Tensor, Number]): The second input, which is a Number or a tensor whose data type is
2764
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
2765
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
3311
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3312
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
2766
3313
 
2767
3314
  Returns:
2768
3315
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
@@ -2783,42 +3330,14 @@ def greater(input, other):
2783
3330
  return greater_op(input, other)
2784
3331
 
2785
3332
 
2786
- def deepcopy(input_x):
2787
- r"""
2788
- Returns a deepcopy of input tensor.
2789
-
2790
- Args:
2791
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2792
-
2793
- Returns:
2794
- Tensor, a deepcopy of `input_x`.
2795
-
2796
- Raises:
2797
- TypeError: If `input_x` is not a Tensor.
2798
-
2799
- Supported Platforms:
2800
- ``Ascend`` ``GPU`` ``CPU``
2801
-
2802
- Examples:
2803
- >>> import mindspore
2804
- >>> from mindspore import Tensor, ops
2805
- >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
2806
- >>> output = ops.deepcopy(input)
2807
- >>> print(output)
2808
- [[0 1]
2809
- [2 1]]
2810
- """
2811
- return identity_op(input_x)
2812
-
2813
-
2814
- def ifft2(input, s=None, dim=(-2, -1), norm=None):
3333
+ def hfft2(input, s=None, dim=(-2, -1), norm=None):
2815
3334
  r"""
2816
- Computes the two dimensional inverse discrete Fourier transform of `input`.
3335
+ Calculates the two dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
2817
3336
 
2818
3337
  Note:
2819
- - `ifft2` is currently only used in `mindscience` scientific computing scenarios and
3338
+ - `hfft2` is currently only used in `mindscience` scientific computing scenarios and
2820
3339
  dose not support other usage scenarios.
2821
- - `ifft2` is not supported on Windows platform yet.
3340
+ - `hfft2` is not supported on Windows platform yet.
2822
3341
 
2823
3342
  Args:
2824
3343
  input (Tensor): The input tensor.
@@ -2827,20 +3346,21 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2827
3346
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2828
3347
 
2829
3348
  s (tuple[int], optional): Length of the transformed `dim` of the result.
2830
- If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifft2`.
3349
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfft2`.
2831
3350
  Default: ``None`` , which does not need to process `input`.
2832
- dim (tuple[int], optional): The dimension along which to take the one dimensional `ifft2`.
3351
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `hfft2`.
2833
3352
  Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2834
3353
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2835
- Three modes are defined as,
3354
+ Three modes are defined as, where :math: `n = prod(s)`
2836
3355
 
2837
3356
  - ``"backward"`` (no normalization).
2838
- - ``"forward"`` (normalize by :math:`1*n`).
2839
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3357
+ - ``"forward"`` (normalize by :math:`1/n`).
3358
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2840
3359
 
2841
3360
  Returns:
2842
- Tensor, The result of `ifft2()` function. The default is the same shape as `input`.
2843
- If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3361
+ Tensor, The result of `hfft2()` function.
3362
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3363
+ result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
2844
3364
  When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
2845
3365
  When the input is float64 or complex128, the return value type is complex128.
2846
3366
 
@@ -2851,6 +3371,7 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2851
3371
  ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
2852
3372
  ValueError: If `dim` has duplicate values.
2853
3373
  ValueError: If `s` is less than 1.
3374
+ ValueError: If `s` and `dim` are given but have different shapes.
2854
3375
  ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
2855
3376
 
2856
3377
  Supported Platforms:
@@ -2860,24 +3381,24 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2860
3381
  >>> import mindspore
2861
3382
  >>> from mindspore import Tensor, ops
2862
3383
  >>> input = ops.ones((4, 4))
2863
- >>> ops.ifft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2864
- Tensor(shape=[4, 4], dtype=Complex64, value=
2865
- [[1+0j, 0+0j, 0+0j, 0+0j],
2866
- [0+0j, 0+0j, 0+0j, 0+0j],
2867
- [0+0j, 0+0j, 0+0j, 0+0j],
2868
- [0+0j, 0+0j, 0+0j, 0+0j]])
3384
+ >>> out = ops.hfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3385
+ >>> print(out)
3386
+ [[16. 0. 0. 0.]
3387
+ [ 0. 0. 0. 0.]
3388
+ [ 0. 0. 0. 0.]
3389
+ [ 0. 0. 0. 0.]]
2869
3390
  """
2870
- return ifft2_op(input, s, dim, norm)
3391
+ return hfft2_op(input, s, dim, norm)
2871
3392
 
2872
3393
 
2873
- def ifft(input, n=None, dim=-1, norm=None):
3394
+ def hfft(input, n=None, dim=-1, norm=None):
2874
3395
  r"""
2875
- Calculates the inverse of `fft()`.
3396
+ Calculates the one dimensional discrete Fourier transform of of a Hermitian symmetric `input` signal.
2876
3397
 
2877
3398
  Note:
2878
- - `ifft` is currently only used in `mindscience` scientific computing scenarios and
3399
+ - `hfft` is currently only used in `mindscience` scientific computing scenarios and
2879
3400
  dose not support other usage scenarios.
2880
- - `ifft` is not supported on Windows platform yet.
3401
+ - `hfft` is not supported on Windows platform yet.
2881
3402
 
2882
3403
  Args:
2883
3404
  input (Tensor): The input tensor.
@@ -2886,19 +3407,22 @@ def ifft(input, n=None, dim=-1, norm=None):
2886
3407
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2887
3408
 
2888
3409
  n (int, optional): Length of the transformed `dim` of the result.
2889
- If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ifft`.
3410
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `hfft`.
2890
3411
  Default: ``None`` , which does not need to process `input`.
2891
- dim (int, optional): The dimension along which to take the one dimensional `ifft`.
3412
+ dim (int, optional): The dimension along which to take the one dimensional `hfft`.
2892
3413
  Default: ``-1`` , which means transform the last dimension of `input`.
2893
3414
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2894
3415
  Three modes are defined as,
2895
3416
 
2896
3417
  - ``"backward"`` (no normalization).
2897
- - ``"forward"`` (normalize by :math:`1*n`).
2898
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3418
+ - ``"forward"`` (normalize by :math:`1/n`).
3419
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2899
3420
 
2900
3421
  Returns:
2901
- Tensor, The result of `ifft()` function.
3422
+ Tensor, The result of `hfft()` function.
3423
+ If `n` is given, result.shape[dim] is :math:`(n - 1) * 2`, otherwise math:`(input.shape[dim] - 1) * 2`.
3424
+ When the `input` is int16, int32, int64, float16, float32, complex64, the return value type is float32.
3425
+ When the `input` is float64 or complex128, the return value type is float64.
2902
3426
 
2903
3427
  Raises:
2904
3428
  TypeError: If the `input` type is not Tensor.
@@ -2915,20 +3439,21 @@ def ifft(input, n=None, dim=-1, norm=None):
2915
3439
  >>> import mindspore
2916
3440
  >>> from mindspore import Tensor, ops
2917
3441
  >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
2918
- >>> ops.ifft(input)
2919
- Tensor(shape=[4], dtype=Complex64, value= [-0.147138+0j, 0.538129+0.115303j, 0.695225+0j, 0.538129-0.115303j])
3442
+ >>> out = ops.hfft(input, n=4, dim=-1, norm="backward")
3443
+ >>> print(out)
3444
+ [-0.12733912 2.1525173 2.3196864 2.1525173 ]
2920
3445
  """
2921
- return ifft_op(input, n, dim, norm)
3446
+ return hfft_op(input, n, dim, norm)
2922
3447
 
2923
3448
 
2924
- def ifftn(input, s=None, dim=None, norm=None):
3449
+ def hfftn(input, s=None, dim=None, norm=None):
2925
3450
  r"""
2926
- Computes the N dimensional inverse discrete Fourier transform of `input`.
3451
+ Calculates the N dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
2927
3452
 
2928
3453
  Note:
2929
- - `ifftn` is currently only used in `mindscience` scientific computing scenarios and
3454
+ - `hfftn` is currently only used in `mindscience` scientific computing scenarios and
2930
3455
  dose not support other usage scenarios.
2931
- - `ifftn` is not supported on Windows platform yet.
3456
+ - `hfftn` is not supported on Windows platform yet.
2932
3457
 
2933
3458
  Args:
2934
3459
  input (Tensor): The input tensor.
@@ -2937,20 +3462,21 @@ def ifftn(input, s=None, dim=None, norm=None):
2937
3462
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2938
3463
 
2939
3464
  s (tuple[int], optional): Length of the transformed `dim` of the result.
2940
- If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifftn`.
3465
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfftn`.
2941
3466
  Default: ``None`` , which does not need to process `input`.
2942
- dim (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
2943
- Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
3467
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `hfftn`.
3468
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2944
3469
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2945
- Three modes are defined as,
3470
+ Three modes are defined as, where :math: `n = prod(s)`
2946
3471
 
2947
3472
  - ``"backward"`` (no normalization).
2948
- - ``"forward"`` (normalize by :math:`1*n`).
2949
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3473
+ - ``"forward"`` (normalize by :math:`1/n`).
3474
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2950
3475
 
2951
3476
  Returns:
2952
- Tensor, The result of `ifftn()` function. The default is the same shape as `input`.
2953
- If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3477
+ Tensor, The result of `hfftn()` function.
3478
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3479
+ result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
2954
3480
  When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
2955
3481
  When the input is float64 or complex128, the return value type is complex128.
2956
3482
 
@@ -2970,32 +3496,418 @@ def ifftn(input, s=None, dim=None, norm=None):
2970
3496
  Examples:
2971
3497
  >>> import mindspore
2972
3498
  >>> from mindspore import Tensor, ops
2973
- >>> input = ops.ones((2, 2, 2))
2974
- >>> ops.ifftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2975
- Tensor(shape=[2, 2, 2], dtype=Complex64, value=
2976
- [[[1+0j, 0+0j],
2977
- [0+0j, 0+0j]],
2978
- [[0+0j, 0+0j],
2979
- [0+0j, 0+0j]]])
3499
+ >>> input = ops.ones((4, 4))
3500
+ >>> out = ops.hfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
3501
+ >>> print(out)
3502
+ [[16. 0. 0. 0.]
3503
+ [ 0. 0. 0. 0.]
3504
+ [ 0. 0. 0. 0.]
3505
+ [ 0. 0. 0. 0.]]
2980
3506
  """
2981
- return ifftn_op(input, s, dim, norm)
3507
+ return hfftn_op(input, s, dim, norm)
2982
3508
 
2983
3509
 
2984
- def ifftshift(input, dim=None):
3510
+ def histc_ext(input, bins=100, min=0, max=0):
2985
3511
  r"""
2986
- The inverse of :func:`mindspore.ops.fftshift` .
3512
+ Computes the histogram of a tensor.
2987
3513
 
2988
- Note:
2989
- - `ifftshift` is currently only used in `mindscience` scientific computing scenarios and
2990
- dose not support other usage scenarios.
2991
- - `ifftshift` is not supported on Windows platform yet.
3514
+ The elements are sorted into equal width bins between `min` and `max`.
3515
+ If `min` and `max` are both zero, the minimum and maximum values of the data are used.
2992
3516
 
2993
- Args:
2994
- input (Tensor): Input tensor.
2995
- dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
2996
- Default is ``None``, which shifts all dimensions.
3517
+ Elements lower than min or higher than max are ignored.
2997
3518
 
2998
- Returns:
3519
+ .. warning::
3520
+ This is an experimental API that is subject to change or deletion.
3521
+ If input is int64, valid values fit within int32; exceeding this may cause precision errors.
3522
+
3523
+ Args:
3524
+ input (Tensor): the input tensor.
3525
+ bins (int, optional): Number of histogram bins, optional. If specified, must be positive. Default: ``100`` .
3526
+ min (int, float, optional): the lower end of the range (inclusive), optional. Default: ``0`` .
3527
+ max (int, float, optional): the upper end of the range (inclusive), optional. Default: ``0`` .
3528
+
3529
+ Returns:
3530
+ A 1-D Tensor, has the same type as `input` with the shape :math:`(bins, )`.
3531
+
3532
+ Raises:
3533
+ TypeError: If `input` is not a Tensor.
3534
+ TypeError: If `input` datatype is not in support list.
3535
+ TypeError: If attr `min` or `max` is not float or int.
3536
+ TypeError: If attr `bins` is not int.
3537
+ ValueError: If attr value `min` > `max`.
3538
+ ValueError: If attr `bins` <= 0.
3539
+
3540
+ Supported Platforms:
3541
+ ``Ascend``
3542
+
3543
+ Examples:
3544
+ >>> from mindspore import Tensor, ops
3545
+ >>> x = Tensor([1., 2, 1])
3546
+ >>> y = ops.histc_ext(x, bins=4, min=0, max=3)
3547
+ >>> print(y)
3548
+ [0 2 1 0]
3549
+ """
3550
+ return histc_ext_op(input, bins, min, max)
3551
+
3552
+
3553
+ def hardshrink(input, lambd=0.5):
3554
+ r"""
3555
+ Hard Shrink activation function. Calculates the output according to the input elements.
3556
+
3557
+ The formula is defined as follows:
3558
+
3559
+ .. math::
3560
+ \text{HardShrink}(x) =
3561
+ \begin{cases}
3562
+ x, & \text{ if } x > \lambda \\
3563
+ x, & \text{ if } x < -\lambda \\
3564
+ 0, & \text{ otherwise }
3565
+ \end{cases}
3566
+
3567
+ HShrink Activation Function Graph:
3568
+
3569
+ .. image:: ../images/HShrink.png
3570
+ :align: center
3571
+
3572
+ Args:
3573
+ input (Tensor): The input of Hard Shrink. Supported dtypes:
3574
+
3575
+ - Ascend: float16, float32, bfloat16.
3576
+ - CPU/GPU: float16, float32.
3577
+ lambd (number, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula.
3578
+ Default: ``0.5`` .
3579
+
3580
+ Returns:
3581
+ Tensor, has the same data type and shape as the input `input`.
3582
+
3583
+ Raises:
3584
+ TypeError: If `lambd` is not a float, int or bool.
3585
+ TypeError: If `input` is not a tensor.
3586
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
3587
+
3588
+ Supported Platforms:
3589
+ ``Ascend`` ``GPU`` ``CPU``
3590
+
3591
+ Examples:
3592
+ >>> import mindspore
3593
+ >>> import numpy as np
3594
+ >>> from mindspore import Tensor, ops
3595
+ >>> input = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mindspore.float32)
3596
+ >>> output = ops.hardshrink(input)
3597
+ >>> print(output)
3598
+ [[ 0. 1. 2. ]
3599
+ [ 0. 0. -2.1233]]
3600
+ """
3601
+ return hshrink_impl(input, lambd)
3602
+
3603
+
3604
+ def hardsigmoid(input):
3605
+ r"""
3606
+ Hard Sigmoid activation function. Calculates the output according to the input elements.
3607
+
3608
+ Hard Sigmoid is defined as:
3609
+
3610
+ .. math::
3611
+ \text{Hardswish}(input) =
3612
+ \begin{cases}
3613
+ 0, & \text{ if } input \leq -3, \\
3614
+ 1, & \text{ if } input \geq +3, \\
3615
+ input/6 + 1/2, & \text{ otherwise }
3616
+ \end{cases}
3617
+
3618
+ HSigmoid Activation Function Graph:
3619
+
3620
+ .. image:: ../images/HSigmoid.png
3621
+ :align: center
3622
+
3623
+ Args:
3624
+ input (Tensor): The input Tensor.
3625
+
3626
+ Returns:
3627
+ Tensor, with the same type and shape as the `input`.
3628
+
3629
+ Raises:
3630
+ TypeError: If `input` is not a Tensor.
3631
+ TypeError: If `input` is neither int nor float.
3632
+
3633
+ Supported Platforms:
3634
+ ``Ascend`` ``GPU`` ``CPU``
3635
+
3636
+ Examples:
3637
+ >>> import mindspore
3638
+ >>> import numpy as np
3639
+ >>> from mindspore import Tensor, ops
3640
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
3641
+ >>> output = ops.hardsigmoid(input)
3642
+ >>> print(output)
3643
+ [0.3333 0.1666 0.5 0.8335 0.6665]
3644
+ """
3645
+ return hsigmoid_op(input)
3646
+
3647
+
3648
+ def hardswish(input):
3649
+ r"""
3650
+ Hard Swish activation function. The input is a Tensor with any valid shape.
3651
+
3652
+ Hard swish is defined as:
3653
+
3654
+ .. math::
3655
+ \text{Hardswish}(input) =
3656
+ \begin{cases}
3657
+ 0, & \text{ if } input \leq -3, \\
3658
+ input, & \text{ if } input \geq +3, \\
3659
+ input*(input + 3)/6, & \text{ otherwise }
3660
+ \end{cases}
3661
+
3662
+ HSwish Activation Function Graph:
3663
+
3664
+ .. image:: ../images/HSwish.png
3665
+ :align: center
3666
+
3667
+ Args:
3668
+ input (Tensor): The input Tensor.
3669
+
3670
+ Returns:
3671
+ Tensor, with the same type and shape as the `input`.
3672
+
3673
+ Raises:
3674
+ TypeError: If `input` is not a Tensor.
3675
+ TypeError: If `input` is neither int nor float.
3676
+
3677
+ Supported Platforms:
3678
+ ``Ascend`` ``GPU`` ``CPU``
3679
+
3680
+ Examples:
3681
+ >>> import mindspore
3682
+ >>> import numpy as np
3683
+ >>> from mindspore import Tensor, ops
3684
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
3685
+ >>> output = ops.hardswish(input)
3686
+ >>> print(output)
3687
+ [-0.3333 -0.3333 0 1.667 0.6665]
3688
+ """
3689
+ return hswish_op(input)
3690
+
3691
+
3692
+ def deepcopy(input_x):
3693
+ r"""
3694
+ Returns a deepcopy of input tensor.
3695
+
3696
+ Args:
3697
+ input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3698
+
3699
+ Returns:
3700
+ Tensor, a deepcopy of `input_x`.
3701
+
3702
+ Raises:
3703
+ TypeError: If `input_x` is not a Tensor.
3704
+
3705
+ Supported Platforms:
3706
+ ``Ascend`` ``GPU`` ``CPU``
3707
+
3708
+ Examples:
3709
+ >>> import mindspore
3710
+ >>> from mindspore import Tensor, ops
3711
+ >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
3712
+ >>> output = ops.deepcopy(input)
3713
+ >>> print(output)
3714
+ [[0 1]
3715
+ [2 1]]
3716
+ """
3717
+ return identity_op(input_x)
3718
+
3719
+
3720
+ def ifft2(input, s=None, dim=(-2, -1), norm=None):
3721
+ r"""
3722
+ Computes the two dimensional inverse discrete Fourier transform of `input`.
3723
+
3724
+ Note:
3725
+ - `ifft2` is currently only used in `mindscience` scientific computing scenarios and
3726
+ dose not support other usage scenarios.
3727
+ - `ifft2` is not supported on Windows platform yet.
3728
+
3729
+ Args:
3730
+ input (Tensor): The input tensor.
3731
+ Supported dtypes:
3732
+
3733
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3734
+
3735
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3736
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifft2`.
3737
+ Default: ``None`` , which does not need to process `input`.
3738
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ifft2`.
3739
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
3740
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3741
+ Three modes are defined as, where :math: `n = prod(s)`
3742
+
3743
+ - ``"backward"`` (normalize by :math:`1/n`).
3744
+ - ``"forward"`` (no normalization).
3745
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3746
+
3747
+ Returns:
3748
+ Tensor, The result of `ifft2()` function. The default is the same shape as `input`.
3749
+ If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3750
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3751
+ When the input is float64 or complex128, the return value type is complex128.
3752
+
3753
+ Raises:
3754
+ TypeError: If the `input` type is not Tensor.
3755
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3756
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3757
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3758
+ ValueError: If `dim` has duplicate values.
3759
+ ValueError: If `s` is less than 1.
3760
+ ValueError: If `s` and `dim` are given but have different shapes.
3761
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3762
+
3763
+ Supported Platforms:
3764
+ ``Ascend`` ``CPU``
3765
+
3766
+ Examples:
3767
+ >>> import mindspore
3768
+ >>> from mindspore import Tensor, ops
3769
+ >>> input = ops.ones((4, 4))
3770
+ >>> out = ops.ifft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3771
+ >>> print(out)
3772
+ [[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
3773
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]
3774
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]
3775
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
3776
+ """
3777
+ return ifft2_op(input, s, dim, norm)
3778
+
3779
+
3780
+ def ifft(input, n=None, dim=-1, norm=None):
3781
+ r"""
3782
+ Calculates the inverse of `fft()`.
3783
+
3784
+ Note:
3785
+ - `ifft` is currently only used in `mindscience` scientific computing scenarios and
3786
+ dose not support other usage scenarios.
3787
+ - `ifft` is not supported on Windows platform yet.
3788
+
3789
+ Args:
3790
+ input (Tensor): The input tensor.
3791
+ Supported dtypes:
3792
+
3793
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3794
+
3795
+ n (int, optional): Length of the transformed `dim` of the result.
3796
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ifft`.
3797
+ Default: ``None`` , which does not need to process `input`.
3798
+ dim (int, optional): The dimension along which to take the one dimensional `ifft`.
3799
+ Default: ``-1`` , which means transform the last dimension of `input`.
3800
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3801
+ Three modes are defined as,
3802
+
3803
+ - ``"backward"`` (normalize by :math:`1/n`).
3804
+ - ``"forward"`` (no normalization).
3805
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3806
+
3807
+ Returns:
3808
+ Tensor, The result of `ifft()` function. The default is the same shape as `input`.
3809
+ If `n` is given, the size of the `dim` axis is changed to `n`.
3810
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3811
+ When the input is float64 or complex128, the return value type is complex128.
3812
+
3813
+ Raises:
3814
+ TypeError: If the `input` type is not Tensor.
3815
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3816
+ TypeError: If `n` or `dim` type is not int.
3817
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3818
+ ValueError: If `n` is less than 1.
3819
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3820
+
3821
+ Supported Platforms:
3822
+ ``Ascend`` ``CPU``
3823
+
3824
+ Examples:
3825
+ >>> import mindspore
3826
+ >>> from mindspore import Tensor, ops
3827
+ >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
3828
+ >>> out = ops.ifft(input, n=4, dim=-1, norm="backward")
3829
+ >>> print(out)
3830
+ [-0.14713785+0.j 0.5381293 +0.11530305j 0.69522464+0.j
3831
+ 0.5381293 -0.11530305j]
3832
+ """
3833
+ return ifft_op(input, n, dim, norm)
3834
+
3835
+
3836
+ def ifftn(input, s=None, dim=None, norm=None):
3837
+ r"""
3838
+ Computes the N dimensional inverse discrete Fourier transform of `input`.
3839
+
3840
+ Note:
3841
+ - `ifftn` is currently only used in `mindscience` scientific computing scenarios and
3842
+ dose not support other usage scenarios.
3843
+ - `ifftn` is not supported on Windows platform yet.
3844
+
3845
+ Args:
3846
+ input (Tensor): The input tensor.
3847
+ Supported dtypes:
3848
+
3849
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3850
+
3851
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3852
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifftn`.
3853
+ Default: ``None`` , which does not need to process `input`.
3854
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
3855
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
3856
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3857
+ Three modes are defined as, where :math: `n = prod(s)`
3858
+
3859
+ - ``"backward"`` (normalize by :math:`1/n`).
3860
+ - ``"forward"`` (no normalization).
3861
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3862
+
3863
+ Returns:
3864
+ Tensor, The result of `ifftn()` function. The default is the same shape as `input`.
3865
+ If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3866
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3867
+ When the input is float64 or complex128, the return value type is complex128.
3868
+
3869
+ Raises:
3870
+ TypeError: If the `input` type is not Tensor.
3871
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3872
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3873
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3874
+ ValueError: If `dim` has duplicate values.
3875
+ ValueError: If `s` is less than 1.
3876
+ ValueError: If `s` and `dim` are given but have different shapes.
3877
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3878
+
3879
+ Supported Platforms:
3880
+ ``Ascend`` ``CPU``
3881
+
3882
+ Examples:
3883
+ >>> import mindspore
3884
+ >>> from mindspore import Tensor, ops
3885
+ >>> input = ops.ones((2, 2, 2))
3886
+ >>> out = ops.ifftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
3887
+ >>> print(out)
3888
+ [[[1.+0.j 0.+0.j]
3889
+ [0.+0.j 0.+0.j]]
3890
+ [[0.+0.j 0.+0.j]
3891
+ [0.+0.j 0.+0.j]]]
3892
+ """
3893
+ return ifftn_op(input, s, dim, norm)
3894
+
3895
+
3896
+ def ifftshift(input, dim=None):
3897
+ r"""
3898
+ The inverse of :func:`mindspore.ops.fftshift` .
3899
+
3900
+ Note:
3901
+ - `ifftshift` is currently only used in `mindscience` scientific computing scenarios and
3902
+ dose not support other usage scenarios.
3903
+ - `ifftshift` is not supported on Windows platform yet.
3904
+
3905
+ Args:
3906
+ input (Tensor): Input tensor.
3907
+ dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
3908
+ Default is ``None``, which shifts all dimensions.
3909
+
3910
+ Returns:
2999
3911
  output (Tensor), the shifted tensor with the same shape and dtype as `input`.
3000
3912
 
3001
3913
  Raises:
@@ -3017,6 +3929,183 @@ def ifftshift(input, dim=None):
3017
3929
  return ifftshift_op(input, dim)
3018
3930
 
3019
3931
 
3932
+ def ihfft2(input, s=None, dim=(-2, -1), norm=None):
3933
+ r"""
3934
+ Computes the two dimensional inverse discrete Fourier transform of real `input`.
3935
+
3936
+ Note:
3937
+ - `ihfft2` is currently only used in `mindscience` scientific computing scenarios and
3938
+ dose not support other usage scenarios.
3939
+ - `ihfft2` is not supported on Windows platform yet.
3940
+
3941
+ Args:
3942
+ input (Tensor): The input tensor.
3943
+ Supported dtypes:
3944
+
3945
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
3946
+
3947
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3948
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfft2`.
3949
+ Default: ``None`` , which does not need to process `input`.
3950
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfft2`.
3951
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
3952
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3953
+ Three modes are defined as, where :math: `n = prod(s)`
3954
+
3955
+ - ``"backward"`` (normalize by :math:`1/n`).
3956
+ - ``"forward"`` (no normalization).
3957
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3958
+
3959
+ Returns:
3960
+ Tensor, The result of `ihfft2()` function.
3961
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3962
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
3963
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
3964
+ When the input is float64, the return value type is complex128.
3965
+
3966
+ Raises:
3967
+ TypeError: If the `input` type is not Tensor.
3968
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
3969
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3970
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3971
+ ValueError: If `dim` has duplicate values.
3972
+ ValueError: If `s` is less than 1.
3973
+ ValueError: If `s` and `dim` are given but have different shapes.
3974
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3975
+
3976
+ Supported Platforms:
3977
+ ``Ascend`` ``CPU``
3978
+
3979
+ Examples:
3980
+ >>> import mindspore
3981
+ >>> from mindspore import Tensor, ops
3982
+ >>> input = ops.ones((4, 4))
3983
+ >>> out = ops.ihfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3984
+ >>> print(out)
3985
+ [[1.-0.j 0.-0.j 0.-0.j]
3986
+ [0.-0.j 0.-0.j 0.-0.j]
3987
+ [0.-0.j 0.-0.j 0.-0.j]
3988
+ [0.-0.j 0.-0.j 0.-0.j]]
3989
+ """
3990
+ return ihfft2_op(input, s, dim, norm)
3991
+
3992
+
3993
+ def ihfft(input, n=None, dim=-1, norm=None):
3994
+ r"""
3995
+ Calculates the inverse of `hfft()`.
3996
+
3997
+ Note:
3998
+ - `ihfft` is currently only used in `mindscience` scientific computing scenarios and
3999
+ dose not support other usage scenarios.
4000
+ - `ihfft` is not supported on Windows platform yet.
4001
+
4002
+ Args:
4003
+ input (Tensor): The input tensor.
4004
+ Supported dtypes:
4005
+
4006
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
4007
+
4008
+ n (int, optional): Length of the transformed `dim` of the result.
4009
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ihfft`.
4010
+ Default: ``None`` , which does not need to process `input`.
4011
+ dim (int, optional): The dimension along which to take the one dimensional `ihfft`.
4012
+ Default: ``-1`` , which means transform the last dimension of `input`.
4013
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4014
+ Three modes are defined as,
4015
+
4016
+ - ``"backward"`` (no normalization).
4017
+ - ``"forward"`` (normalize by :math:`1*n`).
4018
+ - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
4019
+
4020
+ Returns:
4021
+ Tensor, The result of `ihfft()` function.
4022
+ If `n` is given, result.shape[dim] is :math:`n // 2 + 1`, otherwise math:`input.shape[dim] // 2 + 1`.
4023
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
4024
+ When the input is float64, the return value type is complex128.
4025
+
4026
+ Raises:
4027
+ TypeError: If the `input` type is not Tensor.
4028
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
4029
+ TypeError: If `n` or `dim` type is not int.
4030
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4031
+ ValueError: If `n` is less than 1.
4032
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4033
+
4034
+ Supported Platforms:
4035
+ ``Ascend`` ``CPU``
4036
+
4037
+ Examples:
4038
+ >>> import mindspore
4039
+ >>> from mindspore import Tensor, ops
4040
+ >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
4041
+ >>> out = ops.ihfft(input, n=4, dim=-1, norm="backward")
4042
+ >>> print(out)
4043
+ [-0.14713785-0.j 0.5381293 +0.11530305j 0.69522464-0.j ]
4044
+ """
4045
+ return ihfft_op(input, n, dim, norm)
4046
+
4047
+
4048
+ def ihfftn(input, s=None, dim=None, norm=None):
4049
+ r"""
4050
+ Computes the N dimensional inverse discrete Fourier transform of real `input`.
4051
+
4052
+ Note:
4053
+ - `ihfftn` is currently only used in `mindscience` scientific computing scenarios and
4054
+ dose not support other usage scenarios.
4055
+ - `ihfftn` is not supported on Windows platform yet.
4056
+
4057
+ Args:
4058
+ input (Tensor): The input tensor.
4059
+ Supported dtypes:
4060
+
4061
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
4062
+
4063
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4064
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfftn`.
4065
+ Default: ``None`` , which does not need to process `input`.
4066
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfftn`.
4067
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
4068
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4069
+ Three modes are defined as, where :math: `n = prod(s)`
4070
+
4071
+ - ``"backward"`` (normalize by :math:`1/n`).
4072
+ - ``"forward"`` (no normalization).
4073
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4074
+
4075
+ Returns:
4076
+ Tensor, The result of `ihfftn()` function.
4077
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
4078
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
4079
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
4080
+ When the input is float64, the return value type is complex128.
4081
+
4082
+ Raises:
4083
+ TypeError: If the `input` type is not Tensor.
4084
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
4085
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4086
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4087
+ ValueError: If `dim` has duplicate values.
4088
+ ValueError: If `s` is less than 1.
4089
+ ValueError: If `s` and `dim` are given but have different shapes.
4090
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4091
+
4092
+ Supported Platforms:
4093
+ ``Ascend`` ``CPU``
4094
+
4095
+ Examples:
4096
+ >>> import mindspore
4097
+ >>> from mindspore import Tensor, ops
4098
+ >>> input = ops.ones((4, 4))
4099
+ >>> out = ops.ihfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
4100
+ >>> print(out)
4101
+ [[16. 0. 0. 0.]
4102
+ [ 0. 0. 0. 0.]
4103
+ [ 0. 0. 0. 0.]
4104
+ [ 0. 0. 0. 0.]]
4105
+ """
4106
+ return ihfftn_op(input, s, dim, norm)
4107
+
4108
+
3020
4109
  def unfold_ext(input, kernel_size, dilation=1, padding=0, stride=1):
3021
4110
  r"""
3022
4111
  Extracts sliding local blocks from a batched input tensor.
@@ -3114,25 +4203,111 @@ def index_select_ext(input, dim, index):
3114
4203
  ValueError: If the dimension of `index` is not equal to 1.
3115
4204
 
3116
4205
  Supported Platforms:
3117
- ``Ascend``
4206
+ ``Ascend``
4207
+
4208
+ Examples:
4209
+ >>> import mindspore
4210
+ >>> from mindspore import Tensor, ops
4211
+ >>> import numpy as np
4212
+ >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
4213
+ >>> print(input)
4214
+ [[[ 0. 1. 2. 3.]
4215
+ [ 4. 5. 6. 7.]]
4216
+ [[ 8. 9. 10. 11.]
4217
+ [12. 13. 14. 15.]]]
4218
+ >>> index = Tensor([0,], mindspore.int32)
4219
+ >>> y = ops.auto_generate.index_select_ext(input, 1, index)
4220
+ >>> print(y)
4221
+ [[[ 0. 1. 2. 3.]]
4222
+ [[ 8. 9. 10. 11.]]]
4223
+ """
4224
+ return index_select_op(input, dim, index)
4225
+
4226
+
4227
+ def inplace_add_ext(input, other, alpha=1):
4228
+ r"""
4229
+
4230
+ """
4231
+ return inplace_add_ext_op(input, other, alpha)
4232
+
4233
+
4234
+ def inplace_addmm(input, mat1, mat2, beta=1, alpha=1):
4235
+ r"""
4236
+
4237
+ """
4238
+ return inplace_addmm_op(input, mat1, mat2, beta, alpha)
4239
+
4240
+
4241
+ def inplace_adds_ext(input, other, alpha=1):
4242
+ r"""
4243
+
4244
+ """
4245
+ return inplace_adds_ext_op(input, other, alpha)
4246
+
4247
+
4248
+ def zero_(input):
4249
+ r"""
4250
+
4251
+ """
4252
+ return inplace_zero_op(input)
4253
+
4254
+
4255
+ def irfft2(input, s=None, dim=(-2, -1), norm=None):
4256
+ r"""
4257
+ Calculates the inverse of `rfft2()`.
4258
+
4259
+ Note:
4260
+ - `irfft2` is currently only used in `mindscience` scientific computing scenarios and
4261
+ dose not support other usage scenarios.
4262
+ - `irfft2` is not supported on Windows platform yet.
4263
+
4264
+ Args:
4265
+ input (Tensor): The input tensor.
4266
+ Supported dtypes:
4267
+
4268
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
4269
+
4270
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4271
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfft2`.
4272
+ Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
4273
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `irfft2`.
4274
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
4275
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4276
+ Three modes are defined as, where :math: `n = prod(s)`
4277
+
4278
+ - ``"backward"`` (normalize by :math:`1/n`).
4279
+ - ``"forward"`` (no normalization).
4280
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4281
+
4282
+ Returns:
4283
+ Tensor, The result of `irfft2()` function, result.shape[dim[i]] is s[i].
4284
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is float32.
4285
+ When the input is float64 or complex128, the return value type is float64.
4286
+
4287
+ Raises:
4288
+ TypeError: If the `input` type is not Tensor.
4289
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
4290
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4291
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4292
+ ValueError: If `dim` has duplicate values.
4293
+ ValueError: If `s` is less than 1.
4294
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4295
+
4296
+ Supported Platforms:
4297
+ ``Ascend`` ``CPU``
3118
4298
 
3119
4299
  Examples:
3120
4300
  >>> import mindspore
3121
4301
  >>> from mindspore import Tensor, ops
3122
- >>> import numpy as np
3123
- >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
3124
- >>> print(input)
3125
- [[[ 0. 1. 2. 3.]
3126
- [ 4. 5. 6. 7.]]
3127
- [[ 8. 9. 10. 11.]
3128
- [12. 13. 14. 15.]]]
3129
- >>> index = Tensor([0,], mindspore.int32)
3130
- >>> y = ops.auto_generate.index_select_ext(input, 1, index)
3131
- >>> print(y)
3132
- [[[ 0. 1. 2. 3.]]
3133
- [[ 8. 9. 10. 11.]]]
4302
+ >>> input = ops.ones((4, 4))
4303
+ >>> ops.irfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
4304
+ Tensor(shape=[4, 4], dtype=Float32, value=
4305
+ [[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4306
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4307
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4308
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]])
3134
4309
  """
3135
- return index_select_op(input, dim, index)
4310
+ return irfft2_op(input, s, dim, norm)
3136
4311
 
3137
4312
 
3138
4313
  def irfft(input, n=None, dim=-1, norm=None):
@@ -3177,14 +4352,72 @@ def irfft(input, n=None, dim=-1, norm=None):
3177
4352
  >>> import mindspore
3178
4353
  >>> from mindspore import Tensor, ops
3179
4354
  >>> input = Tensor([1, 2, 3, 4])
3180
- >>> y = ops.irfft(input)
4355
+ >>> y = ops.irfft(input, n=6, dim=-1, norm='backward')
3181
4356
  >>> print(y)
3182
- [ 2.5000000e+00 -6.6666669e-01 1.2590267e-15 -1.6666667e-01
3183
- 4.2470195e-16 -6.6666669e-01]
4357
+ [ 2.5 -0.6666667 0. -0.16666667 0. -0.6666667 ]
3184
4358
  """
3185
4359
  return irfft_op(input, n, dim, norm)
3186
4360
 
3187
4361
 
4362
+ def irfftn(input, s=None, dim=None, norm=None):
4363
+ r"""
4364
+ Calculates the inverse of `rfftn()`.
4365
+
4366
+ Note:
4367
+ - `irfftn` is currently only used in `mindscience` scientific computing scenarios and
4368
+ dose not support other usage scenarios.
4369
+ - `irfftn` is not supported on Windows platform yet.
4370
+
4371
+ Args:
4372
+ input (Tensor): The input tensor.
4373
+ Supported dtypes:
4374
+
4375
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
4376
+
4377
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4378
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfftn`.
4379
+ Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
4380
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `irfftn`.
4381
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
4382
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4383
+ Three modes are defined as, where :math: `n = prod(s)`
4384
+
4385
+ - ``"backward"`` (normalize by :math:`1/n`).
4386
+ - ``"forward"`` (no normalization).
4387
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4388
+
4389
+ Returns:
4390
+ Tensor, The result of `irfftn()` function, result.shape[dim[i]] is s[i].
4391
+ When the input is int16, int32, int64, float16, float32 the return value type is float32.
4392
+ When the input is float64, the return value type is float64.
4393
+
4394
+ Raises:
4395
+ TypeError: If the `input` type is not Tensor.
4396
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
4397
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4398
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4399
+ ValueError: If `dim` has duplicate values.
4400
+ ValueError: If `s` is less than 1.
4401
+ ValueError: If `s` and `dim` are given but have different shapes.
4402
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4403
+
4404
+ Supported Platforms:
4405
+ ``Ascend`` ``CPU``
4406
+
4407
+ Examples:
4408
+ >>> import mindspore
4409
+ >>> from mindspore import Tensor, ops
4410
+ >>> input = ops.ones((2, 2, 2))
4411
+ >>> ops.irfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
4412
+ Tensor(shape=[2, 2, 2], dtype=Float32, value=
4413
+ [[[ 1.00000000e+00, 0.00000000e+00],
4414
+ [ 0.00000000e+00, 0.00000000e+00]],
4415
+ [[ 0.00000000e+00, 0.00000000e+00],
4416
+ [ 0.00000000e+00, 0.00000000e+00]]])
4417
+ """
4418
+ return irfftn_op(input, s, dim, norm)
4419
+
4420
+
3188
4421
  def isfinite(x):
3189
4422
  r"""
3190
4423
  Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
@@ -3225,6 +4458,65 @@ def isfinite(x):
3225
4458
  return isfinite_op(x)
3226
4459
 
3227
4460
 
4461
+ def l1_loss_ext(input, target, reduction='mean'):
4462
+ r"""
4463
+ Calculate the mean absolute error between the `input` value and the `target` value.
4464
+
4465
+ Assuming that the :math:`x` and :math:`y` are the predicted value and target value,
4466
+ both are one-dimensional tensors of length :math:`N`, length :math:`N`, `reduction` is set to ``'none'`` ,
4467
+ then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction.
4468
+
4469
+ The formula is as follows:
4470
+
4471
+ .. math::
4472
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
4473
+
4474
+ where :math:`N` is the batch size.
4475
+
4476
+ If `reduction` is ``'mean'`` or ``'sum'`` , then:
4477
+
4478
+ .. math::
4479
+ \ell(x, y) =
4480
+ \begin{cases}
4481
+ \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
4482
+ \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
4483
+ \end{cases}
4484
+
4485
+ Args:
4486
+ input (Tensor): Predicted value, Tensor of any dimension.
4487
+ target (Tensor): Target value, usually has the same shape as the `input`.
4488
+ If `input` and `target` have different shapes, make sure they can broadcast to each other.
4489
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
4490
+ ``'sum'`` . Default: ``'mean'`` .
4491
+
4492
+ - ``'none'``: no reduction will be applied.
4493
+ - ``'mean'``: compute and return the mean of elements in the output. Notice: At least one of the input and target is float type when the reduction is ``'mean'`` .
4494
+ - ``'sum'``: the output elements will be summed.
4495
+
4496
+ Returns:
4497
+ Tensor or Scalar, if `reduction` is ``'none'`` , return a Tensor with same shape and dtype as `input`.
4498
+ Otherwise, a scalar value will be returned.
4499
+
4500
+ Raises:
4501
+ TypeError: If `input` is not a Tensor.
4502
+ TypeError: If `target` is not a Tensor.
4503
+ ValueError: If `reduction` is not one of ``'none'`` , ``'mean'`` or ``'sum'`` .
4504
+
4505
+ Supported Platforms:
4506
+ ``Ascend``
4507
+
4508
+ Examples:
4509
+ >>> from mindspore import Tensor, ops
4510
+ >>> from mindspore import dtype as mstype
4511
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
4512
+ >>> target = Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
4513
+ >>> output = ops.l1_loss_ext(x, target, reduction="mean")
4514
+ >>> print(output)
4515
+ 3.0
4516
+ """
4517
+ return l1_loss_ext_op(input, target, reduction)
4518
+
4519
+
3228
4520
  def leaky_relu_ext(input, negative_slope=0.01):
3229
4521
  r"""
3230
4522
  leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
@@ -3361,7 +4653,7 @@ def log1p(input):
3361
4653
  Returns the natural logarithm of one plus the input tensor element-wise.
3362
4654
 
3363
4655
  .. math::
3364
- out_i = \{log_e}(input_i + 1)
4656
+ out_i = \log_e(input_i + 1)
3365
4657
 
3366
4658
  Args:
3367
4659
  input (Tensor): The input tensor. The value must be greater than -1.
@@ -3422,6 +4714,48 @@ def log(input):
3422
4714
  return log_op(input)
3423
4715
 
3424
4716
 
4717
+ def log_softmax_ext(input, dim=None, dtype=None):
4718
+ r"""
4719
+ Applies the Log Softmax function to the input tensor on the specified axis.
4720
+ Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
4721
+ the Log Softmax function is shown as follows:
4722
+
4723
+ .. math::
4724
+ \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
4725
+
4726
+ where :math:`N` is the length of the Tensor.
4727
+
4728
+ Args:
4729
+ input (Tensor): The input Tensor.
4730
+ dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
4731
+
4732
+ Keyword Args:
4733
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
4734
+ Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
4735
+ If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
4736
+
4737
+ Returns:
4738
+ Tensor, with the same shape as the input.
4739
+
4740
+ Raises:
4741
+ TypeError: If `dim` is not an int.
4742
+ ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
4743
+
4744
+ Supported Platforms:
4745
+ ``Ascend``
4746
+
4747
+ Examples:
4748
+ >>> import mindspore
4749
+ >>> import numpy as np
4750
+ >>> from mindspore import Tensor, ops
4751
+ >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
4752
+ >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
4753
+ >>> print(output)
4754
+ [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
4755
+ """
4756
+ return log_softmax_ext_op(input, dim, dtype)
4757
+
4758
+
3425
4759
  def log_softmax(logits, axis=-1):
3426
4760
  r"""
3427
4761
  Applies the Log Softmax function to the input tensor on the specified axis.
@@ -3459,8 +4793,55 @@ def log_softmax(logits, axis=-1):
3459
4793
  >>> print(output)
3460
4794
  [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
3461
4795
  """
3462
- log_softmax_op = _get_cache_prim(LogSoftmax)(axis)
3463
- return log_softmax_op(logits)
4796
+ return log_softmax_impl(logits, axis)
4797
+
4798
+
4799
+ def logaddexp_ext(input, other):
4800
+ r"""
4801
+ Computes the logarithm of the sum of exponentiations of the inputs.
4802
+ This function is useful in statistics where the calculated probabilities of events may be
4803
+ so small as to exceed the range of normal floating point numbers.
4804
+
4805
+ .. math::
4806
+
4807
+ out_i = \log(exp(input_i) + \exp(other_i))
4808
+
4809
+ .. warning::
4810
+ This is an experimental API that is subject to change or deletion.
4811
+
4812
+ Args:
4813
+ input (Tensor): Input Tensor. The dtype of `input` must be float.
4814
+ other (Tensor): Input Tensor. The dtype of `other` must be float.
4815
+ If the shape of `input` is not equal to the shape of `other`,
4816
+ they must be broadcastable to a common shape (which becomes the shape of the output).
4817
+
4818
+ Returns:
4819
+ Tensor, with the same dtype as `input` and `other`.
4820
+
4821
+ Raises:
4822
+ TypeError: If `input` or `other` is not a Tensor.
4823
+ TypeError: The dtype of `input` or `other` is not float.
4824
+
4825
+ Supported Platforms:
4826
+ ``Ascend``
4827
+
4828
+ Examples:
4829
+ >>> import numpy as np
4830
+ >>> from mindspore import Tensor, ops
4831
+ >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.float16))
4832
+ >>> x2 = Tensor(np.array(2).astype(np.float16))
4833
+ >>> output = ops.logaddexp_ext(x1, x2)
4834
+ >>> print(output)
4835
+ [2.312 2.693 3.312]
4836
+ """
4837
+ return logaddexp_op(input, other)
4838
+
4839
+
4840
+ def logsigmoid_grad(dy, input, buffer):
4841
+ r"""
4842
+
4843
+ """
4844
+ return logsigmoid_grad_op(dy, input, buffer)
3464
4845
 
3465
4846
 
3466
4847
  def masked_fill(input_x, mask, value):
@@ -3502,6 +4883,38 @@ def masked_fill(input_x, mask, value):
3502
4883
  return masked_fill_op(input_x, mask, value)
3503
4884
 
3504
4885
 
4886
+ def masked_select(input, mask):
4887
+ r"""
4888
+ Returns a new 1-D Tensor which indexes the `input` tensor according to the boolean `mask`.
4889
+ The shapes of the `mask` tensor and the `input` tensor don't need to match, but they must be broadcastable.
4890
+
4891
+ Args:
4892
+ input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4893
+ mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4894
+
4895
+ Returns:
4896
+ A 1-D Tensor, with the same type as `input`.
4897
+
4898
+ Raises:
4899
+ TypeError: If `input` or `mask` is not a Tensor.
4900
+ TypeError: If dtype of `mask` is not bool.
4901
+
4902
+ Supported Platforms:
4903
+ ``Ascend`` ``GPU`` ``CPU``
4904
+
4905
+ Examples:
4906
+ >>> import numpy as np
4907
+ >>> import mindspore
4908
+ >>> from mindspore import Tensor, ops
4909
+ >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
4910
+ >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4911
+ >>> output = ops.masked_select(x, mask)
4912
+ >>> print(output)
4913
+ [1 3]
4914
+ """
4915
+ return masked_select_op(input, mask)
4916
+
4917
+
3505
4918
  def matmul_ext(input, mat2):
3506
4919
  r"""
3507
4920
 
@@ -3592,10 +5005,12 @@ def maximum(input, other):
3592
5005
  r"""
3593
5006
  Computes the maximum of input tensors element-wise.
3594
5007
 
5008
+ .. math::
5009
+ output_i = \max(input_i, other_i)
5010
+
3595
5011
  Note:
3596
5012
  - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3597
5013
  consistent.
3598
- - The input must be two Tensors, or a Tensor and a Scalar.
3599
5014
  - When the inputs are two tensors,
3600
5015
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
3601
5016
  - When the inputs are one tensor and one scalar,
@@ -3603,8 +5018,9 @@ def maximum(input, other):
3603
5018
  - Broadcasting is supported.
3604
5019
  - If one of the elements being compared is a NaN, then that element is returned.
3605
5020
 
3606
- .. math::
3607
- output_i = \max(input_i, other_i)
5021
+ .. warning::
5022
+ If all inputs are scalar of integers. In GRAPH mode, the output will be Tensor of int32, while in
5023
+ PYNATIVE mode, the output will be Tensor of int64.
3608
5024
 
3609
5025
  Args:
3610
5026
  input (Union[Tensor, Number, bool]): The first input is a number or
@@ -3751,44 +5167,134 @@ def minimum(input, other):
3751
5167
  - Shapes of them are supposed to be broadcast.
3752
5168
  - If one of the elements being compared is a NaN, then that element is returned.
3753
5169
 
3754
- .. math::
3755
- output_i = \min(input_i, other_i)
5170
+ .. math::
5171
+ output_i = \min(input_i, other_i)
5172
+
5173
+ Args:
5174
+ input (Union[Tensor, Number, bool]): The first input is a number or
5175
+ a bool or a tensor whose data type is number or bool.
5176
+ other (Union[Tensor, Number, bool]): The second input is a number or
5177
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
5178
+
5179
+ Returns:
5180
+ Tensor, the shape is the same as the one after broadcasting,
5181
+ and the data type is the one with higher precision or higher digits among the two inputs.
5182
+
5183
+ Raises:
5184
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
5185
+ ValueError: If `input` and `other` are not the same shape after broadcast.
5186
+
5187
+ Supported Platforms:
5188
+ ``Ascend`` ``GPU`` ``CPU``
5189
+
5190
+ Examples:
5191
+ >>> import mindspore
5192
+ >>> import numpy as np
5193
+ >>> from mindspore import Tensor, ops
5194
+ >>> # case 1 : same data type
5195
+ >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
5196
+ >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5197
+ >>> output = ops.minimum(input, other)
5198
+ >>> print(output)
5199
+ [1. 2. 3.]
5200
+ >>> # case 2 : different data type
5201
+ >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
5202
+ >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5203
+ >>> output = ops.minimum(input, other)
5204
+ >>> print(output.dtype)
5205
+ Float32
5206
+ """
5207
+ return minimum_op(input, other)
5208
+
5209
+
5210
+ def mish_ext(input):
5211
+ r"""
5212
+ Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
5213
+ of input tensors element-wise.
5214
+
5215
+ The formula is defined as follows:
5216
+
5217
+ .. math::
5218
+ \text{mish}(input) = input * \tanh(softplus(\text{input}))
5219
+
5220
+ See more details in `A Self Regularized Non-Monotonic Neural Activation Function
5221
+ <https://arxiv.org/abs/1908.08681>`_.
5222
+
5223
+ Mish Activation Function Graph:
5224
+
5225
+ .. image:: ../images/Mish.png
5226
+ :align: center
5227
+
5228
+ Args:
5229
+ input (Tensor): The input of MISH. Supported dtypes:
5230
+
5231
+ - Ascend: float16, float32.
5232
+
5233
+ Returns:
5234
+ Tensor, has the same type and shape as the `input`.
5235
+
5236
+ Raises:
5237
+ TypeError: If `input` is not a Tensor.
5238
+ TypeError: If dtype of `input` is not float16 or float32.
5239
+
5240
+ Supported Platforms:
5241
+ ``Ascend``
5242
+
5243
+ Examples:
5244
+ >>> import mindspore
5245
+ >>> from mindspore import Tensor, ops
5246
+ >>> import numpy as np
5247
+ >>> x = Tensor(np.array([[-1.1, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
5248
+ >>> output = ops.mish(x)
5249
+ >>> print(output)
5250
+ [[-3.0764845e-01 3.9974124e+00 -2.6832507e-03]
5251
+ [ 1.9439589e+00 -3.3576239e-02 8.9999990e+00]]
5252
+ """
5253
+ return mish_ext_op(input)
5254
+
5255
+
5256
+ def mse_loss_ext(input, target, reduction='mean'):
5257
+ r"""
5258
+ Calculates the mean squared error between the predicted value and the label value.
5259
+
5260
+ For detailed information, please refer to :class:`mindspore.nn.MSELoss`.
3756
5261
 
3757
5262
  Args:
3758
- input (Union[Tensor, Number, bool]): The first input is a number or
3759
- a bool or a tensor whose data type is number or bool.
3760
- other (Union[Tensor, Number, bool]): The second input is a number or
3761
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
5263
+ input (Tensor): Tensor of any dimension. The data type needs to be consistent with the `target`.
5264
+ It should also be broadcastable with the `target`.
5265
+ target (Tensor): The input label. Tensor of any dimension. The data type needs to be consistent with the `input`.
5266
+ It should also be broadcastable with the `input`.
5267
+ reduction (str, optional): Apply specific reduction method to the output: ``'mean'`` , ``'none'`` ,
5268
+ ``'sum'`` . Default: ``'mean'`` .
5269
+
5270
+ - ``'none'``: no reduction will be applied.
5271
+ - ``'mean'``: compute and return the mean of elements in the output.
5272
+ - ``'sum'``: the output elements will be summed.
3762
5273
 
3763
5274
  Returns:
3764
- Tensor, the shape is the same as the one after broadcasting,
3765
- and the data type is the one with higher precision or higher digits among the two inputs.
5275
+ - Tensor. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output is `Tensor Scalar`.
5276
+ - If reduction is ``'none'``, the shape of output is the broadcasted shape of **input** and **target** .
3766
5277
 
3767
5278
  Raises:
3768
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
3769
- ValueError: If `input` and `other` are not the same shape after broadcast.
5279
+ ValueError: If `reduction` is not one of ``'mean'`` , ``'sum'`` or ``'none'``.
5280
+ ValueError: If `input` and `target` are not broadcastable.
5281
+ TypeError: If `input` and `target` are in different data type.
3770
5282
 
3771
5283
  Supported Platforms:
3772
- ``Ascend`` ``GPU`` ``CPU``
5284
+ ``Ascend``
3773
5285
 
3774
5286
  Examples:
3775
5287
  >>> import mindspore
3776
5288
  >>> import numpy as np
3777
5289
  >>> from mindspore import Tensor, ops
3778
- >>> # case 1 : same data type
3779
- >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
3780
- >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
3781
- >>> output = ops.minimum(input, other)
5290
+ >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
5291
+ >>> labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)
5292
+ >>> output = ops.mse_loss_ext(logits, labels, reduction='none')
3782
5293
  >>> print(output)
3783
- [1. 2. 3.]
3784
- >>> # case 2 : different data type
3785
- >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
3786
- >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
3787
- >>> output = ops.minimum(input, other)
3788
- >>> print(output.dtype)
3789
- Float32
5294
+ [[0. 1. 4.]
5295
+ [0. 0. 1.]]
3790
5296
  """
3791
- return minimum_op(input, other)
5297
+ return mse_loss_ext_op(input, target, reduction)
3792
5298
 
3793
5299
 
3794
5300
  def mul(input, other):
@@ -3810,12 +5316,12 @@ def mul(input, other):
3810
5316
  Args:
3811
5317
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3812
5318
  a bool or a tensor whose data type is
3813
- `number <https://www.mindspore.cn/docs/en/r2.3/api_python/mindspore.html#mindspore.dtype>`_ or
3814
- `bool_ <https://www.mindspore.cn/docs/en/r2.3/api_python/mindspore.html#mindspore.dtype>`_.
5319
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5320
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3815
5321
  other (Union[Tensor, number.Number, bool]): The second input, which is a number.Number or
3816
5322
  a bool or a tensor whose data type is
3817
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3818
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5323
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5324
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3819
5325
 
3820
5326
  Returns:
3821
5327
  Tensor, the shape is the same as the one after broadcasting,
@@ -3841,6 +5347,13 @@ def mul(input, other):
3841
5347
  return mul_op(input, other)
3842
5348
 
3843
5349
 
5350
+ def muls(input, other):
5351
+ r"""
5352
+
5353
+ """
5354
+ return muls_op(input, other)
5355
+
5356
+
3844
5357
  def mv(input, vec):
3845
5358
  r"""
3846
5359
 
@@ -3848,6 +5361,44 @@ def mv(input, vec):
3848
5361
  return mv_op(input, vec)
3849
5362
 
3850
5363
 
5364
+ def nan_to_num(input, nan=None, posinf=None, neginf=None):
5365
+ r"""
5366
+ Replace the `NaN`, positive infinity and negative infinity values in `input` with the
5367
+ specified values in `nan`, `posinf` and `neginf` respectively.
5368
+
5369
+ .. warning::
5370
+ For Ascend, it is only supported on Atlas A2 Training Series Products.
5371
+ This is an experimental API that is subject to change or deletion.
5372
+
5373
+ Args:
5374
+ input (Tensor): The shape of tensor is :math:`(input_1, input_2, ..., input_R)`.
5375
+ nan (number, optional): The replace value of `NaN`. Default value is ``None``.
5376
+ posinf (number, optional): the value to replace positive infinity values with. Default: ``None``,
5377
+ replacing positive infinity with the maximum value supported by the data type of `input`.
5378
+ neginf (number, optional): the value to replace negative infinity values with. Default: ``None``,
5379
+ replacing negative infinity with the minimum value supported by the data type of `input`.
5380
+
5381
+ Returns:
5382
+ Tensor, has the same shape and dtype as the `input`.
5383
+
5384
+ Raises:
5385
+ TypeError: If `input` is not a Tensor.
5386
+
5387
+ Supported Platforms:
5388
+ ``Ascend`` ``CPU``
5389
+
5390
+ Examples:
5391
+ >>> import mindspore
5392
+ >>> import numpy as np
5393
+ >>> from mindspore import Tensor, ops
5394
+ >>> input = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 5.0]), mindspore.float32)
5395
+ >>> output = ops.nan_to_num(input, 1.0, 2.0, 3.0)
5396
+ >>> print(output)
5397
+ [1. 2. 3. 5.0]
5398
+ """
5399
+ return nan_to_num_impl(input, nan, posinf, neginf)
5400
+
5401
+
3851
5402
  def neg(input):
3852
5403
  r"""
3853
5404
  Returns a tensor with negative values of the input tensor element-wise.
@@ -3971,101 +5522,46 @@ def ones(shape, dtype=None):
3971
5522
  return ones_op(shape, dtype)
3972
5523
 
3973
5524
 
3974
- def paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask, head_num, scale_value, kv_head_num):
5525
+ def outer_ext(input, vec2):
3975
5526
  r"""
3976
- The PagedAttentionMask is the fusion of block-wise KV Cache access and self-attention(with alibi-mask) computing.
3977
-
3978
- Args:
3979
- query (Tensor): The query tensor with data type of float16.
3980
- :math:`(num\_tokens, num\_head, head\_dim)`.
3981
- key_cache (Tensor): The cache tensor with data type of float16.
3982
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
3983
- value_cache (Tensor): The cache tensor with data type of float16.
3984
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
3985
- block_tables (Tensor): The block mapping table with data type of int32.
3986
- :math:`(num\_tokens, max_num_blocks_per_batch)`.
3987
- context_lens (Tensor): The context length of each sequence with data type of int32.
3988
- :math:`(num\_tokens,)`.
3989
- alibi_mask (Tensor): The context length of each sequence with data type of float16.
3990
- :math:`(num\_tokens, num\_head, 1, max\_context\_len)`.
3991
-
3992
- Outputs:
3993
- attention output.
5527
+ Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
5528
+ and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
3994
5529
 
3995
- Notes:
3996
- No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
5530
+ .. warning::
5531
+ This is an experimental API that is subject to change or deletion.
3997
5532
 
3998
- Examples:
3999
- >>> from mindspore.ops.operations import _inner_ops
4000
- >>> num_tokens = = 4
4001
- >>> num_head = 40
4002
- >>> num_kv_head = 40
4003
- >>> head_dim = 128
4004
- >>> block_size = 16
4005
- >>> num_blocks = 128
4006
- >>> max_seq = 1024
4007
- >>> max_num_blocks_per_batch = max_seq // block_size
4008
- >>> scale_value = 1.0 / math.sqrt(head_dim)
4009
- >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
4010
- >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4011
- >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4012
- >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
4013
- >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
4014
- >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
4015
- >>> alibi_mask = Tensor(np.random.randn(num_tokens, num_head, 1, max_seq).astype(np.int32)))
4016
- >>> paged_attention_mask = _inner_ops.PagedAttentionMask()
4017
- >>> output = paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
4018
- >>> print(output)
4019
- """
4020
- paged_attention_mask_op = _get_cache_prim(PagedAttentionMask)(head_num, scale_value, kv_head_num)
4021
- return paged_attention_mask_op(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
5533
+ .. note::
5534
+ This function does not broadcast.
4022
5535
 
5536
+ Args:
5537
+ input (Tensor): 1-D input vector.
5538
+ vec2 (Tensor): 1-D input vector.
4023
5539
 
4024
- def paged_attention(query, key_cache, value_cache, block_tables, context_lens, head_num, scale_value, kv_head_num):
4025
- r"""
4026
- The PagedAttention is the fusion of block-wise KV Cache access and self-attention computing.
5540
+ Returns:
5541
+ out, 2-D matrix, the outer product of two vectors.
4027
5542
 
4028
- Args:
4029
- query (Tensor): The query tensor with data type of float16.
4030
- :math:`(num\_tokens, num\_head, head\_dim)`.
4031
- key_cache (Tensor): The cache tensor with data type of float16.
4032
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
4033
- value_cache (Tensor): The cache tensor with data type of float16.
4034
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
4035
- block_tables (Tensor): The block mapping table with data type of int32.
4036
- :math:`(num\_tokens, max_num_blocks_per_batch)`.
4037
- context_lens (Tensor): The context length of each sequence with data type of int32.
4038
- :math:`(num\_tokens,)`.
4039
-
4040
- Outputs:
4041
- attention output.
5543
+ Raises:
5544
+ TypeError: If `input` or `vec2` is not a Tensor.
5545
+ TypeError: The implicitly converted data types of `input` and `vec2` are not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16
5546
+ ValueError: If the dimension of `input` or `vec2` is not equal to 1.
4042
5547
 
4043
- Notes:
4044
- No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
5548
+ Supported Platforms:
5549
+ ``Ascend``
4045
5550
 
4046
5551
  Examples:
4047
- >>> from mindspore.ops.operations import _inner_ops
4048
- >>> num_tokens = = 4
4049
- >>> num_head = 40
4050
- >>> num_kv_head = 40
4051
- >>> head_dim = 128
4052
- >>> block_size = 16
4053
- >>> num_blocks = 128
4054
- >>> max_seq = 1024
4055
- >>> max_num_blocks_per_batch = max_seq // block_size
4056
- >>> scale_value = 1.0 / math.sqrt(head_dim)
4057
- >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
4058
- >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4059
- >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4060
- >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
4061
- >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
4062
- >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
4063
- >>> paged_attention = _inner_ops.PagedAttention()
4064
- >>> output = paged_attention(query, key_cache, value_cache, block_tables, context_lens)
4065
- >>> print(output)
5552
+ >>> import mindspore
5553
+ >>> import numpy as np
5554
+ >>> from mindspore import Tensor
5555
+ >>> from mindspore import ops
5556
+ >>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
5557
+ >>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
5558
+ >>> out = ops.outer(input, vec2)
5559
+ >>> print(out)
5560
+ [[49 70 77]
5561
+ [56 80 88]
5562
+ [63 90 99]]
4066
5563
  """
4067
- paged_attention_op = _get_cache_prim(PagedAttention)(head_num, scale_value, kv_head_num)
4068
- return paged_attention_op(query, key_cache, value_cache, block_tables, context_lens)
5564
+ return outer_op(input, vec2)
4069
5565
 
4070
5566
 
4071
5567
  def pow(input, exponent):
@@ -4080,11 +5576,11 @@ def pow(input, exponent):
4080
5576
 
4081
5577
  Args:
4082
5578
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
4083
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
4084
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5579
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5580
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4085
5581
  exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
4086
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
4087
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5582
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5583
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4088
5584
 
4089
5585
  Returns:
4090
5586
  Tensor, the shape is the same as the one after broadcasting,
@@ -4112,7 +5608,7 @@ def pow(input, exponent):
4112
5608
  return pow_op(input, exponent)
4113
5609
 
4114
5610
 
4115
- def prelu(x, weight):
5611
+ def prelu(input, weight):
4116
5612
  r"""
4117
5613
  Parametric Rectified Linear Unit activation function.
4118
5614
 
@@ -4124,30 +5620,26 @@ def prelu(x, weight):
4124
5620
 
4125
5621
  where :math:`x_i` is an element of a channel of the input, `w` is the weight of the channel.
4126
5622
 
4127
- Note:
4128
- Scalar or 1-D Tensor is not supported on Ascend.
4129
-
4130
5623
  PReLU Activation Function Graph:
4131
5624
 
4132
- .. image:: ../images/PReLU.png
5625
+ .. image:: ../images/PReLU2.png
4133
5626
  :align: center
4134
5627
 
5628
+ .. note::
5629
+ Channel dim is the 2nd dim of input. When input has dims < 2, then there is
5630
+ no channel dim and the number of channels = 1.
5631
+
4135
5632
  Args:
4136
- x (Tensor): The input Tensor of the activation function. The data type is float16 or float32.
4137
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4138
- weight (Tensor): Weight Tensor. The data type is float16 or float32.
4139
- The weight can only be a Tensor, and the length is the same as the number of channels C of the `input_x`.
4140
- On GPU devices, when the input is a scalar, the shape is :math:`(1,)` .
5633
+ input (Tensor): The input Tensor of the activation function.
5634
+ weight (Tensor): Weight Tensor. The size of the weight should be 1 or the number of channels at Tensor `input`.
4141
5635
 
4142
5636
  Returns:
4143
- Tensor, with the same shape and dtype as `x`.
4144
- For detailed information, please refer to :class:`mindspore.nn.PReLU`.
5637
+ Tensor, with the same shape and dtype as `input`.
5638
+ For detailed information, please refer to :class:`mindspore.mint.nn.PReLU`.
4145
5639
 
4146
5640
  Raises:
4147
- TypeError: If dtype of `x` or `weight` is neither float16 nor float32.
4148
- TypeError: If the `x` or the `weight` is not a Tensor.
4149
- ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
4150
- ValueError: If the `weight` is not a 1-D Tensor.
5641
+ TypeError: If the `input` or the `weight` is not a Tensor.
5642
+ ValueError: If the `weight` is not a 0-D or 1-D Tensor.
4151
5643
 
4152
5644
  Supported Platforms:
4153
5645
  ``Ascend`` ``GPU`` ``CPU``
@@ -4167,7 +5659,7 @@ def prelu(x, weight):
4167
5659
  [ 2.00 3.00]
4168
5660
  [ 4.0 5.00]]]
4169
5661
  """
4170
- return prelu_op(x, weight)
5662
+ return prelu_op(input, weight)
4171
5663
 
4172
5664
 
4173
5665
  def prod_ext(input, axis=None, keep_dims=False, dtype=None):
@@ -4315,7 +5807,10 @@ def randperm(n, seed=0, offset=0, dtype=mstype.int64):
4315
5807
  that a given type can represent.
4316
5808
 
4317
5809
  .. warning::
4318
- This is an experimental API that is subject to change or deletion.
5810
+ - This is an experimental API that is subject to change or deletion.
5811
+ - The Ascend backend does not support the reproducibility of random numbers, so
5812
+ the `seed` parameter has no effect.
5813
+
4319
5814
 
4320
5815
  Args:
4321
5816
  n (Union[Tensor, int]): The input n Tensor with shape: () or (1,) and with data type of int64.
@@ -4697,6 +6192,63 @@ def flip(input, axis):
4697
6192
  return reverse_v2_impl(input, axis)
4698
6193
 
4699
6194
 
6195
+ def rfft2(input, s=None, dim=(-2, -1), norm=None):
6196
+ r"""
6197
+ Calculates the two dimensional discrete Fourier transform for real input `input`.
6198
+
6199
+ Note:
6200
+ - `rfft2` is currently only used in `mindscience` scientific computing scenarios and
6201
+ dose not support other usage scenarios.
6202
+ - `rfft2` is not supported on Windows platform yet.
6203
+
6204
+ Args:
6205
+ input (Tensor): The input tensor.
6206
+ Supported dtypes:
6207
+
6208
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
6209
+
6210
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
6211
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfft2`.
6212
+ Default: ``None`` , which does not need to process `input`.
6213
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `rfft2`.
6214
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
6215
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
6216
+ Three modes are defined as, where :math: `n = prod(s)`
6217
+
6218
+ - ``"backward"`` (no normalization).
6219
+ - ``"forward"`` (normalize by :math:`1/n`).
6220
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
6221
+
6222
+ Returns:
6223
+ Tensor, The result of `rfft2()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
6224
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
6225
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
6226
+ When the input is float64, the return value type is complex128.
6227
+
6228
+ Raises:
6229
+ TypeError: If the `input` type is not Tensor.
6230
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
6231
+ TypeError: If the type/dtype of `s` and `dim` is not int.
6232
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
6233
+ ValueError: If `dim` has duplicate values.
6234
+ ValueError: If `s` is less than 1.
6235
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
6236
+
6237
+ Supported Platforms:
6238
+ ``Ascend`` ``CPU``
6239
+
6240
+ Examples:
6241
+ >>> import mindspore
6242
+ >>> from mindspore import Tensor, ops
6243
+ >>> input = ops.ones((2, 2))
6244
+ >>> ops.rfft2(input, s=(2, 2), dim=(0, 1), norm="backward")
6245
+ Tensor(shape=[2, 2], dtype=Complex64, value=
6246
+ [[4+0j, 0+0j],
6247
+ [0+0j, 0+0j]])
6248
+ """
6249
+ return rfft2_op(input, s, dim, norm)
6250
+
6251
+
4700
6252
  def rfft(input, n=None, dim=-1, norm=None):
4701
6253
  r"""
4702
6254
  Calculates the one dimensional discrete Fourier transform for real input `input`.
@@ -4739,20 +6291,117 @@ def rfft(input, n=None, dim=-1, norm=None):
4739
6291
  >>> import mindspore
4740
6292
  >>> from mindspore import Tensor, ops
4741
6293
  >>> input = Tensor([1, 2, 3, 4])
4742
- >>> y = ops.rfft(input)
6294
+ >>> y = ops.rfft(input, n=4, dim=-1, norm='backward')
4743
6295
  >>> print(y)
4744
6296
  [10.+0.j -2.+2.j -2.+0.j]
4745
6297
  """
4746
6298
  return rfft_op(input, n, dim, norm)
4747
6299
 
4748
6300
 
6301
+ def rfftfreq(n, d=1.0, dtype=None):
6302
+ r"""
6303
+ Computes the sample frequencies for `rfft` with a signal of size `n`.
6304
+ For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
6305
+
6306
+ .. math::
6307
+ f = [0, 1, ..., n // 2] / (d * n)
6308
+
6309
+ Note:
6310
+ - `rfftfreq` is currently only used in `mindscience` scientific computing scenarios and
6311
+ dose not support other usage scenarios.
6312
+ - `rfftfreq` is not supported on Windows platform yet.
6313
+
6314
+ Args:
6315
+ n (int): Window length.
6316
+ d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
6317
+ dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
6318
+
6319
+ Returns:
6320
+ Tensor, Array of length ``n`` containing the sample frequencies.
6321
+
6322
+ Raises:
6323
+ ValueError: If `n` is less than 1.
6324
+
6325
+ Supported Platforms:
6326
+ ``Ascend`` ``CPU``
6327
+
6328
+ Examples:
6329
+ >>> import mindspore
6330
+ >>> from mindspore import ops
6331
+ >>> out = ops.rfftfreq(n=4, d=1.0)
6332
+ >>> print(out)
6333
+ [0. 0.25 0.5 ]
6334
+ """
6335
+ return rfftfreq_op(n, d, dtype)
6336
+
6337
+
6338
+ def rfftn(input, s=None, dim=None, norm=None):
6339
+ r"""
6340
+ Computes the N dimensional discrete Fourier transform for real input `input`.
6341
+
6342
+ Note:
6343
+ - `rfftn` is currently only used in `mindscience` scientific computing scenarios and
6344
+ dose not support other usage scenarios.
6345
+ - `rfftn` is not supported on Windows platform yet.
6346
+
6347
+ Args:
6348
+ input (Tensor): The input tensor.
6349
+ Supported dtypes:
6350
+
6351
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
6352
+
6353
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
6354
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfftn`.
6355
+ Default: ``None`` , which does not need to process `input`.
6356
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `rfftn`.
6357
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
6358
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
6359
+ Three modes are defined as, where :math: `n = prod(s)`
6360
+
6361
+ - ``"backward"`` (no normalization).
6362
+ - ``"forward"`` (normalize by :math:`1/n`).
6363
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
6364
+
6365
+ Returns:
6366
+ Tensor, The result of `rfftn()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
6367
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
6368
+ When the input is int16, int32, int64, float16, float32 the return value type is complex64.
6369
+ When the input is float64, the return value type is complex128.
6370
+
6371
+ Raises:
6372
+ TypeError: If the `input` type is not Tensor.
6373
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
6374
+ TypeError: If the type/dtype of `s` and `dim` is not int.
6375
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
6376
+ ValueError: If `dim` has duplicate values.
6377
+ ValueError: If `s` is less than 1.
6378
+ ValueError: If `s` and `dim` are given but have different shapes.
6379
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
6380
+
6381
+ Supported Platforms:
6382
+ ``Ascend`` ``CPU``
6383
+
6384
+ Examples:
6385
+ >>> import mindspore
6386
+ >>> from mindspore import Tensor, ops
6387
+ >>> input = ops.ones((2, 2, 2))
6388
+ >>> ops.rfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
6389
+ Tensor(shape=[2, 2, 2], dtype=Complex64, value=
6390
+ [[[8+0j, 0+0j],
6391
+ [0+0j, 0+0j]],
6392
+ [[0+0j, 0+0j],
6393
+ [0+0j, 0+0j]]])
6394
+ """
6395
+ return rfftn_op(input, s, dim, norm)
6396
+
6397
+
4749
6398
  def rms_norm(x, gamma, epsilon=1e-6):
4750
6399
  r"""
4751
6400
  The RmsNorm(Root Mean Square Layer Normalization) operator is a normalization operation. Compared to
4752
6401
  LayerNorm, it retains scaling invariance and removes translation invariance. Its formula is:
4753
6402
 
4754
6403
  .. math::
4755
- y=\frac{x_i}{\sqrt{\frac{1}{n}}\sum_{i=1}^{n}{ x_i^2}+\varepsilon }\gamma_i
6404
+ y=\frac{x_i}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}{ x_i^2}+\varepsilon}}\gamma_i
4756
6405
 
4757
6406
  .. warning::
4758
6407
  This is an experimental API that is subject to change or deletion. This API is only supported in Atlas A2
@@ -4795,36 +6444,82 @@ def rms_norm(x, gamma, epsilon=1e-6):
4795
6444
  return rms_norm_impl(x, gamma, epsilon)
4796
6445
 
4797
6446
 
4798
- def round(input):
6447
+ def rotary_position_embedding(x, cos, sin, mode=0):
4799
6448
  r"""
4800
- Returns half to even of a tensor element-wise.
4801
-
4802
- .. math::
6449
+ Implements the Rotary Position Embedding algorithm.
6450
+ Refer to paper `Enhanced Transformer with Rotary Position Embedding <https://arxiv.org/pdf/2104.09864.pdf>`_.
4803
6451
 
4804
- out_i \approx input_i
6452
+ .. warning::
6453
+ This is an experimental API that is subject to change or deletion.
4805
6454
 
4806
6455
  Args:
4807
- input (Tensor): The input tensor.
6456
+ x (Tensor): 4D tensor, with float16, bfloat16 or float32 data type.
6457
+ cos (Tensor): 4D constant, has the same type as `x` , in range of [-1, 1].
6458
+ sin (Tensor): Same with `cos` .
6459
+ mode (int): An optional attribute. Used to select a calculation mode. 0: rotate_half(GPT-NeoX style); 1: rotate_interleaved(GPT-J style). Defaults to ``0`` .
6460
+
6461
+ .. list-table:: Config layout constraints
6462
+ :widths: 5 20 20
6463
+ :header-rows: 1
6464
+
6465
+ * - Args
6466
+ - RotateHalf(mode:0)
6467
+ - RotateInterleaved(mode:1)
6468
+ * - x
6469
+ - Supported layout:
6470
+
6471
+ 11SD, B1SD, BNSD; D < 896 and D is an Even. B, N < 1000;
6472
+
6473
+ - Supported layout: 11SD, B1SD, BNSD;
6474
+
6475
+ D < 896 and D is an Even.
6476
+
6477
+ B, N < 1000;
6478
+ * - cos
6479
+ - Support layout for different values of `x`:
6480
+
6481
+ `x` is BNSD: 11SD, B1SD, BNSD;
6482
+
6483
+ `x` is BSND: 1S1D, BS1D, BSND;
6484
+
6485
+ `x` is SBND: S11D, SB1D, SBND
6486
+ - Support layout for different values of `x`:
6487
+
6488
+ `x` is BNSD: 11SD;
6489
+
6490
+ `x` is BSND: 1S1D;
6491
+
6492
+ `x` is SBND: S11D
6493
+ * - sin
6494
+ - Same with `cos` .
6495
+ - Same with `cos` .
6496
+
6497
+ .. note::
6498
+ When the layout is BNSD, B * N > 8S and D is 32-bytes alignment, the performance is poor. Therefore, this interface cannot be called.
4808
6499
 
4809
6500
  Returns:
4810
- Tensor, has the same shape and type as the `input`.
6501
+ Tensor, has the same dtype and shape as the `x`.
4811
6502
 
4812
6503
  Raises:
4813
- TypeError: If `input` is not a Tensor.
6504
+ TypeError: If `x` is not a Tensor.
6505
+ TypeError: If `cos` is not a Tensor.
6506
+ TypeError: If `sin` is not a Tensor.
6507
+ TypeError: If `mode` is not an int.
4814
6508
 
4815
6509
  Supported Platforms:
4816
- ``Ascend`` ``GPU`` ``CPU``
6510
+ ``Ascend``
4817
6511
 
4818
6512
  Examples:
4819
- >>> import mindspore
4820
6513
  >>> import numpy as np
4821
6514
  >>> from mindspore import Tensor, ops
4822
- >>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4823
- >>> output = ops.round(input)
4824
- >>> print(output)
4825
- [ 1. 2. 2. 2. -4.]
6515
+ >>> x = Tensor(np.random.uniform(-2, 2, (4, 8192, 4, 128)))
6516
+ >>> cos = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
6517
+ >>> sin = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
6518
+ >>> output = ops.rotary_position_embedding(x, cos, sin, 0)
6519
+ >>> print(output.shape)
6520
+ (4, 8192, 4, 128)
4826
6521
  """
4827
- return round_op(input)
6522
+ return rotary_position_embedding_op(x, cos, sin, mode)
4828
6523
 
4829
6524
 
4830
6525
  def rsqrt(input):
@@ -4850,7 +6545,7 @@ def rsqrt(input):
4850
6545
 
4851
6546
  Examples:
4852
6547
  >>> import mindspore as ms
4853
- >>> import mindspore.ops as ops
6548
+ >>> from mindspore import ops
4854
6549
  >>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
4855
6550
  >>> output = ops.rsqrt(input)
4856
6551
  >>> print(output)
@@ -4999,7 +6694,41 @@ def scatter_nd(indices, updates, shape):
4999
6694
  [0. 1.1 0.]
5000
6695
  [0. 0. 0.]]
5001
6696
  """
5002
- return scatter_nd_op(indices, updates, shape)
6697
+ return scatter_nd_op(indices, updates, shape)
6698
+
6699
+
6700
+ def select_ext(input, dim, index):
6701
+ r"""
6702
+ Slices the input tensor along the selected dimension at the given index.
6703
+
6704
+ .. warning::
6705
+ This is an experimental API that is subject to change or deletion.
6706
+
6707
+ Args:
6708
+ input (Tensor): the input tensor.
6709
+ dim (int): the dimension to slice.
6710
+ index (int): the index to select with.
6711
+
6712
+ Returns:
6713
+ Tensor.
6714
+
6715
+ Raises:
6716
+ TypeError: If input is not a Tensor.
6717
+
6718
+ Supported Platforms:
6719
+ ``Ascend``
6720
+
6721
+ Examples:
6722
+ >>> import mindspore
6723
+ >>> from mindspore import Tensor, mint
6724
+ >>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
6725
+ >>> y = mint.select(input, 0, 0)
6726
+ >>> y = Tensor([1,2], mindspore.float32)
6727
+ >>> print(y)
6728
+ [2 3 4 5]
6729
+
6730
+ """
6731
+ return select_ext_op(input, dim, index)
5003
6732
 
5004
6733
 
5005
6734
  def select(condition, input, other):
@@ -5021,12 +6750,12 @@ def select(condition, input, other):
5021
6750
  The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5022
6751
  input (Union[Tensor, int, float]): The first Tensor to be selected.
5023
6752
  If input is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5024
- If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
5025
- There must be at least one Tensor between x and y.
6753
+ If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as other.
6754
+ There must be at least one Tensor between input and other.
5026
6755
  other (Union[Tensor, int, float]): The second Tensor to be selected.
5027
6756
  If other is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5028
- If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
5029
- There must be at least one Tensor between x and y.
6757
+ If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as input.
6758
+ There must be at least one Tensor between input and other.
5030
6759
 
5031
6760
  Returns:
5032
6761
  Tensor, has the same shape as `condition`.
@@ -5053,6 +6782,70 @@ def select(condition, input, other):
5053
6782
  return select_op(condition, input, other)
5054
6783
 
5055
6784
 
6785
+ def select_v2(condition, input, other):
6786
+ r"""
6787
+
6788
+ """
6789
+ return select_v2_op(condition, input, other)
6790
+
6791
+
6792
+ def selu_ext(input):
6793
+ r"""
6794
+ Activation function SELU (Scaled exponential Linear Unit).
6795
+
6796
+ The activation function is defined as:
6797
+
6798
+ .. math::
6799
+ E_{i} =
6800
+ scale *
6801
+ \begin{cases}
6802
+ x_{i}, &\text{if } x_{i} \geq 0; \cr
6803
+ \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
6804
+ \end{cases}
6805
+
6806
+ where :math:`alpha` and :math:`scale` are pre-defined constants(:math:`alpha=1.67326324`
6807
+ and :math:`scale=1.05070098`).
6808
+
6809
+ See more details in `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_.
6810
+
6811
+ SELU Activation Function Graph:
6812
+
6813
+ .. image:: ../images/SeLU.png
6814
+ :align: center
6815
+
6816
+ Args:
6817
+ input (Tensor): Tensor of any dimension.
6818
+ The data type is float16, float32, bfloat16.
6819
+
6820
+ Returns:
6821
+ Tensor, with the same type and shape as the `input`.
6822
+
6823
+ Raises:
6824
+ TypeError: If dtype of `input` is not float16, float32, bfloat16.
6825
+
6826
+ Supported Platforms:
6827
+ ``Ascend``
6828
+
6829
+ Examples:
6830
+ >>> import mindspore
6831
+ >>> from mindspore import Tensor, mint
6832
+ >>> import numpy as np
6833
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
6834
+ >>> output = mint.nn.functional.selu(input)
6835
+ >>> print(output)
6836
+ [[-1.1113307 4.202804 -1.7575096]
6837
+ [ 2.101402 -1.7462534 9.456309 ]]
6838
+ """
6839
+ return selu_ext_op(input)
6840
+
6841
+
6842
+ def selu_grad(gradient, result):
6843
+ r"""
6844
+
6845
+ """
6846
+ return selu_grad_op(gradient, result)
6847
+
6848
+
5056
6849
  def sequence_concat(x, axis=0):
5057
6850
  r"""
5058
6851
  Support sequence Concat operation.
@@ -5079,6 +6872,14 @@ def sequence_concat(x, axis=0):
5079
6872
  return sequence_concat_op(x)
5080
6873
 
5081
6874
 
6875
+ def shard_identity(input):
6876
+ r"""
6877
+ A intermediate operator only be created when using mindspore.shard or
6878
+ cell.shard during parallel procedure. Will not be exposed to the users.
6879
+ """
6880
+ return shard_identity_op(input)
6881
+
6882
+
5082
6883
  def sigmoid(input):
5083
6884
  r"""
5084
6885
  Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
@@ -5122,7 +6923,7 @@ def sigmoid(input):
5122
6923
 
5123
6924
  def sign(input):
5124
6925
  r"""
5125
- Returns an element-wise indication of the sign of a number. Notice: When the input dtype is float64, the gradient of this operator is NaN.
6926
+ Returns an element-wise indication of the sign of a number. Notice: When the input is NaN and dtype is float64, the output of this operator is NaN.
5126
6927
 
5127
6928
  .. math::
5128
6929
  \text{out}_{i} = \begin{cases}
@@ -5285,12 +7086,20 @@ def sinh(input):
5285
7086
 
5286
7087
  Args:
5287
7088
  input (Tensor): The input tensor of hyperbolic sine function.
7089
+ Supported dtypes:
7090
+
7091
+ - GPU/CPU: float16, float32, float64, complex64 or complex128.
7092
+ - Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
5288
7093
 
5289
7094
  Returns:
5290
- Tensor, has the same shape as `input`.
7095
+ Tensor, has the same shape as the `input`.
7096
+ The dtype of output is float32 when dtype of `input` is in
7097
+ [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as the `input`.
5291
7098
 
5292
- Raises:
5293
- TypeError: If `input` is not a Tensor.
7099
+ :raise TypeError: If `input` is not a Tensor.
7100
+ :raise TypeError:
7101
+ * CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
7102
+ * Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
5294
7103
 
5295
7104
  Supported Platforms:
5296
7105
  ``Ascend`` ``GPU`` ``CPU``
@@ -5349,6 +7158,88 @@ def softplus_ext(input, beta=1, threshold=20):
5349
7158
  return softplus_ext_op(input, beta, threshold)
5350
7159
 
5351
7160
 
7161
+ def softshrink_grad(input_grad, input_x, lambd=0.5):
7162
+ r"""
7163
+ Computes gradients for SoftShrinkGrad operation.
7164
+
7165
+ Args:
7166
+ input_grad (Tensor): the gradients of loss to output of SoftShrink function. Supported dtypes:
7167
+
7168
+ - Ascend: float16, float32, bfloat16.
7169
+ - CPU/GPU: float16, float32.
7170
+ input_x (Tensor): Must be the input `input` of the forward operator SoftSHrink. Supported dtypes:
7171
+
7172
+ - Ascend: float16, float32, bfloat16.
7173
+ - CPU/GPU: float16, float32.
7174
+ lambd (float): the lambda value for the Softshrink formulation. Default: ``0.5`` .
7175
+
7176
+ Returns:
7177
+ backprops, a Tensor with the same shape and data type as `input_x`.
7178
+
7179
+ Rasise:
7180
+ ValueError: If `lambd` is not a float.
7181
+ ValueError: If shape of `input_grad` is not the same as `input_x`.
7182
+ TypeError: If dtype of `input_grad` is not the same as `input_x`.
7183
+ TypeError: If dtype of `input_grad` or `input_x` is not float16, float32 or bfloat16.
7184
+
7185
+ Supported Platforms:
7186
+ ``Ascend`` ``GPU`` ``CPU``
7187
+ """
7188
+ return softshrink_grad_impl(input_grad, input_x, lambd)
7189
+
7190
+
7191
+ def softshrink(input, lambd=0.5):
7192
+ r"""
7193
+ Soft Shrink activation function. Calculates the output according to the input elements.
7194
+
7195
+ The formula is defined as follows:
7196
+
7197
+ .. math::
7198
+ \text{SoftShrink}(x) =
7199
+ \begin{cases}
7200
+ x - \lambda, & \text{ if } x > \lambda \\
7201
+ x + \lambda, & \text{ if } x < -\lambda \\
7202
+ 0, & \text{ otherwise }
7203
+ \end{cases}
7204
+
7205
+ SoftShrink Activation Function Graph:
7206
+
7207
+ .. image:: ../images/Softshrink.png
7208
+ :align: center
7209
+
7210
+ Args:
7211
+ input (Tensor): The input of Soft Shrink. Supported dtypes:
7212
+
7213
+ - Ascend: float16, float32, bfloat16.
7214
+ - CPU/GPU: float16, float32.
7215
+ lambd (number, optional): The threshold :math:`\lambda` defined by the Soft Shrink formula.
7216
+ It should be greater than or equal to 0, default: ``0.5`` .
7217
+
7218
+ Returns:
7219
+ Tensor, has the same data type and shape as the input `input`.
7220
+
7221
+ Raises:
7222
+ TypeError: If `lambd` is not a float, int or bool.
7223
+ TypeError: If `input` is not a tensor.
7224
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
7225
+
7226
+ Supported Platforms:
7227
+ ``Ascend`` ``GPU`` ``CPU``
7228
+
7229
+ Examples:
7230
+ >>> import mindspore
7231
+ >>> from mindspore import Tensor
7232
+ >>> from mindspore import ops
7233
+ >>> import numpy as np
7234
+ >>> x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float32)
7235
+ >>> output = ops.softshrink(x)
7236
+ >>> print(output)
7237
+ [[ 0.02979 0.287 0.676 ]
7238
+ [ 0.2837 0.1216 -0.6543 ]]
7239
+ """
7240
+ return softshrink_impl(input, lambd)
7241
+
7242
+
5352
7243
  def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False):
5353
7244
  r"""
5354
7245
  Solve the linear system :math:`a x = b` for `x`, Assuming `a` is a triangular matrix.
@@ -5694,12 +7585,12 @@ def sub_ext(input, other, alpha=1):
5694
7585
  Args:
5695
7586
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
5696
7587
  a bool or a tensor whose data type is
5697
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5698
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7588
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7589
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5699
7590
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
5700
7591
  a bool or a tensor whose data type is
5701
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5702
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7592
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7593
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5703
7594
  alpha (number.Number): A scaling factor applied to `other`, default 1.
5704
7595
 
5705
7596
  Returns:
@@ -5751,8 +7642,8 @@ def sub(input, other):
5751
7642
  Args:
5752
7643
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
5753
7644
  a bool or a tensor whose data type is
5754
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5755
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7645
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7646
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5756
7647
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
5757
7648
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
5758
7649
 
@@ -5779,6 +7670,83 @@ def sub(input, other):
5779
7670
  return sub_op(input, other)
5780
7671
 
5781
7672
 
7673
+ def swiglu_grad(grad_output, input, dim=-1):
7674
+ r"""
7675
+
7676
+ """
7677
+ return swiglu_grad_op(grad_output, input, dim)
7678
+
7679
+
7680
+ def swiglu(input, dim=-1):
7681
+ r"""
7682
+ Computes SwiGLU (Swish-Gated Linear Unit activation function) of input tensor.
7683
+ SwiGLU is a variant of the :class:`mindspore.ops.GLU` activation function, it is defined as:
7684
+
7685
+ .. math::
7686
+ {SwiGLU}(a, b)= Swish(a) \otimes b
7687
+
7688
+ where :math:`a` is the first half of the `input` matrices and :math:`b` is the second half,
7689
+ Swish(a)=a :math:`\sigma` (a), :math:`\sigma` is the :func:`mindspore.ops.sigmoid` activation function
7690
+ and :math:`\otimes` is the Hadamard product.
7691
+
7692
+ Args:
7693
+ input (Tensor): Tensor to be split. It has shape :math:`(\ast_1, N, \ast_2)`
7694
+ where `*` means, any number of additional dimensions. :math:`N` must be divisible by 2.
7695
+ dim (int, optional): the axis to split the input. It must be int. Default: ``-1`` , the last axis of `input`.
7696
+
7697
+ Returns:
7698
+ Tensor, the same dtype as the `input`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
7699
+
7700
+ Raises:
7701
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
7702
+ TypeError: If `input` is not a Tensor.
7703
+ RuntimeError: If the dimension specified by `dim` is not divisible by 2.
7704
+
7705
+ Supported Platforms:
7706
+ ``Ascend``
7707
+
7708
+ Examples:
7709
+ >>> from mindspore import Tensor, ops
7710
+ >>> input = Tensor([[-0.12, 0.123, 31.122], [2.1223, 4.1212121217, 0.3123]], dtype=mindspore.float32)
7711
+ >>> output = ops.swiglu(input, 0)
7712
+ >>> print(output)
7713
+ [[-0.11970687 0.2690224 9.7194 ]]
7714
+ """
7715
+ return swiglu_op(input, dim)
7716
+
7717
+
7718
+ def tan(input):
7719
+ r"""
7720
+ Computes tangent of `input` element-wise.
7721
+
7722
+ .. math::
7723
+
7724
+ out_i = \tan(input_i)
7725
+
7726
+ Args:
7727
+ input (Tensor): The input Tensor, valid for any dimensions.
7728
+
7729
+ Returns:
7730
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
7731
+
7732
+ Raises:
7733
+ TypeError: If `input` is not a Tensor.
7734
+
7735
+ Supported Platforms:
7736
+ ``Ascend`` ``GPU`` ``CPU``
7737
+
7738
+ Examples:
7739
+ >>> import mindspore
7740
+ >>> import numpy as np
7741
+ >>> from mindspore import Tensor, ops
7742
+ >>> input = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
7743
+ >>> output = ops.tan(input)
7744
+ >>> print(output)
7745
+ [-1.5574077 0. 1.5574077]
7746
+ """
7747
+ return tan_op(input)
7748
+
7749
+
5782
7750
  def tanh(input):
5783
7751
  r"""
5784
7752
  Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
@@ -5818,6 +7786,14 @@ def tanh(input):
5818
7786
  return tanh_op(input)
5819
7787
 
5820
7788
 
7789
+ def tensor_scatter_elements(data, indices, updates, axis=0, reduce='none'):
7790
+ r"""
7791
+
7792
+ """
7793
+ tensor_scatter_elements_op = _get_cache_prim(TensorScatterElements)(axis, reduce)
7794
+ return tensor_scatter_elements_op(data, indices, updates)
7795
+
7796
+
5821
7797
  def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5822
7798
  r"""
5823
7799
  Finds values and indices of the `k` largest or smallest entries along a given dimension.
@@ -5891,7 +7867,7 @@ def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5891
7867
  return topk_ext_op(input, k, dim, largest, sorted)
5892
7868
 
5893
7869
 
5894
- def topkrouter(input, capacity, expert_num):
7870
+ def topkrouter(input, capacity, expert_num, drop_type=0):
5895
7871
  r"""
5896
7872
  TopkRouter implementation in MOE.
5897
7873
 
@@ -5899,6 +7875,7 @@ def topkrouter(input, capacity, expert_num):
5899
7875
  - **x** (Tensor) - Input Tensor of 3D, Supporting types:[int32, int64]
5900
7876
  - **capacity** (Int64) - The maximum number of tokens each expert can handle
5901
7877
  - **expert_num** (Int64) - The number of expert.
7878
+ - **drop_type** (Int64) - S-Drop/K-Drop, 0 means S-Drop, 1 means K-Drop, default 0.
5902
7879
 
5903
7880
  Outputs:
5904
7881
  tuple(Tensor), tuple of 2 tensors, `dispatch_index` and `combine_inex`.
@@ -5908,7 +7885,48 @@ def topkrouter(input, capacity, expert_num):
5908
7885
  Supported Platforms:
5909
7886
  ``Ascend``
5910
7887
  """
5911
- return topkrouter_op(input, capacity, expert_num)
7888
+ return topkrouter_op(input, capacity, expert_num, drop_type)
7889
+
7890
+
7891
+ def trace_ext(input):
7892
+ r"""
7893
+ Returns a new tensor that is the sum of the `input` main trace.
7894
+
7895
+ Note:
7896
+ Input must be tensor.
7897
+
7898
+ Args:
7899
+ input (Tensor): 2-D Tensor.
7900
+
7901
+ Returns:
7902
+ Tensor, when the data type of `input` is integer or bool, its data type is int64, otherwise it is the same as `input`, and size equals to 1.
7903
+
7904
+ Raises:
7905
+ TypeError: If `input` is not a Tensor.
7906
+ ValueError: If the dimension of `input` is not equal to 2.
7907
+ TypeError: If the dtype of `input` is not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16.
7908
+
7909
+ Supported Platforms:
7910
+ ``Ascend``
7911
+
7912
+ Examples:
7913
+ >>> import mindspore
7914
+ >>> import numpy as np
7915
+ >>> from mindspore import Tensor, ops
7916
+ >>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
7917
+ >>> output = ops.trace_ext(input)
7918
+ >>> print(output)
7919
+ 42.0
7920
+ >>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
7921
+ >>> output = ops.trace_ext(input)
7922
+ >>> print(output)
7923
+ 18.0
7924
+ >>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
7925
+ >>> output = ops.trace_ext(input)
7926
+ >>> print(output)
7927
+ 24.0
7928
+ """
7929
+ return trace_ext_op(input)
5912
7930
 
5913
7931
 
5914
7932
  def trace(input):
@@ -5951,6 +7969,41 @@ def trace(input):
5951
7969
  return trace_op(input)
5952
7970
 
5953
7971
 
7972
+ def transpose_ext(input, dim0, dim1):
7973
+ r"""
7974
+ Interchange two axes of a tensor.
7975
+
7976
+ .. warning::
7977
+ This is an experimental API that is subject to change or deletion.
7978
+
7979
+ Args:
7980
+ input(Tensor): Input tensor.
7981
+ dim0 (int): First axis.
7982
+ dim1 (int): Second axis.
7983
+
7984
+ Returns:
7985
+ Transposed tensor, has the same data type as `input`.
7986
+
7987
+ Raises:
7988
+ TypeError: If argument `input` is not Tensor.
7989
+ TypeError: If `dim0` or `dim1` is not integer.
7990
+ ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
7991
+
7992
+ Supported Platforms:
7993
+ ``Ascend``
7994
+
7995
+ Examples:
7996
+ >>> import numpy as np
7997
+ >>> from mindspore import mint
7998
+ >>> from mindspore import Tensor
7999
+ >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
8000
+ >>> output = mint.transpose(input, 0, 2)
8001
+ >>> print(output.shape)
8002
+ (4, 3, 2)
8003
+ """
8004
+ return transpose_ext_op(input, dim0, dim1)
8005
+
8006
+
5954
8007
  def transpose(input, input_perm):
5955
8008
  r"""
5956
8009
  Permutes the dimensions of the input tensor according to input permutation.
@@ -6002,9 +8055,16 @@ def transpose(input, input_perm):
6002
8055
  return transpose_op(input, input_perm)
6003
8056
 
6004
8057
 
8058
+ def tril_ext(input, diagonal=0):
8059
+ r"""
8060
+
8061
+ """
8062
+ return tril_ext_impl(input, diagonal)
8063
+
8064
+
6005
8065
  def triu(input, diagonal=0):
6006
8066
  r"""
6007
- Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
8067
+ Returns the upper triangle part of `input` (elements that contain the diagonal and below),
6008
8068
  and set the other elements to zeros.
6009
8069
 
6010
8070
  .. warning::
@@ -6016,7 +8076,7 @@ def triu(input, diagonal=0):
6016
8076
  indicating the main diagonal.
6017
8077
 
6018
8078
  Returns:
6019
- Tensor, a tensor has the same shape and data type as input.
8079
+ Tensor, a tensor has the same shape and data type as `input`.
6020
8080
 
6021
8081
  Raises:
6022
8082
  TypeError: If `diagonal` is not an int.
@@ -6063,6 +8123,34 @@ def triu(input, diagonal=0):
6063
8123
  return triu_impl(input, diagonal)
6064
8124
 
6065
8125
 
8126
+ def trunc(input):
8127
+ r"""
8128
+ Returns a new tensor with the truncated integer values of the elements of the input tensor.
8129
+
8130
+ Args:
8131
+ input (Tensor): The input tensor.
8132
+
8133
+ Returns:
8134
+ Tensor, the same shape and data type as the input.
8135
+
8136
+ Raises:
8137
+ TypeError: If `input` is not a Tensor.
8138
+
8139
+ Supported Platforms:
8140
+ ``Ascend`` ``GPU`` ``CPU``
8141
+
8142
+ Examples:
8143
+ >>> import mindspore
8144
+ >>> import numpy as np
8145
+ >>> from mindspore import Tensor, ops
8146
+ >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
8147
+ >>> output = ops.trunc(x)
8148
+ >>> print(output)
8149
+ [3. 0. 0. -3.]
8150
+ """
8151
+ return trunc_op(input)
8152
+
8153
+
6066
8154
  def tuple_to_tensor(input_tuple, dtype=None):
6067
8155
  r"""
6068
8156
 
@@ -6429,11 +8517,11 @@ def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expand
6429
8517
  return moe_finalize_routing_op(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6430
8518
 
6431
8519
 
6432
- def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
8520
+ def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
6433
8521
  r"""
6434
8522
 
6435
8523
  """
6436
- return quant_batch_matmul_impl(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
8524
+ return quant_batch_matmul_impl(x1, x2, scale, offset, bias, pertokenScaleOptional, transpose_x1, transpose_x2, dtype)
6437
8525
 
6438
8526
 
6439
8527
  def weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None, transpose_x=False, transpose_weight=False, antiquant_group_size=0):