mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (308) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +3 -1
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +50 -9
  9. mindspore/_extends/parse/compile_config.py +41 -0
  10. mindspore/_extends/parse/parser.py +9 -7
  11. mindspore/_extends/parse/standard_method.py +52 -14
  12. mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
  13. mindspore/amp.py +24 -10
  14. mindspore/atlprov.dll +0 -0
  15. mindspore/avcodec-59.dll +0 -0
  16. mindspore/avdevice-59.dll +0 -0
  17. mindspore/avfilter-8.dll +0 -0
  18. mindspore/avformat-59.dll +0 -0
  19. mindspore/avutil-57.dll +0 -0
  20. mindspore/c1.dll +0 -0
  21. mindspore/c1xx.dll +0 -0
  22. mindspore/c2.dll +0 -0
  23. mindspore/common/__init__.py +6 -4
  24. mindspore/common/_pijit_context.py +190 -0
  25. mindspore/common/_register_for_tensor.py +2 -1
  26. mindspore/common/_tensor_overload.py +139 -0
  27. mindspore/common/api.py +102 -87
  28. mindspore/common/dump.py +5 -6
  29. mindspore/common/generator.py +1 -7
  30. mindspore/common/hook_handle.py +14 -26
  31. mindspore/common/mindir_util.py +2 -2
  32. mindspore/common/parameter.py +46 -13
  33. mindspore/common/recompute.py +39 -9
  34. mindspore/common/sparse_tensor.py +7 -3
  35. mindspore/common/tensor.py +209 -29
  36. mindspore/communication/__init__.py +1 -1
  37. mindspore/communication/_comm_helper.py +38 -3
  38. mindspore/communication/comm_func.py +310 -55
  39. mindspore/communication/management.py +14 -14
  40. mindspore/context.py +123 -22
  41. mindspore/dataset/__init__.py +1 -1
  42. mindspore/dataset/audio/__init__.py +1 -1
  43. mindspore/dataset/core/config.py +7 -0
  44. mindspore/dataset/core/validator_helpers.py +7 -0
  45. mindspore/dataset/engine/cache_client.py +1 -1
  46. mindspore/dataset/engine/datasets.py +72 -44
  47. mindspore/dataset/engine/datasets_audio.py +7 -7
  48. mindspore/dataset/engine/datasets_standard_format.py +53 -3
  49. mindspore/dataset/engine/datasets_text.py +20 -20
  50. mindspore/dataset/engine/datasets_user_defined.py +174 -104
  51. mindspore/dataset/engine/datasets_vision.py +33 -33
  52. mindspore/dataset/engine/iterators.py +29 -0
  53. mindspore/dataset/engine/obs/util.py +7 -0
  54. mindspore/dataset/engine/queue.py +114 -60
  55. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  56. mindspore/dataset/engine/validators.py +34 -14
  57. mindspore/dataset/text/__init__.py +1 -4
  58. mindspore/dataset/transforms/__init__.py +0 -3
  59. mindspore/dataset/utils/line_reader.py +2 -0
  60. mindspore/dataset/vision/__init__.py +1 -4
  61. mindspore/dataset/vision/utils.py +1 -1
  62. mindspore/dataset/vision/validators.py +2 -1
  63. mindspore/dnnl.dll +0 -0
  64. mindspore/dpcmi.dll +0 -0
  65. mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
  66. mindspore/experimental/es/embedding_service.py +883 -0
  67. mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
  68. mindspore/experimental/llm_boost/__init__.py +21 -0
  69. mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
  70. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  71. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  72. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  73. mindspore/experimental/llm_boost/register.py +129 -0
  74. mindspore/experimental/llm_boost/utils.py +31 -0
  75. mindspore/experimental/optim/adamw.py +85 -0
  76. mindspore/experimental/optim/optimizer.py +3 -0
  77. mindspore/hal/__init__.py +3 -3
  78. mindspore/hal/contiguous_tensors_handle.py +175 -0
  79. mindspore/hal/stream.py +18 -0
  80. mindspore/include/api/model_group.h +13 -1
  81. mindspore/include/api/types.h +10 -10
  82. mindspore/include/dataset/config.h +2 -2
  83. mindspore/include/dataset/constants.h +2 -2
  84. mindspore/include/dataset/execute.h +2 -2
  85. mindspore/include/dataset/vision.h +4 -0
  86. mindspore/jpeg62.dll +0 -0
  87. mindspore/log.py +1 -1
  88. mindspore/mindrecord/filewriter.py +68 -51
  89. mindspore/mindspore_backend.dll +0 -0
  90. mindspore/mindspore_common.dll +0 -0
  91. mindspore/mindspore_core.dll +0 -0
  92. mindspore/mindspore_glog.dll +0 -0
  93. mindspore/mindspore_np_dtype.dll +0 -0
  94. mindspore/mindspore_ops.dll +0 -0
  95. mindspore/mint/__init__.py +495 -46
  96. mindspore/mint/distributed/__init__.py +31 -0
  97. mindspore/mint/distributed/distributed.py +254 -0
  98. mindspore/mint/nn/__init__.py +266 -21
  99. mindspore/mint/nn/functional.py +125 -19
  100. mindspore/mint/nn/layer/__init__.py +39 -0
  101. mindspore/mint/nn/layer/activation.py +133 -0
  102. mindspore/mint/nn/layer/normalization.py +477 -0
  103. mindspore/mint/nn/layer/pooling.py +110 -0
  104. mindspore/mint/optim/adamw.py +28 -7
  105. mindspore/mint/special/__init__.py +63 -0
  106. mindspore/msobj140.dll +0 -0
  107. mindspore/mspdb140.dll +0 -0
  108. mindspore/mspdbcore.dll +0 -0
  109. mindspore/mspdbst.dll +0 -0
  110. mindspore/mspft140.dll +0 -0
  111. mindspore/msvcdis140.dll +0 -0
  112. mindspore/msvcp140_1.dll +0 -0
  113. mindspore/msvcp140_2.dll +0 -0
  114. mindspore/msvcp140_atomic_wait.dll +0 -0
  115. mindspore/msvcp140_codecvt_ids.dll +0 -0
  116. mindspore/multiprocessing/__init__.py +2 -1
  117. mindspore/nn/__init__.py +0 -1
  118. mindspore/nn/cell.py +275 -93
  119. mindspore/nn/layer/activation.py +211 -44
  120. mindspore/nn/layer/basic.py +113 -3
  121. mindspore/nn/layer/embedding.py +120 -2
  122. mindspore/nn/layer/normalization.py +101 -5
  123. mindspore/nn/layer/padding.py +34 -48
  124. mindspore/nn/layer/pooling.py +161 -7
  125. mindspore/nn/layer/transformer.py +3 -3
  126. mindspore/nn/loss/__init__.py +2 -2
  127. mindspore/nn/loss/loss.py +84 -6
  128. mindspore/nn/optim/__init__.py +2 -1
  129. mindspore/nn/optim/adadelta.py +1 -1
  130. mindspore/nn/optim/adam.py +1 -1
  131. mindspore/nn/optim/lamb.py +1 -1
  132. mindspore/nn/optim/tft_wrapper.py +127 -0
  133. mindspore/nn/wrap/cell_wrapper.py +12 -23
  134. mindspore/nn/wrap/grad_reducer.py +5 -5
  135. mindspore/nn/wrap/loss_scale.py +17 -3
  136. mindspore/numpy/__init__.py +1 -1
  137. mindspore/numpy/array_creations.py +65 -68
  138. mindspore/numpy/array_ops.py +64 -60
  139. mindspore/numpy/fft.py +610 -75
  140. mindspore/numpy/logic_ops.py +11 -10
  141. mindspore/numpy/math_ops.py +85 -84
  142. mindspore/numpy/utils_const.py +4 -4
  143. mindspore/opencv_core452.dll +0 -0
  144. mindspore/opencv_imgcodecs452.dll +0 -0
  145. mindspore/opencv_imgproc452.dll +0 -0
  146. mindspore/ops/__init__.py +6 -4
  147. mindspore/ops/_grad_experimental/grad_comm_ops.py +47 -3
  148. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
  149. mindspore/ops/_vmap/vmap_array_ops.py +2 -4
  150. mindspore/ops/_vmap/vmap_math_ops.py +17 -1
  151. mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
  152. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +85 -7
  153. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
  154. mindspore/ops/auto_generate/gen_extend_func.py +734 -13
  155. mindspore/ops/auto_generate/gen_ops_def.py +2420 -381
  156. mindspore/ops/auto_generate/gen_ops_prim.py +5196 -1659
  157. mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
  158. mindspore/ops/composite/base.py +85 -48
  159. mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
  160. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
  161. mindspore/ops/function/__init__.py +22 -0
  162. mindspore/ops/function/array_func.py +490 -153
  163. mindspore/ops/function/debug_func.py +113 -1
  164. mindspore/ops/function/fft_func.py +15 -2
  165. mindspore/ops/function/grad/grad_func.py +3 -2
  166. mindspore/ops/function/math_func.py +558 -207
  167. mindspore/ops/function/nn_func.py +817 -383
  168. mindspore/ops/function/other_func.py +3 -2
  169. mindspore/ops/function/random_func.py +184 -8
  170. mindspore/ops/function/reshard_func.py +13 -11
  171. mindspore/ops/function/sparse_unary_func.py +1 -1
  172. mindspore/ops/function/vmap_func.py +3 -2
  173. mindspore/ops/functional.py +24 -14
  174. mindspore/ops/op_info_register.py +3 -3
  175. mindspore/ops/operations/__init__.py +6 -1
  176. mindspore/ops/operations/_grad_ops.py +2 -76
  177. mindspore/ops/operations/_infer_ops.py +1 -1
  178. mindspore/ops/operations/_inner_ops.py +71 -94
  179. mindspore/ops/operations/array_ops.py +12 -146
  180. mindspore/ops/operations/comm_ops.py +42 -53
  181. mindspore/ops/operations/custom_ops.py +83 -19
  182. mindspore/ops/operations/debug_ops.py +42 -10
  183. mindspore/ops/operations/manually_defined/_inner.py +12 -0
  184. mindspore/ops/operations/manually_defined/ops_def.py +265 -10
  185. mindspore/ops/operations/math_ops.py +12 -223
  186. mindspore/ops/operations/nn_ops.py +20 -114
  187. mindspore/ops/operations/other_ops.py +7 -4
  188. mindspore/ops/operations/random_ops.py +46 -1
  189. mindspore/ops/primitive.py +18 -6
  190. mindspore/ops_generate/arg_dtype_cast.py +2 -0
  191. mindspore/ops_generate/gen_aclnn_implement.py +11 -11
  192. mindspore/ops_generate/gen_constants.py +36 -0
  193. mindspore/ops_generate/gen_ops.py +67 -52
  194. mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
  195. mindspore/ops_generate/gen_pyboost_func.py +131 -47
  196. mindspore/ops_generate/op_proto.py +10 -3
  197. mindspore/ops_generate/pyboost_utils.py +14 -1
  198. mindspore/ops_generate/template.py +43 -21
  199. mindspore/parallel/__init__.py +3 -1
  200. mindspore/parallel/_auto_parallel_context.py +28 -8
  201. mindspore/parallel/_cell_wrapper.py +83 -0
  202. mindspore/parallel/_parallel_serialization.py +47 -19
  203. mindspore/parallel/_tensor.py +81 -11
  204. mindspore/parallel/_utils.py +13 -1
  205. mindspore/parallel/algo_parameter_config.py +5 -5
  206. mindspore/parallel/checkpoint_transform.py +46 -39
  207. mindspore/parallel/cluster/process_entity/__init__.py +1 -1
  208. mindspore/parallel/cluster/process_entity/_api.py +31 -23
  209. mindspore/parallel/cluster/process_entity/_utils.py +2 -27
  210. mindspore/parallel/parameter_broadcast.py +3 -4
  211. mindspore/parallel/shard.py +162 -31
  212. mindspore/parallel/transform_safetensors.py +993 -0
  213. mindspore/pgodb140.dll +0 -0
  214. mindspore/pgort140.dll +0 -0
  215. mindspore/profiler/__init__.py +2 -1
  216. mindspore/profiler/common/constant.py +29 -0
  217. mindspore/profiler/common/registry.py +47 -0
  218. mindspore/profiler/common/util.py +28 -0
  219. mindspore/profiler/dynamic_profiler.py +694 -0
  220. mindspore/profiler/envprofiling.py +17 -19
  221. mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
  222. mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
  223. mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
  224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
  225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
  226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
  227. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  228. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
  229. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
  230. mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
  231. mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
  232. mindspore/profiler/parser/base_timeline_generator.py +19 -25
  233. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  234. mindspore/profiler/parser/framework_parser.py +1 -391
  235. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  236. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  237. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  238. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  239. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  240. mindspore/profiler/parser/profiler_info.py +78 -6
  241. mindspore/profiler/profiler.py +153 -0
  242. mindspore/profiler/profiling.py +280 -412
  243. mindspore/rewrite/__init__.py +1 -2
  244. mindspore/rewrite/common/namespace.py +4 -4
  245. mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
  246. mindspore/run_check/_check_version.py +36 -103
  247. mindspore/safeguard/rewrite_obfuscation.py +591 -247
  248. mindspore/swresample-4.dll +0 -0
  249. mindspore/swscale-6.dll +0 -0
  250. mindspore/tbbmalloc.dll +0 -0
  251. mindspore/tinyxml2.dll +0 -0
  252. mindspore/train/__init__.py +4 -3
  253. mindspore/train/_utils.py +28 -2
  254. mindspore/train/amp.py +171 -53
  255. mindspore/train/callback/__init__.py +2 -2
  256. mindspore/train/callback/_callback.py +4 -4
  257. mindspore/train/callback/_checkpoint.py +85 -22
  258. mindspore/train/callback/_cluster_monitor.py +1 -1
  259. mindspore/train/callback/_flops_collector.py +1 -0
  260. mindspore/train/callback/_loss_monitor.py +3 -3
  261. mindspore/train/callback/_on_request_exit.py +134 -31
  262. mindspore/train/callback/_summary_collector.py +5 -5
  263. mindspore/train/callback/_tft_register.py +352 -0
  264. mindspore/train/dataset_helper.py +7 -3
  265. mindspore/train/metrics/metric.py +3 -3
  266. mindspore/train/metrics/roc.py +4 -4
  267. mindspore/train/mind_ir_pb2.py +44 -39
  268. mindspore/train/model.py +134 -58
  269. mindspore/train/serialization.py +336 -112
  270. mindspore/turbojpeg.dll +0 -0
  271. mindspore/utils/__init__.py +21 -0
  272. mindspore/utils/utils.py +60 -0
  273. mindspore/vcmeta.dll +0 -0
  274. mindspore/vcruntime140.dll +0 -0
  275. mindspore/vcruntime140_1.dll +0 -0
  276. mindspore/version.py +1 -1
  277. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/METADATA +6 -2
  278. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/RECORD +281 -275
  279. mindspore/include/c_api/ms/abstract.h +0 -67
  280. mindspore/include/c_api/ms/attribute.h +0 -197
  281. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  282. mindspore/include/c_api/ms/base/macros.h +0 -32
  283. mindspore/include/c_api/ms/base/status.h +0 -33
  284. mindspore/include/c_api/ms/base/types.h +0 -283
  285. mindspore/include/c_api/ms/context.h +0 -102
  286. mindspore/include/c_api/ms/graph.h +0 -160
  287. mindspore/include/c_api/ms/node.h +0 -606
  288. mindspore/include/c_api/ms/tensor.h +0 -161
  289. mindspore/include/c_api/ms/value.h +0 -84
  290. mindspore/mindspore_shared_lib.dll +0 -0
  291. mindspore/nn/extend/basic.py +0 -140
  292. mindspore/nn/extend/embedding.py +0 -143
  293. mindspore/nn/extend/layer/normalization.py +0 -109
  294. mindspore/nn/extend/pooling.py +0 -117
  295. mindspore/nn/layer/embedding_service.py +0 -531
  296. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  297. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  298. mindspore/ops/extend/__init__.py +0 -53
  299. mindspore/ops/extend/array_func.py +0 -218
  300. mindspore/ops/extend/math_func.py +0 -76
  301. mindspore/ops/extend/nn_func.py +0 -308
  302. mindspore/ops/silent_check.py +0 -162
  303. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  304. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  305. mindspore/train/callback/_mindio_ttp.py +0 -443
  306. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
  307. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +0 -0
  308. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
@@ -54,6 +54,39 @@ def abs(input):
54
54
  return abs_op(input)
55
55
 
56
56
 
57
+ def acos_ext(input):
58
+ r"""
59
+ Computes arccosine of input tensors element-wise.
60
+
61
+ .. math::
62
+
63
+ out_i = \cos^{-1}(input_i)
64
+
65
+ Args:
66
+ input (Tensor): The shape of tensor is
67
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
68
+
69
+ Returns:
70
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
71
+
72
+ Raises:
73
+ TypeError: If `input` is not a Tensor.
74
+
75
+ Supported Platforms:
76
+ ``Ascend`` ``GPU`` ``CPU``
77
+
78
+ Examples:
79
+ >>> import mindspore
80
+ >>> import numpy as np
81
+ >>> from mindspore import Tensor, ops
82
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
83
+ >>> output = ops.acos_ext(input)
84
+ >>> print(output)
85
+ [0.7377037 1.5307857 1.2661037 0.9764114]
86
+ """
87
+ return acos_ext_op(input)
88
+
89
+
57
90
  def acos(input):
58
91
  r"""
59
92
  Computes arccosine of input tensors element-wise.
@@ -88,6 +121,42 @@ def acos(input):
88
121
  return acos_op(input)
89
122
 
90
123
 
124
+ def acosh_ext(input):
125
+ r"""
126
+ Computes inverse hyperbolic cosine of the inputs element-wise.
127
+
128
+ .. math::
129
+
130
+ out_i = \cosh^{-1}(input_i)
131
+
132
+ .. note::
133
+ Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
134
+ Input range is [1, inf].
135
+
136
+ Args:
137
+ input (Tensor): The input tensor of inverse hyperbolic cosine function.
138
+
139
+ Returns:
140
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
141
+
142
+ Raises:
143
+ TypeError: If `input` is not a Tensor.
144
+
145
+ Supported Platforms:
146
+ ``Ascend`` ``GPU`` ``CPU``
147
+
148
+ Examples:
149
+ >>> import mindspore
150
+ >>> import numpy as np
151
+ >>> from mindspore import Tensor, ops
152
+ >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
153
+ >>> output = ops.acosh_ext(input)
154
+ >>> print(output)
155
+ [0. 0.9624236 1.7627472 5.298292 ]
156
+ """
157
+ return acosh_ext_op(input)
158
+
159
+
91
160
  def acosh(input):
92
161
  r"""
93
162
  Computes inverse hyperbolic cosine of the inputs element-wise.
@@ -96,7 +165,7 @@ def acosh(input):
96
165
 
97
166
  out_i = \cosh^{-1}(input_i)
98
167
 
99
- .. warning::
168
+ .. note::
100
169
  Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
101
170
  Input range is [1, inf].
102
171
 
@@ -124,6 +193,52 @@ def acosh(input):
124
193
  return acosh_op(input)
125
194
 
126
195
 
196
+ def adaptive_avg_pool1d(input, output_size):
197
+ r"""
198
+ Performs 1D adaptive average pooling on a multi-plane input signal.
199
+ That is, for any input size, the size of the specified output is L.
200
+ The number of output features is equal to the number of input features.
201
+
202
+ .. warning::
203
+ This is an experimental API that is subject to change or deletion.
204
+
205
+ Args:
206
+ input (Tensor): The input of adaptive_avg_pool1d, which is a 2D or 3D tensor,
207
+ with float16 or float32 data type.
208
+ output_size (int): The target output feature size. `output_size` is an integer.
209
+
210
+ Returns:
211
+ Tensor, with the same type as the `input`.
212
+
213
+ Shape of the output is `input_shape[:len(input_shape) - 1] + [output_size]`.
214
+
215
+ Raises:
216
+ ValueError: If `output_size` is not integer.
217
+ TypeError: If `input` is not a Tensor.
218
+ TypeError: If dtype of `input` is not float16, float32.
219
+
220
+ Supported Platforms:
221
+ ``Ascend``
222
+
223
+ Examples:
224
+ >>> import mindspore
225
+ >>> from mindspore import Tensor, mint
226
+ >>> input = Tensor([[2,3],[3,4]],dtype=mindspore.float16)
227
+ >>> output = mint.nn.functional.adaptive_avg_pool1d(input, 3)
228
+ >>> print(output)
229
+ [[2. 2.5 3. ]
230
+ [3. 3.5 4. ]]
231
+ """
232
+ return adaptive_avg_pool1d_op(input, output_size)
233
+
234
+
235
+ def adaptive_avg_pool2d_grad_ext(grad_output, x):
236
+ r"""
237
+
238
+ """
239
+ return adaptive_avg_pool2d_grad_ext_op(grad_output, x)
240
+
241
+
127
242
  def add_ext(input, other, alpha=1):
128
243
  r"""
129
244
  Adds scaled other value to input Tensor.
@@ -141,12 +256,12 @@ def add_ext(input, other, alpha=1):
141
256
  Args:
142
257
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
143
258
  a bool or a tensor whose data type is
144
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
145
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
259
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
260
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
146
261
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
147
262
  a bool or a tensor whose data type is
148
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
149
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
263
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
264
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
150
265
  alpha (number.Number): A scaling factor applied to `other`, default 1.
151
266
 
152
267
  Returns:
@@ -180,6 +295,20 @@ def add_ext(input, other, alpha=1):
180
295
  return add_ext_op(input, other, alpha)
181
296
 
182
297
 
298
+ def add_layer_norm_grad(dy, x1, x2, rstd, mean, gamma, dsumOptional):
299
+ r"""
300
+
301
+ """
302
+ return add_layer_norm_grad_op(dy, x1, x2, rstd, mean, gamma, dsumOptional)
303
+
304
+
305
+ def add_layernorm_v2(x1, x2, gamma, beta, epsilon=1e-5, additionalOut=False):
306
+ r"""
307
+
308
+ """
309
+ return add_layernorm_v2_op(x1, x2, gamma, beta, epsilon, additionalOut)
310
+
311
+
183
312
  def add(input, other):
184
313
  r"""
185
314
  Adds other value to input Tensor.
@@ -200,12 +329,12 @@ def add(input, other):
200
329
  Args:
201
330
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
202
331
  a bool or a tensor whose data type is
203
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
204
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
332
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
333
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
205
334
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
206
335
  a bool or a tensor whose data type is
207
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
208
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
336
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
337
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
209
338
 
210
339
  Returns:
211
340
  Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
@@ -353,6 +482,72 @@ def argmax_ext(input, dim=None, keepdim=False):
353
482
  return argmax_ext_op(input, dim, keepdim)
354
483
 
355
484
 
485
+ def argmin_ext(input, dim=None, keepdim=False):
486
+ r"""
487
+ Return the indices of the minimum values of a tensor across a dimension.
488
+
489
+ Args:
490
+ input (Tensor): Input tensor.
491
+ dim (Union[int, None], optional): Specify the axis for calculation. If `dim` is ``None`` , the indices of the minimum
492
+ value within the flattened input will be returned. Default: ``None`` .
493
+ keepdim (bool, optional): Whether the output tensor retains the specified
494
+ dimension. Ignored if `dim` is None. Default: ``False`` .
495
+
496
+ Returns:
497
+ Tensor, indices of the minimum values of the input tensor across a dimension.
498
+
499
+ Raises:
500
+ TypeError: If `keepdim` is not bool.
501
+ ValueError: If `dim` is out of range.
502
+
503
+ Supported Platforms:
504
+ ``Ascend``
505
+
506
+ Examples:
507
+ >>> import numpy as np
508
+ >>> from mindspore import Tensor
509
+ >>> from mindspore import mint
510
+ >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
511
+ >>> output = mint.argmin(x, dim=-1)
512
+ >>> print(output)
513
+ [0 1 2]
514
+ """
515
+ return argmin_ext_op(input, dim, keepdim)
516
+
517
+
518
+ def asin_ext(input):
519
+ r"""
520
+ Computes arcsine of input tensors element-wise.
521
+
522
+ .. math::
523
+
524
+ out_i = \sin^{-1}(input_i)
525
+
526
+ Args:
527
+ input (Tensor): The shape of tensor is
528
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
529
+
530
+ Returns:
531
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
532
+
533
+ Raises:
534
+ TypeError: If `input` is not a Tensor.
535
+
536
+ Supported Platforms:
537
+ ``Ascend`` ``GPU`` ``CPU``
538
+
539
+ Examples:
540
+ >>> import mindspore
541
+ >>> import numpy as np
542
+ >>> from mindspore import Tensor, ops
543
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
544
+ >>> output = ops.asin_ext(input)
545
+ >>> print(output)
546
+ [0.8330927 0.04001068 0.30469266 0.59438497 ]
547
+ """
548
+ return asin_ext_op(input)
549
+
550
+
356
551
  def asin(input):
357
552
  r"""
358
553
  Computes arcsine of input tensors element-wise.
@@ -387,6 +582,38 @@ def asin(input):
387
582
  return asin_op(input)
388
583
 
389
584
 
585
+ def asinh_ext(input):
586
+ r"""
587
+ Computes inverse hyperbolic sine of the input element-wise.
588
+
589
+ .. math::
590
+
591
+ out_i = \sinh^{-1}(input_i)
592
+
593
+ Args:
594
+ input (Tensor): The input tensor of inverse hyperbolic sine function.
595
+
596
+ Returns:
597
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
598
+
599
+ Raises:
600
+ TypeError: If `input` is not a Tensor.
601
+
602
+ Supported Platforms:
603
+ ``Ascend`` ``GPU`` ``CPU``
604
+
605
+ Examples:
606
+ >>> import mindspore
607
+ >>> import numpy as np
608
+ >>> from mindspore import Tensor, ops
609
+ >>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
610
+ >>> output = ops.asinh_ext(input)
611
+ >>> print(output)
612
+ [-2.3124385 1.1947632 1.8184465 5.298342 ]
613
+ """
614
+ return asinh_ext_op(input)
615
+
616
+
390
617
  def asinh(input):
391
618
  r"""
392
619
  Computes inverse hyperbolic sine of the input element-wise.
@@ -522,7 +749,9 @@ def atan2_ext(input, other):
522
749
  its shape is able to broadcast with `input`.
523
750
 
524
751
  Returns:
525
- Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
752
+ Tensor, the shape is the same as the one after broadcasting.
753
+ The dtype of output is float32 when dtype of `input` is in
754
+ [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
526
755
 
527
756
  Raises:
528
757
  TypeError: If `input` or `other` is not a Tensor or scalar.
@@ -586,6 +815,39 @@ def atan2(input, other):
586
815
  return atan2_op(input, other)
587
816
 
588
817
 
818
+ def atan_ext(input):
819
+ r"""
820
+ Computes the trigonometric inverse tangent of the input element-wise.
821
+
822
+ .. math::
823
+
824
+ out_i = \tan^{-1}(input_i)
825
+
826
+ Args:
827
+ input (Tensor): The shape of tensor is
828
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
829
+
830
+ Returns:
831
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
832
+
833
+ Raises:
834
+ TypeError: If `input` is not a Tensor.
835
+
836
+ Supported Platforms:
837
+ ``Ascend`` ``GPU`` ``CPU``
838
+
839
+ Examples:
840
+ >>> import mindspore
841
+ >>> import numpy as np
842
+ >>> from mindspore import Tensor, ops
843
+ >>> input = Tensor(np.array([1.0, 0.0]), mindspore.float32)
844
+ >>> output = ops.atan_ext(input)
845
+ >>> print(output)
846
+ [0.7853982 0. ]
847
+ """
848
+ return atan_ext_op(input)
849
+
850
+
589
851
  def atan(input):
590
852
  r"""
591
853
  Computes the trigonometric inverse tangent of the input element-wise.
@@ -635,14 +897,12 @@ def atanh(input):
635
897
  Args:
636
898
  input (Tensor): The shape of tensor is
637
899
  :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
638
- The data type should be one of the following types: float16, float32.
639
900
 
640
901
  Returns:
641
- A Tensor, has the same type as the input.
902
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
642
903
 
643
904
  Raises:
644
905
  TypeError: If `input` is not a Tensor.
645
- TypeError: If dtype of `input` is not float16 or float32.
646
906
 
647
907
  Supported Platforms:
648
908
  ``Ascend`` ``GPU`` ``CPU``
@@ -659,6 +919,50 @@ def atanh(input):
659
919
  return atanh_op(input)
660
920
 
661
921
 
922
+ def baddbmm(input, batch1, batch2, beta, alpha):
923
+ r"""
924
+ The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
925
+ The formula is defined as follows:
926
+
927
+ .. math::
928
+ \text{out}_{i} = \beta \text{input}_{i} + \alpha (\text{batch1}_{i} \mathbin{@} \text{batch2}_{i})
929
+
930
+ Args:
931
+ input (Tensor): The input Tensor. When batch1 is a :math:`(C, W, T)` Tensor and batch2 is a
932
+ :math:`(C, T, H)` Tensor, input must be broadcastable with :math:`(C, W, H)` Tensor.
933
+ batch1 (Tensor): :math:`batch1` in the above formula. Must be 3-D Tensor, dtype is same as input.
934
+ batch2 (Tensor): :math:`batch2` in the above formula. Must be 3-D Tensor, dtype is same as input.
935
+
936
+ Keyword Args:
937
+ beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
938
+ alpha (Union[float, int], optional): multiplier for :math:`batch1 @ batch2`. Default: ``1`` .
939
+
940
+ Returns:
941
+ Tensor, has the same dtype as input, shape will be :math:`(C, W, H)`.
942
+
943
+ Raises:
944
+ TypeError: If the type of `input`, `batch1`, `batch2` is not Tensor.
945
+ TypeError: If the types of `input`, `batch1`, `batch2` are different.
946
+ ValueError: If `batch1` and `batch2` are not 3-D tensors.
947
+
948
+ Supported Platforms:
949
+ ``Ascend``
950
+
951
+ Examples:
952
+ >>> import numpy as np
953
+ >>> from mindspore import Tensor, ops
954
+ >>> input = Tensor(np.ones([1, 3, 3]).astype(np.float32))
955
+ >>> batch1 = Tensor(np.ones([1, 3, 4]).astype(np.float32))
956
+ >>> batch2 = Tensor(np.ones([1, 4, 3]).astype(np.float32))
957
+ >>> output = ops.baddbmm_ext(input, batch1, batch2)
958
+ >>> print(output)
959
+ [[[5. 5. 5.]
960
+ [5. 5. 5.]
961
+ [5. 5. 5.]]]
962
+ """
963
+ return baddbmm_op(input, batch1, batch2, beta, alpha)
964
+
965
+
662
966
  def bmm_ext(input, mat2):
663
967
  r"""
664
968
  Performs batch matrix-matrix multiplication of two three-dimensional tensors.
@@ -778,7 +1082,7 @@ def broadcast_to(input, shape):
778
1082
 
779
1083
  cast_op=Cast()
780
1084
 
781
- def cast(input_x, dtype):
1085
+ def cast(input, dtype):
782
1086
  r"""
783
1087
  Returns a tensor with the new specified data type.
784
1088
 
@@ -786,17 +1090,16 @@ def cast(input_x, dtype):
786
1090
  When converting complex numbers to boolean type, the imaginary part of the complex number is not
787
1091
  taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
788
1092
 
789
- Inputs:
790
- - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
791
- The tensor to be cast.
792
- - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
1093
+ Args:
1094
+ input (Union[Tensor, Number]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The tensor to be cast.
1095
+ dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
793
1096
 
794
- Outputs:
795
- Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
1097
+ Returns:
1098
+ Tensor, the shape of tensor is the same as `input`, :math:`(x_1, x_2, ..., x_R)`.
796
1099
 
797
1100
  Raises:
798
- TypeError: If `input_x` is neither Tensor nor Number.
799
- TypeError: If `type` is not a Number.
1101
+ TypeError: If `input` is neither Tensor nor Number.
1102
+ TypeError: If `dtype` is not a Number.
800
1103
 
801
1104
  Supported Platforms:
802
1105
  ``Ascend`` ``GPU`` ``CPU``
@@ -806,16 +1109,15 @@ def cast(input_x, dtype):
806
1109
  >>> import numpy as np
807
1110
  >>> from mindspore import Tensor, ops
808
1111
  >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
809
- >>> input_x = Tensor(input_np)
810
- >>> type_dst = mindspore.int32
811
- >>> cast = ops.Cast()
812
- >>> output = cast(input_x, type_dst)
1112
+ >>> input = Tensor(input_np)
1113
+ >>> dtype = mindspore.int32
1114
+ >>> output = ops.cast(input, dtype)
813
1115
  >>> print(output.dtype)
814
1116
  Int32
815
1117
  >>> print(output.shape)
816
1118
  (2, 3, 4, 5)
817
1119
  """
818
- return cast_op(input_x, dtype)
1120
+ return cast_op(input, dtype)
819
1121
 
820
1122
 
821
1123
  def ceil(input):
@@ -1198,6 +1500,13 @@ def contiguous(input):
1198
1500
  return contiguous_op(input)
1199
1501
 
1200
1502
 
1503
+ def copy_ext(variable, value):
1504
+ r"""
1505
+
1506
+ """
1507
+ return copy_ext_op(variable, value)
1508
+
1509
+
1201
1510
  def copy(input):
1202
1511
  r"""
1203
1512
 
@@ -1313,16 +1622,19 @@ def cosh(input):
1313
1622
  out_i = \cosh(input_i)
1314
1623
 
1315
1624
  Args:
1316
- input (Tensor): The input tensor of hyperbolic cosine function, its data type
1317
- must be float16, float32, float64, complex64 or complex128.
1625
+ input (Tensor): The input tensor of hyperbolic cosine function.
1626
+ Supported dtypes:
1627
+
1628
+ - GPU/CPU: float16, float32, float64, complex64 or complex128.
1629
+ - Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
1318
1630
 
1319
1631
  Returns:
1320
1632
  Tensor, has the same shape as `input`.
1321
1633
 
1322
- Raises:
1323
- TypeError: If the dtype of `input` is not one of the following types:
1324
- float16, float32, float64, complex64, complex128.
1325
- TypeError: If `input` is not a Tensor.
1634
+ :raise TypeError: If `input` is not a Tensor.
1635
+ :raise TypeError:
1636
+ * CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
1637
+ * Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
1326
1638
 
1327
1639
  Supported Platforms:
1328
1640
  ``Ascend`` ``GPU`` ``CPU``
@@ -1367,14 +1679,17 @@ def cummax(input, axis):
1367
1679
  TypeError: If `axis` is not an int.
1368
1680
  ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
1369
1681
 
1682
+ .. note::
1683
+ O2 mode is not supported in Ascend.
1684
+
1370
1685
  Supported Platforms:
1371
- ``GPU`` ``CPU``
1686
+ ``Ascend`` ``GPU`` ``CPU``
1372
1687
 
1373
1688
  Examples:
1374
1689
  >>> import mindspore
1375
1690
  >>> import numpy as np
1376
1691
  >>> from mindspore import Tensor
1377
- >>> import mindspore.ops as ops
1692
+ >>> from mindspore import ops
1378
1693
  >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
1379
1694
  >>> output = ops.cummax(x, axis=0)
1380
1695
  >>> print(output[0])
@@ -1388,11 +1703,53 @@ def cummax(input, axis):
1388
1703
  [2 1 2 0]
1389
1704
  [2 1 2 0]]
1390
1705
  """
1391
- cummax_op = _get_cache_prim(Cummax)(axis)
1392
- return cummax_op(input)
1706
+ return cummax_impl(input, axis)
1393
1707
 
1394
1708
 
1395
- def cumsum_ext(input, dim, dtype=None):
1709
+ def cummin_ext(input, dim):
1710
+ r"""
1711
+ Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
1712
+ along the dimension `dim`, and `indices` is the index location of each minimum value.
1713
+
1714
+ .. math::
1715
+ \begin{array}{ll} \\
1716
+ y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
1717
+ \end{array}
1718
+
1719
+ Args:
1720
+ input (Tensor): The input Tensor, The dimension must be greater than 0.
1721
+ dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
1722
+
1723
+ Returns:
1724
+ tuple [Tensor], tuple of 2 Tensors, containing the cumulative minimum of elements and the index.
1725
+ The shape of each output tensor is the same as that of input `input`.
1726
+
1727
+ Raises:
1728
+ TypeError: If `input` is not a Tensor.
1729
+ TypeError: If `input` is a Tensor, but the type is complex or bool.
1730
+ TypeError: If `dim` is not an int.
1731
+ ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
1732
+
1733
+ .. note::
1734
+ O2 mode is not supported in Ascend.
1735
+
1736
+ Supported Platforms:
1737
+ ``Ascend``
1738
+
1739
+ Examples:
1740
+ >>> from mindspore import Tensor, ops
1741
+ >>> import mindspore
1742
+ >>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
1743
+ >>> output = ops.cummin_ext(a, dim=0)
1744
+ >>> print(output[0])
1745
+ [-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
1746
+ >>> print(output[1])
1747
+ [0 1 1 1 4 4]
1748
+ """
1749
+ return cummin_ext_op(input, dim)
1750
+
1751
+
1752
+ def cumsum_ext(input, dim, dtype=None):
1396
1753
  r"""
1397
1754
  Computes the cumulative sum of input Tensor along `dim`.
1398
1755
 
@@ -1731,6 +2088,104 @@ def elu(input_x, alpha=1.0):
1731
2088
  return elu_op(input_x)
1732
2089
 
1733
2090
 
2091
+ def embedding_apply_ada_grad(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2092
+ r"""
2093
+
2094
+ """
2095
+ return embedding_apply_ada_grad_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2096
+
2097
+
2098
+ def embedding_apply_adam(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2099
+ r"""
2100
+
2101
+ """
2102
+ return embedding_apply_adam_op(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2103
+
2104
+
2105
+ def embedding_apply_adam_w(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad=(0,), mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2106
+ r"""
2107
+
2108
+ """
2109
+ return embedding_apply_adam_w_op(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2110
+
2111
+
2112
+ def embedding_apply_ftrl(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2113
+ r"""
2114
+
2115
+ """
2116
+ return embedding_apply_ftrl_op(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2117
+
2118
+
2119
+ def embedding_apply_rmsprop(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2120
+ r"""
2121
+
2122
+ """
2123
+ return embedding_apply_rmsprop_op(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2124
+
2125
+
2126
+ def embedding_apply_sgd(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
2127
+ r"""
2128
+
2129
+ """
2130
+ return embedding_apply_sgd_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
2131
+
2132
+
2133
+ def embedding_feature_mapping_export(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id):
2134
+ r"""
2135
+
2136
+ """
2137
+ return embedding_feature_mapping_export_op(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
2138
+
2139
+
2140
+ def embedding_feature_mapping_file_size(file_path, table_name, global_step, embedding_dim, only_offset_flag=True):
2141
+ r"""
2142
+
2143
+ """
2144
+ return embedding_feature_mapping_file_size_op(file_path, table_name, global_step, embedding_dim, only_offset_flag)
2145
+
2146
+
2147
+ def embedding_feature_mapping_find(table_name, feature_size, num=1):
2148
+ r"""
2149
+
2150
+ """
2151
+ return embedding_feature_mapping_find_op(table_name, feature_size, num)
2152
+
2153
+
2154
+ def embedding_feature_mapping_import(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag=True, num=1):
2155
+ r"""
2156
+
2157
+ """
2158
+ return embedding_feature_mapping_import_op(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
2159
+
2160
+
2161
+ def embedding_feature_mapping_insert(table_name, num, feature_id, offset_id):
2162
+ r"""
2163
+
2164
+ """
2165
+ return embedding_feature_mapping_insert_op(table_name, num, feature_id, offset_id)
2166
+
2167
+
2168
+ def embedding_feature_mapping_table_size(table_name):
2169
+ r"""
2170
+
2171
+ """
2172
+ return embedding_feature_mapping_table_size_op(table_name)
2173
+
2174
+
2175
+ def embedding_feature_mapping_v2(table_name, feature_id, table_total_size, table_actual_size):
2176
+ r"""
2177
+
2178
+ """
2179
+ return embedding_feature_mapping_v2_op(table_name, feature_id, table_total_size, table_actual_size)
2180
+
2181
+
2182
+ def embedding_table_evict(var_handle, global_step, steps_to_live=0):
2183
+ r"""
2184
+
2185
+ """
2186
+ return embedding_table_evict_op(var_handle, global_step, steps_to_live)
2187
+
2188
+
1734
2189
  def equal(input, other):
1735
2190
  r"""
1736
2191
  Computes the equivalence between two tensors element-wise.
@@ -1834,15 +2289,19 @@ def erfc(input):
1834
2289
  input (Tensor): The input tensor of the complementary error function, :math:`x` in the above formula.
1835
2290
  Supported dtypes:
1836
2291
 
1837
- - Ascend: float16, float32.
2292
+ - Ascend: float16, float32, float64, int64, bool, bfloat16.
1838
2293
  - GPU/CPU: float16, float32, float64.
1839
2294
 
1840
2295
  Returns:
1841
- Tensor, has the same shape and dtype as `input`.
2296
+ Tensor.
2297
+ The dtype of output is float32 when dtype of `input` is in
2298
+ [bool, int64]. Otherwise output has the same dtype as the `input`.
1842
2299
 
1843
- Raises:
1844
- TypeError: If `input` is not a Tensor.
1845
- TypeError: If dtype of `input` is not float16, float32 or float64.
2300
+ :raise TypeError: If `input` is not a Tensor.
2301
+ :raise TypeError: If dtype of `input` is not the following:
2302
+
2303
+ * Ascend: float16, float32, float64, int64, bool, bfloat16.
2304
+ * GPU/CPU: float16, float32, float64.
1846
2305
 
1847
2306
  Supported Platforms:
1848
2307
  ``Ascend`` ``GPU`` ``CPU``
@@ -1851,17 +2310,17 @@ def erfc(input):
1851
2310
  >>> import mindspore
1852
2311
  >>> import numpy as np
1853
2312
  >>> from mindspore import Tensor, ops
1854
- >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
1855
- >>> output = ops.erfc(x)
2313
+ >>> input = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2314
+ >>> output = ops.erfc(input)
1856
2315
  >>> print(output)
1857
- [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
2316
+ [1.8427008e+00 1.0000000e+00 1.5729921e-01 4.6777348e-03 2.2090497e-05]
1858
2317
  """
1859
2318
  return erfc_op(input)
1860
2319
 
1861
2320
 
1862
2321
  def erfinv(input):
1863
2322
  r"""
1864
- Returns the result of the inverse error function with `input`, which is defined in the range `(-1, 1)` as:
2323
+ Returns the result of the inverse error function with `input`. It is defined in the range `(-1, 1)` as:
1865
2324
 
1866
2325
  .. math::
1867
2326
 
@@ -2119,7 +2578,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2119
2578
  dim (tuple[int], optional): The dimension along which to take the one dimensional `fft2`.
2120
2579
  Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2121
2580
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2122
- Three modes are defined as,
2581
+ Three modes are defined as, where :math: `n = prod(s)`
2123
2582
 
2124
2583
  - ``"backward"`` (no normalization).
2125
2584
  - ``"forward"`` (normalize by :math:`1/n`).
@@ -2138,6 +2597,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2138
2597
  ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
2139
2598
  ValueError: If `dim` has duplicate values.
2140
2599
  ValueError: If `s` is less than 1.
2600
+ ValueError: If `s` and `dim` are given but have different shapes.
2141
2601
  ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
2142
2602
 
2143
2603
  Supported Platforms:
@@ -2147,12 +2607,12 @@ def fft2(input, s=None, dim=(-2, -1), norm=None):
2147
2607
  >>> import mindspore
2148
2608
  >>> from mindspore import Tensor, ops
2149
2609
  >>> input = ops.ones((4, 4))
2150
- >>> ops.fft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2151
- Tensor(shape=[4, 4], dtype=Complex64, value=
2152
- [[16+0j, 0+0j, 0+0j, 0+0j],
2153
- [0+0j, 0+0j, 0+0j, 0+0j],
2154
- [0+0j, 0+0j, 0+0j, 0+0j],
2155
- [0+0j, 0+0j, 0+0j, 0+0j]])
2610
+ >>> out = ops.fft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2611
+ >>> print(out)
2612
+ [[16.+0.j 0.+0.j 0.+0.j 0.+0.j]
2613
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
2614
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
2615
+ [ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
2156
2616
  """
2157
2617
  return fft2_op(input, s, dim, norm)
2158
2618
 
@@ -2205,12 +2665,51 @@ def fft(input, n=None, dim=-1, norm=None):
2205
2665
  >>> import mindspore
2206
2666
  >>> from mindspore import Tensor, ops
2207
2667
  >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
2208
- >>> ops.fft(input)
2209
- Tensor(shape=[4], dtype=Complex64, value= [-0.588551+0j, 2.15252-0.461212j, 2.7809+0j, 2.15252+0.461212j])
2668
+ >>> out = ops.fft(input, n=4, dim=-1, norm="backward")
2669
+ >>> print(out)
2670
+ [-0.5885514+0.j 2.1525173-0.46121222j 2.7808986+0.j
2671
+ 2.1525173+0.46121222j]
2210
2672
  """
2211
2673
  return fft_op(input, n, dim, norm)
2212
2674
 
2213
2675
 
2676
+ def fftfreq(n, d=1.0, dtype=None):
2677
+ r"""
2678
+ Computes the discrete Fourier Transform sample frequencies for a signal of size `n`.
2679
+ For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
2680
+
2681
+ .. math::
2682
+ f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
2683
+
2684
+ Note:
2685
+ - `fftfreq` is currently only used in `mindscience` scientific computing scenarios and
2686
+ dose not support other usage scenarios.
2687
+ - `fftfreq` is not supported on Windows platform yet.
2688
+
2689
+ Args:
2690
+ n (int): Window length.
2691
+ d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
2692
+ dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
2693
+
2694
+ Returns:
2695
+ Tensor, Array of length ``n`` containing the sample frequencies.
2696
+
2697
+ Raises:
2698
+ ValueError: If `n` is less than 1.
2699
+
2700
+ Supported Platforms:
2701
+ ``Ascend`` ``CPU``
2702
+
2703
+ Examples:
2704
+ >>> import mindspore
2705
+ >>> from mindspore import ops
2706
+ >>> out = ops.fftfreq(n=4, d=1.0)
2707
+ >>> print(out)
2708
+ [ 0. 0.25 -0.5 -0.25]
2709
+ """
2710
+ return fftfreq_op(n, d, dtype)
2711
+
2712
+
2214
2713
  def fftn(input, s=None, dim=None, norm=None):
2215
2714
  r"""
2216
2715
  Computes the N dimensional discrete Fourier transform of `input`.
@@ -2232,7 +2731,7 @@ def fftn(input, s=None, dim=None, norm=None):
2232
2731
  dim (tuple[int], optional): The dimension along which to take the one dimensional `fftn`.
2233
2732
  Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
2234
2733
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2235
- Three modes are defined as,
2734
+ Three modes are defined as, where :math: `n = prod(s)`
2236
2735
 
2237
2736
  - ``"backward"`` (no normalization).
2238
2737
  - ``"forward"`` (normalize by :math:`1/n`).
@@ -2261,12 +2760,12 @@ def fftn(input, s=None, dim=None, norm=None):
2261
2760
  >>> import mindspore
2262
2761
  >>> from mindspore import Tensor, ops
2263
2762
  >>> input = ops.ones((2, 2, 2))
2264
- >>> ops.fftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2265
- Tensor(shape=[2, 2, 2], dtype=Complex64, value=
2266
- [[[8+0j, 0+0j],
2267
- [0+0j, 0+0j]],
2268
- [[0+0j, 0+0j],
2269
- [0+0j, 0+0j]]])
2763
+ >>> out = ops.fftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2764
+ >>> print(out)
2765
+ [[[8.+0.j 0.+0.j]
2766
+ [0.+0.j 0.+0.j]]
2767
+ [[0.+0.j 0.+0.j]
2768
+ [0.+0.j 0.+0.j]]]
2270
2769
  """
2271
2770
  return fftn_op(input, s, dim, norm)
2272
2771
 
@@ -2313,8 +2812,6 @@ def flatten_ext(input, start_dim=0, end_dim=-1):
2313
2812
 
2314
2813
  Args:
2315
2814
  input (Tensor): The input Tensor.
2316
-
2317
- Keyword Args:
2318
2815
  start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
2319
2816
  end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
2320
2817
 
@@ -2334,9 +2831,9 @@ def flatten_ext(input, start_dim=0, end_dim=-1):
2334
2831
  Examples:
2335
2832
  >>> import mindspore
2336
2833
  >>> import numpy as np
2337
- >>> from mindspore import Tensor, mint
2834
+ >>> from mindspore import Tensor, ops
2338
2835
  >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
2339
- >>> output = mint.flatten(input_x)
2836
+ >>> output = ops.auto_generate.flatten_ext(input_x)
2340
2837
  >>> print(output.shape)
2341
2838
  (24,)
2342
2839
  """
@@ -2569,7 +3066,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
2569
3066
  On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
2570
3067
  undefined.
2571
3068
  2. The data type of input_params cannot be
2572
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3069
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
2573
3070
  platform currently.
2574
3071
 
2575
3072
  Args:
@@ -2758,11 +3255,11 @@ def greater(input, other):
2758
3255
 
2759
3256
  Args:
2760
3257
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
2761
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
2762
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
3258
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3259
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ .
2763
3260
  other (Union[Tensor, Number]): The second input, which is a Number or a tensor whose data type is
2764
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
2765
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
3261
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3262
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
2766
3263
 
2767
3264
  Returns:
2768
3265
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
@@ -2783,42 +3280,14 @@ def greater(input, other):
2783
3280
  return greater_op(input, other)
2784
3281
 
2785
3282
 
2786
- def deepcopy(input_x):
2787
- r"""
2788
- Returns a deepcopy of input tensor.
2789
-
2790
- Args:
2791
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2792
-
2793
- Returns:
2794
- Tensor, a deepcopy of `input_x`.
2795
-
2796
- Raises:
2797
- TypeError: If `input_x` is not a Tensor.
2798
-
2799
- Supported Platforms:
2800
- ``Ascend`` ``GPU`` ``CPU``
2801
-
2802
- Examples:
2803
- >>> import mindspore
2804
- >>> from mindspore import Tensor, ops
2805
- >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
2806
- >>> output = ops.deepcopy(input)
2807
- >>> print(output)
2808
- [[0 1]
2809
- [2 1]]
2810
- """
2811
- return identity_op(input_x)
2812
-
2813
-
2814
- def ifft2(input, s=None, dim=(-2, -1), norm=None):
3283
+ def hfft2(input, s=None, dim=(-2, -1), norm=None):
2815
3284
  r"""
2816
- Computes the two dimensional inverse discrete Fourier transform of `input`.
3285
+ Calculates the two dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
2817
3286
 
2818
3287
  Note:
2819
- - `ifft2` is currently only used in `mindscience` scientific computing scenarios and
3288
+ - `hfft2` is currently only used in `mindscience` scientific computing scenarios and
2820
3289
  dose not support other usage scenarios.
2821
- - `ifft2` is not supported on Windows platform yet.
3290
+ - `hfft2` is not supported on Windows platform yet.
2822
3291
 
2823
3292
  Args:
2824
3293
  input (Tensor): The input tensor.
@@ -2827,20 +3296,21 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2827
3296
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2828
3297
 
2829
3298
  s (tuple[int], optional): Length of the transformed `dim` of the result.
2830
- If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifft2`.
3299
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfft2`.
2831
3300
  Default: ``None`` , which does not need to process `input`.
2832
- dim (tuple[int], optional): The dimension along which to take the one dimensional `ifft2`.
3301
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `hfft2`.
2833
3302
  Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2834
3303
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2835
- Three modes are defined as,
3304
+ Three modes are defined as, where :math: `n = prod(s)`
2836
3305
 
2837
3306
  - ``"backward"`` (no normalization).
2838
- - ``"forward"`` (normalize by :math:`1*n`).
2839
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3307
+ - ``"forward"`` (normalize by :math:`1/n`).
3308
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2840
3309
 
2841
3310
  Returns:
2842
- Tensor, The result of `ifft2()` function. The default is the same shape as `input`.
2843
- If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3311
+ Tensor, The result of `hfft2()` function.
3312
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3313
+ result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
2844
3314
  When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
2845
3315
  When the input is float64 or complex128, the return value type is complex128.
2846
3316
 
@@ -2851,6 +3321,7 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2851
3321
  ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
2852
3322
  ValueError: If `dim` has duplicate values.
2853
3323
  ValueError: If `s` is less than 1.
3324
+ ValueError: If `s` and `dim` are given but have different shapes.
2854
3325
  ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
2855
3326
 
2856
3327
  Supported Platforms:
@@ -2860,24 +3331,24 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None):
2860
3331
  >>> import mindspore
2861
3332
  >>> from mindspore import Tensor, ops
2862
3333
  >>> input = ops.ones((4, 4))
2863
- >>> ops.ifft2(input, s=(4, 4), dim=(0, 1), norm="backward")
2864
- Tensor(shape=[4, 4], dtype=Complex64, value=
2865
- [[1+0j, 0+0j, 0+0j, 0+0j],
2866
- [0+0j, 0+0j, 0+0j, 0+0j],
2867
- [0+0j, 0+0j, 0+0j, 0+0j],
2868
- [0+0j, 0+0j, 0+0j, 0+0j]])
3334
+ >>> out = ops.hfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3335
+ >>> print(out)
3336
+ [[16. 0. 0. 0.]
3337
+ [ 0. 0. 0. 0.]
3338
+ [ 0. 0. 0. 0.]
3339
+ [ 0. 0. 0. 0.]]
2869
3340
  """
2870
- return ifft2_op(input, s, dim, norm)
3341
+ return hfft2_op(input, s, dim, norm)
2871
3342
 
2872
3343
 
2873
- def ifft(input, n=None, dim=-1, norm=None):
3344
+ def hfft(input, n=None, dim=-1, norm=None):
2874
3345
  r"""
2875
- Calculates the inverse of `fft()`.
3346
+ Calculates the one dimensional discrete Fourier transform of of a Hermitian symmetric `input` signal.
2876
3347
 
2877
3348
  Note:
2878
- - `ifft` is currently only used in `mindscience` scientific computing scenarios and
3349
+ - `hfft` is currently only used in `mindscience` scientific computing scenarios and
2879
3350
  dose not support other usage scenarios.
2880
- - `ifft` is not supported on Windows platform yet.
3351
+ - `hfft` is not supported on Windows platform yet.
2881
3352
 
2882
3353
  Args:
2883
3354
  input (Tensor): The input tensor.
@@ -2886,19 +3357,22 @@ def ifft(input, n=None, dim=-1, norm=None):
2886
3357
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2887
3358
 
2888
3359
  n (int, optional): Length of the transformed `dim` of the result.
2889
- If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ifft`.
3360
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `hfft`.
2890
3361
  Default: ``None`` , which does not need to process `input`.
2891
- dim (int, optional): The dimension along which to take the one dimensional `ifft`.
3362
+ dim (int, optional): The dimension along which to take the one dimensional `hfft`.
2892
3363
  Default: ``-1`` , which means transform the last dimension of `input`.
2893
3364
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2894
3365
  Three modes are defined as,
2895
3366
 
2896
3367
  - ``"backward"`` (no normalization).
2897
- - ``"forward"`` (normalize by :math:`1*n`).
2898
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3368
+ - ``"forward"`` (normalize by :math:`1/n`).
3369
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2899
3370
 
2900
3371
  Returns:
2901
- Tensor, The result of `ifft()` function.
3372
+ Tensor, The result of `hfft()` function.
3373
+ If `n` is given, result.shape[dim] is :math:`(n - 1) * 2`, otherwise math:`(input.shape[dim] - 1) * 2`.
3374
+ When the `input` is int16, int32, int64, float16, float32, complex64, the return value type is float32.
3375
+ When the `input` is float64 or complex128, the return value type is float64.
2902
3376
 
2903
3377
  Raises:
2904
3378
  TypeError: If the `input` type is not Tensor.
@@ -2915,20 +3389,21 @@ def ifft(input, n=None, dim=-1, norm=None):
2915
3389
  >>> import mindspore
2916
3390
  >>> from mindspore import Tensor, ops
2917
3391
  >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
2918
- >>> ops.ifft(input)
2919
- Tensor(shape=[4], dtype=Complex64, value= [-0.147138+0j, 0.538129+0.115303j, 0.695225+0j, 0.538129-0.115303j])
3392
+ >>> out = ops.hfft(input, n=4, dim=-1, norm="backward")
3393
+ >>> print(out)
3394
+ [-0.12733912 2.1525173 2.3196864 2.1525173 ]
2920
3395
  """
2921
- return ifft_op(input, n, dim, norm)
3396
+ return hfft_op(input, n, dim, norm)
2922
3397
 
2923
3398
 
2924
- def ifftn(input, s=None, dim=None, norm=None):
3399
+ def hfftn(input, s=None, dim=None, norm=None):
2925
3400
  r"""
2926
- Computes the N dimensional inverse discrete Fourier transform of `input`.
3401
+ Calculates the N dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
2927
3402
 
2928
3403
  Note:
2929
- - `ifftn` is currently only used in `mindscience` scientific computing scenarios and
3404
+ - `hfftn` is currently only used in `mindscience` scientific computing scenarios and
2930
3405
  dose not support other usage scenarios.
2931
- - `ifftn` is not supported on Windows platform yet.
3406
+ - `hfftn` is not supported on Windows platform yet.
2932
3407
 
2933
3408
  Args:
2934
3409
  input (Tensor): The input tensor.
@@ -2937,20 +3412,21 @@ def ifftn(input, s=None, dim=None, norm=None):
2937
3412
  - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
2938
3413
 
2939
3414
  s (tuple[int], optional): Length of the transformed `dim` of the result.
2940
- If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifftn`.
3415
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfftn`.
2941
3416
  Default: ``None`` , which does not need to process `input`.
2942
- dim (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
2943
- Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
3417
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `hfftn`.
3418
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
2944
3419
  norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
2945
- Three modes are defined as,
3420
+ Three modes are defined as, where :math: `n = prod(s)`
2946
3421
 
2947
3422
  - ``"backward"`` (no normalization).
2948
- - ``"forward"`` (normalize by :math:`1*n`).
2949
- - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3423
+ - ``"forward"`` (normalize by :math:`1/n`).
3424
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
2950
3425
 
2951
3426
  Returns:
2952
- Tensor, The result of `ifftn()` function. The default is the same shape as `input`.
2953
- If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3427
+ Tensor, The result of `hfftn()` function.
3428
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3429
+ result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
2954
3430
  When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
2955
3431
  When the input is float64 or complex128, the return value type is complex128.
2956
3432
 
@@ -2970,71 +3446,634 @@ def ifftn(input, s=None, dim=None, norm=None):
2970
3446
  Examples:
2971
3447
  >>> import mindspore
2972
3448
  >>> from mindspore import Tensor, ops
2973
- >>> input = ops.ones((2, 2, 2))
2974
- >>> ops.ifftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
2975
- Tensor(shape=[2, 2, 2], dtype=Complex64, value=
2976
- [[[1+0j, 0+0j],
2977
- [0+0j, 0+0j]],
2978
- [[0+0j, 0+0j],
2979
- [0+0j, 0+0j]]])
3449
+ >>> input = ops.ones((4, 4))
3450
+ >>> out = ops.hfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
3451
+ >>> print(out)
3452
+ [[16. 0. 0. 0.]
3453
+ [ 0. 0. 0. 0.]
3454
+ [ 0. 0. 0. 0.]
3455
+ [ 0. 0. 0. 0.]]
2980
3456
  """
2981
- return ifftn_op(input, s, dim, norm)
3457
+ return hfftn_op(input, s, dim, norm)
2982
3458
 
2983
3459
 
2984
- def ifftshift(input, dim=None):
3460
+ def histc_ext(input, bins=100, min=0, max=0):
2985
3461
  r"""
2986
- The inverse of :func:`mindspore.ops.fftshift` .
3462
+ Computes the histogram of a tensor.
2987
3463
 
2988
- Note:
2989
- - `ifftshift` is currently only used in `mindscience` scientific computing scenarios and
2990
- dose not support other usage scenarios.
2991
- - `ifftshift` is not supported on Windows platform yet.
3464
+ The elements are sorted into equal width bins between `min` and `max`.
3465
+ If `min` and `max` are both zero, the minimum and maximum values of the data are used.
3466
+
3467
+ Elements lower than min or higher than max are ignored.
3468
+
3469
+ .. warning::
3470
+ This is an experimental API that is subject to change or deletion.
3471
+ If input is int64, valid values fit within int32; exceeding this may cause precision errors.
2992
3472
 
2993
3473
  Args:
2994
- input (Tensor): Input tensor.
2995
- dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
2996
- Default is ``None``, which shifts all dimensions.
3474
+ input (Tensor): the input tensor.
3475
+ bins (int, optional): Number of histogram bins, optional. If specified, must be positive. Default: ``100`` .
3476
+ min (int, float, optional): the lower end of the range (inclusive), optional. Default: ``0`` .
3477
+ max (int, float, optional): the upper end of the range (inclusive), optional. Default: ``0`` .
2997
3478
 
2998
3479
  Returns:
2999
- output (Tensor), the shifted tensor with the same shape and dtype as `input`.
3480
+ A 1-D Tensor, has the same type as `input` with the shape :math:`(bins, )`.
3000
3481
 
3001
3482
  Raises:
3002
- TypeError: If `input` is not a tensor.
3003
- TypeError: If the type/dtype of `dim` is not int.
3004
- ValueError: If `dim` is out of the range of :math:`[-input.ndim, input.ndim)`.
3483
+ TypeError: If `input` is not a Tensor.
3484
+ TypeError: If `input` datatype is not in support list.
3485
+ TypeError: If attr `min` or `max` is not float or int.
3486
+ TypeError: If attr `bins` is not int.
3487
+ ValueError: If attr value `min` > `max`.
3488
+ ValueError: If attr `bins` <= 0.
3005
3489
 
3006
3490
  Supported Platforms:
3007
- ``Ascend`` ``CPU``
3491
+ ``Ascend``
3008
3492
 
3009
3493
  Examples:
3010
- >>> from mindspore.ops import fftshift, ifftshift
3011
- >>> from mindspore import Tensor
3012
- >>> from mindspore import dtype as mstype
3013
- >>> input = Tensor([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=mstype.int32)
3014
- >>> ifftshift(fftshift(input))
3015
- Tensor(shape=[10], dtype=Int32, value= [ 0, 1, 2, 3, 4, -5, -4, -3, -2, -1])
3494
+ >>> from mindspore import Tensor, ops
3495
+ >>> x = Tensor([1., 2, 1])
3496
+ >>> y = ops.histc_ext(x, bins=4, min=0, max=3)
3497
+ >>> print(y)
3498
+ [0 2 1 0]
3016
3499
  """
3017
- return ifftshift_op(input, dim)
3500
+ return histc_ext_op(input, bins, min, max)
3018
3501
 
3019
3502
 
3020
- def unfold_ext(input, kernel_size, dilation=1, padding=0, stride=1):
3503
+ def hardshrink(input, lambd=0.5):
3021
3504
  r"""
3022
- Extracts sliding local blocks from a batched input tensor.
3505
+ Hard Shrink activation function. Calculates the output according to the input elements.
3023
3506
 
3024
- Consider a batched input tensor of shape :math:`(N, C, *)`,
3025
- where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
3026
- and :math:`*` represent arbitrary spatial dimensions. This operation flattens
3027
- each sliding `Kernel_size`- sized block within the spatial dimensions
3028
- of `input` into a column (i.e., last dimension) of a 3-D output
3029
- tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)`, where
3030
- :math:`C \times \prod(\text{kernel_size})` is the total number of values
3031
- within each block (a block has :math:`\prod(\text{kernel_size})` spatial
3032
- locations each containing a `C`-channeled vector), and :math:`L` is
3033
- the total number of such blocks:
3507
+ The formula is defined as follows:
3034
3508
 
3035
3509
  .. math::
3036
- L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{padding}[d] %
3037
- - \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
3510
+ \text{HardShrink}(x) =
3511
+ \begin{cases}
3512
+ x, & \text{ if } x > \lambda \\
3513
+ x, & \text{ if } x < -\lambda \\
3514
+ 0, & \text{ otherwise }
3515
+ \end{cases}
3516
+
3517
+ HShrink Activation Function Graph:
3518
+
3519
+ .. image:: ../images/HShrink.png
3520
+ :align: center
3521
+
3522
+ Args:
3523
+ input (Tensor): The input of Hard Shrink. Supported dtypes:
3524
+
3525
+ - Ascend: float16, float32, bfloat16.
3526
+ - CPU/GPU: float16, float32.
3527
+ lambd (number, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula.
3528
+ Default: ``0.5`` .
3529
+
3530
+ Returns:
3531
+ Tensor, has the same data type and shape as the input `input`.
3532
+
3533
+ Raises:
3534
+ TypeError: If `lambd` is not a float, int or bool.
3535
+ TypeError: If `input` is not a tensor.
3536
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
3537
+
3538
+ Supported Platforms:
3539
+ ``Ascend`` ``GPU`` ``CPU``
3540
+
3541
+ Examples:
3542
+ >>> import mindspore
3543
+ >>> import numpy as np
3544
+ >>> from mindspore import Tensor, ops
3545
+ >>> input = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mindspore.float32)
3546
+ >>> output = ops.hardshrink(input)
3547
+ >>> print(output)
3548
+ [[ 0. 1. 2. ]
3549
+ [ 0. 0. -2.1233]]
3550
+ """
3551
+ return hshrink_impl(input, lambd)
3552
+
3553
+
3554
+ def hardsigmoid(input):
3555
+ r"""
3556
+ Hard Sigmoid activation function. Calculates the output according to the input elements.
3557
+
3558
+ Hard Sigmoid is defined as:
3559
+
3560
+ .. math::
3561
+ \text{Hardswish}(input) =
3562
+ \begin{cases}
3563
+ 0, & \text{ if } input \leq -3, \\
3564
+ 1, & \text{ if } input \geq +3, \\
3565
+ input/6 + 1/2, & \text{ otherwise }
3566
+ \end{cases}
3567
+
3568
+ HSigmoid Activation Function Graph:
3569
+
3570
+ .. image:: ../images/HSigmoid.png
3571
+ :align: center
3572
+
3573
+ Args:
3574
+ input (Tensor): The input Tensor.
3575
+
3576
+ Returns:
3577
+ Tensor, with the same type and shape as the `input`.
3578
+
3579
+ Raises:
3580
+ TypeError: If `input` is not a Tensor.
3581
+ TypeError: If `input` is neither int nor float.
3582
+
3583
+ Supported Platforms:
3584
+ ``Ascend`` ``GPU`` ``CPU``
3585
+
3586
+ Examples:
3587
+ >>> import mindspore
3588
+ >>> import numpy as np
3589
+ >>> from mindspore import Tensor, ops
3590
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
3591
+ >>> output = ops.hardsigmoid(input)
3592
+ >>> print(output)
3593
+ [0.3333 0.1666 0.5 0.8335 0.6665]
3594
+ """
3595
+ return hsigmoid_op(input)
3596
+
3597
+
3598
+ def hardswish(input):
3599
+ r"""
3600
+ Hard Swish activation function. The input is a Tensor with any valid shape.
3601
+
3602
+ Hard swish is defined as:
3603
+
3604
+ .. math::
3605
+ \text{Hardswish}(input) =
3606
+ \begin{cases}
3607
+ 0, & \text{ if } input \leq -3, \\
3608
+ input, & \text{ if } input \geq +3, \\
3609
+ input*(input + 3)/6, & \text{ otherwise }
3610
+ \end{cases}
3611
+
3612
+ HSwish Activation Function Graph:
3613
+
3614
+ .. image:: ../images/HSwish.png
3615
+ :align: center
3616
+
3617
+ Args:
3618
+ input (Tensor): The input Tensor.
3619
+
3620
+ Returns:
3621
+ Tensor, with the same type and shape as the `input`.
3622
+
3623
+ Raises:
3624
+ TypeError: If `input` is not a Tensor.
3625
+ TypeError: If `input` is neither int nor float.
3626
+
3627
+ Supported Platforms:
3628
+ ``Ascend`` ``GPU`` ``CPU``
3629
+
3630
+ Examples:
3631
+ >>> import mindspore
3632
+ >>> import numpy as np
3633
+ >>> from mindspore import Tensor, ops
3634
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
3635
+ >>> output = ops.hardswish(input)
3636
+ >>> print(output)
3637
+ [-0.3333 -0.3333 0 1.667 0.6665]
3638
+ """
3639
+ return hswish_op(input)
3640
+
3641
+
3642
+ def deepcopy(input_x):
3643
+ r"""
3644
+ Returns a deepcopy of input tensor.
3645
+
3646
+ Args:
3647
+ input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3648
+
3649
+ Returns:
3650
+ Tensor, a deepcopy of `input_x`.
3651
+
3652
+ Raises:
3653
+ TypeError: If `input_x` is not a Tensor.
3654
+
3655
+ Supported Platforms:
3656
+ ``Ascend`` ``GPU`` ``CPU``
3657
+
3658
+ Examples:
3659
+ >>> import mindspore
3660
+ >>> from mindspore import Tensor, ops
3661
+ >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
3662
+ >>> output = ops.deepcopy(input)
3663
+ >>> print(output)
3664
+ [[0 1]
3665
+ [2 1]]
3666
+ """
3667
+ return identity_op(input_x)
3668
+
3669
+
3670
+ def ifft2(input, s=None, dim=(-2, -1), norm=None):
3671
+ r"""
3672
+ Computes the two dimensional inverse discrete Fourier transform of `input`.
3673
+
3674
+ Note:
3675
+ - `ifft2` is currently only used in `mindscience` scientific computing scenarios and
3676
+ dose not support other usage scenarios.
3677
+ - `ifft2` is not supported on Windows platform yet.
3678
+
3679
+ Args:
3680
+ input (Tensor): The input tensor.
3681
+ Supported dtypes:
3682
+
3683
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3684
+
3685
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3686
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifft2`.
3687
+ Default: ``None`` , which does not need to process `input`.
3688
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ifft2`.
3689
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
3690
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3691
+ Three modes are defined as, where :math: `n = prod(s)`
3692
+
3693
+ - ``"backward"`` (normalize by :math:`1/n`).
3694
+ - ``"forward"`` (no normalization).
3695
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3696
+
3697
+ Returns:
3698
+ Tensor, The result of `ifft2()` function. The default is the same shape as `input`.
3699
+ If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3700
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3701
+ When the input is float64 or complex128, the return value type is complex128.
3702
+
3703
+ Raises:
3704
+ TypeError: If the `input` type is not Tensor.
3705
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3706
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3707
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3708
+ ValueError: If `dim` has duplicate values.
3709
+ ValueError: If `s` is less than 1.
3710
+ ValueError: If `s` and `dim` are given but have different shapes.
3711
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3712
+
3713
+ Supported Platforms:
3714
+ ``Ascend`` ``CPU``
3715
+
3716
+ Examples:
3717
+ >>> import mindspore
3718
+ >>> from mindspore import Tensor, ops
3719
+ >>> input = ops.ones((4, 4))
3720
+ >>> out = ops.ifft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3721
+ >>> print(out)
3722
+ [[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
3723
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]
3724
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]
3725
+ [0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
3726
+ """
3727
+ return ifft2_op(input, s, dim, norm)
3728
+
3729
+
3730
+ def ifft(input, n=None, dim=-1, norm=None):
3731
+ r"""
3732
+ Calculates the inverse of `fft()`.
3733
+
3734
+ Note:
3735
+ - `ifft` is currently only used in `mindscience` scientific computing scenarios and
3736
+ dose not support other usage scenarios.
3737
+ - `ifft` is not supported on Windows platform yet.
3738
+
3739
+ Args:
3740
+ input (Tensor): The input tensor.
3741
+ Supported dtypes:
3742
+
3743
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3744
+
3745
+ n (int, optional): Length of the transformed `dim` of the result.
3746
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ifft`.
3747
+ Default: ``None`` , which does not need to process `input`.
3748
+ dim (int, optional): The dimension along which to take the one dimensional `ifft`.
3749
+ Default: ``-1`` , which means transform the last dimension of `input`.
3750
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3751
+ Three modes are defined as,
3752
+
3753
+ - ``"backward"`` (normalize by :math:`1/n`).
3754
+ - ``"forward"`` (no normalization).
3755
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3756
+
3757
+ Returns:
3758
+ Tensor, The result of `ifft()` function. The default is the same shape as `input`.
3759
+ If `n` is given, the size of the `dim` axis is changed to `n`.
3760
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3761
+ When the input is float64 or complex128, the return value type is complex128.
3762
+
3763
+ Raises:
3764
+ TypeError: If the `input` type is not Tensor.
3765
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3766
+ TypeError: If `n` or `dim` type is not int.
3767
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3768
+ ValueError: If `n` is less than 1.
3769
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3770
+
3771
+ Supported Platforms:
3772
+ ``Ascend`` ``CPU``
3773
+
3774
+ Examples:
3775
+ >>> import mindspore
3776
+ >>> from mindspore import Tensor, ops
3777
+ >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
3778
+ >>> out = ops.ifft(input, n=4, dim=-1, norm="backward")
3779
+ >>> print(out)
3780
+ [-0.14713785+0.j 0.5381293 +0.11530305j 0.69522464+0.j
3781
+ 0.5381293 -0.11530305j]
3782
+ """
3783
+ return ifft_op(input, n, dim, norm)
3784
+
3785
+
3786
+ def ifftn(input, s=None, dim=None, norm=None):
3787
+ r"""
3788
+ Computes the N dimensional inverse discrete Fourier transform of `input`.
3789
+
3790
+ Note:
3791
+ - `ifftn` is currently only used in `mindscience` scientific computing scenarios and
3792
+ dose not support other usage scenarios.
3793
+ - `ifftn` is not supported on Windows platform yet.
3794
+
3795
+ Args:
3796
+ input (Tensor): The input tensor.
3797
+ Supported dtypes:
3798
+
3799
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
3800
+
3801
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3802
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifftn`.
3803
+ Default: ``None`` , which does not need to process `input`.
3804
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
3805
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
3806
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3807
+ Three modes are defined as, where :math: `n = prod(s)`
3808
+
3809
+ - ``"backward"`` (normalize by :math:`1/n`).
3810
+ - ``"forward"`` (no normalization).
3811
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3812
+
3813
+ Returns:
3814
+ Tensor, The result of `ifftn()` function. The default is the same shape as `input`.
3815
+ If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
3816
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
3817
+ When the input is float64 or complex128, the return value type is complex128.
3818
+
3819
+ Raises:
3820
+ TypeError: If the `input` type is not Tensor.
3821
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
3822
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3823
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3824
+ ValueError: If `dim` has duplicate values.
3825
+ ValueError: If `s` is less than 1.
3826
+ ValueError: If `s` and `dim` are given but have different shapes.
3827
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3828
+
3829
+ Supported Platforms:
3830
+ ``Ascend`` ``CPU``
3831
+
3832
+ Examples:
3833
+ >>> import mindspore
3834
+ >>> from mindspore import Tensor, ops
3835
+ >>> input = ops.ones((2, 2, 2))
3836
+ >>> out = ops.ifftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
3837
+ >>> print(out)
3838
+ [[[1.+0.j 0.+0.j]
3839
+ [0.+0.j 0.+0.j]]
3840
+ [[0.+0.j 0.+0.j]
3841
+ [0.+0.j 0.+0.j]]]
3842
+ """
3843
+ return ifftn_op(input, s, dim, norm)
3844
+
3845
+
3846
+ def ifftshift(input, dim=None):
3847
+ r"""
3848
+ The inverse of :func:`mindspore.ops.fftshift` .
3849
+
3850
+ Note:
3851
+ - `ifftshift` is currently only used in `mindscience` scientific computing scenarios and
3852
+ dose not support other usage scenarios.
3853
+ - `ifftshift` is not supported on Windows platform yet.
3854
+
3855
+ Args:
3856
+ input (Tensor): Input tensor.
3857
+ dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
3858
+ Default is ``None``, which shifts all dimensions.
3859
+
3860
+ Returns:
3861
+ output (Tensor), the shifted tensor with the same shape and dtype as `input`.
3862
+
3863
+ Raises:
3864
+ TypeError: If `input` is not a tensor.
3865
+ TypeError: If the type/dtype of `dim` is not int.
3866
+ ValueError: If `dim` is out of the range of :math:`[-input.ndim, input.ndim)`.
3867
+
3868
+ Supported Platforms:
3869
+ ``Ascend`` ``CPU``
3870
+
3871
+ Examples:
3872
+ >>> from mindspore.ops import fftshift, ifftshift
3873
+ >>> from mindspore import Tensor
3874
+ >>> from mindspore import dtype as mstype
3875
+ >>> input = Tensor([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=mstype.int32)
3876
+ >>> ifftshift(fftshift(input))
3877
+ Tensor(shape=[10], dtype=Int32, value= [ 0, 1, 2, 3, 4, -5, -4, -3, -2, -1])
3878
+ """
3879
+ return ifftshift_op(input, dim)
3880
+
3881
+
3882
+ def ihfft2(input, s=None, dim=(-2, -1), norm=None):
3883
+ r"""
3884
+ Computes the two dimensional inverse discrete Fourier transform of real `input`.
3885
+
3886
+ Note:
3887
+ - `ihfft2` is currently only used in `mindscience` scientific computing scenarios and
3888
+ dose not support other usage scenarios.
3889
+ - `ihfft2` is not supported on Windows platform yet.
3890
+
3891
+ Args:
3892
+ input (Tensor): The input tensor.
3893
+ Supported dtypes:
3894
+
3895
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
3896
+
3897
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
3898
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfft2`.
3899
+ Default: ``None`` , which does not need to process `input`.
3900
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfft2`.
3901
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
3902
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3903
+ Three modes are defined as, where :math: `n = prod(s)`
3904
+
3905
+ - ``"backward"`` (normalize by :math:`1/n`).
3906
+ - ``"forward"`` (no normalization).
3907
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
3908
+
3909
+ Returns:
3910
+ Tensor, The result of `ihfft2()` function.
3911
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
3912
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
3913
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
3914
+ When the input is float64, the return value type is complex128.
3915
+
3916
+ Raises:
3917
+ TypeError: If the `input` type is not Tensor.
3918
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
3919
+ TypeError: If the type/dtype of `s` and `dim` is not int.
3920
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3921
+ ValueError: If `dim` has duplicate values.
3922
+ ValueError: If `s` is less than 1.
3923
+ ValueError: If `s` and `dim` are given but have different shapes.
3924
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3925
+
3926
+ Supported Platforms:
3927
+ ``Ascend`` ``CPU``
3928
+
3929
+ Examples:
3930
+ >>> import mindspore
3931
+ >>> from mindspore import Tensor, ops
3932
+ >>> input = ops.ones((4, 4))
3933
+ >>> out = ops.ihfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
3934
+ >>> print(out)
3935
+ [[1.-0.j 0.-0.j 0.-0.j]
3936
+ [0.-0.j 0.-0.j 0.-0.j]
3937
+ [0.-0.j 0.-0.j 0.-0.j]
3938
+ [0.-0.j 0.-0.j 0.-0.j]]
3939
+ """
3940
+ return ihfft2_op(input, s, dim, norm)
3941
+
3942
+
3943
+ def ihfft(input, n=None, dim=-1, norm=None):
3944
+ r"""
3945
+ Calculates the inverse of `hfft()`.
3946
+
3947
+ Note:
3948
+ - `ihfft` is currently only used in `mindscience` scientific computing scenarios and
3949
+ dose not support other usage scenarios.
3950
+ - `ihfft` is not supported on Windows platform yet.
3951
+
3952
+ Args:
3953
+ input (Tensor): The input tensor.
3954
+ Supported dtypes:
3955
+
3956
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
3957
+
3958
+ n (int, optional): Length of the transformed `dim` of the result.
3959
+ If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ihfft`.
3960
+ Default: ``None`` , which does not need to process `input`.
3961
+ dim (int, optional): The dimension along which to take the one dimensional `ihfft`.
3962
+ Default: ``-1`` , which means transform the last dimension of `input`.
3963
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
3964
+ Three modes are defined as,
3965
+
3966
+ - ``"backward"`` (no normalization).
3967
+ - ``"forward"`` (normalize by :math:`1*n`).
3968
+ - ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
3969
+
3970
+ Returns:
3971
+ Tensor, The result of `ihfft()` function.
3972
+ If `n` is given, result.shape[dim] is :math:`n // 2 + 1`, otherwise math:`input.shape[dim] // 2 + 1`.
3973
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
3974
+ When the input is float64, the return value type is complex128.
3975
+
3976
+ Raises:
3977
+ TypeError: If the `input` type is not Tensor.
3978
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
3979
+ TypeError: If `n` or `dim` type is not int.
3980
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
3981
+ ValueError: If `n` is less than 1.
3982
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
3983
+
3984
+ Supported Platforms:
3985
+ ``Ascend`` ``CPU``
3986
+
3987
+ Examples:
3988
+ >>> import mindspore
3989
+ >>> from mindspore import Tensor, ops
3990
+ >>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
3991
+ >>> out = ops.ihfft(input, n=4, dim=-1, norm="backward")
3992
+ >>> print(out)
3993
+ [-0.14713785-0.j 0.5381293 +0.11530305j 0.69522464-0.j ]
3994
+ """
3995
+ return ihfft_op(input, n, dim, norm)
3996
+
3997
+
3998
+ def ihfftn(input, s=None, dim=None, norm=None):
3999
+ r"""
4000
+ Computes the N dimensional inverse discrete Fourier transform of real `input`.
4001
+
4002
+ Note:
4003
+ - `ihfftn` is currently only used in `mindscience` scientific computing scenarios and
4004
+ dose not support other usage scenarios.
4005
+ - `ihfftn` is not supported on Windows platform yet.
4006
+
4007
+ Args:
4008
+ input (Tensor): The input tensor.
4009
+ Supported dtypes:
4010
+
4011
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
4012
+
4013
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4014
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfftn`.
4015
+ Default: ``None`` , which does not need to process `input`.
4016
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfftn`.
4017
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
4018
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4019
+ Three modes are defined as, where :math: `n = prod(s)`
4020
+
4021
+ - ``"backward"`` (normalize by :math:`1/n`).
4022
+ - ``"forward"`` (no normalization).
4023
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4024
+
4025
+ Returns:
4026
+ Tensor, The result of `ihfftn()` function.
4027
+ If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
4028
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
4029
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
4030
+ When the input is float64, the return value type is complex128.
4031
+
4032
+ Raises:
4033
+ TypeError: If the `input` type is not Tensor.
4034
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
4035
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4036
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4037
+ ValueError: If `dim` has duplicate values.
4038
+ ValueError: If `s` is less than 1.
4039
+ ValueError: If `s` and `dim` are given but have different shapes.
4040
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4041
+
4042
+ Supported Platforms:
4043
+ ``Ascend`` ``CPU``
4044
+
4045
+ Examples:
4046
+ >>> import mindspore
4047
+ >>> from mindspore import Tensor, ops
4048
+ >>> input = ops.ones((4, 4))
4049
+ >>> out = ops.ihfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
4050
+ >>> print(out)
4051
+ [[16. 0. 0. 0.]
4052
+ [ 0. 0. 0. 0.]
4053
+ [ 0. 0. 0. 0.]
4054
+ [ 0. 0. 0. 0.]]
4055
+ """
4056
+ return ihfftn_op(input, s, dim, norm)
4057
+
4058
+
4059
+ def unfold_ext(input, kernel_size, dilation=1, padding=0, stride=1):
4060
+ r"""
4061
+ Extracts sliding local blocks from a batched input tensor.
4062
+
4063
+ Consider a batched input tensor of shape :math:`(N, C, *)`,
4064
+ where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
4065
+ and :math:`*` represent arbitrary spatial dimensions. This operation flattens
4066
+ each sliding `Kernel_size`- sized block within the spatial dimensions
4067
+ of `input` into a column (i.e., last dimension) of a 3-D output
4068
+ tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)`, where
4069
+ :math:`C \times \prod(\text{kernel_size})` is the total number of values
4070
+ within each block (a block has :math:`\prod(\text{kernel_size})` spatial
4071
+ locations each containing a `C`-channeled vector), and :math:`L` is
4072
+ the total number of such blocks:
4073
+
4074
+ .. math::
4075
+ L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{padding}[d] %
4076
+ - \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
3038
4077
 
3039
4078
  where :math:`\text{spatial_size}` is formed by the spatial dimensions
3040
4079
  of `input` (:math:`*` above), and :math:`d` is over all spatial
@@ -3114,25 +4153,111 @@ def index_select_ext(input, dim, index):
3114
4153
  ValueError: If the dimension of `index` is not equal to 1.
3115
4154
 
3116
4155
  Supported Platforms:
3117
- ``Ascend``
4156
+ ``Ascend``
4157
+
4158
+ Examples:
4159
+ >>> import mindspore
4160
+ >>> from mindspore import Tensor, ops
4161
+ >>> import numpy as np
4162
+ >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
4163
+ >>> print(input)
4164
+ [[[ 0. 1. 2. 3.]
4165
+ [ 4. 5. 6. 7.]]
4166
+ [[ 8. 9. 10. 11.]
4167
+ [12. 13. 14. 15.]]]
4168
+ >>> index = Tensor([0,], mindspore.int32)
4169
+ >>> y = ops.auto_generate.index_select_ext(input, 1, index)
4170
+ >>> print(y)
4171
+ [[[ 0. 1. 2. 3.]]
4172
+ [[ 8. 9. 10. 11.]]]
4173
+ """
4174
+ return index_select_op(input, dim, index)
4175
+
4176
+
4177
+ def inplace_add_ext(input, other, alpha=1):
4178
+ r"""
4179
+
4180
+ """
4181
+ return inplace_add_ext_op(input, other, alpha)
4182
+
4183
+
4184
+ def inplace_addmm(input, mat1, mat2, beta=1, alpha=1):
4185
+ r"""
4186
+
4187
+ """
4188
+ return inplace_addmm_op(input, mat1, mat2, beta, alpha)
4189
+
4190
+
4191
+ def inplace_adds_ext(input, other, alpha=1):
4192
+ r"""
4193
+
4194
+ """
4195
+ return inplace_adds_ext_op(input, other, alpha)
4196
+
4197
+
4198
+ def zero_(input):
4199
+ r"""
4200
+
4201
+ """
4202
+ return inplace_zero_op(input)
4203
+
4204
+
4205
+ def irfft2(input, s=None, dim=(-2, -1), norm=None):
4206
+ r"""
4207
+ Calculates the inverse of `rfft2()`.
4208
+
4209
+ Note:
4210
+ - `irfft2` is currently only used in `mindscience` scientific computing scenarios and
4211
+ dose not support other usage scenarios.
4212
+ - `irfft2` is not supported on Windows platform yet.
4213
+
4214
+ Args:
4215
+ input (Tensor): The input tensor.
4216
+ Supported dtypes:
4217
+
4218
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
4219
+
4220
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4221
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfft2`.
4222
+ Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
4223
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `irfft2`.
4224
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
4225
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4226
+ Three modes are defined as, where :math: `n = prod(s)`
4227
+
4228
+ - ``"backward"`` (normalize by :math:`1/n`).
4229
+ - ``"forward"`` (no normalization).
4230
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4231
+
4232
+ Returns:
4233
+ Tensor, The result of `irfft2()` function, result.shape[dim[i]] is s[i].
4234
+ When the input is int16, int32, int64, float16, float32, complex64, the return value type is float32.
4235
+ When the input is float64 or complex128, the return value type is float64.
4236
+
4237
+ Raises:
4238
+ TypeError: If the `input` type is not Tensor.
4239
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
4240
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4241
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4242
+ ValueError: If `dim` has duplicate values.
4243
+ ValueError: If `s` is less than 1.
4244
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4245
+
4246
+ Supported Platforms:
4247
+ ``Ascend`` ``CPU``
3118
4248
 
3119
4249
  Examples:
3120
4250
  >>> import mindspore
3121
4251
  >>> from mindspore import Tensor, ops
3122
- >>> import numpy as np
3123
- >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
3124
- >>> print(input)
3125
- [[[ 0. 1. 2. 3.]
3126
- [ 4. 5. 6. 7.]]
3127
- [[ 8. 9. 10. 11.]
3128
- [12. 13. 14. 15.]]]
3129
- >>> index = Tensor([0,], mindspore.int32)
3130
- >>> y = ops.auto_generate.index_select_ext(input, 1, index)
3131
- >>> print(y)
3132
- [[[ 0. 1. 2. 3.]]
3133
- [[ 8. 9. 10. 11.]]]
4252
+ >>> input = ops.ones((4, 4))
4253
+ >>> ops.irfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
4254
+ Tensor(shape=[4, 4], dtype=Float32, value=
4255
+ [[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4256
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4257
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
4258
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]])
3134
4259
  """
3135
- return index_select_op(input, dim, index)
4260
+ return irfft2_op(input, s, dim, norm)
3136
4261
 
3137
4262
 
3138
4263
  def irfft(input, n=None, dim=-1, norm=None):
@@ -3177,14 +4302,72 @@ def irfft(input, n=None, dim=-1, norm=None):
3177
4302
  >>> import mindspore
3178
4303
  >>> from mindspore import Tensor, ops
3179
4304
  >>> input = Tensor([1, 2, 3, 4])
3180
- >>> y = ops.irfft(input)
4305
+ >>> y = ops.irfft(input, n=6, dim=-1, norm='backward')
3181
4306
  >>> print(y)
3182
- [ 2.5000000e+00 -6.6666669e-01 1.2590267e-15 -1.6666667e-01
3183
- 4.2470195e-16 -6.6666669e-01]
4307
+ [ 2.5 -0.6666667 0. -0.16666667 0. -0.6666667 ]
3184
4308
  """
3185
4309
  return irfft_op(input, n, dim, norm)
3186
4310
 
3187
4311
 
4312
+ def irfftn(input, s=None, dim=None, norm=None):
4313
+ r"""
4314
+ Calculates the inverse of `rfftn()`.
4315
+
4316
+ Note:
4317
+ - `irfftn` is currently only used in `mindscience` scientific computing scenarios and
4318
+ dose not support other usage scenarios.
4319
+ - `irfftn` is not supported on Windows platform yet.
4320
+
4321
+ Args:
4322
+ input (Tensor): The input tensor.
4323
+ Supported dtypes:
4324
+
4325
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
4326
+
4327
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
4328
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfftn`.
4329
+ Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
4330
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `irfftn`.
4331
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
4332
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
4333
+ Three modes are defined as, where :math: `n = prod(s)`
4334
+
4335
+ - ``"backward"`` (normalize by :math:`1/n`).
4336
+ - ``"forward"`` (no normalization).
4337
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
4338
+
4339
+ Returns:
4340
+ Tensor, The result of `irfftn()` function, result.shape[dim[i]] is s[i].
4341
+ When the input is int16, int32, int64, float16, float32 the return value type is float32.
4342
+ When the input is float64, the return value type is float64.
4343
+
4344
+ Raises:
4345
+ TypeError: If the `input` type is not Tensor.
4346
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
4347
+ TypeError: If the type/dtype of `s` and `dim` is not int.
4348
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
4349
+ ValueError: If `dim` has duplicate values.
4350
+ ValueError: If `s` is less than 1.
4351
+ ValueError: If `s` and `dim` are given but have different shapes.
4352
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
4353
+
4354
+ Supported Platforms:
4355
+ ``Ascend`` ``CPU``
4356
+
4357
+ Examples:
4358
+ >>> import mindspore
4359
+ >>> from mindspore import Tensor, ops
4360
+ >>> input = ops.ones((2, 2, 2))
4361
+ >>> ops.irfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
4362
+ Tensor(shape=[2, 2, 2], dtype=Float32, value=
4363
+ [[[ 1.00000000e+00, 0.00000000e+00],
4364
+ [ 0.00000000e+00, 0.00000000e+00]],
4365
+ [[ 0.00000000e+00, 0.00000000e+00],
4366
+ [ 0.00000000e+00, 0.00000000e+00]]])
4367
+ """
4368
+ return irfftn_op(input, s, dim, norm)
4369
+
4370
+
3188
4371
  def isfinite(x):
3189
4372
  r"""
3190
4373
  Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
@@ -3225,6 +4408,65 @@ def isfinite(x):
3225
4408
  return isfinite_op(x)
3226
4409
 
3227
4410
 
4411
+ def l1_loss_ext(input, target, reduction='mean'):
4412
+ r"""
4413
+ Calculate the mean absolute error between the `input` value and the `target` value.
4414
+
4415
+ Assuming that the :math:`x` and :math:`y` are the predicted value and target value,
4416
+ both are one-dimensional tensors of length :math:`N`, length :math:`N`, `reduction` is set to ``'none'`` ,
4417
+ then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction.
4418
+
4419
+ The formula is as follows:
4420
+
4421
+ .. math::
4422
+ \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
4423
+
4424
+ where :math:`N` is the batch size.
4425
+
4426
+ If `reduction` is ``'mean'`` or ``'sum'`` , then:
4427
+
4428
+ .. math::
4429
+ \ell(x, y) =
4430
+ \begin{cases}
4431
+ \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
4432
+ \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
4433
+ \end{cases}
4434
+
4435
+ Args:
4436
+ input (Tensor): Predicted value, Tensor of any dimension.
4437
+ target (Tensor): Target value, usually has the same shape as the `input`.
4438
+ If `input` and `target` have different shapes, make sure they can broadcast to each other.
4439
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
4440
+ ``'sum'`` . Default: ``'mean'`` .
4441
+
4442
+ - ``'none'``: no reduction will be applied.
4443
+ - ``'mean'``: compute and return the mean of elements in the output. Notice: At least one of the input and target is float type when the reduction is ``'mean'`` .
4444
+ - ``'sum'``: the output elements will be summed.
4445
+
4446
+ Returns:
4447
+ Tensor or Scalar, if `reduction` is ``'none'`` , return a Tensor with same shape and dtype as `input`.
4448
+ Otherwise, a scalar value will be returned.
4449
+
4450
+ Raises:
4451
+ TypeError: If `input` is not a Tensor.
4452
+ TypeError: If `target` is not a Tensor.
4453
+ ValueError: If `reduction` is not one of ``'none'`` , ``'mean'`` or ``'sum'`` .
4454
+
4455
+ Supported Platforms:
4456
+ ``Ascend``
4457
+
4458
+ Examples:
4459
+ >>> from mindspore import Tensor, ops
4460
+ >>> from mindspore import dtype as mstype
4461
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
4462
+ >>> target = Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
4463
+ >>> output = ops.l1_loss_ext(x, target, reduction="mean")
4464
+ >>> print(output)
4465
+ 3.0
4466
+ """
4467
+ return l1_loss_ext_op(input, target, reduction)
4468
+
4469
+
3228
4470
  def leaky_relu_ext(input, negative_slope=0.01):
3229
4471
  r"""
3230
4472
  leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
@@ -3361,7 +4603,7 @@ def log1p(input):
3361
4603
  Returns the natural logarithm of one plus the input tensor element-wise.
3362
4604
 
3363
4605
  .. math::
3364
- out_i = \{log_e}(input_i + 1)
4606
+ out_i = \log_e(input_i + 1)
3365
4607
 
3366
4608
  Args:
3367
4609
  input (Tensor): The input tensor. The value must be greater than -1.
@@ -3422,6 +4664,48 @@ def log(input):
3422
4664
  return log_op(input)
3423
4665
 
3424
4666
 
4667
+ def log_softmax_ext(input, dim=None, dtype=None):
4668
+ r"""
4669
+ Applies the Log Softmax function to the input tensor on the specified axis.
4670
+ Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
4671
+ the Log Softmax function is shown as follows:
4672
+
4673
+ .. math::
4674
+ \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
4675
+
4676
+ where :math:`N` is the length of the Tensor.
4677
+
4678
+ Args:
4679
+ input (Tensor): The input Tensor.
4680
+ dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
4681
+
4682
+ Keyword Args:
4683
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
4684
+ Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
4685
+ If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
4686
+
4687
+ Returns:
4688
+ Tensor, with the same shape as the input.
4689
+
4690
+ Raises:
4691
+ TypeError: If `dim` is not an int.
4692
+ ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
4693
+
4694
+ Supported Platforms:
4695
+ ``Ascend``
4696
+
4697
+ Examples:
4698
+ >>> import mindspore
4699
+ >>> import numpy as np
4700
+ >>> from mindspore import Tensor, ops
4701
+ >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
4702
+ >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
4703
+ >>> print(output)
4704
+ [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
4705
+ """
4706
+ return log_softmax_ext_op(input, dim, dtype)
4707
+
4708
+
3425
4709
  def log_softmax(logits, axis=-1):
3426
4710
  r"""
3427
4711
  Applies the Log Softmax function to the input tensor on the specified axis.
@@ -3459,8 +4743,55 @@ def log_softmax(logits, axis=-1):
3459
4743
  >>> print(output)
3460
4744
  [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
3461
4745
  """
3462
- log_softmax_op = _get_cache_prim(LogSoftmax)(axis)
3463
- return log_softmax_op(logits)
4746
+ return log_softmax_impl(logits, axis)
4747
+
4748
+
4749
+ def logaddexp_ext(input, other):
4750
+ r"""
4751
+ Computes the logarithm of the sum of exponentiations of the inputs.
4752
+ This function is useful in statistics where the calculated probabilities of events may be
4753
+ so small as to exceed the range of normal floating point numbers.
4754
+
4755
+ .. math::
4756
+
4757
+ out_i = \log(exp(input_i) + \exp(other_i))
4758
+
4759
+ .. warning::
4760
+ This is an experimental API that is subject to change or deletion.
4761
+
4762
+ Args:
4763
+ input (Tensor): Input Tensor. The dtype of `input` must be float.
4764
+ other (Tensor): Input Tensor. The dtype of `other` must be float.
4765
+ If the shape of `input` is not equal to the shape of `other`,
4766
+ they must be broadcastable to a common shape (which becomes the shape of the output).
4767
+
4768
+ Returns:
4769
+ Tensor, with the same dtype as `input` and `other`.
4770
+
4771
+ Raises:
4772
+ TypeError: If `input` or `other` is not a Tensor.
4773
+ TypeError: The dtype of `input` or `other` is not float.
4774
+
4775
+ Supported Platforms:
4776
+ ``Ascend``
4777
+
4778
+ Examples:
4779
+ >>> import numpy as np
4780
+ >>> from mindspore import Tensor, ops
4781
+ >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.float16))
4782
+ >>> x2 = Tensor(np.array(2).astype(np.float16))
4783
+ >>> output = ops.logaddexp_ext(x1, x2)
4784
+ >>> print(output)
4785
+ [2.312 2.693 3.312]
4786
+ """
4787
+ return logaddexp_op(input, other)
4788
+
4789
+
4790
+ def logsigmoid_grad(dy, input, buffer):
4791
+ r"""
4792
+
4793
+ """
4794
+ return logsigmoid_grad_op(dy, input, buffer)
3464
4795
 
3465
4796
 
3466
4797
  def masked_fill(input_x, mask, value):
@@ -3502,6 +4833,38 @@ def masked_fill(input_x, mask, value):
3502
4833
  return masked_fill_op(input_x, mask, value)
3503
4834
 
3504
4835
 
4836
+ def masked_select(input, mask):
4837
+ r"""
4838
+ Returns a new 1-D Tensor which indexes the `input` tensor according to the boolean `mask`.
4839
+ The shapes of the `mask` tensor and the `input` tensor don't need to match, but they must be broadcastable.
4840
+
4841
+ Args:
4842
+ input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4843
+ mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4844
+
4845
+ Returns:
4846
+ A 1-D Tensor, with the same type as `input`.
4847
+
4848
+ Raises:
4849
+ TypeError: If `input` or `mask` is not a Tensor.
4850
+ TypeError: If dtype of `mask` is not bool.
4851
+
4852
+ Supported Platforms:
4853
+ ``Ascend`` ``GPU`` ``CPU``
4854
+
4855
+ Examples:
4856
+ >>> import numpy as np
4857
+ >>> import mindspore
4858
+ >>> from mindspore import Tensor, ops
4859
+ >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
4860
+ >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4861
+ >>> output = ops.masked_select(x, mask)
4862
+ >>> print(output)
4863
+ [1 3]
4864
+ """
4865
+ return masked_select_op(input, mask)
4866
+
4867
+
3505
4868
  def matmul_ext(input, mat2):
3506
4869
  r"""
3507
4870
 
@@ -3592,10 +4955,12 @@ def maximum(input, other):
3592
4955
  r"""
3593
4956
  Computes the maximum of input tensors element-wise.
3594
4957
 
4958
+ .. math::
4959
+ output_i = \max(input_i, other_i)
4960
+
3595
4961
  Note:
3596
4962
  - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3597
4963
  consistent.
3598
- - The input must be two Tensors, or a Tensor and a Scalar.
3599
4964
  - When the inputs are two tensors,
3600
4965
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
3601
4966
  - When the inputs are one tensor and one scalar,
@@ -3603,8 +4968,9 @@ def maximum(input, other):
3603
4968
  - Broadcasting is supported.
3604
4969
  - If one of the elements being compared is a NaN, then that element is returned.
3605
4970
 
3606
- .. math::
3607
- output_i = \max(input_i, other_i)
4971
+ .. warning::
4972
+ If all inputs are scalar of integers. In GRAPH mode, the output will be Tensor of int32, while in
4973
+ PYNATIVE mode, the output will be Tensor of int64.
3608
4974
 
3609
4975
  Args:
3610
4976
  input (Union[Tensor, Number, bool]): The first input is a number or
@@ -3751,44 +5117,134 @@ def minimum(input, other):
3751
5117
  - Shapes of them are supposed to be broadcast.
3752
5118
  - If one of the elements being compared is a NaN, then that element is returned.
3753
5119
 
3754
- .. math::
3755
- output_i = \min(input_i, other_i)
5120
+ .. math::
5121
+ output_i = \min(input_i, other_i)
5122
+
5123
+ Args:
5124
+ input (Union[Tensor, Number, bool]): The first input is a number or
5125
+ a bool or a tensor whose data type is number or bool.
5126
+ other (Union[Tensor, Number, bool]): The second input is a number or
5127
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
5128
+
5129
+ Returns:
5130
+ Tensor, the shape is the same as the one after broadcasting,
5131
+ and the data type is the one with higher precision or higher digits among the two inputs.
5132
+
5133
+ Raises:
5134
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
5135
+ ValueError: If `input` and `other` are not the same shape after broadcast.
5136
+
5137
+ Supported Platforms:
5138
+ ``Ascend`` ``GPU`` ``CPU``
5139
+
5140
+ Examples:
5141
+ >>> import mindspore
5142
+ >>> import numpy as np
5143
+ >>> from mindspore import Tensor, ops
5144
+ >>> # case 1 : same data type
5145
+ >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
5146
+ >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5147
+ >>> output = ops.minimum(input, other)
5148
+ >>> print(output)
5149
+ [1. 2. 3.]
5150
+ >>> # case 2 : different data type
5151
+ >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
5152
+ >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5153
+ >>> output = ops.minimum(input, other)
5154
+ >>> print(output.dtype)
5155
+ Float32
5156
+ """
5157
+ return minimum_op(input, other)
5158
+
5159
+
5160
+ def mish_ext(input):
5161
+ r"""
5162
+ Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
5163
+ of input tensors element-wise.
5164
+
5165
+ The formula is defined as follows:
5166
+
5167
+ .. math::
5168
+ \text{mish}(input) = input * \tanh(softplus(\text{input}))
5169
+
5170
+ See more details in `A Self Regularized Non-Monotonic Neural Activation Function
5171
+ <https://arxiv.org/abs/1908.08681>`_.
5172
+
5173
+ Mish Activation Function Graph:
5174
+
5175
+ .. image:: ../images/Mish.png
5176
+ :align: center
5177
+
5178
+ Args:
5179
+ input (Tensor): The input of MISH. Supported dtypes:
5180
+
5181
+ - Ascend: float16, float32.
5182
+
5183
+ Returns:
5184
+ Tensor, has the same type and shape as the `input`.
5185
+
5186
+ Raises:
5187
+ TypeError: If `input` is not a Tensor.
5188
+ TypeError: If dtype of `input` is not float16 or float32.
5189
+
5190
+ Supported Platforms:
5191
+ ``Ascend``
5192
+
5193
+ Examples:
5194
+ >>> import mindspore
5195
+ >>> from mindspore import Tensor, ops
5196
+ >>> import numpy as np
5197
+ >>> x = Tensor(np.array([[-1.1, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
5198
+ >>> output = ops.mish(x)
5199
+ >>> print(output)
5200
+ [[-3.0764845e-01 3.9974124e+00 -2.6832507e-03]
5201
+ [ 1.9439589e+00 -3.3576239e-02 8.9999990e+00]]
5202
+ """
5203
+ return mish_ext_op(input)
5204
+
5205
+
5206
+ def mse_loss_ext(input, target, reduction='mean'):
5207
+ r"""
5208
+ Calculates the mean squared error between the predicted value and the label value.
5209
+
5210
+ For detailed information, please refer to :class:`mindspore.nn.MSELoss`.
3756
5211
 
3757
5212
  Args:
3758
- input (Union[Tensor, Number, bool]): The first input is a number or
3759
- a bool or a tensor whose data type is number or bool.
3760
- other (Union[Tensor, Number, bool]): The second input is a number or
3761
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
5213
+ input (Tensor): Tensor of any dimension. The data type needs to be consistent with the `target`.
5214
+ It should also be broadcastable with the `target`.
5215
+ target (Tensor): The input label. Tensor of any dimension. The data type needs to be consistent with the `input`.
5216
+ It should also be broadcastable with the `input`.
5217
+ reduction (str, optional): Apply specific reduction method to the output: ``'mean'`` , ``'none'`` ,
5218
+ ``'sum'`` . Default: ``'mean'`` .
5219
+
5220
+ - ``'none'``: no reduction will be applied.
5221
+ - ``'mean'``: compute and return the mean of elements in the output.
5222
+ - ``'sum'``: the output elements will be summed.
3762
5223
 
3763
5224
  Returns:
3764
- Tensor, the shape is the same as the one after broadcasting,
3765
- and the data type is the one with higher precision or higher digits among the two inputs.
5225
+ - Tensor. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output is `Tensor Scalar`.
5226
+ - If reduction is ``'none'``, the shape of output is the broadcasted shape of **input** and **target** .
3766
5227
 
3767
5228
  Raises:
3768
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
3769
- ValueError: If `input` and `other` are not the same shape after broadcast.
5229
+ ValueError: If `reduction` is not one of ``'mean'`` , ``'sum'`` or ``'none'``.
5230
+ ValueError: If `input` and `target` are not broadcastable.
5231
+ TypeError: If `input` and `target` are in different data type.
3770
5232
 
3771
5233
  Supported Platforms:
3772
- ``Ascend`` ``GPU`` ``CPU``
5234
+ ``Ascend``
3773
5235
 
3774
5236
  Examples:
3775
5237
  >>> import mindspore
3776
5238
  >>> import numpy as np
3777
5239
  >>> from mindspore import Tensor, ops
3778
- >>> # case 1 : same data type
3779
- >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
3780
- >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
3781
- >>> output = ops.minimum(input, other)
5240
+ >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
5241
+ >>> labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)
5242
+ >>> output = ops.mse_loss_ext(logits, labels, reduction='none')
3782
5243
  >>> print(output)
3783
- [1. 2. 3.]
3784
- >>> # case 2 : different data type
3785
- >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
3786
- >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
3787
- >>> output = ops.minimum(input, other)
3788
- >>> print(output.dtype)
3789
- Float32
5244
+ [[0. 1. 4.]
5245
+ [0. 0. 1.]]
3790
5246
  """
3791
- return minimum_op(input, other)
5247
+ return mse_loss_ext_op(input, target, reduction)
3792
5248
 
3793
5249
 
3794
5250
  def mul(input, other):
@@ -3810,12 +5266,12 @@ def mul(input, other):
3810
5266
  Args:
3811
5267
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3812
5268
  a bool or a tensor whose data type is
3813
- `number <https://www.mindspore.cn/docs/en/r2.3/api_python/mindspore.html#mindspore.dtype>`_ or
3814
- `bool_ <https://www.mindspore.cn/docs/en/r2.3/api_python/mindspore.html#mindspore.dtype>`_.
5269
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5270
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3815
5271
  other (Union[Tensor, number.Number, bool]): The second input, which is a number.Number or
3816
5272
  a bool or a tensor whose data type is
3817
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3818
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5273
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5274
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3819
5275
 
3820
5276
  Returns:
3821
5277
  Tensor, the shape is the same as the one after broadcasting,
@@ -3841,6 +5297,13 @@ def mul(input, other):
3841
5297
  return mul_op(input, other)
3842
5298
 
3843
5299
 
5300
+ def muls(input, other):
5301
+ r"""
5302
+
5303
+ """
5304
+ return muls_op(input, other)
5305
+
5306
+
3844
5307
  def mv(input, vec):
3845
5308
  r"""
3846
5309
 
@@ -3848,6 +5311,44 @@ def mv(input, vec):
3848
5311
  return mv_op(input, vec)
3849
5312
 
3850
5313
 
5314
+ def nan_to_num(input, nan=None, posinf=None, neginf=None):
5315
+ r"""
5316
+ Replace the `NaN`, positive infinity and negative infinity values in `input` with the
5317
+ specified values in `nan`, `posinf` and `neginf` respectively.
5318
+
5319
+ .. warning::
5320
+ For Ascend, it is only supported on Atlas A2 Training Series Products.
5321
+ This is an experimental API that is subject to change or deletion.
5322
+
5323
+ Args:
5324
+ input (Tensor): The shape of tensor is :math:`(input_1, input_2, ..., input_R)`.
5325
+ nan (number, optional): The replace value of `NaN`. Default value is ``None``.
5326
+ posinf (number, optional): the value to replace positive infinity values with. Default: ``None``,
5327
+ replacing positive infinity with the maximum value supported by the data type of `input`.
5328
+ neginf (number, optional): the value to replace negative infinity values with. Default: ``None``,
5329
+ replacing negative infinity with the minimum value supported by the data type of `input`.
5330
+
5331
+ Returns:
5332
+ Tensor, has the same shape and dtype as the `input`.
5333
+
5334
+ Raises:
5335
+ TypeError: If `input` is not a Tensor.
5336
+
5337
+ Supported Platforms:
5338
+ ``Ascend`` ``CPU``
5339
+
5340
+ Examples:
5341
+ >>> import mindspore
5342
+ >>> import numpy as np
5343
+ >>> from mindspore import Tensor, ops
5344
+ >>> input = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 5.0]), mindspore.float32)
5345
+ >>> output = ops.nan_to_num(input, 1.0, 2.0, 3.0)
5346
+ >>> print(output)
5347
+ [1. 2. 3. 5.0]
5348
+ """
5349
+ return nan_to_num_impl(input, nan, posinf, neginf)
5350
+
5351
+
3851
5352
  def neg(input):
3852
5353
  r"""
3853
5354
  Returns a tensor with negative values of the input tensor element-wise.
@@ -3971,101 +5472,46 @@ def ones(shape, dtype=None):
3971
5472
  return ones_op(shape, dtype)
3972
5473
 
3973
5474
 
3974
- def paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask, head_num, scale_value, kv_head_num):
5475
+ def outer_ext(input, vec2):
3975
5476
  r"""
3976
- The PagedAttentionMask is the fusion of block-wise KV Cache access and self-attention(with alibi-mask) computing.
3977
-
3978
- Args:
3979
- query (Tensor): The query tensor with data type of float16.
3980
- :math:`(num\_tokens, num\_head, head\_dim)`.
3981
- key_cache (Tensor): The cache tensor with data type of float16.
3982
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
3983
- value_cache (Tensor): The cache tensor with data type of float16.
3984
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
3985
- block_tables (Tensor): The block mapping table with data type of int32.
3986
- :math:`(num\_tokens, max_num_blocks_per_batch)`.
3987
- context_lens (Tensor): The context length of each sequence with data type of int32.
3988
- :math:`(num\_tokens,)`.
3989
- alibi_mask (Tensor): The context length of each sequence with data type of float16.
3990
- :math:`(num\_tokens, num\_head, 1, max\_context\_len)`.
3991
-
3992
- Outputs:
3993
- attention output.
5477
+ Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
5478
+ and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
3994
5479
 
3995
- Notes:
3996
- No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
5480
+ .. warning::
5481
+ This is an experimental API that is subject to change or deletion.
3997
5482
 
3998
- Examples:
3999
- >>> from mindspore.ops.operations import _inner_ops
4000
- >>> num_tokens = = 4
4001
- >>> num_head = 40
4002
- >>> num_kv_head = 40
4003
- >>> head_dim = 128
4004
- >>> block_size = 16
4005
- >>> num_blocks = 128
4006
- >>> max_seq = 1024
4007
- >>> max_num_blocks_per_batch = max_seq // block_size
4008
- >>> scale_value = 1.0 / math.sqrt(head_dim)
4009
- >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
4010
- >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4011
- >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4012
- >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
4013
- >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
4014
- >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
4015
- >>> alibi_mask = Tensor(np.random.randn(num_tokens, num_head, 1, max_seq).astype(np.int32)))
4016
- >>> paged_attention_mask = _inner_ops.PagedAttentionMask()
4017
- >>> output = paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
4018
- >>> print(output)
4019
- """
4020
- paged_attention_mask_op = _get_cache_prim(PagedAttentionMask)(head_num, scale_value, kv_head_num)
4021
- return paged_attention_mask_op(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
5483
+ .. note::
5484
+ This function does not broadcast.
4022
5485
 
5486
+ Args:
5487
+ input (Tensor): 1-D input vector.
5488
+ vec2 (Tensor): 1-D input vector.
4023
5489
 
4024
- def paged_attention(query, key_cache, value_cache, block_tables, context_lens, head_num, scale_value, kv_head_num):
4025
- r"""
4026
- The PagedAttention is the fusion of block-wise KV Cache access and self-attention computing.
5490
+ Returns:
5491
+ out, 2-D matrix, the outer product of two vectors.
4027
5492
 
4028
- Args:
4029
- query (Tensor): The query tensor with data type of float16.
4030
- :math:`(num\_tokens, num\_head, head\_dim)`.
4031
- key_cache (Tensor): The cache tensor with data type of float16.
4032
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
4033
- value_cache (Tensor): The cache tensor with data type of float16.
4034
- :math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
4035
- block_tables (Tensor): The block mapping table with data type of int32.
4036
- :math:`(num\_tokens, max_num_blocks_per_batch)`.
4037
- context_lens (Tensor): The context length of each sequence with data type of int32.
4038
- :math:`(num\_tokens,)`.
4039
-
4040
- Outputs:
4041
- attention output.
5493
+ Raises:
5494
+ TypeError: If `input` or `vec2` is not a Tensor.
5495
+ TypeError: The implicitly converted data types of `input` and `vec2` are not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16
5496
+ ValueError: If the dimension of `input` or `vec2` is not equal to 1.
4042
5497
 
4043
- Notes:
4044
- No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
5498
+ Supported Platforms:
5499
+ ``Ascend``
4045
5500
 
4046
5501
  Examples:
4047
- >>> from mindspore.ops.operations import _inner_ops
4048
- >>> num_tokens = = 4
4049
- >>> num_head = 40
4050
- >>> num_kv_head = 40
4051
- >>> head_dim = 128
4052
- >>> block_size = 16
4053
- >>> num_blocks = 128
4054
- >>> max_seq = 1024
4055
- >>> max_num_blocks_per_batch = max_seq // block_size
4056
- >>> scale_value = 1.0 / math.sqrt(head_dim)
4057
- >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
4058
- >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4059
- >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
4060
- >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
4061
- >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
4062
- >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
4063
- >>> paged_attention = _inner_ops.PagedAttention()
4064
- >>> output = paged_attention(query, key_cache, value_cache, block_tables, context_lens)
4065
- >>> print(output)
5502
+ >>> import mindspore
5503
+ >>> import numpy as np
5504
+ >>> from mindspore import Tensor
5505
+ >>> from mindspore import ops
5506
+ >>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
5507
+ >>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
5508
+ >>> out = ops.outer(input, vec2)
5509
+ >>> print(out)
5510
+ [[49 70 77]
5511
+ [56 80 88]
5512
+ [63 90 99]]
4066
5513
  """
4067
- paged_attention_op = _get_cache_prim(PagedAttention)(head_num, scale_value, kv_head_num)
4068
- return paged_attention_op(query, key_cache, value_cache, block_tables, context_lens)
5514
+ return outer_op(input, vec2)
4069
5515
 
4070
5516
 
4071
5517
  def pow(input, exponent):
@@ -4080,11 +5526,11 @@ def pow(input, exponent):
4080
5526
 
4081
5527
  Args:
4082
5528
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
4083
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
4084
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5529
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5530
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4085
5531
  exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
4086
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
4087
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
5532
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
5533
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4088
5534
 
4089
5535
  Returns:
4090
5536
  Tensor, the shape is the same as the one after broadcasting,
@@ -4112,7 +5558,7 @@ def pow(input, exponent):
4112
5558
  return pow_op(input, exponent)
4113
5559
 
4114
5560
 
4115
- def prelu(x, weight):
5561
+ def prelu(input, weight):
4116
5562
  r"""
4117
5563
  Parametric Rectified Linear Unit activation function.
4118
5564
 
@@ -4124,30 +5570,26 @@ def prelu(x, weight):
4124
5570
 
4125
5571
  where :math:`x_i` is an element of a channel of the input, `w` is the weight of the channel.
4126
5572
 
4127
- Note:
4128
- Scalar or 1-D Tensor is not supported on Ascend.
4129
-
4130
5573
  PReLU Activation Function Graph:
4131
5574
 
4132
- .. image:: ../images/PReLU.png
5575
+ .. image:: ../images/PReLU2.png
4133
5576
  :align: center
4134
5577
 
5578
+ .. note::
5579
+ Channel dim is the 2nd dim of input. When input has dims < 2, then there is
5580
+ no channel dim and the number of channels = 1.
5581
+
4135
5582
  Args:
4136
- x (Tensor): The input Tensor of the activation function. The data type is float16 or float32.
4137
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4138
- weight (Tensor): Weight Tensor. The data type is float16 or float32.
4139
- The weight can only be a Tensor, and the length is the same as the number of channels C of the `input_x`.
4140
- On GPU devices, when the input is a scalar, the shape is :math:`(1,)` .
5583
+ input (Tensor): The input Tensor of the activation function.
5584
+ weight (Tensor): Weight Tensor. The size of the weight should be 1 or the number of channels at Tensor `input`.
4141
5585
 
4142
5586
  Returns:
4143
- Tensor, with the same shape and dtype as `x`.
4144
- For detailed information, please refer to :class:`mindspore.nn.PReLU`.
5587
+ Tensor, with the same shape and dtype as `input`.
5588
+ For detailed information, please refer to :class:`mindspore.mint.nn.PReLU`.
4145
5589
 
4146
5590
  Raises:
4147
- TypeError: If dtype of `x` or `weight` is neither float16 nor float32.
4148
- TypeError: If the `x` or the `weight` is not a Tensor.
4149
- ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
4150
- ValueError: If the `weight` is not a 1-D Tensor.
5591
+ TypeError: If the `input` or the `weight` is not a Tensor.
5592
+ ValueError: If the `weight` is not a 0-D or 1-D Tensor.
4151
5593
 
4152
5594
  Supported Platforms:
4153
5595
  ``Ascend`` ``GPU`` ``CPU``
@@ -4167,7 +5609,7 @@ def prelu(x, weight):
4167
5609
  [ 2.00 3.00]
4168
5610
  [ 4.0 5.00]]]
4169
5611
  """
4170
- return prelu_op(x, weight)
5612
+ return prelu_op(input, weight)
4171
5613
 
4172
5614
 
4173
5615
  def prod_ext(input, axis=None, keep_dims=False, dtype=None):
@@ -4315,7 +5757,10 @@ def randperm(n, seed=0, offset=0, dtype=mstype.int64):
4315
5757
  that a given type can represent.
4316
5758
 
4317
5759
  .. warning::
4318
- This is an experimental API that is subject to change or deletion.
5760
+ - This is an experimental API that is subject to change or deletion.
5761
+ - The Ascend backend does not support the reproducibility of random numbers, so
5762
+ the `seed` parameter has no effect.
5763
+
4319
5764
 
4320
5765
  Args:
4321
5766
  n (Union[Tensor, int]): The input n Tensor with shape: () or (1,) and with data type of int64.
@@ -4697,6 +6142,63 @@ def flip(input, axis):
4697
6142
  return reverse_v2_impl(input, axis)
4698
6143
 
4699
6144
 
6145
+ def rfft2(input, s=None, dim=(-2, -1), norm=None):
6146
+ r"""
6147
+ Calculates the two dimensional discrete Fourier transform for real input `input`.
6148
+
6149
+ Note:
6150
+ - `rfft2` is currently only used in `mindscience` scientific computing scenarios and
6151
+ dose not support other usage scenarios.
6152
+ - `rfft2` is not supported on Windows platform yet.
6153
+
6154
+ Args:
6155
+ input (Tensor): The input tensor.
6156
+ Supported dtypes:
6157
+
6158
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
6159
+
6160
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
6161
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfft2`.
6162
+ Default: ``None`` , which does not need to process `input`.
6163
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `rfft2`.
6164
+ Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
6165
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
6166
+ Three modes are defined as, where :math: `n = prod(s)`
6167
+
6168
+ - ``"backward"`` (no normalization).
6169
+ - ``"forward"`` (normalize by :math:`1/n`).
6170
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
6171
+
6172
+ Returns:
6173
+ Tensor, The result of `rfft2()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
6174
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
6175
+ When the input is int16, int32, int64, float16, float32, the return value type is complex64.
6176
+ When the input is float64, the return value type is complex128.
6177
+
6178
+ Raises:
6179
+ TypeError: If the `input` type is not Tensor.
6180
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
6181
+ TypeError: If the type/dtype of `s` and `dim` is not int.
6182
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
6183
+ ValueError: If `dim` has duplicate values.
6184
+ ValueError: If `s` is less than 1.
6185
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
6186
+
6187
+ Supported Platforms:
6188
+ ``Ascend`` ``CPU``
6189
+
6190
+ Examples:
6191
+ >>> import mindspore
6192
+ >>> from mindspore import Tensor, ops
6193
+ >>> input = ops.ones((2, 2))
6194
+ >>> ops.rfft2(input, s=(2, 2), dim=(0, 1), norm="backward")
6195
+ Tensor(shape=[2, 2], dtype=Complex64, value=
6196
+ [[4+0j, 0+0j],
6197
+ [0+0j, 0+0j]])
6198
+ """
6199
+ return rfft2_op(input, s, dim, norm)
6200
+
6201
+
4700
6202
  def rfft(input, n=None, dim=-1, norm=None):
4701
6203
  r"""
4702
6204
  Calculates the one dimensional discrete Fourier transform for real input `input`.
@@ -4739,20 +6241,117 @@ def rfft(input, n=None, dim=-1, norm=None):
4739
6241
  >>> import mindspore
4740
6242
  >>> from mindspore import Tensor, ops
4741
6243
  >>> input = Tensor([1, 2, 3, 4])
4742
- >>> y = ops.rfft(input)
6244
+ >>> y = ops.rfft(input, n=4, dim=-1, norm='backward')
4743
6245
  >>> print(y)
4744
6246
  [10.+0.j -2.+2.j -2.+0.j]
4745
6247
  """
4746
6248
  return rfft_op(input, n, dim, norm)
4747
6249
 
4748
6250
 
6251
+ def rfftfreq(n, d=1.0, dtype=None):
6252
+ r"""
6253
+ Computes the sample frequencies for `rfft` with a signal of size `n`.
6254
+ For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
6255
+
6256
+ .. math::
6257
+ f = [0, 1, ..., n // 2] / (d * n)
6258
+
6259
+ Note:
6260
+ - `rfftfreq` is currently only used in `mindscience` scientific computing scenarios and
6261
+ dose not support other usage scenarios.
6262
+ - `rfftfreq` is not supported on Windows platform yet.
6263
+
6264
+ Args:
6265
+ n (int): Window length.
6266
+ d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
6267
+ dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
6268
+
6269
+ Returns:
6270
+ Tensor, Array of length ``n`` containing the sample frequencies.
6271
+
6272
+ Raises:
6273
+ ValueError: If `n` is less than 1.
6274
+
6275
+ Supported Platforms:
6276
+ ``Ascend`` ``CPU``
6277
+
6278
+ Examples:
6279
+ >>> import mindspore
6280
+ >>> from mindspore import ops
6281
+ >>> out = ops.rfftfreq(n=4, d=1.0)
6282
+ >>> print(out)
6283
+ [0. 0.25 0.5 ]
6284
+ """
6285
+ return rfftfreq_op(n, d, dtype)
6286
+
6287
+
6288
+ def rfftn(input, s=None, dim=None, norm=None):
6289
+ r"""
6290
+ Computes the N dimensional discrete Fourier transform for real input `input`.
6291
+
6292
+ Note:
6293
+ - `rfftn` is currently only used in `mindscience` scientific computing scenarios and
6294
+ dose not support other usage scenarios.
6295
+ - `rfftn` is not supported on Windows platform yet.
6296
+
6297
+ Args:
6298
+ input (Tensor): The input tensor.
6299
+ Supported dtypes:
6300
+
6301
+ - Ascend/CPU: int16, int32, int64, float16, float32, float64.
6302
+
6303
+ s (tuple[int], optional): Length of the transformed `dim` of the result.
6304
+ If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfftn`.
6305
+ Default: ``None`` , which does not need to process `input`.
6306
+ dim (tuple[int], optional): The dimension along which to take the one dimensional `rfftn`.
6307
+ Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
6308
+ norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
6309
+ Three modes are defined as, where :math: `n = prod(s)`
6310
+
6311
+ - ``"backward"`` (no normalization).
6312
+ - ``"forward"`` (normalize by :math:`1/n`).
6313
+ - ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
6314
+
6315
+ Returns:
6316
+ Tensor, The result of `rfftn()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
6317
+ result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
6318
+ When the input is int16, int32, int64, float16, float32 the return value type is complex64.
6319
+ When the input is float64, the return value type is complex128.
6320
+
6321
+ Raises:
6322
+ TypeError: If the `input` type is not Tensor.
6323
+ TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
6324
+ TypeError: If the type/dtype of `s` and `dim` is not int.
6325
+ ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
6326
+ ValueError: If `dim` has duplicate values.
6327
+ ValueError: If `s` is less than 1.
6328
+ ValueError: If `s` and `dim` are given but have different shapes.
6329
+ ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
6330
+
6331
+ Supported Platforms:
6332
+ ``Ascend`` ``CPU``
6333
+
6334
+ Examples:
6335
+ >>> import mindspore
6336
+ >>> from mindspore import Tensor, ops
6337
+ >>> input = ops.ones((2, 2, 2))
6338
+ >>> ops.rfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
6339
+ Tensor(shape=[2, 2, 2], dtype=Complex64, value=
6340
+ [[[8+0j, 0+0j],
6341
+ [0+0j, 0+0j]],
6342
+ [[0+0j, 0+0j],
6343
+ [0+0j, 0+0j]]])
6344
+ """
6345
+ return rfftn_op(input, s, dim, norm)
6346
+
6347
+
4749
6348
  def rms_norm(x, gamma, epsilon=1e-6):
4750
6349
  r"""
4751
6350
  The RmsNorm(Root Mean Square Layer Normalization) operator is a normalization operation. Compared to
4752
6351
  LayerNorm, it retains scaling invariance and removes translation invariance. Its formula is:
4753
6352
 
4754
6353
  .. math::
4755
- y=\frac{x_i}{\sqrt{\frac{1}{n}}\sum_{i=1}^{n}{ x_i^2}+\varepsilon }\gamma_i
6354
+ y=\frac{x_i}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}{ x_i^2}+\varepsilon}}\gamma_i
4756
6355
 
4757
6356
  .. warning::
4758
6357
  This is an experimental API that is subject to change or deletion. This API is only supported in Atlas A2
@@ -4795,36 +6394,83 @@ def rms_norm(x, gamma, epsilon=1e-6):
4795
6394
  return rms_norm_impl(x, gamma, epsilon)
4796
6395
 
4797
6396
 
4798
- def round(input):
6397
+ def rotary_position_embedding(x, cos, sin, mode=0):
4799
6398
  r"""
4800
- Returns half to even of a tensor element-wise.
4801
-
4802
- .. math::
6399
+ Implements the Rotary Position Embedding algorithm.
6400
+ Refer to paper `Enhanced Transformer with Rotary Position Embedding <https://arxiv.org/pdf/2104.09864.pdf>`_.
4803
6401
 
4804
- out_i \approx input_i
6402
+ .. warning::
6403
+ This is an experimental API that is subject to change or deletion.
4805
6404
 
4806
6405
  Args:
4807
- input (Tensor): The input tensor.
6406
+ x (Tensor): 4D tensor, with float16, bfloat16 or float32 data type.
6407
+ cos (Tensor): 4D tensor, has the same type as `x` , in range of [-1, 1].
6408
+ sin (Tensor): Same with `cos` .
6409
+ mode (int): An optional attribute. Used to select a calculation mode. 0: rotate_half(GPT-NeoX style); 1: rotate_interleaved(GPT-J style). Defaults to ``0`` .
6410
+
6411
+ .. list-table:: Config layout constraints
6412
+ :widths: 5 20 20
6413
+ :header-rows: 1
6414
+
6415
+ * - Args
6416
+ - RotateHalf(mode:0)
6417
+ - RotateInterleaved(mode:1)
6418
+ * - x
6419
+ - Supported layout:
6420
+
6421
+ 11SD, B1SD, BNSD; D < 896 and D is an Even. B, N < 1000;
6422
+
6423
+ B * N <= 1024 if gradient calculation of cos/sin is used.
6424
+ - Supported layout: 11SD, B1SD, BNSD;
6425
+
6426
+ D < 896 and D is an Even.
6427
+
6428
+ B, N < 1000;
6429
+ * - cos
6430
+ - Support layout for different values of `x`:
6431
+
6432
+ `x` is BNSD: 11SD, B1SD, BNSD;
6433
+
6434
+ `x` is BSND: 1S1D, BS1D, BSND;
6435
+
6436
+ `x` is SBND: S11D, SB1D, SBND
6437
+ - Support layout for different values of `x`:
6438
+
6439
+ `x` is BNSD: 11SD;
6440
+
6441
+ `x` is BSND: 1S1D;
6442
+
6443
+ `x` is SBND: S11D
6444
+ * - sin
6445
+ - Same with `cos` .
6446
+ - Same with `cos` .
6447
+
6448
+ .. note::
6449
+ When the layout is BNSD, B * N > 8S and D is 32-bytes alignment, the performance is poor. Therefore, this interface cannot be called.
4808
6450
 
4809
6451
  Returns:
4810
- Tensor, has the same shape and type as the `input`.
6452
+ Tensor, has the same dtype and shape as the `x`.
4811
6453
 
4812
6454
  Raises:
4813
- TypeError: If `input` is not a Tensor.
6455
+ TypeError: If `x` is not a Tensor.
6456
+ TypeError: If `cos` is not a Tensor.
6457
+ TypeError: If `sin` is not a Tensor.
6458
+ TypeError: If `mode` is not an int.
4814
6459
 
4815
6460
  Supported Platforms:
4816
- ``Ascend`` ``GPU`` ``CPU``
6461
+ ``Ascend``
4817
6462
 
4818
6463
  Examples:
4819
- >>> import mindspore
4820
6464
  >>> import numpy as np
4821
6465
  >>> from mindspore import Tensor, ops
4822
- >>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4823
- >>> output = ops.round(input)
4824
- >>> print(output)
4825
- [ 1. 2. 2. 2. -4.]
6466
+ >>> x = Tensor(np.random.uniform(-2, 2, (4, 8192, 4, 128)))
6467
+ >>> cos = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
6468
+ >>> sin = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
6469
+ >>> output = ops.rotary_position_embedding(x, cos, sin, 0)
6470
+ >>> print(output.shape)
6471
+ (4, 8192, 4, 128)
4826
6472
  """
4827
- return round_op(input)
6473
+ return rotary_position_embedding_op(x, cos, sin, mode)
4828
6474
 
4829
6475
 
4830
6476
  def rsqrt(input):
@@ -4850,7 +6496,7 @@ def rsqrt(input):
4850
6496
 
4851
6497
  Examples:
4852
6498
  >>> import mindspore as ms
4853
- >>> import mindspore.ops as ops
6499
+ >>> from mindspore import ops
4854
6500
  >>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
4855
6501
  >>> output = ops.rsqrt(input)
4856
6502
  >>> print(output)
@@ -4999,7 +6645,41 @@ def scatter_nd(indices, updates, shape):
4999
6645
  [0. 1.1 0.]
5000
6646
  [0. 0. 0.]]
5001
6647
  """
5002
- return scatter_nd_op(indices, updates, shape)
6648
+ return scatter_nd_op(indices, updates, shape)
6649
+
6650
+
6651
+ def select_ext(input, dim, index):
6652
+ r"""
6653
+ Slices the input tensor along the selected dimension at the given index.
6654
+
6655
+ .. warning::
6656
+ This is an experimental API that is subject to change or deletion.
6657
+
6658
+ Args:
6659
+ input (Tensor): the input tensor.
6660
+ dim (int): the dimension to slice.
6661
+ index (int): the index to select with.
6662
+
6663
+ Returns:
6664
+ Tensor.
6665
+
6666
+ Raises:
6667
+ TypeError: If input is not a Tensor.
6668
+
6669
+ Supported Platforms:
6670
+ ``Ascend``
6671
+
6672
+ Examples:
6673
+ >>> import mindspore
6674
+ >>> from mindspore import Tensor, mint
6675
+ >>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
6676
+ >>> y = mint.select(input, 0, 0)
6677
+ >>> y = Tensor([1,2], mindspore.float32)
6678
+ >>> print(y)
6679
+ [2 3 4 5]
6680
+
6681
+ """
6682
+ return select_ext_op(input, dim, index)
5003
6683
 
5004
6684
 
5005
6685
  def select(condition, input, other):
@@ -5021,12 +6701,12 @@ def select(condition, input, other):
5021
6701
  The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5022
6702
  input (Union[Tensor, int, float]): The first Tensor to be selected.
5023
6703
  If input is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5024
- If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
5025
- There must be at least one Tensor between x and y.
6704
+ If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as other.
6705
+ There must be at least one Tensor between input and other.
5026
6706
  other (Union[Tensor, int, float]): The second Tensor to be selected.
5027
6707
  If other is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
5028
- If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
5029
- There must be at least one Tensor between x and y.
6708
+ If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as input.
6709
+ There must be at least one Tensor between input and other.
5030
6710
 
5031
6711
  Returns:
5032
6712
  Tensor, has the same shape as `condition`.
@@ -5053,6 +6733,70 @@ def select(condition, input, other):
5053
6733
  return select_op(condition, input, other)
5054
6734
 
5055
6735
 
6736
+ def select_v2(condition, input, other):
6737
+ r"""
6738
+
6739
+ """
6740
+ return select_v2_op(condition, input, other)
6741
+
6742
+
6743
+ def selu_ext(input):
6744
+ r"""
6745
+ Activation function SELU (Scaled exponential Linear Unit).
6746
+
6747
+ The activation function is defined as:
6748
+
6749
+ .. math::
6750
+ E_{i} =
6751
+ scale *
6752
+ \begin{cases}
6753
+ x_{i}, &\text{if } x_{i} \geq 0; \cr
6754
+ \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
6755
+ \end{cases}
6756
+
6757
+ where :math:`alpha` and :math:`scale` are pre-defined constants(:math:`alpha=1.67326324`
6758
+ and :math:`scale=1.05070098`).
6759
+
6760
+ See more details in `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_.
6761
+
6762
+ SELU Activation Function Graph:
6763
+
6764
+ .. image:: ../images/SeLU.png
6765
+ :align: center
6766
+
6767
+ Args:
6768
+ input (Tensor): Tensor of any dimension.
6769
+ The data type is float16, float32, bfloat16.
6770
+
6771
+ Returns:
6772
+ Tensor, with the same type and shape as the `input`.
6773
+
6774
+ Raises:
6775
+ TypeError: If dtype of `input` is not float16, float32, bfloat16.
6776
+
6777
+ Supported Platforms:
6778
+ ``Ascend``
6779
+
6780
+ Examples:
6781
+ >>> import mindspore
6782
+ >>> from mindspore import Tensor, mint
6783
+ >>> import numpy as np
6784
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
6785
+ >>> output = mint.nn.functional.selu(input)
6786
+ >>> print(output)
6787
+ [[-1.1113307 4.202804 -1.7575096]
6788
+ [ 2.101402 -1.7462534 9.456309 ]]
6789
+ """
6790
+ return selu_ext_op(input)
6791
+
6792
+
6793
+ def selu_grad(gradient, result):
6794
+ r"""
6795
+
6796
+ """
6797
+ return selu_grad_op(gradient, result)
6798
+
6799
+
5056
6800
  def sequence_concat(x, axis=0):
5057
6801
  r"""
5058
6802
  Support sequence Concat operation.
@@ -5079,6 +6823,14 @@ def sequence_concat(x, axis=0):
5079
6823
  return sequence_concat_op(x)
5080
6824
 
5081
6825
 
6826
+ def shard_identity(input):
6827
+ r"""
6828
+ A intermediate operator only be created when using mindspore.shard or
6829
+ cell.shard during parallel procedure. Will not be exposed to the users.
6830
+ """
6831
+ return shard_identity_op(input)
6832
+
6833
+
5082
6834
  def sigmoid(input):
5083
6835
  r"""
5084
6836
  Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
@@ -5122,7 +6874,7 @@ def sigmoid(input):
5122
6874
 
5123
6875
  def sign(input):
5124
6876
  r"""
5125
- Returns an element-wise indication of the sign of a number. Notice: When the input dtype is float64, the gradient of this operator is NaN.
6877
+ Returns an element-wise indication of the sign of a number. Notice: When the input is NaN and dtype is float64, the output of this operator is NaN.
5126
6878
 
5127
6879
  .. math::
5128
6880
  \text{out}_{i} = \begin{cases}
@@ -5285,12 +7037,20 @@ def sinh(input):
5285
7037
 
5286
7038
  Args:
5287
7039
  input (Tensor): The input tensor of hyperbolic sine function.
7040
+ Supported dtypes:
7041
+
7042
+ - GPU/CPU: float16, float32, float64, complex64 or complex128.
7043
+ - Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
5288
7044
 
5289
7045
  Returns:
5290
- Tensor, has the same shape as `input`.
7046
+ Tensor, has the same shape as the `input`.
7047
+ The dtype of output is float32 when dtype of `input` is in
7048
+ [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as the `input`.
5291
7049
 
5292
- Raises:
5293
- TypeError: If `input` is not a Tensor.
7050
+ :raise TypeError: If `input` is not a Tensor.
7051
+ :raise TypeError:
7052
+ * CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
7053
+ * Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
5294
7054
 
5295
7055
  Supported Platforms:
5296
7056
  ``Ascend`` ``GPU`` ``CPU``
@@ -5349,6 +7109,88 @@ def softplus_ext(input, beta=1, threshold=20):
5349
7109
  return softplus_ext_op(input, beta, threshold)
5350
7110
 
5351
7111
 
7112
+ def softshrink_grad(input_grad, input_x, lambd=0.5):
7113
+ r"""
7114
+ Computes gradients for SoftShrinkGrad operation.
7115
+
7116
+ Args:
7117
+ input_grad (Tensor): the gradients of loss to output of SoftShrink function. Supported dtypes:
7118
+
7119
+ - Ascend: float16, float32, bfloat16.
7120
+ - CPU/GPU: float16, float32.
7121
+ input_x (Tensor): Must be the input `input` of the forward operator SoftSHrink. Supported dtypes:
7122
+
7123
+ - Ascend: float16, float32, bfloat16.
7124
+ - CPU/GPU: float16, float32.
7125
+ lambd (float): the lambda value for the Softshrink formulation. Default: ``0.5`` .
7126
+
7127
+ Returns:
7128
+ backprops, a Tensor with the same shape and data type as `input_x`.
7129
+
7130
+ Rasise:
7131
+ ValueError: If `lambd` is not a float.
7132
+ ValueError: If shape of `input_grad` is not the same as `input_x`.
7133
+ TypeError: If dtype of `input_grad` is not the same as `input_x`.
7134
+ TypeError: If dtype of `input_grad` or `input_x` is not float16, float32 or bfloat16.
7135
+
7136
+ Supported Platforms:
7137
+ ``Ascend`` ``GPU`` ``CPU``
7138
+ """
7139
+ return softshrink_grad_impl(input_grad, input_x, lambd)
7140
+
7141
+
7142
+ def softshrink(input, lambd=0.5):
7143
+ r"""
7144
+ Soft Shrink activation function. Calculates the output according to the input elements.
7145
+
7146
+ The formula is defined as follows:
7147
+
7148
+ .. math::
7149
+ \text{SoftShrink}(x) =
7150
+ \begin{cases}
7151
+ x - \lambda, & \text{ if } x > \lambda \\
7152
+ x + \lambda, & \text{ if } x < -\lambda \\
7153
+ 0, & \text{ otherwise }
7154
+ \end{cases}
7155
+
7156
+ SoftShrink Activation Function Graph:
7157
+
7158
+ .. image:: ../images/Softshrink.png
7159
+ :align: center
7160
+
7161
+ Args:
7162
+ input (Tensor): The input of Soft Shrink. Supported dtypes:
7163
+
7164
+ - Ascend: float16, float32, bfloat16.
7165
+ - CPU/GPU: float16, float32.
7166
+ lambd (number, optional): The threshold :math:`\lambda` defined by the Soft Shrink formula.
7167
+ It should be greater than or equal to 0, default: ``0.5`` .
7168
+
7169
+ Returns:
7170
+ Tensor, has the same data type and shape as the input `input`.
7171
+
7172
+ Raises:
7173
+ TypeError: If `lambd` is not a float, int or bool.
7174
+ TypeError: If `input` is not a tensor.
7175
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
7176
+
7177
+ Supported Platforms:
7178
+ ``Ascend`` ``GPU`` ``CPU``
7179
+
7180
+ Examples:
7181
+ >>> import mindspore
7182
+ >>> from mindspore import Tensor
7183
+ >>> from mindspore import ops
7184
+ >>> import numpy as np
7185
+ >>> x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float32)
7186
+ >>> output = ops.softshrink(x)
7187
+ >>> print(output)
7188
+ [[ 0.02979 0.287 0.676 ]
7189
+ [ 0.2837 0.1216 -0.6543 ]]
7190
+ """
7191
+ return softshrink_impl(input, lambd)
7192
+
7193
+
5352
7194
  def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False):
5353
7195
  r"""
5354
7196
  Solve the linear system :math:`a x = b` for `x`, Assuming `a` is a triangular matrix.
@@ -5694,12 +7536,12 @@ def sub_ext(input, other, alpha=1):
5694
7536
  Args:
5695
7537
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
5696
7538
  a bool or a tensor whose data type is
5697
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5698
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7539
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7540
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5699
7541
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
5700
7542
  a bool or a tensor whose data type is
5701
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5702
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7543
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7544
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5703
7545
  alpha (number.Number): A scaling factor applied to `other`, default 1.
5704
7546
 
5705
7547
  Returns:
@@ -5751,8 +7593,8 @@ def sub(input, other):
5751
7593
  Args:
5752
7594
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
5753
7595
  a bool or a tensor whose data type is
5754
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
5755
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
7596
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
7597
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
5756
7598
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
5757
7599
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
5758
7600
 
@@ -5779,6 +7621,83 @@ def sub(input, other):
5779
7621
  return sub_op(input, other)
5780
7622
 
5781
7623
 
7624
+ def swiglu_grad(grad_output, input, dim=-1):
7625
+ r"""
7626
+
7627
+ """
7628
+ return swiglu_grad_op(grad_output, input, dim)
7629
+
7630
+
7631
+ def swiglu(input, dim=-1):
7632
+ r"""
7633
+ Computes SwiGLU (Swish-Gated Linear Unit activation function) of input tensor.
7634
+ SwiGLU is a variant of the :class:`mindspore.ops.GLU` activation function, it is defined as:
7635
+
7636
+ .. math::
7637
+ {SwiGLU}(a, b)= Swish(a) \otimes b
7638
+
7639
+ where :math:`a` is the first half of the `input` matrices and :math:`b` is the second half,
7640
+ Swish(a)=a :math:`\sigma` (a), :math:`\sigma` is the :func:`mindspore.ops.sigmoid` activation function
7641
+ and :math:`\otimes` is the Hadamard product.
7642
+
7643
+ Args:
7644
+ input (Tensor): Tensor to be split. It has shape :math:`(\ast_1, N, \ast_2)`
7645
+ where `*` means, any number of additional dimensions. :math:`N` must be divisible by 2.
7646
+ dim (int, optional): the axis to split the input. It must be int. Default: ``-1`` , the last axis of `input`.
7647
+
7648
+ Returns:
7649
+ Tensor, the same dtype as the `input`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
7650
+
7651
+ Raises:
7652
+ TypeError: If dtype of `input` is not float16, float32 or bfloat16.
7653
+ TypeError: If `input` is not a Tensor.
7654
+ RuntimeError: If the dimension specified by `dim` is not divisible by 2.
7655
+
7656
+ Supported Platforms:
7657
+ ``Ascend``
7658
+
7659
+ Examples:
7660
+ >>> from mindspore import Tensor, ops
7661
+ >>> input = Tensor([[-0.12, 0.123, 31.122], [2.1223, 4.1212121217, 0.3123]], dtype=mindspore.float32)
7662
+ >>> output = ops.swiglu(input, 0)
7663
+ >>> print(output)
7664
+ [[-0.11970687 0.2690224 9.7194 ]]
7665
+ """
7666
+ return swiglu_op(input, dim)
7667
+
7668
+
7669
+ def tan(input):
7670
+ r"""
7671
+ Computes tangent of `input` element-wise.
7672
+
7673
+ .. math::
7674
+
7675
+ out_i = \tan(input_i)
7676
+
7677
+ Args:
7678
+ input (Tensor): The input Tensor, valid for any dimensions.
7679
+
7680
+ Returns:
7681
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
7682
+
7683
+ Raises:
7684
+ TypeError: If `input` is not a Tensor.
7685
+
7686
+ Supported Platforms:
7687
+ ``Ascend`` ``GPU`` ``CPU``
7688
+
7689
+ Examples:
7690
+ >>> import mindspore
7691
+ >>> import numpy as np
7692
+ >>> from mindspore import Tensor, ops
7693
+ >>> input = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
7694
+ >>> output = ops.tan(input)
7695
+ >>> print(output)
7696
+ [-1.5574077 0. 1.5574077]
7697
+ """
7698
+ return tan_op(input)
7699
+
7700
+
5782
7701
  def tanh(input):
5783
7702
  r"""
5784
7703
  Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
@@ -5818,6 +7737,14 @@ def tanh(input):
5818
7737
  return tanh_op(input)
5819
7738
 
5820
7739
 
7740
+ def tensor_scatter_elements(data, indices, updates, axis=0, reduce='none'):
7741
+ r"""
7742
+
7743
+ """
7744
+ tensor_scatter_elements_op = _get_cache_prim(TensorScatterElements)(axis, reduce)
7745
+ return tensor_scatter_elements_op(data, indices, updates)
7746
+
7747
+
5821
7748
  def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5822
7749
  r"""
5823
7750
  Finds values and indices of the `k` largest or smallest entries along a given dimension.
@@ -5891,7 +7818,7 @@ def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5891
7818
  return topk_ext_op(input, k, dim, largest, sorted)
5892
7819
 
5893
7820
 
5894
- def topkrouter(input, capacity, expert_num):
7821
+ def topkrouter(input, capacity, expert_num, drop_type=0):
5895
7822
  r"""
5896
7823
  TopkRouter implementation in MOE.
5897
7824
 
@@ -5899,6 +7826,7 @@ def topkrouter(input, capacity, expert_num):
5899
7826
  - **x** (Tensor) - Input Tensor of 3D, Supporting types:[int32, int64]
5900
7827
  - **capacity** (Int64) - The maximum number of tokens each expert can handle
5901
7828
  - **expert_num** (Int64) - The number of expert.
7829
+ - **drop_type** (Int64) - S-Drop/K-Drop, 0 means S-Drop, 1 means K-Drop, default 0.
5902
7830
 
5903
7831
  Outputs:
5904
7832
  tuple(Tensor), tuple of 2 tensors, `dispatch_index` and `combine_inex`.
@@ -5908,7 +7836,48 @@ def topkrouter(input, capacity, expert_num):
5908
7836
  Supported Platforms:
5909
7837
  ``Ascend``
5910
7838
  """
5911
- return topkrouter_op(input, capacity, expert_num)
7839
+ return topkrouter_op(input, capacity, expert_num, drop_type)
7840
+
7841
+
7842
+ def trace_ext(input):
7843
+ r"""
7844
+ Returns a new tensor that is the sum of the `input` main trace.
7845
+
7846
+ Note:
7847
+ Input must be tensor.
7848
+
7849
+ Args:
7850
+ input (Tensor): 2-D Tensor.
7851
+
7852
+ Returns:
7853
+ Tensor, when the data type of `input` is integer or bool, its data type is int64, otherwise it is the same as `input`, and size equals to 1.
7854
+
7855
+ Raises:
7856
+ TypeError: If `input` is not a Tensor.
7857
+ ValueError: If the dimension of `input` is not equal to 2.
7858
+ TypeError: If the dtype of `input` is not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16.
7859
+
7860
+ Supported Platforms:
7861
+ ``Ascend``
7862
+
7863
+ Examples:
7864
+ >>> import mindspore
7865
+ >>> import numpy as np
7866
+ >>> from mindspore import Tensor, ops
7867
+ >>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
7868
+ >>> output = ops.trace_ext(input)
7869
+ >>> print(output)
7870
+ 42.0
7871
+ >>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
7872
+ >>> output = ops.trace_ext(input)
7873
+ >>> print(output)
7874
+ 18.0
7875
+ >>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
7876
+ >>> output = ops.trace_ext(input)
7877
+ >>> print(output)
7878
+ 24.0
7879
+ """
7880
+ return trace_ext_op(input)
5912
7881
 
5913
7882
 
5914
7883
  def trace(input):
@@ -5951,6 +7920,41 @@ def trace(input):
5951
7920
  return trace_op(input)
5952
7921
 
5953
7922
 
7923
+ def transpose_ext(input, dim0, dim1):
7924
+ r"""
7925
+ Interchange two axes of a tensor.
7926
+
7927
+ .. warning::
7928
+ This is an experimental API that is subject to change or deletion.
7929
+
7930
+ Args:
7931
+ input(Tensor): Input tensor.
7932
+ dim0 (int): First axis.
7933
+ dim1 (int): Second axis.
7934
+
7935
+ Returns:
7936
+ Transposed tensor, has the same data type as `input`.
7937
+
7938
+ Raises:
7939
+ TypeError: If argument `input` is not Tensor.
7940
+ TypeError: If `dim0` or `dim1` is not integer.
7941
+ ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
7942
+
7943
+ Supported Platforms:
7944
+ ``Ascend``
7945
+
7946
+ Examples:
7947
+ >>> import numpy as np
7948
+ >>> from mindspore import mint
7949
+ >>> from mindspore import Tensor
7950
+ >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
7951
+ >>> output = mint.transpose(input, 0, 2)
7952
+ >>> print(output.shape)
7953
+ (4, 3, 2)
7954
+ """
7955
+ return transpose_ext_op(input, dim0, dim1)
7956
+
7957
+
5954
7958
  def transpose(input, input_perm):
5955
7959
  r"""
5956
7960
  Permutes the dimensions of the input tensor according to input permutation.
@@ -6002,9 +8006,16 @@ def transpose(input, input_perm):
6002
8006
  return transpose_op(input, input_perm)
6003
8007
 
6004
8008
 
8009
+ def tril_ext(input, diagonal=0):
8010
+ r"""
8011
+
8012
+ """
8013
+ return tril_ext_impl(input, diagonal)
8014
+
8015
+
6005
8016
  def triu(input, diagonal=0):
6006
8017
  r"""
6007
- Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
8018
+ Returns the upper triangle part of `input` (elements that contain the diagonal and below),
6008
8019
  and set the other elements to zeros.
6009
8020
 
6010
8021
  .. warning::
@@ -6016,7 +8027,7 @@ def triu(input, diagonal=0):
6016
8027
  indicating the main diagonal.
6017
8028
 
6018
8029
  Returns:
6019
- Tensor, a tensor has the same shape and data type as input.
8030
+ Tensor, a tensor has the same shape and data type as `input`.
6020
8031
 
6021
8032
  Raises:
6022
8033
  TypeError: If `diagonal` is not an int.
@@ -6063,6 +8074,34 @@ def triu(input, diagonal=0):
6063
8074
  return triu_impl(input, diagonal)
6064
8075
 
6065
8076
 
8077
+ def trunc(input):
8078
+ r"""
8079
+ Returns a new tensor with the truncated integer values of the elements of the input tensor.
8080
+
8081
+ Args:
8082
+ input (Tensor): The input tensor.
8083
+
8084
+ Returns:
8085
+ Tensor, the same shape and data type as the input.
8086
+
8087
+ Raises:
8088
+ TypeError: If `input` is not a Tensor.
8089
+
8090
+ Supported Platforms:
8091
+ ``Ascend`` ``GPU`` ``CPU``
8092
+
8093
+ Examples:
8094
+ >>> import mindspore
8095
+ >>> import numpy as np
8096
+ >>> from mindspore import Tensor, ops
8097
+ >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
8098
+ >>> output = ops.trunc(x)
8099
+ >>> print(output)
8100
+ [3. 0. 0. -3.]
8101
+ """
8102
+ return trunc_op(input)
8103
+
8104
+
6066
8105
  def tuple_to_tensor(input_tuple, dtype=None):
6067
8106
  r"""
6068
8107
 
@@ -6429,11 +8468,11 @@ def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expand
6429
8468
  return moe_finalize_routing_op(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6430
8469
 
6431
8470
 
6432
- def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
8471
+ def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
6433
8472
  r"""
6434
8473
 
6435
8474
  """
6436
- return quant_batch_matmul_impl(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
8475
+ return quant_batch_matmul_impl(x1, x2, scale, offset, bias, pertokenScaleOptional, transpose_x1, transpose_x2, dtype)
6437
8476
 
6438
8477
 
6439
8478
  def weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None, transpose_x=False, transpose_weight=False, antiquant_group_size=0):