mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -17,17 +17,20 @@ from mindspore.common import dtype as mstype
17
17
  from mindspore.ops.auto_generate.pyboost_inner_prim import *
18
18
 
19
19
 
20
- def acos(input):
20
+ def acosh(input):
21
21
  r"""
22
- Computes arccosine of input tensors element-wise.
22
+ Computes inverse hyperbolic cosine of the inputs element-wise.
23
23
 
24
24
  .. math::
25
25
 
26
- out_i = \cos^{-1}(input_i)
26
+ out_i = \cosh^{-1}(input_i)
27
+
28
+ .. note::
29
+ Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
30
+ Input range is [1, inf].
27
31
 
28
32
  Args:
29
- input (Tensor): The shape of tensor is
30
- :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
33
+ input (Tensor): The input tensor of inverse hyperbolic cosine function.
31
34
 
32
35
  Returns:
33
36
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -42,28 +45,25 @@ def acos(input):
42
45
  >>> import mindspore
43
46
  >>> import numpy as np
44
47
  >>> from mindspore import Tensor, ops
45
- >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
46
- >>> output = ops.acos_ext(input)
48
+ >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
49
+ >>> output = ops.acosh_ext(input)
47
50
  >>> print(output)
48
- [0.7377037 1.5307857 1.2661037 0.9764114]
51
+ [0. 0.9624236 1.7627472 5.298292 ]
49
52
  """
50
- return acos_impl(input)
53
+ return acosh_impl(input)
51
54
 
52
55
 
53
- def acosh(input):
56
+ def acos(input):
54
57
  r"""
55
- Computes inverse hyperbolic cosine of the inputs element-wise.
58
+ Computes arccosine of input tensors element-wise.
56
59
 
57
60
  .. math::
58
61
 
59
- out_i = \cosh^{-1}(input_i)
60
-
61
- .. note::
62
- Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
63
- Input range is [1, inf].
62
+ out_i = \cos^{-1}(input_i)
64
63
 
65
64
  Args:
66
- input (Tensor): The input tensor of inverse hyperbolic cosine function.
65
+ input (Tensor): The shape of tensor is
66
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
67
67
 
68
68
  Returns:
69
69
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -78,12 +78,12 @@ def acosh(input):
78
78
  >>> import mindspore
79
79
  >>> import numpy as np
80
80
  >>> from mindspore import Tensor, ops
81
- >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
82
- >>> output = ops.acosh_ext(input)
81
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
82
+ >>> output = ops.acos_ext(input)
83
83
  >>> print(output)
84
- [0. 0.9624236 1.7627472 5.298292 ]
84
+ [0.7377037 1.5307857 1.2661037 0.9764114]
85
85
  """
86
- return acosh_impl(input)
86
+ return acos_impl(input)
87
87
 
88
88
 
89
89
  def adaptive_avg_pool2d_grad(grad_output, x):
@@ -93,6 +93,13 @@ def adaptive_avg_pool2d_grad(grad_output, x):
93
93
  return adaptive_avg_pool2d_grad_impl(grad_output, x)
94
94
 
95
95
 
96
+ def adaptive_avg_pool3d(input, output_size):
97
+ r"""
98
+ None
99
+ """
100
+ return adaptive_avg_pool3d_impl(input, output_size)
101
+
102
+
96
103
  def add(input, other, alpha=1):
97
104
  r"""
98
105
  Adds scaled other value to input Tensor.
@@ -151,14 +158,38 @@ def add(input, other, alpha=1):
151
158
 
152
159
  def argmax(input, dim=None, keepdim=False):
153
160
  r"""
161
+ argmax(input) -> Tensor
162
+
163
+ Return the indices of the maximum values of a tensor.
164
+
165
+ Args:
166
+ input (Tensor): Input tensor.
167
+
168
+ Returns:
169
+ Tensor.
170
+
171
+ Supported Platforms:
172
+ ``Ascend``
173
+
174
+ Examples:
175
+ >>> import numpy as np
176
+ >>> from mindspore import Tensor
177
+ >>> from mindspore import ops
178
+ >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
179
+ >>> output = ops.auto_generate.argmax_ext(x)
180
+ >>> print(output)
181
+ 6
182
+
183
+ .. function:: argmax(input, dim, keepdim=False) -> Tensor
184
+ :noindex:
185
+
154
186
  Return the indices of the maximum values of a tensor across a dimension.
155
187
 
156
188
  Args:
157
189
  input (Tensor): Input tensor.
158
- dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
159
- value within the flattened input will be returned. Default: ``None`` .
190
+ dim (int): The dimension to reduce.
160
191
  keepdim (bool, optional): Whether the output tensor retains the specified
161
- dimension. Ignored if `dim` is None. Default: ``False`` .
192
+ dimension. Default: ``False`` .
162
193
 
163
194
  Returns:
164
195
  Tensor, indices of the maximum values across a dimension.
@@ -215,37 +246,46 @@ def argmin(input, dim=None, keepdim=False):
215
246
  return argmin_impl(input, dim, keepdim)
216
247
 
217
248
 
218
- def asin(input):
249
+ def argsort(input, dim=-1, descending=False, stable=False):
219
250
  r"""
220
- Computes arcsine of input tensors element-wise.
221
-
222
- .. math::
251
+ Sorts the input tensor along the given dimension in specified order and return the sorted indices.
223
252
 
224
- out_i = \sin^{-1}(input_i)
253
+ .. warning::
254
+ This is an experimental optimizer API that is subject to change.
225
255
 
226
256
  Args:
227
- input (Tensor): The shape of tensor is
228
- :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
257
+ input(Tensor): The input tensor to sort.
258
+ dim (int, optional): The dim to sort along. Default: ``-1`` , means the last dimension.
259
+ The Ascend backend only supports sorting the last dimension.
260
+ descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
261
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
262
+ stable (bool, optional): Whether to use stable sorting algorithm. Default: ``False``.
229
263
 
230
264
  Returns:
231
- Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
265
+ Tensor, the indices of sorted input tensor. Data type is int64.
232
266
 
233
267
  Raises:
234
- TypeError: If `input` is not a Tensor.
268
+ ValueError: If `dim` is out of range.
269
+ TypeError: If dtype of `dim` is not int32.
270
+ TypeError: If dtype of `descending` is not bool.
271
+ TypeError: If dtype of `stable` is not bool.
235
272
 
236
273
  Supported Platforms:
237
- ``Ascend`` ``GPU`` ``CPU``
274
+ ``Ascend``
238
275
 
239
276
  Examples:
240
277
  >>> import mindspore
241
278
  >>> import numpy as np
242
- >>> from mindspore import Tensor, ops
243
- >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
244
- >>> output = ops.asin_ext(input)
245
- >>> print(output)
246
- [0.8330927 0.04001068 0.30469266 0.59438497 ]
279
+ >>> from mindspore import Tensor
280
+ >>> import mindspore.mint as mint
281
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
282
+ >>> sort = mint.argsort(x)
283
+ >>> print(sort)
284
+ [[2 1 0]
285
+ [2 0 1]
286
+ [0 1 2]]
247
287
  """
248
- return asin_impl(input)
288
+ return argsort_impl(input, dim, descending, stable)
249
289
 
250
290
 
251
291
  def asinh(input):
@@ -280,6 +320,39 @@ def asinh(input):
280
320
  return asinh_impl(input)
281
321
 
282
322
 
323
+ def asin(input):
324
+ r"""
325
+ Computes arcsine of input tensors element-wise.
326
+
327
+ .. math::
328
+
329
+ out_i = \sin^{-1}(input_i)
330
+
331
+ Args:
332
+ input (Tensor): The shape of tensor is
333
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
334
+
335
+ Returns:
336
+ Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
337
+
338
+ Raises:
339
+ TypeError: If `input` is not a Tensor.
340
+
341
+ Supported Platforms:
342
+ ``Ascend`` ``GPU`` ``CPU``
343
+
344
+ Examples:
345
+ >>> import mindspore
346
+ >>> import numpy as np
347
+ >>> from mindspore import Tensor, ops
348
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
349
+ >>> output = ops.asin_ext(input)
350
+ >>> print(output)
351
+ [0.8330927 0.04001068 0.30469266 0.59438497 ]
352
+ """
353
+ return asin_impl(input)
354
+
355
+
283
356
  def atan2(input, other):
284
357
  r"""
285
358
  Returns arctangent of input/other element-wise.
@@ -356,6 +429,102 @@ def atan(input):
356
429
  return atan_impl(input)
357
430
 
358
431
 
432
+ def avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
433
+ r"""
434
+ Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
435
+
436
+ Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, avg_pool1d outputs regional average in the
437
+ :math:`(L_{in})`-dimension. Given kernel size as :math:`ks = l_{ker}` and `stride` as :math:`s = s_0`, the
438
+ operation is as follows.
439
+
440
+ .. math::
441
+ \text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
442
+ \text{input}(N_i, C_j, s_0 \times l + n)
443
+
444
+ .. warning::
445
+ This is an experimental API that is subject to change or deletion.
446
+
447
+ Args:
448
+ input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})`.
449
+ kernel_size (Union(int, tuple[int])): The size of kernel window used to take the average value.
450
+ stride (Union(int, tuple[int]), optional): The distance of kernel moving. `stride` can either be an int
451
+ number or a tuple of one int number. Default: ``None``, the same value as `kernel_size`.
452
+ padding (Union(int, tuple[int]), optional): The pad length to be filled. `padding` can either be an integer
453
+ or a tuple of one integer. Default: ``0`` .
454
+ ceil_mode (bool, optional): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
455
+ count_include_pad (bool, optional): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
456
+
457
+ Returns:
458
+ Tensor of shape :math:`(N, C_{in}, L_{out})`.
459
+
460
+ Raises:
461
+ TypeError: If `input` is not a Tensor.
462
+ TypeError: If `kernel_size` or `stride` is not an int.
463
+ TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
464
+ ValueError: If `kernel_size` or `stride` is less than `1`.
465
+ ValueError: If `kernel_size` or `stride` or `padding` is not int nor a tuple whose length is greater than `1`.
466
+
467
+ Supported Platforms:
468
+ ``Ascend`` ``GPU`` ``CPU``
469
+
470
+ Examples:
471
+ >>> import mindspore
472
+ >>> import numpy as np
473
+ >>> from mindspore import Tensor, mint
474
+ >>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
475
+ >>> output = mint.nn.functional.avg_pool1d(input_x, kernel_size=6, stride=1)
476
+ >>> print(output.shape)
477
+ (1, 3, 1)
478
+ """
479
+ return avg_pool1d_impl(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
480
+
481
+
482
+ def bincount(input, weights=None, minlength=0):
483
+ r"""
484
+ Count the occurrences of each value in the input.
485
+
486
+ If `minlength` is not specified, the length of the output Tensor is the maximum value in the input plus one.
487
+ If `minlength` is specified, the length of the output Tensor is the maximum value between `minlength` or
488
+ the maximum value in the input plus one.
489
+
490
+ Each value in the output Tensor represents the number of occurrences of that index value in the input.
491
+ If `weights` is specified, the output results are weighted,
492
+ i.e., :math:`out[n] += weight[i]` instead of :math:`out[n] += 1`.
493
+
494
+ .. warning::
495
+ This is an experimental API that is subject to change or deletion.
496
+
497
+ Args:
498
+ input (Tensor): A one-dimensional Tensor.
499
+ weights (Tensor, optional): Weights with the same shape as the input. Default: ``None``.
500
+ minlength (int, optional): The minimum length of output Tensor. Should be non-negative. Default: ``0``.
501
+
502
+ Returns:
503
+ Tensor, If input is non-empty, the output shape is :math:`(max(max(input)+1, minlength), )`,
504
+ otherwise the shape is :math:`(0, )`.
505
+
506
+ Raises:
507
+ TypeError: If `input` or `weights` is not a Tensor.
508
+ ValueError: If `input` contains negative values.
509
+ ValueError: If `input` is not one-dimensional or `input` and `weights` do not have the same shape.
510
+
511
+ Supported Platforms:
512
+ ``Ascend``
513
+
514
+ Examples:
515
+ >>> from mindspore import mint, Tensor
516
+ >>> print(mint.bincount(Tensor(np.arange(5))))
517
+ [1 1 1 1 1]
518
+ >>> print(mint.bincount(Tensor(np.array([0, 1, 1, 3, 2, 1, 7]))))
519
+ [1 3 1 1 0 0 0 1]
520
+ >>> w = Tensor(np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6])) # weights
521
+ >>> x = Tensor(np.array([0, 1, 1, 2, 2, 2]))
522
+ >>> print(mint.bincount(x, weights=w, minlength=5))
523
+ [0.3 0.7 1.1 0. 0. ]
524
+ """
525
+ return bincount_impl(input, weights, minlength)
526
+
527
+
359
528
  def bmm(input, mat2):
360
529
  r"""
361
530
  Performs batch matrix-matrix multiplication of two three-dimensional tensors.
@@ -463,13 +632,6 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
463
632
  return fold_impl(input, converted_output_size, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
464
633
 
465
634
 
466
- def copy(variable, value):
467
- r"""
468
- None
469
- """
470
- return copy_impl(variable, value)
471
-
472
-
473
635
  def cummin(input, dim):
474
636
  r"""
475
637
  Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
@@ -480,6 +642,9 @@ def cummin(input, dim):
480
642
  y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
481
643
  \end{array}
482
644
 
645
+ .. note::
646
+ O2 mode is not supported in Ascend.
647
+
483
648
  Args:
484
649
  input (Tensor): The input Tensor, The dimension must be greater than 0.
485
650
  dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
@@ -494,9 +659,6 @@ def cummin(input, dim):
494
659
  TypeError: If `dim` is not an int.
495
660
  ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
496
661
 
497
- .. note::
498
- O2 mode is not supported in Ascend.
499
-
500
662
  Supported Platforms:
501
663
  ``Ascend``
502
664
 
@@ -561,6 +723,54 @@ def cumsum(input, dim, dtype=None):
561
723
  return cumsum_impl(input, dim, dtype)
562
724
 
563
725
 
726
+ def diag(input, diagonal=0):
727
+ r"""
728
+ If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal.
729
+
730
+ If input is a matrix (2-D tensor), then returns a 1-D tensor with the diagonal elements of input.
731
+
732
+ The argument diagonal controls which diagonal to consider:
733
+
734
+ - If `diagonal` = 0, it is the main diagonal.
735
+
736
+ - If `diagonal` > 0, it is above the main diagonal.
737
+
738
+ - If `diagonal` < 0, it is below the main diagonal.
739
+
740
+ .. warning::
741
+ This is an experimental API that is subject to change or deletion.
742
+
743
+ Args:
744
+ input (Tensor): The input tensor.
745
+ diagonal (int, optional): the diagonal to consider. Defaults: ``0``.
746
+
747
+ Returns:
748
+ Tensor, has the same dtype as the `input`, its shape is up to `diagonal`.
749
+
750
+ - If `input` shape is :math:`(x_0)` : then output shape is :math:`(x_0 + \left | diagonal \right | , x_0 + \left | diagonal \right | )` 2-D Tensor.
751
+
752
+ - If `input` shape is :math:`(x_0, x_1)` : then output shape is main diagonal to move :math:`(\left | diagonal \right |)` elements remains elements' length 1-D Tensor.
753
+
754
+ Raises:
755
+ TypeError: If `input` is not a Tensor.
756
+ ValueError: If shape of `input` is not 1-D and 2-D.
757
+
758
+ Supported Platforms:
759
+ ``Ascend``
760
+
761
+ Examples:
762
+ >>> from mindspore import Tensor, mint
763
+ >>> input = Tensor([1, 2, 3, 4]).astype('int32')
764
+ >>> output = mint.diag(input)
765
+ >>> print(output)
766
+ [[1 0 0 0]
767
+ [0 2 0 0]
768
+ [0 0 3 0]
769
+ [0 0 0 4]]
770
+ """
771
+ return diag_impl(input, diagonal)
772
+
773
+
564
774
  def elu(input, alpha=1.0):
565
775
  r"""
566
776
  Exponential Linear Unit activation function.
@@ -652,6 +862,40 @@ def flatten(input, start_dim=0, end_dim=-1):
652
862
  return flatten_impl(input, start_dim, end_dim)
653
863
 
654
864
 
865
+ def frac(input):
866
+ r"""
867
+ Calculates the fractional part of each element in the input.
868
+
869
+ .. math::
870
+ out_i = input_i - \lfloor |input_i| \rfloor * sgn(input_i)
871
+
872
+ .. warning::
873
+ This is an experimental API that is subject to change or deletion.
874
+
875
+ Args:
876
+ input (Tensor): The input Tensor.
877
+
878
+ Returns:
879
+ Tensor, has the same shape and type as input.
880
+
881
+ Raises:
882
+ TypeError: If `input` is not a Tensor.
883
+
884
+ Supported Platforms:
885
+ ``Ascend``
886
+
887
+ Examples:
888
+ >>> import mindspore
889
+ >>> import numpy as np
890
+ >>> from mindspore import Tensor, ops
891
+ >>> x = Tensor([2, 4.2, -2.5], mindspore.float16)
892
+ >>> output = ops.frac_ext(x)
893
+ >>> print(output)
894
+ [ 0. 0.1992 -0.5 ]
895
+ """
896
+ return frac_impl(input)
897
+
898
+
655
899
  def histc(input, bins=100, min=0, max=0):
656
900
  r"""
657
901
  Computes the histogram of a tensor.
@@ -767,6 +1011,56 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
767
1011
  return unfold_impl(input, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
768
1012
 
769
1013
 
1014
+ def index_add(input, dim, index, source, alpha=1):
1015
+ r"""
1016
+ Accumulate the elements of `alpha` times `source` into the `input` by adding to the index in the order given in `index`. For example, if ``dim == 0`` , ``index[i] == j`` , and ``alpha = -1`` , then the `i` th row of `source` is subtracted from the `j` th row of `input` . The `dim` th dimension of `source` must have the same size as the length of `index` , and all other dimensions must match `input`, or an error will be raised. For a 3-D tensor, the output is defined as follows:
1017
+
1018
+ .. math::
1019
+ \begin{array}{ll}
1020
+ input[index[i],\ :,\ :]\ +=\ alpha * source[i,\ :,\ :] \qquad \#if\ dim == 0 \\
1021
+ input[:,\ \ index[i],\ :]\ +=\ alpha * source[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
1022
+ input[:,\ :,\ \ index[i]]\ +=\ alpha * source[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
1023
+ \end{array}
1024
+
1025
+ .. warning::
1026
+ This is an experimental API that is subject to change or deletion.
1027
+
1028
+ Args:
1029
+ input (Tensor): The input Tensor.
1030
+ dim (int): The dimension along which to index.
1031
+ index (Tensor): Add the value of "input Tensor" and `source` along the dimension of the `dim` according to the specified index value, with data type int32. The `index` must be 1D with the same size as the size of `source` in the `dim` dimension. The values of `index` should be in [0, b), where the b is the size of "input Tensor" in the `dim` dimension.
1032
+ source (Tensor): The input tensor with the value to add. Must have same data type as "input Tensor". The shape must be the same as "input Tensor" except the `dim` th dimension.
1033
+ alpha (number, optional): The scalar multiplier for source. Default: ``1``.
1034
+
1035
+ Returns:
1036
+ Tensor, has the same shape and dtype as `input`.
1037
+
1038
+ Raises:
1039
+ TypeError: If neither `index` nor `source` is a Tensor.
1040
+ ValueError: If the value of `dim` is out of the dimension range of `source` shape.
1041
+ ValueError: If `index` rank is not the same as `source` rank.
1042
+ ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension of source[dim].
1043
+ ValueError: If the shape of `source` is not the same as that of `input` except the `dim` axis.
1044
+
1045
+ Supported Platforms:
1046
+ ``Ascend``
1047
+
1048
+ Examples:
1049
+ >>> import numpy as np
1050
+ >>> import mindspore
1051
+ >>> from mindspore import Tensor, ops
1052
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1053
+ >>> index = Tensor(np.array([0, 2]), mindspore.int32)
1054
+ >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
1055
+ >>> output = ops.auto_generate.index_add_ext(x, 1, index, y, alpha=1)
1056
+ >>> print(output)
1057
+ [[ 1.5 2. 4. ]
1058
+ [ 5. 5. 7.5]
1059
+ [ 9. 8. 11.5]]
1060
+ """
1061
+ return index_add_impl(input, dim, index, source, alpha)
1062
+
1063
+
770
1064
  def index_select(input, dim, index):
771
1065
  r"""
772
1066
  Generates a new Tensor that accesses the values of `input` along the specified `dim` dimension
@@ -813,6 +1107,13 @@ def index_select(input, dim, index):
813
1107
  return index_select_impl(input, dim, index)
814
1108
 
815
1109
 
1110
+ def inplace_adds(input, other, alpha=1):
1111
+ r"""
1112
+ None
1113
+ """
1114
+ return inplace_adds_impl(input, other, alpha)
1115
+
1116
+
816
1117
  def inplace_add(input, other, alpha=1):
817
1118
  r"""
818
1119
  None
@@ -820,11 +1121,41 @@ def inplace_add(input, other, alpha=1):
820
1121
  return inplace_add_impl(input, other, alpha)
821
1122
 
822
1123
 
823
- def inplace_adds(input, other, alpha=1):
1124
+ def sub_tensor_(input, other, alpha=1):
824
1125
  r"""
825
1126
  None
826
1127
  """
827
- return inplace_adds_impl(input, other, alpha)
1128
+ return sub_tensor_impl(input, other, alpha)
1129
+
1130
+
1131
+ def isneginf(input):
1132
+ r"""
1133
+ Determines which elements are -inf for each position.
1134
+
1135
+ .. warning::
1136
+ - This API can be used only on the Atlas A2 training series.
1137
+
1138
+ Args:
1139
+ input (Tensor): Input Tensor.
1140
+
1141
+ Returns:
1142
+ Tensor with the same shape as the input, where elements are `True` if the corresponding element in the `input` is negative infinity, and `False` otherwise.
1143
+
1144
+ Raises:
1145
+ TypeError: If the input is not a tensor.
1146
+
1147
+ Supported Platforms:
1148
+ ``Ascend`` ``GPU`` ``CPU``
1149
+
1150
+ Examples:
1151
+ >>> from mindspore import ops, Tensor
1152
+ >>> from mindspore import dtype as mstype
1153
+ >>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
1154
+ >>> print(output)
1155
+ [[ True False]
1156
+ [False True]]
1157
+ """
1158
+ return isneginf_impl(input)
828
1159
 
829
1160
 
830
1161
  def l1_loss(input, target, reduction='mean'):
@@ -908,7 +1239,7 @@ def leaky_relu(input, negative_slope=0.01):
908
1239
 
909
1240
  Args:
910
1241
  input (Tensor): The input of leaky_relu is a Tensor of any dimension.
911
- negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
1242
+ negative_slope (Union[int, float], optional): Slope of the activation function when the element of `input` is less than 0.
912
1243
  Default: ``0.01`` .
913
1244
 
914
1245
  Returns:
@@ -933,32 +1264,65 @@ def leaky_relu(input, negative_slope=0.01):
933
1264
  return leaky_relu_impl(input, negative_slope)
934
1265
 
935
1266
 
936
- def log_softmax(input, dim=None, dtype=None):
1267
+ def log10(input):
937
1268
  r"""
938
- Applies the Log Softmax function to the input tensor on the specified axis.
939
- Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
940
- the Log Softmax function is shown as follows:
1269
+ Returns the logarithm to the base 10 of a tensor element-wise.
941
1270
 
942
1271
  .. math::
943
- \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
1272
+ y_i = \log_{10}(x_i)
944
1273
 
945
- where :math:`N` is the length of the Tensor.
1274
+ .. warning::
1275
+ - This is an experimental API that is subject to change or deletion.
1276
+ - If the input value of operator Log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
1277
+ may be affacted.
946
1278
 
947
1279
  Args:
948
- input (Tensor): The input Tensor.
949
- dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
1280
+ input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
950
1281
 
951
- Keyword Args:
952
- dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
953
- Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
954
- If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
1282
+ Returns:
1283
+ Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
1284
+
1285
+ - if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
1286
+ - if `input.dtype` is integer or boolean type, the output dtype is float32.
1287
+
1288
+ Raises:
1289
+ TypeError: If `input` is not a Tensor.
1290
+
1291
+ Supported Platforms:
1292
+ ``Ascend``
1293
+
1294
+ Examples:
1295
+ >>> import mindspore
1296
+ >>> import numpy as np
1297
+ >>> from mindspore import Tensor, mint
1298
+ >>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
1299
+ >>> output = mint.log10(x)
1300
+ >>> print(output)
1301
+ [0.47712136 0.69897 0.845098 ]
1302
+ """
1303
+ return log10_impl(input)
1304
+
1305
+
1306
+ def log2(input):
1307
+ r"""
1308
+ Returns the logarithm to the base 2 of a tensor element-wise.
1309
+
1310
+ .. math::
1311
+ y_i = \log_2(x_i)
1312
+
1313
+ .. warning::
1314
+ - If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
1315
+ may be affacted.
1316
+
1317
+ Args:
1318
+ input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
955
1319
 
956
1320
  Returns:
957
- Tensor, with the same shape as the input.
1321
+ Tensor, has the same shape as the `input`. If `input.dtype` is of integer or boolean type, the output dtype
1322
+ will be float32. Otherwise, the output dtype will be the same as `input.dtype`.
958
1323
 
959
1324
  Raises:
960
- TypeError: If `dim` is not an int.
961
- ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
1325
+ TypeError: If `input` is not a Tensor.
962
1326
 
963
1327
  Supported Platforms:
964
1328
  ``Ascend``
@@ -966,13 +1330,13 @@ def log_softmax(input, dim=None, dtype=None):
966
1330
  Examples:
967
1331
  >>> import mindspore
968
1332
  >>> import numpy as np
969
- >>> from mindspore import Tensor, ops
970
- >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
971
- >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
1333
+ >>> from mindspore import Tensor, mint
1334
+ >>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
1335
+ >>> output = mint.log2(x)
972
1336
  >>> print(output)
973
- [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
1337
+ [1.5849625 2.321928 2.807355 ]
974
1338
  """
975
- return log_softmax_impl(input, dim, dtype)
1339
+ return log2_impl(input)
976
1340
 
977
1341
 
978
1342
  def logaddexp(input, other):
@@ -992,7 +1356,7 @@ def logaddexp(input, other):
992
1356
  input (Tensor): Input Tensor. The dtype of `input` must be float.
993
1357
  other (Tensor): Input Tensor. The dtype of `other` must be float.
994
1358
  If the shape of `input` is not equal to the shape of `other`,
995
- they must be broadcastable to a common shape (which becomes the shape of the output).
1359
+ they must be broadcastable to a common shape.
996
1360
 
997
1361
  Returns:
998
1362
  Tensor, with the same dtype as `input` and `other`.
@@ -1016,11 +1380,100 @@ def logaddexp(input, other):
1016
1380
  return logaddexp_impl(input, other)
1017
1381
 
1018
1382
 
1019
- def matmul(input, mat2):
1383
+ def logsumexp(input, dim, keepdim=False):
1384
+ r"""
1385
+ Computes the logarithm of the sum of exponentiations of all elements along the specified `dim` dimension of the `input` (with numerical stabilization), and retains the dimension based on the `keepdim` parameter.
1386
+
1387
+ .. math::
1388
+
1389
+ logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
1390
+
1391
+ .. warning::
1392
+ This is an experimental API that is subject to change or deletion.
1393
+
1394
+ Args:
1395
+ input (Tensor): Input Tensor.
1396
+ dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced (the value should be within `[0, len(input.shape) - 1]`), when the `dim` is `()`, all dimensions are reduced.
1397
+ keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: `False`.
1398
+
1399
+ Returns:
1400
+ Tensor, the dtype changes according to the `input.dtype`, and the shape changes according to the values of `dim` and `keepdim`.
1401
+
1402
+ - If `input.dtype` is in [float16, float32, bfloat16], the output dtype is the same as the `input.dtype`.
1403
+ - If `input.dtype` is an integer or boolean type, the output dtype is float32.
1404
+ - If `dim` is (), and `keepdim` is False, the output is a 0-D tensor representing the logarithm of the sum of exponentiations of all elements in the `input` tensor.
1405
+ - If `dim` is `1`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
1406
+ - If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
1407
+
1408
+ Raises:
1409
+ TypeError: If `input` is not a Tensor.
1410
+ TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
1411
+ TypeError: If `dim` is not an int or tuple(int) or list(list).
1412
+ TypeError: If `keepdim` is not a bool.
1413
+ ValueError: If the value of any elements of `dim` is not in the range `[0, len(input.shape) - 1]`.
1414
+ RuntimeError: If any element of `dim` is repeated.
1415
+
1416
+ Supported Platforms:
1417
+ ``Ascend``
1418
+
1419
+ Examples:
1420
+ >>> import numpy as np
1421
+ >>> from mindspore import Tensor, ops
1422
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1423
+ >>> output = ops.auto_generate.logsumexp_ext(x, 1, keepdim=True)
1424
+ >>> print(output.shape)
1425
+ (3, 1, 5, 6)
1426
+ """
1427
+ return logsumexp_impl(input, dim, keepdim)
1428
+
1429
+
1430
+ def log_softmax(input, dim=None, dtype=None):
1431
+ r"""
1432
+ Applies the Log Softmax function to the input tensor on the specified axis.
1433
+ Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
1434
+ the Log Softmax function is shown as follows:
1435
+
1436
+ .. math::
1437
+ \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
1438
+
1439
+ where :math:`N` is the length of the Tensor.
1440
+
1441
+ Args:
1442
+ input (Tensor): The input Tensor.
1443
+ dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
1444
+
1445
+ Keyword Args:
1446
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
1447
+ Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
1448
+ If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
1449
+
1450
+ Returns:
1451
+ Tensor, with the same shape as the input.
1452
+
1453
+ Raises:
1454
+ TypeError: If `dim` is not an int.
1455
+ ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
1456
+
1457
+ Supported Platforms:
1458
+ ``Ascend``
1459
+
1460
+ Examples:
1461
+ >>> import mindspore
1462
+ >>> import numpy as np
1463
+ >>> from mindspore import Tensor, ops
1464
+ >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
1465
+ >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
1466
+ >>> print(output)
1467
+ [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
1468
+ """
1469
+ return log_softmax_impl(input, dim, dtype)
1470
+
1471
+
1472
+ def matmul(input, other):
1020
1473
  r"""
1021
1474
  None
1022
1475
  """
1023
- return matmul_impl(input, mat2)
1476
+ return matmul_impl(input, other)
1024
1477
 
1025
1478
 
1026
1479
  def matrix_inverse(input):
@@ -1053,42 +1506,93 @@ def matrix_inverse(input):
1053
1506
  return matrix_inverse_impl(input)
1054
1507
 
1055
1508
 
1056
- def mean(input, axis=None, keep_dims=False, dtype=None):
1509
+ def max_unpool2d(input, indices, kernel_size, stride=None, padding=0, output_size=None):
1510
+ r"""
1511
+ Computes the inverse of `max_pool2d`.
1512
+
1513
+ `max_unpool2d` keeps the maximal value and set all position of non-maximal values to zero. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
1514
+
1515
+ .. math::
1516
+ \begin{array}{ll} \\
1517
+ H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
1518
+ W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
1519
+ \end{array}
1520
+
1521
+ .. warning::
1522
+ This is an experimental API that is subject to change or deletion.
1523
+
1524
+ Args:
1525
+ input (Tensor): The input Tensor to invert. Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1526
+ indices (Tensor): Max values' index represented by the indices. Tensor of shape must be same with input 'input'. Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`. Data type must be in int32 or int64.
1527
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively.
1528
+ stride (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents the height and width of movement are both stride, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , which indicates the moving step is `kernel_size` .
1529
+ padding (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` . If `padding` is an integer, the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
1530
+ output_size (tuple[int], optional): The target output size. Default: ``None`` . If output_size == (), then the shape of output computed by `kernel_size`, `stride` and `padding`. If output_size != (), then output_size must be :math:`(N, C, H, W)` , :math:`(C, H, W)` or :math:`(H, W)` and output_size must belong to :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
1531
+
1532
+ Returns:
1533
+ Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, with the same data type with `input`.
1534
+
1535
+ Raises:
1536
+ TypeError: If data type of `input` or `indices` is not supported.
1537
+ TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
1538
+ ValueError: If numbers in `stride`, `padding` or `kernel_size` are not positive.
1539
+ ValueError: If the shapes of `input` and `indices` are different.
1540
+ ValueError: If the length of `input` is not 3 or 4.
1541
+ ValueError: If the type of `output_size` is not tuple.
1542
+ ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
1543
+
1544
+ Supported Platforms:
1545
+ ``Ascend``
1546
+
1547
+ Examples:
1548
+ >>> import numpy as np
1549
+ >>> from mindspore import Tensor, ops
1550
+ >>> input = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
1551
+ >>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
1552
+ >>> output = ops.max_unpool2d_ext(input, indices, 1, stride=1, padding=0)
1553
+ >>> print(output.asnumpy())
1554
+ [[[[0. 1.]
1555
+ [8. 9.]]]]
1556
+ """
1557
+ return max_unpool2d_impl(input, indices, kernel_size, stride, padding, output_size)
1558
+
1559
+
1560
+ def mean(input, dim=None, keepdim=False, dtype=None):
1057
1561
  r"""
1058
1562
  Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
1059
- And reduce a dimension of `input` along the specified `axis`. `keep_dims`
1563
+ And reduce a dimension of `input` along the specified `dim`. `keepdim`
1060
1564
  determines whether the dimensions of the output and input are the same.
1061
1565
 
1062
1566
  Note:
1063
- The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
1567
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
1064
1568
 
1065
1569
  Args:
1066
1570
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
1067
1571
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1068
- axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
1572
+ dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
1069
1573
  reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
1070
1574
  and the value range is [-r,r).
1071
- keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1575
+ keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1072
1576
  If ``False`` , don't keep these dimensions. Default: ``False`` .
1073
1577
  dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
1074
1578
 
1075
1579
  Returns:
1076
1580
  Tensor, has the same data type as input tensor.
1077
1581
 
1078
- - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
1582
+ - If `dim` is ``None`` , and `keepdim` is ``False`` ,
1079
1583
  the output is a 0-D tensor representing the product of all elements in the input tensor.
1080
- - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
1584
+ - If `dim` is int, set as 1, and `keepdim` is ``False`` ,
1081
1585
  the shape of output is :math:`(x_0, x_2, ..., x_R)`.
1082
- - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
1586
+ - If `dim` is tuple(int), set as (1, 2), and `keepdim` is ``False`` ,
1083
1587
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
1084
- - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
1588
+ - If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
1085
1589
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
1086
1590
 
1087
1591
  Raises:
1088
1592
  TypeError: If `x` is not a Tensor.
1089
- TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
1090
- TypeError: If `keep_dims` is not a bool.
1091
- ValueError: If `axis` is out of range.
1593
+ TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
1594
+ TypeError: If `keepdim` is not a bool.
1595
+ ValueError: If `dim` is out of range.
1092
1596
 
1093
1597
  Supported Platforms:
1094
1598
  ``Ascend`` ``GPU`` ``CPU``
@@ -1098,7 +1602,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
1098
1602
  >>> import numpy as np
1099
1603
  >>> from mindspore import Tensor, ops
1100
1604
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1101
- >>> output = ops.mean(x, 1, keep_dims=True)
1605
+ >>> output = ops.mean_ext(x, 1, keepdim=True)
1102
1606
  >>> result = output.shape
1103
1607
  >>> print(result)
1104
1608
  (3, 1, 5, 6)
@@ -1107,25 +1611,25 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
1107
1611
  ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
1108
1612
  ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
1109
1613
  ... mindspore.float32)
1110
- >>> output = ops.mean(x)
1614
+ >>> output = ops.mean_ext(x)
1111
1615
  >>> print(output)
1112
1616
  5.0
1113
1617
  >>> print(output.shape)
1114
1618
  ()
1115
- >>> # case 2: Reduces a dimension along the axis 0
1116
- >>> output = ops.mean(x, 0, True)
1619
+ >>> # case 2: Reduces a dimension along the dim 0
1620
+ >>> output = ops.mean_ext(x, 0, True)
1117
1621
  >>> print(output)
1118
1622
  [[[4. 4. 4. 4. 4. 4.]
1119
1623
  [5. 5. 5. 5. 5. 5.]
1120
1624
  [6. 6. 6. 6. 6. 6.]]]
1121
- >>> # case 3: Reduces a dimension along the axis 1
1122
- >>> output = ops.mean(x, 1, True)
1625
+ >>> # case 3: Reduces a dimension along the dim 1
1626
+ >>> output = ops.mean_ext(x, 1, True)
1123
1627
  >>> print(output)
1124
1628
  [[[2. 2. 2. 2. 2. 2.]]
1125
1629
  [[5. 5. 5. 5. 5. 5.]]
1126
1630
  [[8. 8. 8. 8. 8. 8.]]]
1127
- >>> # case 4: Reduces a dimension along the axis 2
1128
- >>> output = ops.mean(x, 2, True)
1631
+ >>> # case 4: Reduces a dimension along the dim 2
1632
+ >>> output = ops.mean_ext(x, 2, True)
1129
1633
  >>> print(output)
1130
1634
  [[[ 2.]
1131
1635
  [ 2.]
@@ -1137,7 +1641,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
1137
1641
  [ 8.]
1138
1642
  [10.]]]
1139
1643
  """
1140
- return mean_impl(input, axis, keep_dims, dtype)
1644
+ return mean_impl(input, dim, keepdim, dtype)
1141
1645
 
1142
1646
 
1143
1647
  def mish(input):
@@ -1186,6 +1690,50 @@ def mish(input):
1186
1690
  return mish_impl(input)
1187
1691
 
1188
1692
 
1693
+ def mm(input, mat2):
1694
+ r"""
1695
+ Returns the matrix product of two arrays.
1696
+ If `input` is a :math:`(n \times m)` Tensor, `mat2` is a
1697
+ :math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
1698
+
1699
+ Note:
1700
+ This function cannot support broadcasting.
1701
+ Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
1702
+
1703
+ .. warning::
1704
+ This is an experimental API that is subject to change or deletion.
1705
+
1706
+ Args:
1707
+ input (Tensor): The first matrix of matrix multiplication.
1708
+ The last dimension of `input` must be the same size as the first dimension of `mat2`.
1709
+ mat2 (Tensor): The second matrix of matrix multiplication.
1710
+ The last dimension of `input` must be the same size as the first dimension of `mat2`.
1711
+
1712
+ Returns:
1713
+ Tensor, the matrix product of the inputs.
1714
+
1715
+ Raises:
1716
+ ValueError: If the last dimension of `input` is not the same size as the
1717
+ second-to-last dimension of `mat2`.
1718
+ TypeError: If `input` or `mat2` is not a Tensor.
1719
+ TypeError: If dtype of `input` or `mat2` is not float16, float32 or bfloat16.
1720
+
1721
+ Supported Platforms:
1722
+ ``Ascend``
1723
+
1724
+ Examples:
1725
+ >>> import mindspore as ms
1726
+ >>> from mindspore import ops
1727
+ >>> import numpy as np
1728
+ >>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
1729
+ >>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
1730
+ >>> out = ops.mm_ext(x1, x2)
1731
+ >>> print(out.shape)
1732
+ (2, 4)
1733
+ """
1734
+ return mm_impl(input, mat2)
1735
+
1736
+
1189
1737
  def mse_loss(input, target, reduction='mean'):
1190
1738
  r"""
1191
1739
  Calculates the mean squared error between the predicted value and the label value.
@@ -1272,34 +1820,34 @@ def outer(input, vec2):
1272
1820
  return outer_impl(input, vec2)
1273
1821
 
1274
1822
 
1275
- def prod(input, axis=None, keep_dims=False, dtype=None):
1823
+ def prod(input, dim=None, keepdim=False, dtype=None):
1276
1824
  r"""
1277
1825
  Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
1278
- reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
1279
- same by controlling `keep_dims`.
1826
+ reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
1827
+ same by controlling `keepdim`.
1280
1828
 
1281
1829
  Args:
1282
1830
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
1283
1831
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1284
- axis (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
1832
+ dim (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
1285
1833
  Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
1286
- keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1834
+ keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1287
1835
  If ``False`` , don't keep these dimensions. Default: ``False`` .
1288
1836
  dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
1289
1837
 
1290
1838
  Returns:
1291
1839
  Tensor, has the same data type as input tensor.
1292
1840
 
1293
- - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
1841
+ - If `dim` is ``None`` , and `keepdim` is ``False`` ,
1294
1842
  the output is a 0-D tensor representing the product of all elements in the input tensor.
1295
- - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
1843
+ - If `dim` is int, set as 1, and `keepdim` is ``False`` ,
1296
1844
  the shape of output is :math:`(input_0, input_2, ..., input_R)`.
1297
1845
 
1298
1846
  Raises:
1299
1847
  TypeError: If `input` is not a Tensor.
1300
- TypeError: If `axis` is not one of the following: int or None.
1301
- TypeError: If `keep_dims` is not a bool.
1302
- ValueError: If `axis` is out of range.
1848
+ TypeError: If `dim` is not one of the following: int or None.
1849
+ TypeError: If `keepdim` is not a bool.
1850
+ ValueError: If `dim` is out of range.
1303
1851
 
1304
1852
  Supported Platforms:
1305
1853
  ``Ascend`` ``GPU`` ``CPU``
@@ -1309,7 +1857,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
1309
1857
  >>> import numpy as np
1310
1858
  >>> from mindspore import Tensor, ops
1311
1859
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1312
- >>> output = ops.ProdExt()(x, 1, keep_dims=True)
1860
+ >>> output = ops.prod_ext(x, 1, keepdim=True)
1313
1861
  >>> result = output.shape
1314
1862
  >>> print(result)
1315
1863
  (3, 1, 5, 6)
@@ -1317,25 +1865,25 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
1317
1865
  >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
1318
1866
  ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
1319
1867
  ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
1320
- >>> output = ops.ProdExt()(x)
1868
+ >>> output = ops.prod_ext(x)
1321
1869
  >>> print(output)
1322
1870
  2.2833798e+33
1323
1871
  >>> print(output.shape)
1324
1872
  ()
1325
- >>> # case 2: Reduces a dimension along axis 0.
1326
- >>> output = ops.ProdExt()(x, 0, True)
1873
+ >>> # case 2: Reduces a dimension along dim 0.
1874
+ >>> output = ops.prod_ext(x, 0, True)
1327
1875
  >>> print(output)
1328
1876
  [[[ 28. 28. 28. 28. 28. 28.]
1329
1877
  [ 80. 80. 80. 80. 80. 80.]
1330
1878
  [162. 162. 162. 162. 162. 162.]]]
1331
- >>> # case 3: Reduces a dimension along axis 1.
1332
- >>> output = ops.ProdExt()(x, 1, True)
1879
+ >>> # case 3: Reduces a dimension along dim 1.
1880
+ >>> output = ops.prod_ext(x, 1, True)
1333
1881
  >>> print(output)
1334
1882
  [[[ 6. 6. 6. 6. 6. 6.]]
1335
1883
  [[120. 120. 120. 120. 120. 120.]]
1336
1884
  [[504. 504. 504. 504. 504. 504.]]]
1337
- >>> # case 4: Reduces a dimension along axis 2.
1338
- >>> output = ops.ProdExt()(x, 2, True)
1885
+ >>> # case 4: Reduces a dimension along dim 2.
1886
+ >>> output = ops.prod_ext(x, 2, True)
1339
1887
  >>> print(output)
1340
1888
  [[[1.00000e+00]
1341
1889
  [6.40000e+01]
@@ -1347,7 +1895,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
1347
1895
  [2.62144e+05]
1348
1896
  [5.31441e+05]]]
1349
1897
  """
1350
- return prod_impl(input, axis, keep_dims, dtype)
1898
+ return prod_impl(input, dim, keepdim, dtype)
1351
1899
 
1352
1900
 
1353
1901
  def select(input, dim, index):
@@ -1376,7 +1924,6 @@ def select(input, dim, index):
1376
1924
  >>> from mindspore import Tensor, mint
1377
1925
  >>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
1378
1926
  >>> y = mint.select(input, 0, 0)
1379
- >>> y = Tensor([1,2], mindspore.float32)
1380
1927
  >>> print(y)
1381
1928
  [2 3 4 5]
1382
1929
  """
@@ -1494,7 +2041,7 @@ def stack(tensors, dim=0):
1494
2041
 
1495
2042
  Args:
1496
2043
  tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
1497
- dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
2044
+ dim (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
1498
2045
 
1499
2046
  Returns:
1500
2047
  Tensor. A stacked Tensor with the same type as `tensors`.
@@ -1502,7 +2049,7 @@ def stack(tensors, dim=0):
1502
2049
  Raises:
1503
2050
  TypeError: If the data types of elements in `tensors` are not the same.
1504
2051
  ValueError: If `dim` is out of the range [-(R+1), R+1);
1505
- or if the shapes of elements in tensors are not the same.
2052
+ or if the shapes of elements in `tensors` are not the same.
1506
2053
 
1507
2054
  Supported Platforms:
1508
2055
  ``Ascend``
@@ -1577,6 +2124,67 @@ def sub(input, other, alpha=1):
1577
2124
  return sub_impl(input, other, alpha)
1578
2125
 
1579
2126
 
2127
+ def sum(input, dim=None, keepdim=False, dtype=None):
2128
+ r"""
2129
+ Calculate sum of Tensor elements over a given dim.
2130
+
2131
+ Note:
2132
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
2133
+
2134
+ Args:
2135
+ input (Tensor): The input tensor.
2136
+ dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
2137
+ If ``None`` , sum all the elements of the input tensor.
2138
+ If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
2139
+ Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
2140
+ keepdim (bool): Whether the output tensor has `dim` retained or not.
2141
+ If ``True`` , keep these reduced dimensions and the length is 1.
2142
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
2143
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
2144
+
2145
+ Returns:
2146
+ A Tensor, sum of elements over a given `dim` in `input`.
2147
+
2148
+ Raises:
2149
+ TypeError: If `input` is not a Tensor.
2150
+ TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
2151
+ ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
2152
+ TypeError: If `keepdim` is not a bool.
2153
+
2154
+ Supported Platforms:
2155
+ ``Ascend`` ``GPU`` ``CPU``
2156
+
2157
+ Examples:
2158
+ >>> import mindspore
2159
+ >>> import numpy as np
2160
+ >>> from mindspore import Tensor, ops
2161
+ >>> from mindspore import dtype as mstype
2162
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
2163
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
2164
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
2165
+ >>> out = ops.sum_ext(x)
2166
+ >>> print(out)
2167
+ 270.0
2168
+ >>> out = ops.sum_ext(x, dim=2)
2169
+ >>> print(out)
2170
+ [[ 6. 12. 18.]
2171
+ [24. 30. 36.]
2172
+ [42. 48. 54.]]
2173
+ >>> out = ops.sum_ext(x, dim=2, keepdim=True)
2174
+ >>> print(out)
2175
+ [[[ 6.]
2176
+ [12.]
2177
+ [18.]]
2178
+ [[24.]
2179
+ [30.]
2180
+ [36.]]
2181
+ [[42.]
2182
+ [48.]
2183
+ [54.]]]
2184
+ """
2185
+ return sum_impl(input, dim, keepdim, dtype)
2186
+
2187
+
1580
2188
  def topk(input, k, dim=-1, largest=True, sorted=True):
1581
2189
  r"""
1582
2190
  Finds values and indices of the `k` largest or smallest entries along a given dimension.
@@ -1633,7 +2241,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
1633
2241
  (Tensor(shape=[3, 2], dtype=Float32, value=
1634
2242
  [[ 9.67299998e-01, 5.36800027e-01],
1635
2243
  [ 6.52499974e-01, 4.68499988e-01],
1636
- [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
2244
+ [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
1637
2245
  [[3, 0],
1638
2246
  [1, 2],
1639
2247
  [2, 3]]))
@@ -1642,7 +2250,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
1642
2250
  (Tensor(shape=[3, 2], dtype=Float32, value=
1643
2251
  [[ 2.44700000e-01, 4.30200011e-01],
1644
2252
  [ 1.86800003e-01, 4.38800007e-01],
1645
- [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
2253
+ [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
1646
2254
  [[1, 2],
1647
2255
  [3, 0],
1648
2256
  [0, 1]]))
@@ -1654,9 +2262,6 @@ def trace(input):
1654
2262
  r"""
1655
2263
  Returns a new tensor that is the sum of the `input` main trace.
1656
2264
 
1657
- Note:
1658
- Input must be tensor.
1659
-
1660
2265
  Args:
1661
2266
  input (Tensor): 2-D Tensor.
1662
2267
 
@@ -1732,3 +2337,38 @@ def tril(input, diagonal=0):
1732
2337
  """
1733
2338
  return tril_impl(input, diagonal)
1734
2339
 
2340
+
2341
+ def t(input):
2342
+ r"""
2343
+ Transpose the input tensor.
2344
+
2345
+ .. warning::
2346
+ This is an experimental API that is subject to change or deletion.
2347
+
2348
+ Args:
2349
+ input (Tensor): The input tensor.
2350
+
2351
+ Returns:
2352
+ Tensor, transpose 2D tensor, return 1D tensor as it is.
2353
+
2354
+ Raises:
2355
+ ValueError: If the dimension of `input` is greater than 2.
2356
+ ValueError: If `input` is empty.
2357
+ TypeError: If `input` is not a tensor.
2358
+
2359
+ Supported Platforms:
2360
+ ``Ascend``
2361
+
2362
+ Examples:
2363
+ >>> import mindspore
2364
+ >>> import numpy as np
2365
+ >>> from mindspore import Tensor, ops
2366
+ >>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
2367
+ >>> output = ops.t_ext(input)
2368
+ >>> print(output)
2369
+ [[ 1. 4.]
2370
+ [ 2. 5.]
2371
+ [ 3. 6.]]
2372
+ """
2373
+ return t_impl(input)
2374
+