mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2024 Huawei Technologies Co., Ltd
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -17,17 +17,20 @@ from mindspore.common import dtype as mstype
17
17
  from mindspore.ops.auto_generate.pyboost_inner_prim import *
18
18
 
19
19
 
20
- def acos(input):
20
+ def acosh(input):
21
21
  r"""
22
- Computes arccosine of input tensors element-wise.
22
+ Computes inverse hyperbolic cosine of the inputs element-wise.
23
23
 
24
24
  .. math::
25
25
 
26
- out_i = \cos^{-1}(input_i)
26
+ out_i = \cosh^{-1}(input_i)
27
+
28
+ .. note::
29
+ Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
30
+ Input range is [1, inf].
27
31
 
28
32
  Args:
29
- input (Tensor): The shape of tensor is
30
- :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
33
+ input (Tensor): The input tensor of inverse hyperbolic cosine function.
31
34
 
32
35
  Returns:
33
36
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -42,28 +45,25 @@ def acos(input):
42
45
  >>> import mindspore
43
46
  >>> import numpy as np
44
47
  >>> from mindspore import Tensor, ops
45
- >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
46
- >>> output = ops.acos_ext(input)
48
+ >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
49
+ >>> output = ops.acosh_ext(input)
47
50
  >>> print(output)
48
- [0.7377037 1.5307857 1.2661037 0.9764114]
51
+ [0. 0.9624236 1.7627472 5.298292 ]
49
52
  """
50
- return acos_impl(input)
53
+ return acosh_impl(input)
51
54
 
52
55
 
53
- def acosh(input):
56
+ def acos(input):
54
57
  r"""
55
- Computes inverse hyperbolic cosine of the inputs element-wise.
58
+ Computes arccosine of input tensors element-wise.
56
59
 
57
60
  .. math::
58
61
 
59
- out_i = \cosh^{-1}(input_i)
60
-
61
- .. note::
62
- Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
63
- Input range is [1, inf].
62
+ out_i = \cos^{-1}(input_i)
64
63
 
65
64
  Args:
66
- input (Tensor): The input tensor of inverse hyperbolic cosine function.
65
+ input (Tensor): The shape of tensor is
66
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
67
67
 
68
68
  Returns:
69
69
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -78,12 +78,12 @@ def acosh(input):
78
78
  >>> import mindspore
79
79
  >>> import numpy as np
80
80
  >>> from mindspore import Tensor, ops
81
- >>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
82
- >>> output = ops.acosh_ext(input)
81
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
82
+ >>> output = ops.acos_ext(input)
83
83
  >>> print(output)
84
- [0. 0.9624236 1.7627472 5.298292 ]
84
+ [0.7377037 1.5307857 1.2661037 0.9764114]
85
85
  """
86
- return acosh_impl(input)
86
+ return acos_impl(input)
87
87
 
88
88
 
89
89
  def adaptive_avg_pool2d_grad(grad_output, x):
@@ -237,16 +237,16 @@ def argmin(input, dim=None, keepdim=False):
237
237
  Examples:
238
238
  >>> import numpy as np
239
239
  >>> from mindspore import Tensor
240
- >>> from mindspore import mint
240
+ >>> from mindspore import ops
241
241
  >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
242
- >>> output = mint.argmin(x, dim=-1)
242
+ >>> output = ops.auto_generate.argmin_ext(x, dim=-1)
243
243
  >>> print(output)
244
244
  [0 1 2]
245
245
  """
246
246
  return argmin_impl(input, dim, keepdim)
247
247
 
248
248
 
249
- def argsort(input, dim=-1, descending=False):
249
+ def argsort(input, dim=-1, descending=False, stable=False):
250
250
  r"""
251
251
  Sorts the input tensor along the given dimension in specified order and return the sorted indices.
252
252
 
@@ -259,39 +259,44 @@ def argsort(input, dim=-1, descending=False):
259
259
  The Ascend backend only supports sorting the last dimension.
260
260
  descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
261
261
  are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
262
+ stable (bool, optional): Whether to use stable sorting algorithm. Default: ``False``.
262
263
 
263
264
  Returns:
264
265
  Tensor, the indices of sorted input tensor. Data type is int64.
265
266
 
267
+ Raises:
268
+ ValueError: If `dim` is out of range.
269
+ TypeError: If dtype of `dim` is not int32.
270
+ TypeError: If dtype of `descending` is not bool.
271
+ TypeError: If dtype of `stable` is not bool.
272
+
266
273
  Supported Platforms:
267
274
  ``Ascend``
268
275
 
269
276
  Examples:
270
277
  >>> import mindspore
271
278
  >>> import numpy as np
272
- >>> from mindspore import Tensor
273
- >>> import mindspore.mint as mint
279
+ >>> from mindspore import Tensor, ops
274
280
  >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
275
- >>> sort = mint.argsort(x)
281
+ >>> sort = ops.auto_generate.argsort_ext(x)
276
282
  >>> print(sort)
277
283
  [[2 1 0]
278
- [2 0 1]
279
- [0 1 2]]
284
+ [2 0 1]
285
+ [0 1 2]]
280
286
  """
281
- return argsort_impl(input, dim, descending)
287
+ return argsort_impl(input, dim, descending, stable)
282
288
 
283
289
 
284
- def asin(input):
290
+ def asinh(input):
285
291
  r"""
286
- Computes arcsine of input tensors element-wise.
292
+ Computes inverse hyperbolic sine of the input element-wise.
287
293
 
288
294
  .. math::
289
295
 
290
- out_i = \sin^{-1}(input_i)
296
+ out_i = \sinh^{-1}(input_i)
291
297
 
292
298
  Args:
293
- input (Tensor): The shape of tensor is
294
- :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
299
+ input (Tensor): The input tensor of inverse hyperbolic sine function.
295
300
 
296
301
  Returns:
297
302
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -306,24 +311,25 @@ def asin(input):
306
311
  >>> import mindspore
307
312
  >>> import numpy as np
308
313
  >>> from mindspore import Tensor, ops
309
- >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
310
- >>> output = ops.asin_ext(input)
314
+ >>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
315
+ >>> output = ops.asinh_ext(input)
311
316
  >>> print(output)
312
- [0.8330927 0.04001068 0.30469266 0.59438497 ]
317
+ [-2.3124385 1.1947632 1.8184465 5.298342 ]
313
318
  """
314
- return asin_impl(input)
319
+ return asinh_impl(input)
315
320
 
316
321
 
317
- def asinh(input):
322
+ def asin(input):
318
323
  r"""
319
- Computes inverse hyperbolic sine of the input element-wise.
324
+ Computes arcsine of input tensors element-wise.
320
325
 
321
326
  .. math::
322
327
 
323
- out_i = \sinh^{-1}(input_i)
328
+ out_i = \sin^{-1}(input_i)
324
329
 
325
330
  Args:
326
- input (Tensor): The input tensor of inverse hyperbolic sine function.
331
+ input (Tensor): The shape of tensor is
332
+ :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
327
333
 
328
334
  Returns:
329
335
  Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
@@ -338,12 +344,12 @@ def asinh(input):
338
344
  >>> import mindspore
339
345
  >>> import numpy as np
340
346
  >>> from mindspore import Tensor, ops
341
- >>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
342
- >>> output = ops.asinh_ext(input)
347
+ >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
348
+ >>> output = ops.asin_ext(input)
343
349
  >>> print(output)
344
- [-2.3124385 1.1947632 1.8184465 5.298342 ]
350
+ [0.8330927 0.04001068 0.30469266 0.59438497 ]
345
351
  """
346
- return asinh_impl(input)
352
+ return asin_impl(input)
347
353
 
348
354
 
349
355
  def atan2(input, other):
@@ -382,7 +388,7 @@ def atan2(input, other):
382
388
  >>> from mindspore import Tensor, ops
383
389
  >>> input = Tensor(np.array([0, 1]), mindspore.float32)
384
390
  >>> other = Tensor(np.array([1, 1]), mindspore.float32)
385
- >>> output = mint.atan2(input, other)
391
+ >>> output = ops.auto_generate.atan2_ext(input, other)
386
392
  >>> print(output)
387
393
  [0. 0.7853982]
388
394
  """
@@ -463,9 +469,9 @@ def avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, coun
463
469
  Examples:
464
470
  >>> import mindspore
465
471
  >>> import numpy as np
466
- >>> from mindspore import Tensor, mint
472
+ >>> from mindspore import Tensor, ops
467
473
  >>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
468
- >>> output = mint.nn.functional.avg_pool1d(input_x, kernel_size=6, stride=1)
474
+ >>> output = ops.auto_generate.avg_pool1d_ext(input_x, kernel_size=6, stride=1)
469
475
  >>> print(output.shape)
470
476
  (1, 3, 1)
471
477
  """
@@ -505,15 +511,15 @@ def bincount(input, weights=None, minlength=0):
505
511
  ``Ascend``
506
512
 
507
513
  Examples:
508
- >>> from mindspore import mint
509
- >>> print(mint.bincount(np.arange(5)))
510
- [1. 1. 1. 1. 1.]
511
- >>> print(mint.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
512
- [1. 3. 1. 1. 0. 0. 0. 1.]
513
- >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
514
- >>> x = np.array([0, 1, 1, 2, 2, 2])
515
- >>> print(mint.bincount(x, weights=w, minlength=5))
516
- [0.3 0.7 1.1 0.0 0.0]
514
+ >>> from mindspore import ops, Tensor
515
+ >>> print(ops.auto_generate.bincount_ext(Tensor(np.arange(5))))
516
+ [1 1 1 1 1]
517
+ >>> print(ops.auto_generate.bincount_ext(Tensor(np.array([0, 1, 1, 3, 2, 1, 7]))))
518
+ [1 3 1 1 0 0 0 1]
519
+ >>> w = Tensor(np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6])) # weights
520
+ >>> x = Tensor(np.array([0, 1, 1, 2, 2, 2]))
521
+ >>> print(ops.auto_generate.bincount_ext(x, weights=w, minlength=5))
522
+ [0.3 0.7 1.1 0. 0. ]
517
523
  """
518
524
  return bincount_impl(input, weights, minlength)
519
525
 
@@ -716,6 +722,54 @@ def cumsum(input, dim, dtype=None):
716
722
  return cumsum_impl(input, dim, dtype)
717
723
 
718
724
 
725
+ def diag(input, diagonal=0):
726
+ r"""
727
+ If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal.
728
+
729
+ If input is a matrix (2-D tensor), then returns a 1-D tensor with the diagonal elements of input.
730
+
731
+ The argument diagonal controls which diagonal to consider:
732
+
733
+ - If `diagonal` = 0, it is the main diagonal.
734
+
735
+ - If `diagonal` > 0, it is above the main diagonal.
736
+
737
+ - If `diagonal` < 0, it is below the main diagonal.
738
+
739
+ .. warning::
740
+ This is an experimental API that is subject to change or deletion.
741
+
742
+ Args:
743
+ input (Tensor): The input tensor.
744
+ diagonal (int, optional): the diagonal to consider. Defaults: ``0``.
745
+
746
+ Returns:
747
+ Tensor, has the same dtype as the `input`, its shape is up to `diagonal`.
748
+
749
+ - If `input` shape is :math:`(x_0)` : then output shape is :math:`(x_0 + \left | diagonal \right | , x_0 + \left | diagonal \right | )` 2-D Tensor.
750
+
751
+ - If `input` shape is :math:`(x_0, x_1)` : then output shape is main diagonal to move :math:`(\left | diagonal \right |)` elements remains elements' length 1-D Tensor.
752
+
753
+ Raises:
754
+ TypeError: If `input` is not a Tensor.
755
+ ValueError: If shape of `input` is not 1-D and 2-D.
756
+
757
+ Supported Platforms:
758
+ ``Ascend``
759
+
760
+ Examples:
761
+ >>> from mindspore import Tensor, ops
762
+ >>> input = Tensor([1, 2, 3, 4]).astype('int32')
763
+ >>> output = ops.auto_generate.diag_ext(input)
764
+ >>> print(output)
765
+ [[1 0 0 0]
766
+ [0 2 0 0]
767
+ [0 0 3 0]
768
+ [0 0 0 4]]
769
+ """
770
+ return diag_impl(input, diagonal)
771
+
772
+
719
773
  def elu(input, alpha=1.0):
720
774
  r"""
721
775
  Exponential Linear Unit activation function.
@@ -956,6 +1010,56 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
956
1010
  return unfold_impl(input, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
957
1011
 
958
1012
 
1013
+ def index_add(input, dim, index, source, alpha=1):
1014
+ r"""
1015
+ Accumulate the elements of `alpha` times `source` into the `input` by adding to the index in the order given in `index`. For example, if ``dim == 0`` , ``index[i] == j`` , and ``alpha = -1`` , then the `i` th row of `source` is subtracted from the `j` th row of `input` . The `dim` th dimension of `source` must have the same size as the length of `index` , and all other dimensions must match `input`, or an error will be raised. For a 3-D tensor, the output is defined as follows:
1016
+
1017
+ .. math::
1018
+ \begin{array}{ll}
1019
+ input[index[i],\ :,\ :]\ +=\ alpha * source[i,\ :,\ :] \qquad \#if\ dim == 0 \\
1020
+ input[:,\ \ index[i],\ :]\ +=\ alpha * source[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
1021
+ input[:,\ :,\ \ index[i]]\ +=\ alpha * source[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
1022
+ \end{array}
1023
+
1024
+ .. warning::
1025
+ This is an experimental API that is subject to change or deletion.
1026
+
1027
+ Args:
1028
+ input (Tensor): The input Tensor.
1029
+ dim (int): The dimension along which to index.
1030
+ index (Tensor): Add the value of "input Tensor" and `source` along the dimension of the `dim` according to the specified index value, with data type int32. The `index` must be 1D with the same size as the size of `source` in the `dim` dimension. The values of `index` should be in [0, b), where the b is the size of "input Tensor" in the `dim` dimension.
1031
+ source (Tensor): The input tensor with the value to add. Must have same data type as "input Tensor". The shape must be the same as "input Tensor" except the `dim` th dimension.
1032
+ alpha (number, optional): The scalar multiplier for source. Default: ``1``.
1033
+
1034
+ Returns:
1035
+ Tensor, has the same shape and dtype as `input`.
1036
+
1037
+ Raises:
1038
+ TypeError: If neither `index` nor `source` is a Tensor.
1039
+ ValueError: If the value of `dim` is out of the dimension range of `source` shape.
1040
+ ValueError: If `index` rank is not the same as `source` rank.
1041
+ ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension of source[dim].
1042
+ ValueError: If the shape of `source` is not the same as that of `input` except the `dim` axis.
1043
+
1044
+ Supported Platforms:
1045
+ ``Ascend``
1046
+
1047
+ Examples:
1048
+ >>> import numpy as np
1049
+ >>> import mindspore
1050
+ >>> from mindspore import Tensor, ops
1051
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1052
+ >>> index = Tensor(np.array([0, 2]), mindspore.int32)
1053
+ >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
1054
+ >>> output = ops.auto_generate.index_add_ext(x, 1, index, y, alpha=1)
1055
+ >>> print(output)
1056
+ [[ 1.5 2. 4. ]
1057
+ [ 5. 5. 7.5]
1058
+ [ 9. 8. 11.5]]
1059
+ """
1060
+ return index_add_impl(input, dim, index, source, alpha)
1061
+
1062
+
959
1063
  def index_select(input, dim, index):
960
1064
  r"""
961
1065
  Generates a new Tensor that accesses the values of `input` along the specified `dim` dimension
@@ -1002,18 +1106,18 @@ def index_select(input, dim, index):
1002
1106
  return index_select_impl(input, dim, index)
1003
1107
 
1004
1108
 
1005
- def inplace_add(input, other, alpha=1):
1109
+ def inplace_adds(input, other, alpha=1):
1006
1110
  r"""
1007
1111
  None
1008
1112
  """
1009
- return inplace_add_impl(input, other, alpha)
1113
+ return inplace_adds_impl(input, other, alpha)
1010
1114
 
1011
1115
 
1012
- def inplace_adds(input, other, alpha=1):
1116
+ def inplace_add(input, other, alpha=1):
1013
1117
  r"""
1014
1118
  None
1015
1119
  """
1016
- return inplace_adds_impl(input, other, alpha)
1120
+ return inplace_add_impl(input, other, alpha)
1017
1121
 
1018
1122
 
1019
1123
  def sub_tensor_(input, other, alpha=1):
@@ -1028,7 +1132,6 @@ def isneginf(input):
1028
1132
  Determines which elements are -inf for each position.
1029
1133
 
1030
1134
  .. warning::
1031
- - This is an experimental API that is subject to change.
1032
1135
  - This API can be used only on the Atlas A2 training series.
1033
1136
 
1034
1137
  Args:
@@ -1190,9 +1293,9 @@ def log10(input):
1190
1293
  Examples:
1191
1294
  >>> import mindspore
1192
1295
  >>> import numpy as np
1193
- >>> from mindspore import Tensor, mint
1296
+ >>> from mindspore import Tensor, ops
1194
1297
  >>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
1195
- >>> output = mint.log10(x)
1298
+ >>> output = ops.auto_generate.log10_ext(x)
1196
1299
  >>> print(output)
1197
1300
  [0.47712136 0.69897 0.845098 ]
1198
1301
  """
@@ -1207,7 +1310,6 @@ def log2(input):
1207
1310
  y_i = \log_2(x_i)
1208
1311
 
1209
1312
  .. warning::
1210
- - This is an experimental API that is subject to change or deletion.
1211
1313
  - If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
1212
1314
  may be affacted.
1213
1315
 
@@ -1215,10 +1317,8 @@ def log2(input):
1215
1317
  input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
1216
1318
 
1217
1319
  Returns:
1218
- Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
1219
-
1220
- - if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
1221
- - if `input.dtype` is integer or boolean type, the output dtype is float32.
1320
+ Tensor, has the same shape as the `input`. If `input.dtype` is of integer or boolean type, the output dtype
1321
+ will be float32. Otherwise, the output dtype will be the same as `input.dtype`.
1222
1322
 
1223
1323
  Raises:
1224
1324
  TypeError: If `input` is not a Tensor.
@@ -1229,57 +1329,15 @@ def log2(input):
1229
1329
  Examples:
1230
1330
  >>> import mindspore
1231
1331
  >>> import numpy as np
1232
- >>> from mindspore import Tensor, mint
1332
+ >>> from mindspore import Tensor, ops
1233
1333
  >>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
1234
- >>> output = mint.log2(x)
1334
+ >>> output = ops.auto_generate.log2_ext(x)
1235
1335
  >>> print(output)
1236
1336
  [1.5849625 2.321928 2.807355 ]
1237
1337
  """
1238
1338
  return log2_impl(input)
1239
1339
 
1240
1340
 
1241
- def log_softmax(input, dim=None, dtype=None):
1242
- r"""
1243
- Applies the Log Softmax function to the input tensor on the specified axis.
1244
- Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
1245
- the Log Softmax function is shown as follows:
1246
-
1247
- .. math::
1248
- \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
1249
-
1250
- where :math:`N` is the length of the Tensor.
1251
-
1252
- Args:
1253
- input (Tensor): The input Tensor.
1254
- dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
1255
-
1256
- Keyword Args:
1257
- dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
1258
- Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
1259
- If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
1260
-
1261
- Returns:
1262
- Tensor, with the same shape as the input.
1263
-
1264
- Raises:
1265
- TypeError: If `dim` is not an int.
1266
- ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
1267
-
1268
- Supported Platforms:
1269
- ``Ascend``
1270
-
1271
- Examples:
1272
- >>> import mindspore
1273
- >>> import numpy as np
1274
- >>> from mindspore import Tensor, ops
1275
- >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
1276
- >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
1277
- >>> print(output)
1278
- [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
1279
- """
1280
- return log_softmax_impl(input, dim, dtype)
1281
-
1282
-
1283
1341
  def logaddexp(input, other):
1284
1342
  r"""
1285
1343
  Computes the logarithm of the sum of exponentiations of the inputs.
@@ -1297,7 +1355,7 @@ def logaddexp(input, other):
1297
1355
  input (Tensor): Input Tensor. The dtype of `input` must be float.
1298
1356
  other (Tensor): Input Tensor. The dtype of `other` must be float.
1299
1357
  If the shape of `input` is not equal to the shape of `other`,
1300
- they must be broadcastable to a common shape (which becomes the shape of the output).
1358
+ they must be broadcastable to a common shape.
1301
1359
 
1302
1360
  Returns:
1303
1361
  Tensor, with the same dtype as `input` and `other`.
@@ -1368,6 +1426,48 @@ def logsumexp(input, dim, keepdim=False):
1368
1426
  return logsumexp_impl(input, dim, keepdim)
1369
1427
 
1370
1428
 
1429
+ def log_softmax(input, dim=None, dtype=None):
1430
+ r"""
1431
+ Applies the Log Softmax function to the input tensor on the specified axis.
1432
+ Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
1433
+ the Log Softmax function is shown as follows:
1434
+
1435
+ .. math::
1436
+ \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
1437
+
1438
+ where :math:`N` is the length of the Tensor.
1439
+
1440
+ Args:
1441
+ input (Tensor): The input Tensor.
1442
+ dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
1443
+
1444
+ Keyword Args:
1445
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
1446
+ Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
1447
+ If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
1448
+
1449
+ Returns:
1450
+ Tensor, with the same shape as the input.
1451
+
1452
+ Raises:
1453
+ TypeError: If `dim` is not an int.
1454
+ ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
1455
+
1456
+ Supported Platforms:
1457
+ ``Ascend``
1458
+
1459
+ Examples:
1460
+ >>> import mindspore
1461
+ >>> import numpy as np
1462
+ >>> from mindspore import Tensor, ops
1463
+ >>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
1464
+ >>> output = ops.auto_generate.log_softmax(logits, dim=-1)
1465
+ >>> print(output)
1466
+ [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
1467
+ """
1468
+ return log_softmax_impl(input, dim, dtype)
1469
+
1470
+
1371
1471
  def matmul(input, other):
1372
1472
  r"""
1373
1473
  None
@@ -1797,38 +1897,6 @@ def prod(input, dim=None, keepdim=False, dtype=None):
1797
1897
  return prod_impl(input, dim, keepdim, dtype)
1798
1898
 
1799
1899
 
1800
- def select(input, dim, index):
1801
- r"""
1802
- Slices the input tensor along the selected dimension at the given index.
1803
-
1804
- .. warning::
1805
- This is an experimental API that is subject to change or deletion.
1806
-
1807
- Args:
1808
- input (Tensor): the input tensor.
1809
- dim (int): the dimension to slice.
1810
- index (int): the index to select with.
1811
-
1812
- Returns:
1813
- Tensor.
1814
-
1815
- Raises:
1816
- TypeError: If input is not a Tensor.
1817
-
1818
- Supported Platforms:
1819
- ``Ascend``
1820
-
1821
- Examples:
1822
- >>> import mindspore
1823
- >>> from mindspore import Tensor, mint
1824
- >>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
1825
- >>> y = mint.select(input, 0, 0)
1826
- >>> print(y)
1827
- [2 3 4 5]
1828
- """
1829
- return select_impl(input, dim, index)
1830
-
1831
-
1832
1900
  def selu(input):
1833
1901
  r"""
1834
1902
  Activation function SELU (Scaled exponential Linear Unit).
@@ -1868,13 +1936,13 @@ def selu(input):
1868
1936
 
1869
1937
  Examples:
1870
1938
  >>> import mindspore
1871
- >>> from mindspore import Tensor, mint
1939
+ >>> from mindspore import Tensor, ops
1872
1940
  >>> import numpy as np
1873
1941
  >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
1874
- >>> output = mint.nn.functional.selu(input)
1942
+ >>> output = ops.auto_generate.selu_ext(input)
1875
1943
  >>> print(output)
1876
1944
  [[-1.1113307 4.202804 -1.7575096]
1877
- [ 2.101402 -1.7462534 9.456309 ]]
1945
+ [ 2.101402 -1.7462534 9.456309 ]]
1878
1946
  """
1879
1947
  return selu_impl(input)
1880
1948
 
@@ -2025,50 +2093,63 @@ def sub(input, other, alpha=1):
2025
2093
 
2026
2094
  def sum(input, dim=None, keepdim=False, dtype=None):
2027
2095
  r"""
2028
- Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
2029
- and the parameters `axis0` and `axis1` correspond to `dim0` and `dim1` in the reference interface respectively.
2030
-
2031
- .. warning::
2032
- This is an experimental API that is subject to change or deletion.
2033
-
2034
- Refer to :func:`mindspore.mint.transpose` for more details.
2035
- """
2036
- return sum_impl(input, dim, keepdim, dtype)
2037
-
2038
-
2039
- def t(input):
2040
- r"""
2041
- Transpose the input tensor.
2096
+ Calculate sum of Tensor elements over a given dim.
2042
2097
 
2043
- .. warning::
2044
- This is an experimental API that is subject to change or deletion.
2098
+ Note:
2099
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
2045
2100
 
2046
2101
  Args:
2047
2102
  input (Tensor): The input tensor.
2103
+ dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
2104
+ If ``None`` , sum all the elements of the input tensor.
2105
+ If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
2106
+ Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
2107
+ keepdim (bool): Whether the output tensor has `dim` retained or not.
2108
+ If ``True`` , keep these reduced dimensions and the length is 1.
2109
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
2110
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
2048
2111
 
2049
2112
  Returns:
2050
- Tensor, transpose 2D tensor, return 1D tensor as it is.
2113
+ A Tensor, sum of elements over a given `dim` in `input`.
2051
2114
 
2052
2115
  Raises:
2053
- ValueError: If the dimension of `input` is greater than 2.
2054
- ValueError: If `input` is empty.
2055
- TypeError: If `input` is not a tensor.
2116
+ TypeError: If `input` is not a Tensor.
2117
+ TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
2118
+ ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
2119
+ TypeError: If `keepdim` is not a bool.
2056
2120
 
2057
2121
  Supported Platforms:
2058
- ``Ascend``
2122
+ ``Ascend`` ``GPU`` ``CPU``
2059
2123
 
2060
2124
  Examples:
2061
2125
  >>> import mindspore
2062
2126
  >>> import numpy as np
2063
2127
  >>> from mindspore import Tensor, ops
2064
- >>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
2065
- >>> output = ops.t_ext(input)
2066
- >>> print(output)
2067
- [[ 1. 4.]
2068
- [ 2. 5.]
2069
- [ 3. 6.]]
2128
+ >>> from mindspore import dtype as mstype
2129
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
2130
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
2131
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
2132
+ >>> out = ops.sum_ext(x)
2133
+ >>> print(out)
2134
+ 270.0
2135
+ >>> out = ops.sum_ext(x, dim=2)
2136
+ >>> print(out)
2137
+ [[ 6. 12. 18.]
2138
+ [24. 30. 36.]
2139
+ [42. 48. 54.]]
2140
+ >>> out = ops.sum_ext(x, dim=2, keepdim=True)
2141
+ >>> print(out)
2142
+ [[[ 6.]
2143
+ [12.]
2144
+ [18.]]
2145
+ [[24.]
2146
+ [30.]
2147
+ [36.]]
2148
+ [[42.]
2149
+ [48.]
2150
+ [54.]]]
2070
2151
  """
2071
- return t_impl(input)
2152
+ return sum_impl(input, dim, keepdim, dtype)
2072
2153
 
2073
2154
 
2074
2155
  def topk(input, k, dim=-1, largest=True, sorted=True):
@@ -2127,7 +2208,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
2127
2208
  (Tensor(shape=[3, 2], dtype=Float32, value=
2128
2209
  [[ 9.67299998e-01, 5.36800027e-01],
2129
2210
  [ 6.52499974e-01, 4.68499988e-01],
2130
- [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
2211
+ [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
2131
2212
  [[3, 0],
2132
2213
  [1, 2],
2133
2214
  [2, 3]]))
@@ -2136,7 +2217,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
2136
2217
  (Tensor(shape=[3, 2], dtype=Float32, value=
2137
2218
  [[ 2.44700000e-01, 4.30200011e-01],
2138
2219
  [ 1.86800003e-01, 4.38800007e-01],
2139
- [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
2220
+ [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
2140
2221
  [[1, 2],
2141
2222
  [3, 0],
2142
2223
  [0, 1]]))
@@ -2148,9 +2229,6 @@ def trace(input):
2148
2229
  r"""
2149
2230
  Returns a new tensor that is the sum of the `input` main trace.
2150
2231
 
2151
- Note:
2152
- Input must be tensor.
2153
-
2154
2232
  Args:
2155
2233
  input (Tensor): 2-D Tensor.
2156
2234
 
@@ -2185,44 +2263,44 @@ def trace(input):
2185
2263
  return trace_impl(input)
2186
2264
 
2187
2265
 
2188
- def transpose(input, dim0, dim1):
2266
+ def tril(input, diagonal=0):
2189
2267
  r"""
2190
- Interchange two axes of a tensor.
2268
+ None
2269
+ """
2270
+ return tril_impl(input, diagonal)
2271
+
2272
+
2273
+ def t(input):
2274
+ r"""
2275
+ Transpose the input tensor.
2191
2276
 
2192
2277
  .. warning::
2193
2278
  This is an experimental API that is subject to change or deletion.
2194
2279
 
2195
2280
  Args:
2196
- input(Tensor): Input tensor.
2197
- dim0 (int): First axis.
2198
- dim1 (int): Second axis.
2281
+ input (Tensor): The input tensor.
2199
2282
 
2200
2283
  Returns:
2201
- Transposed tensor, has the same data type as `input`.
2284
+ Tensor, transpose 2D tensor, return 1D tensor as it is.
2202
2285
 
2203
2286
  Raises:
2204
- TypeError: If argument `input` is not Tensor.
2205
- TypeError: If `dim0` or `dim1` is not integer.
2206
- ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
2287
+ ValueError: If the dimension of `input` is greater than 2.
2288
+ ValueError: If `input` is empty.
2289
+ TypeError: If `input` is not a tensor.
2207
2290
 
2208
2291
  Supported Platforms:
2209
2292
  ``Ascend``
2210
2293
 
2211
2294
  Examples:
2295
+ >>> import mindspore
2212
2296
  >>> import numpy as np
2213
- >>> from mindspore import mint
2214
- >>> from mindspore import Tensor
2215
- >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
2216
- >>> output = mint.transpose(input, 0, 2)
2217
- >>> print(output.shape)
2218
- (4, 3, 2)
2219
- """
2220
- return transpose_impl(input, dim0, dim1)
2221
-
2222
-
2223
- def tril(input, diagonal=0):
2224
- r"""
2225
- None
2297
+ >>> from mindspore import Tensor, ops
2298
+ >>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
2299
+ >>> output = ops.t_ext(input)
2300
+ >>> print(output)
2301
+ [[ 1. 4.]
2302
+ [ 2. 5.]
2303
+ [ 3. 6.]]
2226
2304
  """
2227
- return tril_impl(input, diagonal)
2305
+ return t_impl(input)
2228
2306