mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -13,20 +13,150 @@
13
13
  # limitations under the License.
14
14
  # ============================================================================
15
15
  """Holding mint APIs"""
16
+ from mindspore._c_expression import _add_instance
17
+ from mindspore._c_expression import _addcdiv_instance
16
18
  from mindspore._c_expression import _all_gather_matmul_instance
17
19
  from mindspore._c_expression import _bitwise_not_instance
18
20
  from mindspore._c_expression import _clamp_instance
19
21
  from mindspore._c_expression import _div_instance
20
22
  from mindspore._c_expression import _empty_instance
23
+ from mindspore._c_expression import _floor_divide_instance
21
24
  from mindspore._c_expression import _fmod_instance
25
+ from mindspore._c_expression import _gelu_instance
26
+ from mindspore._c_expression import _gmm_instance
27
+ from mindspore._c_expression import _gmm_backward_instance
28
+ from mindspore._c_expression import _gmm_backward_fusion_instance
29
+ from mindspore._c_expression import _greater_equal_instance
30
+ from mindspore._c_expression import _kthvalue_instance
22
31
  from mindspore._c_expression import _lerp_instance
23
32
  from mindspore._c_expression import _matmul_reduce_scatter_instance
24
33
  from mindspore._c_expression import _max_instance
25
34
  from mindspore._c_expression import _min_instance
26
35
  from mindspore._c_expression import _nansum_instance
36
+ from mindspore._c_expression import _pixel_shuffle_instance
27
37
  from mindspore._c_expression import _remainder_instance
28
38
  from mindspore._c_expression import _repeat_interleave_instance
39
+ from mindspore._c_expression import _sub_instance
29
40
  from mindspore._c_expression import _where_instance
41
+ from mindspore._c_expression import _xlogy_instance
42
+
43
+ def add(*args, **kwargs):
44
+ r"""
45
+ add(input, other, *, alpha=1) -> Tensor
46
+
47
+ Adds scaled other value to `self`.
48
+
49
+ .. math::
50
+
51
+ out_{i} = self_{i} + alpha \times other_{i}
52
+
53
+ Note:
54
+ - When `self` and `other` have different shapes,
55
+ they must be able to broadcast to a common shape.
56
+ - `self`, `other` and `alpha` comply with the implicit type conversion rules to make the data types
57
+ consistent.
58
+
59
+ Args:
60
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
61
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
62
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
63
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
64
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
65
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
66
+
67
+ Keyword Args:
68
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
69
+
70
+ Returns:
71
+ Tensor with a shape that is the same as the broadcasted shape of the `self` and `other`,
72
+ and the data type is the one with higher precision or higher digits among `self`, `other` and `alpha`.
73
+
74
+ Raises:
75
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
76
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
77
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
78
+
79
+ Supported Platforms:
80
+ ``Ascend`` ``GPU`` ``CPU``
81
+
82
+ Examples:
83
+ >>> import numpy as np
84
+ >>> import mindspore
85
+ >>> from mindspore import Tensor, mint
86
+ >>> x = Tensor(1, mindspore.int32)
87
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
88
+ >>> alpha = 0.5
89
+ >>> output = mint.add(x, y, alpha=alpha) # x.add(y, alpha=alpha)
90
+ >>> print(output)
91
+ [3. 3.5 4.]
92
+ >>> # the data type of x is int32, the data type of y is float32,
93
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
94
+ >>> print(output.dtype)
95
+ Float32
96
+ """
97
+ return _add_instance(*args, **kwargs)
98
+
99
+
100
+ def __add__(*args, **kwargs):
101
+ r"""
102
+ __add__(input, other, *, alpha=1) -> Tensor
103
+
104
+ Alias for :func:`mindspore.mint.add`.
105
+
106
+ .. method:: mint.__add__(input, other, *, alpha=1) -> Tensor
107
+ :noindex:
108
+
109
+ Alias for overload function of :func:`mindspore.mint.add`.
110
+ """
111
+ return _add_instance(*args, **kwargs)
112
+
113
+
114
+ def addcdiv(*args, **kwargs):
115
+ r"""
116
+ addcdiv_ext(input, tensor1, tensor2, *, value=1) -> Tensor
117
+
118
+ Performs the element-wise division of tensor tensor1 by tensor tensor2,
119
+ multiply the result by the scalar value and add it to input data.
120
+
121
+ .. math::
122
+ y[i] = input[i] + value * (tensor1[i] / tensor2[i])
123
+
124
+ .. warning::
125
+ This is an experimental API that is subject to change or deletion.
126
+
127
+ Args:
128
+ input (Tensor): The tensor to be added.
129
+ tensor1 (Tensor): The numerator tensor.
130
+ tensor2 (Tensor): The denominator tensor.
131
+
132
+ Keyword Args:
133
+ value (Number, optional): The multiplier for tensor1/tensor2. Default: ``1`` .
134
+
135
+ Returns:
136
+ Tensor, has the same shape and dtype as tensor1/tensor2.
137
+
138
+ Raises:
139
+ TypeError: If dtype of `tensor1`, `tensor2`, or `input` is not tensor.
140
+ ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
141
+ ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1/tensor2`.
142
+ ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1/tensor2)`.
143
+
144
+ Supported Platforms:
145
+ ``Ascend``
146
+
147
+ Examples:
148
+ >>> import mindspore
149
+ >>> import numpy as np
150
+ >>> from mindspore import Tensor, ops
151
+ >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
152
+ >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
153
+ >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
154
+ >>> y = ops.addcdiv_ext(input_data, x1, x2, value=1)
155
+ >>> print(y)
156
+ [1.25 1.6666667 2.5 5. ]
157
+ """
158
+ return _addcdiv_instance(*args, **kwargs)
159
+
30
160
 
31
161
  def all_gather_matmul(*args, **kwargs):
32
162
  r"""
@@ -99,7 +229,7 @@ def all_gather_matmul(*args, **kwargs):
99
229
  Before running the following examples, you need to configure the communication environment variables.
100
230
 
101
231
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
102
- configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
232
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
103
233
  for more details.
104
234
 
105
235
  This example should be run with 2 devices.
@@ -351,6 +481,60 @@ def empty(*args, **kwargs):
351
481
  return _empty_instance(*args, **kwargs)
352
482
 
353
483
 
484
+ def floor_divide(*args, **kwargs):
485
+ r"""
486
+ Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
487
+
488
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
489
+ Inputs must be two tensors or one tensor and one scalar.
490
+ When the inputs are two tensors,
491
+ dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
492
+ When the inputs are one tensor and one scalar,
493
+ the scalar could only be a constant.
494
+
495
+ .. math::
496
+ out_{i} = \text{floor}( \frac{input_i}{other_i})
497
+
498
+ where the :math:`floor` indicates the Floor operator. For more details,
499
+ please refer to the :class:`mindspore.mint.floor` operator.
500
+
501
+ .. warning::
502
+ This is an experimental API that is subject to change or deletion.
503
+
504
+ Args:
505
+ input (Union[Tensor, Number, bool]): The first input is a number or
506
+ a bool or a tensor whose data type is number or bool.
507
+ other (Union[Tensor, Number, bool]): The second input is a number or
508
+ a bool or a tensor whose data type is number or bool.
509
+
510
+ Returns:
511
+ Tensor, the shape is the same as the one after broadcasting,
512
+ and the data type is the one with higher precision or higher digits among the two inputs.
513
+
514
+ Raises:
515
+ TypeError: If `input` and `other` are not the following: Tensor, number.Number or bool.
516
+
517
+ Supported Platforms:
518
+ ``Ascend`` ``GPU`` ``CPU``
519
+
520
+ Examples:
521
+ >>> import mindspore
522
+ >>> from mindspore import Tensor, mint
523
+ >>> import numpy as np
524
+ >>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
525
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
526
+ >>> output = mint.floor_divide(input, other)
527
+ >>> print(output)
528
+ [ 0 1 -1]
529
+ >>> input = Tensor(2.0, mindspore.float32)
530
+ >>> other = Tensor(2.0, mindspore.float32)
531
+ >>> output = mint.floor_divide(input, other)
532
+ >>> print(output)
533
+ 1.0
534
+ """
535
+ return _floor_divide_instance(*args, **kwargs)
536
+
537
+
354
538
  def fmod(*args, **kwargs):
355
539
  r"""
356
540
  fmod(input, other) -> Tensor
@@ -393,6 +577,294 @@ def fmod(*args, **kwargs):
393
577
  return _fmod_instance(*args, **kwargs)
394
578
 
395
579
 
580
+ def gelu(*args, **kwargs):
581
+ r"""
582
+ gelu(input, *, approximate='none') -> Tensor
583
+
584
+ Gaussian Error Linear Units activation function.
585
+
586
+ GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
587
+ And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
588
+ <https://arxiv.org/abs/1810.04805>`_.
589
+
590
+ When `approximate` argument is `none`, GELU is defined as follows:
591
+
592
+ .. math::
593
+ GELU(x_i) = x_i*P(X < x_i),
594
+
595
+ where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
596
+ :math:`x_i` is the input element.
597
+
598
+ When `approximate` argument is `tanh`, GELU is estimated with:
599
+
600
+ .. math::
601
+ GELU(x_i) = 0.5 * x_i * (1 + \tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3)))
602
+
603
+ GELU Activation Function Graph:
604
+
605
+ .. image:: ../images/GELU.png
606
+ :align: center
607
+
608
+ .. note::
609
+ On the Ascend platform, when `input` is -inf, its gradient is 0,
610
+ and when `input` is inf, its gradient is `dout`.
611
+
612
+ Args:
613
+ input (Tensor): The input of the activation function GeLU, the data type is float16, float32 or float64.
614
+
615
+ Keyword Args:
616
+ approximate (str, optional): the gelu approximation algorithm to use. Acceptable vaslues are ``'none'`` and ``'tanh'`` .
617
+ Default: ``'none'`` .
618
+
619
+ Returns:
620
+ Tensor, with the same type and shape as `input`.
621
+
622
+ Raises:
623
+ TypeError: If `input` is not a Tensor.
624
+ TypeError: If dtype of `input` is not bfloat16, float16, float32 or float64.
625
+ ValueError: If `approximate` value is neither `none` nor `tanh`.
626
+
627
+ Supported Platforms:
628
+ ``Ascend``
629
+
630
+ Examples:
631
+ >>> import mindspore
632
+ >>> import numpy as np
633
+ >>> from mindspore import Tensor, mint
634
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
635
+ >>> result = mint.nn.functional.gelu(input)
636
+ >>> print(result)
637
+ [[-1.58655241e-01 3.99987316e+00 -0.00000000e+00]
638
+ [ 1.95449972e+00 -1.41860323e-06 9.0000000e+00]]
639
+ >>> result = mint.nn.functional.gelu(input, approximate="tanh")
640
+ >>> print(result)
641
+ [[-1.58808023e-01 3.99992990e+00 -3.10779147e-21]
642
+ [ 1.95459759e+00 -2.29180174e-07 9.0000000e+00]]
643
+ """
644
+ return _gelu_instance(*args, **kwargs)
645
+
646
+
647
+ def gmm(*args, **kwargs):
648
+ r"""
649
+ gmm(x, weight, bias=None, group_list=None, group_type=0, group_list_type=0) -> tuple[Tensor]
650
+
651
+ Grouping matrix multiplication.
652
+
653
+ .. warning::
654
+ - This is an experimental API that is subject to change or deletion.
655
+ - `group_type` must be a constant.
656
+ - Only support on Atlas A2 training series.
657
+ - When the type of `group_list` is tuple[int] or list[int], it should a non-negative non-decreasing sequence,
658
+ indicating indexes of each group along the split axis. In this scenario, the arg `group_list_type` is useless.
659
+
660
+ .. note::
661
+ - When `group_type` is 2, the tensors in `x` must be non-continuous tensors which has
662
+ been transposed.
663
+ - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
664
+ which is implemented by ops.function.math_func.gmm_backward or through automatic differentiation.
665
+
666
+ Args:
667
+ x (tuple[Tensor]): The first tensors to be multiplied, whose num should be 1.
668
+ weight (tuple[Tensor]): The second tensors to be multiplied, whose num should be 1.
669
+ bias (tuple[Tensor], optional): Biases added to outputs, whose num should be 1.
670
+ The shape of each tensor in `bias` should be :math: `(group_list.shape[0], n)`
671
+ or :math: `(len(group_list), n)`. In the training scenario, the bias only supports None.
672
+ Default: ``None`` .
673
+ group_list (Union[Tensor, list[int], tuple[int]], optional): 1-D Tensor, list[int]
674
+ or tuple[int], indicating indexes or sizes of each group along the split axis.
675
+ When `group_list` is list[int] or tuple[int], it's length should be less than or equal to 128.
676
+ When `group_list` is a Tensor, it's size should be less than or equal to 1024.
677
+ Supported dtypes: int64.
678
+ Default: ``None`` .
679
+
680
+ - If `group_list_type` is 0, it must be a non-negative non-decreasing sequence.
681
+ And when `group_type` is 0, the last element in `group_list` should be equal to
682
+ the first dimension of the tensor in `x` . When `group_type` is 2, the last element
683
+ in `group_list` should be equal to the second dimension of the tensor in `x` .
684
+
685
+ - If `group_list_type` is 1, the value in `group_list` are the sizes of each group.
686
+ group_type (int, optional): Represents the axes that need to be grouped. For example,
687
+ :math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
688
+
689
+ - If `group_type` is 0, it means that the m-axis is grouped, meaning that the shape
690
+ of each tensor in `x` should be :math: `(m, k)` , the shape of each tensor in `weight`
691
+ should be :math: `(group_list.shape[0], k, n)` or :math: `(len(group_list), k, n)`,
692
+ and the shape of each tensor in result would be :math: `(m, n)` .
693
+
694
+ - If `group_type` is 2, it means that the k-axis is grouped, meaning that
695
+ the shape of each tensor in `x` should be :math: `(m, k)`, the shape of each
696
+ tensor in `weight` should be :math: `(k, n)`, and the shape of each tensor
697
+ in result would be :math: `(group_list.shape[0], m, n)` or :math: `(len(group_list), m, n)`.
698
+ group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
699
+ result of the size of each group. If it's 1, the value in `group_list` are the size
700
+ of each group. Default: ``0`` .
701
+
702
+ `x` , `weight` and `bias` only support the following 3 type combinations:
703
+
704
+ - x: float16, weight: float16, bias: float16
705
+ - x: bfloat16, weight: bfloat16, bias: float32
706
+ - x: float32, weight: float32, bias: float32
707
+
708
+ Returns:
709
+ tuple[Tensor], the results of grouping matrix multiplication.
710
+
711
+ Supported Platforms:
712
+ ``Ascend``
713
+
714
+ Examples:
715
+ >>> import numpy as np
716
+ >>> from mindspore import Tensor, ops
717
+ >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
718
+ >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
719
+ >>> group_list = Tensor([2, 4, 2, 2])
720
+ >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=1)
721
+ >>> print(y[0].shape)
722
+ >>> (10, 8)
723
+ >>> group_list = [2, 6, 8, 10]
724
+ >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=0)
725
+ >>> print(y[0].shape)
726
+ >>> (10, 8)
727
+ """
728
+ return _gmm_instance(*args, **kwargs)
729
+
730
+
731
+ def gmm_backward(*args, **kwargs):
732
+ r"""
733
+ gmm_backward(grad, x, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
734
+
735
+ the grad of ops.function.math_func.gmm
736
+ """
737
+ return _gmm_backward_instance(*args, **kwargs)
738
+
739
+
740
+ def gmm_backward_fusion(*args, **kwargs):
741
+ r"""
742
+ gmm_backward_fusion(grad, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
743
+
744
+ the grad of ops.function.math_func.gmm, only dx
745
+ """
746
+ return _gmm_backward_fusion_instance(*args, **kwargs)
747
+
748
+
749
+ def greater_equal(*args, **kwargs):
750
+ r"""
751
+ greater_equal(input, other) -> Tensor
752
+
753
+ Computes the boolean value of :math:`input >= other` element-wise.
754
+
755
+ .. math::
756
+
757
+ out_{i} =\begin{cases}
758
+ & \text{True, if } input_{i}>=other_{i} \\
759
+ & \text{False, if } input_{i}<other_{i}
760
+ \end{cases}
761
+
762
+ Note:
763
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
764
+ consistent.
765
+ - The inputs must be two tensors or one tensor and one scalar.
766
+ - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
767
+ and the shapes of them can be broadcast.
768
+ - When the inputs are one tensor and one scalar, the scalar could only be a constant.
769
+ - Broadcasting is supported.
770
+ - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
771
+ in another input by copying the value of the dimension.
772
+
773
+ Args:
774
+ input (Union[Tensor, Number]): The first input is a number
775
+ or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
776
+ other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
777
+ or a Tensor of the number or bool_ data type. When the first input is a Scalar,
778
+ the second input must be a Tensor of number or bool_ data type.
779
+
780
+ Returns:
781
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
782
+
783
+ Raises:
784
+ TypeError: If neither `input` nor `other` is a Tensor.
785
+
786
+ Supported Platforms:
787
+ ``Ascend``
788
+
789
+ Examples:
790
+ >>> import mindspore
791
+ >>> import numpy as np
792
+ >>> from mindspore import Tensor, mint
793
+ >>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
794
+ >>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
795
+ >>> output = mint.greater_equal(input, other)
796
+ >>> print(output)
797
+ [True True False]
798
+ >>> y = 2.1
799
+ >>> output = mint.greater_equal(input, y)
800
+ >>> print(output)
801
+ [False False True]
802
+ """
803
+ return _greater_equal_instance(*args, **kwargs)
804
+
805
+
806
+ def ge(*args, **kwargs):
807
+ r"""
808
+ ge(input, other) -> Tensor
809
+
810
+ Alias for :func:`mindspore.mint.greater_equal`.
811
+ """
812
+ return _greater_equal_instance(*args, **kwargs)
813
+
814
+
815
+ def kthvalue(*args, **kwargs):
816
+ r"""
817
+ Calculates the kth smallest value along given dim specified by `dim` of the input
818
+ tensor, and returns a tuple of (`values`, `indices`) where `values` contains the k-th smallest element
819
+ and `indices` provides the index of each corresponding element.
820
+
821
+ Args:
822
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
823
+ :math:`(input_1, input_2, ..., input_N)`.
824
+ k (int): Specifies the k-th smallest element to retrieve.
825
+ dim (int, optional): The dimension along which to find the k-th smallest value. Default: ``-1`` .
826
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
827
+ input, the output will reduce dimension if ``False`` . Default: ``False`` .
828
+
829
+ Returns:
830
+ A tuple consisting of `values` and `indices`.
831
+
832
+ - **values** (Tensor) - The k-th smallest value of input tensor, with the same dtype as `input`.
833
+
834
+ -If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
835
+ -If `keepdim` is ``False`` , the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
836
+
837
+ - **indices** (Tensor) - The `indices` for the k-th smallest value of the input tensor, it has the same shape as `values` with dtype of int64.
838
+
839
+ Raises:
840
+ TypeError: If `k` or `dim` is not an int.
841
+ TypeError: If `keepdim` is not a bool.
842
+ TypeError: If dtype of `input` is not supported.
843
+ ValueError: If `input` is an empty Tensor.
844
+ RuntimeError: If `k` is not in the proper range.
845
+
846
+ Supported Platforms:
847
+ ``Ascend``
848
+
849
+ Examples:
850
+ >>> import mindspore
851
+ >>> import numpy as np
852
+ >>> from mindspore import Tensor, ops
853
+ >>> input_x = Tensor(np.array([[1.01, 2.02, 3.03], [1.04, 2.05, 3.06]]), mindspore.float32)
854
+ >>> out = ops.auto_generate.kthvalue(input_x, 2, 1, False)
855
+ >>> print(out)
856
+ (Tensor(shape=[2], dtype=Float32, value= [ 2.01999998e+00, 2.04999995e+00]), Tensor(shape=[2], dtype=Int64, value= [1, 1]))
857
+ >>> out1 = ops.auto_generate.kthvalue(input_x, 2, 1, True)
858
+ >>> print(out1)
859
+ (Tensor(shape=[2, 1], dtype=Float32, value=
860
+ [[ 2.01999998e+00],
861
+ [ 2.04999995e+00]]), Tensor(shape=[2, 1], dtype=Int64, value=
862
+ [[1],
863
+ [1]]))
864
+ """
865
+ return _kthvalue_instance(*args, **kwargs)
866
+
867
+
396
868
  def lerp(*args, **kwargs):
397
869
  r"""
398
870
  lerp(input, end, weight) -> Tensor
@@ -510,7 +982,7 @@ def matmul_reduce_scatter(*args, **kwargs):
510
982
  Before running the following examples, you need to configure the communication environment variables.
511
983
 
512
984
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
513
- configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
985
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
514
986
  for more details.
515
987
 
516
988
  This example should be run with 2 devices.
@@ -578,7 +1050,7 @@ def max(*args, **kwargs):
578
1050
  :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
579
1051
  dim (int): The dimension to reduce.
580
1052
  keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
581
- `input` , the output will reduce dimension if ``false``. Default: ``False``.
1053
+ `input` , the output will reduce dimension if ``False``. Default: ``False``.
582
1054
 
583
1055
  Returns:
584
1056
  tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
@@ -649,7 +1121,7 @@ def min(*args, **kwargs):
649
1121
  :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
650
1122
  dim (int): The dimension to reduce.
651
1123
  keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
652
- input, the output will reduce dimension if ``false``. Default: ``False``.
1124
+ input, the output will reduce dimension if ``False``. Default: ``False``.
653
1125
 
654
1126
  Returns:
655
1127
  tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
@@ -739,6 +1211,51 @@ def nansum(*args, **kwargs):
739
1211
  return _nansum_instance(*args, **kwargs)
740
1212
 
741
1213
 
1214
+ def pixel_shuffle(*args, **kwargs):
1215
+ r"""
1216
+ pixel_shuffle(input, upscale_factor) -> Tensor
1217
+
1218
+ Rearrange elements in a tensor according to an upscaling factor.
1219
+
1220
+ Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
1221
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
1222
+
1223
+ This is useful for implementing efficient sub-pixel convolution
1224
+ with a stride of :math:`1/r`.
1225
+
1226
+ For detailed introduction to the pixel_shuffle algorithm, refer to
1227
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
1228
+
1229
+ .. warning::
1230
+ This is an experimental API that is subject to change or deletion.
1231
+
1232
+ Args:
1233
+ input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
1234
+ and the length of third to last dimension can be divisible by the square of `upscale_factor`.
1235
+ upscale_factor (int): factor to shuffle the input Tensor, and is a positive integer.
1236
+ `upscale_factor` is the above-mentioned :math:`r`.
1237
+
1238
+ Returns:
1239
+ - **output** (Tensor) - Tensor of shape :math:`(*, C, H \times r, W \times r)` .
1240
+
1241
+ Raises:
1242
+ ValueError: If `upscale_factor` is not a positive integer.
1243
+ ValueError: If the length of third to last dimension is not divisible by the square of `upscale_factor`.
1244
+ ValueError: If the dimension of `input` is less than 3.
1245
+
1246
+ Supported Platforms:
1247
+ ``Ascend``
1248
+
1249
+ Examples:
1250
+ >>> from mindspore import mint
1251
+ >>> input = mint.randn(1, 9, 4, 4)
1252
+ >>> output = mint.nn.functional.pixel_shuffle(input, 3)
1253
+ >>> print(output.shape)
1254
+ (1, 1, 12, 12)
1255
+ """
1256
+ return _pixel_shuffle_instance(*args, **kwargs)
1257
+
1258
+
742
1259
  def remainder(*args, **kwargs):
743
1260
  r"""
744
1261
  remainder(input, other) -> Tensor
@@ -830,6 +1347,77 @@ def repeat_interleave(*args, **kwargs):
830
1347
  return _repeat_interleave_instance(*args, **kwargs)
831
1348
 
832
1349
 
1350
+ def sub(*args, **kwargs):
1351
+ r"""
1352
+ sub(input, other, *, alpha=1) -> Tensor
1353
+
1354
+ Subtracts scaled other value from self Tensor.
1355
+
1356
+ .. math::
1357
+
1358
+ out_{i} = self_{i} - alpha \times other_{i}
1359
+
1360
+ Note:
1361
+ - When the two inputs have different shapes,
1362
+ they must be able to broadcast to a common shape.
1363
+ - The two inputs and alpha comply with the implicit type conversion rules to make the data types
1364
+ consistent.
1365
+
1366
+ Args:
1367
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
1368
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1369
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1370
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
1371
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1372
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1373
+
1374
+ Keyword Args:
1375
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
1376
+
1377
+ Returns:
1378
+ Tensor with a shape that is the same as the broadcasted shape of the self `self` and `other`,
1379
+ and the data type is the one with higher precision or higher digits among the two inputs and alpha.
1380
+
1381
+ Raises:
1382
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
1383
+ TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
1384
+ TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
1385
+
1386
+ Supported Platforms:
1387
+ ``Ascend`` ``GPU`` ``CPU``
1388
+
1389
+ Examples:
1390
+ >>> import numpy as np
1391
+ >>> import mindspore
1392
+ >>> from mindspore import Tensor, mint
1393
+ >>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
1394
+ >>> y = Tensor(1, mindspore.int32)
1395
+ >>> alpha = 0.5
1396
+ >>> output = mint.sub(x, y, alpha=alpha)
1397
+ >>> print(output)
1398
+ [3.5 4.5 5.5]
1399
+ >>> # the data type of x is float32, the data type of y is int32,
1400
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
1401
+ >>> print(output.dtype)
1402
+ Float32
1403
+ """
1404
+ return _sub_instance(*args, **kwargs)
1405
+
1406
+
1407
+ def __sub__(*args, **kwargs):
1408
+ r"""
1409
+ __sub__(input, other, *, alpha=1) -> Tensor
1410
+
1411
+ Alias for :func:`mindspore.mint.sub`.
1412
+
1413
+ .. method:: mint.__sub__(input, other, *, alpha=1) -> Tensor
1414
+ :noindex:
1415
+
1416
+ Alias for overload function of :func:`mindspore.mint.sub`.
1417
+ """
1418
+ return _sub_instance(*args, **kwargs)
1419
+
1420
+
833
1421
  def where(*args, **kwargs):
834
1422
  r"""
835
1423
  where(condition, input, other) -> Tensor
@@ -877,7 +1465,58 @@ def where(*args, **kwargs):
877
1465
  """
878
1466
  return _where_instance(*args, **kwargs)
879
1467
 
1468
+
1469
+ def xlogy(*args, **kwargs):
1470
+ r"""
1471
+ xlogy(input, other) -> Tensor
1472
+
1473
+ Computes the first input multiplied by the logarithm of second input element-wise.
1474
+ Returns zero when `input` is zero.
1475
+
1476
+ .. math::
1477
+
1478
+ out_i = input_{i}\log{other_{i}}
1479
+
1480
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1481
+ The inputs must be two tensors or one tensor and one scalar.
1482
+ When the inputs are two tensors, the shapes of them could be broadcast.
1483
+
1484
+ Args:
1485
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1486
+ a bool or a tensor whose data type is
1487
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1488
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1489
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1490
+ a bool or a tensor whose data type is number or bool when the first input is a tensor.
1491
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1492
+
1493
+ Returns:
1494
+ Tensor, the shape is the same as the one after broadcasting,
1495
+ and the data type is the one with higher precision or higher digits among the two inputs.
1496
+
1497
+ Raises:
1498
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1499
+ ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1500
+
1501
+ Supported Platforms:
1502
+ ``Ascend`` ``GPU`` ``CPU``
1503
+
1504
+ Examples:
1505
+ >>> import mindspore
1506
+ >>> import numpy as np
1507
+ >>> from mindspore import Tensor, ops
1508
+ >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1509
+ >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1510
+ >>> output = ops.xlogy(input, other)
1511
+ >>> print(output)
1512
+ [-3.465736 0. 2.7725887]
1513
+ """
1514
+ return _xlogy_instance(*args, **kwargs)
1515
+
880
1516
  __all__ = [
1517
+ "add",
1518
+ "__add__",
1519
+ "addcdiv",
881
1520
  "all_gather_matmul",
882
1521
  "bitwise_not",
883
1522
  "clamp",
@@ -885,13 +1524,25 @@ __all__ = [
885
1524
  "div",
886
1525
  "divide",
887
1526
  "empty",
1527
+ "floor_divide",
888
1528
  "fmod",
1529
+ "gelu",
1530
+ "gmm",
1531
+ "gmm_backward",
1532
+ "gmm_backward_fusion",
1533
+ "greater_equal",
1534
+ "ge",
1535
+ "kthvalue",
889
1536
  "lerp",
890
1537
  "matmul_reduce_scatter",
891
1538
  "max",
892
1539
  "min",
893
1540
  "nansum",
1541
+ "pixel_shuffle",
894
1542
  "remainder",
895
1543
  "repeat_interleave",
1544
+ "sub",
1545
+ "__sub__",
896
1546
  "where",
1547
+ "xlogy",
897
1548
  ]