mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (491) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +24 -193
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +97 -74
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +1915 -3287
  46. mindspore/common/api.py +341 -354
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/hook_handle.py +5 -3
  52. mindspore/common/initializer.py +10 -6
  53. mindspore/common/jit_begin_end.py +94 -0
  54. mindspore/common/jit_config.py +6 -1
  55. mindspore/common/jit_context.py +76 -0
  56. mindspore/common/jit_trace.py +378 -0
  57. mindspore/common/lazy_inline.py +2 -2
  58. mindspore/common/mutable.py +5 -4
  59. mindspore/common/parameter.py +106 -39
  60. mindspore/common/seed.py +2 -2
  61. mindspore/common/sparse_tensor.py +23 -17
  62. mindspore/common/tensor.py +297 -714
  63. mindspore/communication/__init__.py +7 -5
  64. mindspore/communication/_comm_helper.py +47 -2
  65. mindspore/communication/comm_func.py +70 -53
  66. mindspore/communication/management.py +83 -17
  67. mindspore/context.py +214 -560
  68. mindspore/dataset/__init__.py +44 -20
  69. mindspore/dataset/audio/__init__.py +2 -8
  70. mindspore/dataset/audio/transforms.py +3 -17
  71. mindspore/dataset/core/config.py +3 -3
  72. mindspore/dataset/engine/cache_client.py +1 -1
  73. mindspore/dataset/engine/datasets.py +102 -120
  74. mindspore/dataset/engine/datasets_audio.py +22 -22
  75. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  76. mindspore/dataset/engine/datasets_text.py +78 -85
  77. mindspore/dataset/engine/datasets_user_defined.py +108 -76
  78. mindspore/dataset/engine/datasets_vision.py +111 -108
  79. mindspore/dataset/engine/iterators.py +5 -3
  80. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  81. mindspore/dataset/engine/samplers.py +279 -57
  82. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  83. mindspore/dataset/engine/validators.py +10 -0
  84. mindspore/dataset/text/__init__.py +7 -6
  85. mindspore/dataset/text/transforms.py +6 -5
  86. mindspore/dataset/text/utils.py +3 -3
  87. mindspore/dataset/transforms/__init__.py +0 -9
  88. mindspore/dataset/transforms/transforms.py +3 -3
  89. mindspore/dataset/utils/browse_dataset.py +1 -1
  90. mindspore/dataset/vision/__init__.py +2 -9
  91. mindspore/dataset/vision/transforms.py +202 -158
  92. mindspore/dataset/vision/utils.py +7 -5
  93. mindspore/device_context/ascend/op_debug.py +60 -1
  94. mindspore/device_context/ascend/op_tuning.py +0 -4
  95. mindspore/device_manager.py +39 -3
  96. mindspore/dnnl.dll +0 -0
  97. mindspore/dpcmi.dll +0 -0
  98. mindspore/experimental/es/embedding_service.py +35 -27
  99. mindspore/experimental/map_parameter.py +4 -4
  100. mindspore/experimental/optim/adadelta.py +22 -26
  101. mindspore/experimental/optim/adagrad.py +4 -4
  102. mindspore/experimental/optim/adam.py +4 -0
  103. mindspore/experimental/optim/adamax.py +4 -4
  104. mindspore/experimental/optim/adamw.py +4 -0
  105. mindspore/experimental/optim/asgd.py +1 -1
  106. mindspore/experimental/optim/lr_scheduler.py +40 -22
  107. mindspore/experimental/optim/radam.py +5 -5
  108. mindspore/experimental/optim/rprop.py +1 -1
  109. mindspore/experimental/optim/sgd.py +1 -1
  110. mindspore/hal/contiguous_tensors_handle.py +6 -10
  111. mindspore/hal/device.py +55 -81
  112. mindspore/hal/event.py +38 -55
  113. mindspore/hal/memory.py +93 -144
  114. mindspore/hal/stream.py +81 -125
  115. mindspore/include/dataset/constants.h +7 -4
  116. mindspore/include/dataset/execute.h +2 -2
  117. mindspore/jpeg62.dll +0 -0
  118. mindspore/log.py +40 -2
  119. mindspore/mindrecord/__init__.py +20 -7
  120. mindspore/mindspore_backend_common.dll +0 -0
  121. mindspore/mindspore_backend_manager.dll +0 -0
  122. mindspore/mindspore_common.dll +0 -0
  123. mindspore/mindspore_core.dll +0 -0
  124. mindspore/mindspore_dump.dll +0 -0
  125. mindspore/mindspore_frontend.dll +0 -0
  126. mindspore/mindspore_glog.dll +0 -0
  127. mindspore/mindspore_memory_pool.dll +0 -0
  128. mindspore/mindspore_ms_backend.dll +0 -0
  129. mindspore/mindspore_ops.dll +0 -0
  130. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  131. mindspore/mindspore_ops_kernel_common.dll +0 -0
  132. mindspore/mindspore_profiler.dll +0 -0
  133. mindspore/mindspore_pyboost.dll +0 -0
  134. mindspore/mindspore_pynative.dll +0 -0
  135. mindspore/mindspore_res_manager.dll +0 -0
  136. mindspore/mindspore_runtime_pipeline.dll +0 -0
  137. mindspore/mint/__init__.py +131 -700
  138. mindspore/mint/distributed/__init__.py +5 -1
  139. mindspore/mint/distributed/distributed.py +194 -109
  140. mindspore/mint/linalg/__init__.py +2 -0
  141. mindspore/mint/nn/__init__.py +280 -18
  142. mindspore/mint/nn/functional.py +282 -64
  143. mindspore/mint/nn/layer/__init__.py +4 -0
  144. mindspore/mint/nn/layer/_functions.py +7 -3
  145. mindspore/mint/nn/layer/activation.py +120 -13
  146. mindspore/mint/nn/layer/conv.py +218 -24
  147. mindspore/mint/nn/layer/normalization.py +15 -16
  148. mindspore/mint/nn/layer/padding.py +1 -1
  149. mindspore/mint/nn/layer/pooling.py +66 -1
  150. mindspore/mint/optim/__init__.py +2 -1
  151. mindspore/mint/optim/sgd.py +171 -0
  152. mindspore/msobj140.dll +0 -0
  153. mindspore/mspdb140.dll +0 -0
  154. mindspore/mspdbcore.dll +0 -0
  155. mindspore/mspdbst.dll +0 -0
  156. mindspore/mspft140.dll +0 -0
  157. mindspore/msvcdis140.dll +0 -0
  158. mindspore/msvcp140_1.dll +0 -0
  159. mindspore/msvcp140_2.dll +0 -0
  160. mindspore/msvcp140_atomic_wait.dll +0 -0
  161. mindspore/msvcp140_codecvt_ids.dll +0 -0
  162. mindspore/nn/__init__.py +4 -1
  163. mindspore/nn/cell.py +1250 -176
  164. mindspore/nn/layer/activation.py +23 -21
  165. mindspore/nn/layer/basic.py +22 -16
  166. mindspore/nn/layer/container.py +1 -1
  167. mindspore/nn/layer/conv.py +22 -17
  168. mindspore/nn/layer/embedding.py +9 -8
  169. mindspore/nn/layer/normalization.py +48 -42
  170. mindspore/nn/layer/pooling.py +75 -31
  171. mindspore/nn/layer/transformer.py +11 -10
  172. mindspore/nn/learning_rate_schedule.py +4 -2
  173. mindspore/nn/loss/loss.py +27 -19
  174. mindspore/nn/optim/ada_grad.py +6 -5
  175. mindspore/nn/optim/adadelta.py +9 -7
  176. mindspore/nn/optim/adafactor.py +1 -1
  177. mindspore/nn/optim/adam.py +16 -12
  178. mindspore/nn/optim/adamax.py +8 -7
  179. mindspore/nn/optim/adasum.py +5 -5
  180. mindspore/nn/optim/asgd.py +1 -1
  181. mindspore/nn/optim/ftrl.py +11 -9
  182. mindspore/nn/optim/lamb.py +1 -1
  183. mindspore/nn/optim/lazyadam.py +12 -10
  184. mindspore/nn/optim/momentum.py +7 -6
  185. mindspore/nn/optim/optimizer.py +2 -2
  186. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  187. mindspore/nn/optim/rmsprop.py +13 -12
  188. mindspore/nn/optim/rprop.py +9 -7
  189. mindspore/nn/optim/sgd.py +9 -6
  190. mindspore/nn/optim/tft_wrapper.py +5 -2
  191. mindspore/nn/probability/bijector/bijector.py +17 -11
  192. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  193. mindspore/nn/probability/bijector/invert.py +2 -2
  194. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  195. mindspore/nn/probability/bijector/softplus.py +3 -2
  196. mindspore/nn/probability/distribution/beta.py +3 -3
  197. mindspore/nn/probability/distribution/categorical.py +1 -1
  198. mindspore/nn/probability/distribution/cauchy.py +4 -2
  199. mindspore/nn/probability/distribution/exponential.py +6 -7
  200. mindspore/nn/probability/distribution/gamma.py +2 -2
  201. mindspore/nn/probability/distribution/gumbel.py +2 -2
  202. mindspore/nn/probability/distribution/half_normal.py +5 -3
  203. mindspore/nn/probability/distribution/logistic.py +5 -3
  204. mindspore/nn/probability/distribution/poisson.py +1 -1
  205. mindspore/nn/probability/distribution/uniform.py +5 -3
  206. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  207. mindspore/nn/reinforcement/tensor_array.py +1 -1
  208. mindspore/nn/wrap/__init__.py +6 -6
  209. mindspore/nn/wrap/cell_wrapper.py +178 -117
  210. mindspore/nn/wrap/grad_reducer.py +45 -36
  211. mindspore/nn/wrap/loss_scale.py +3 -3
  212. mindspore/numpy/array_creations.py +3 -3
  213. mindspore/numpy/array_ops.py +1 -1
  214. mindspore/numpy/math_ops.py +4 -4
  215. mindspore/numpy/utils.py +1 -2
  216. mindspore/numpy/utils_const.py +1 -2
  217. mindspore/opencv_core452.dll +0 -0
  218. mindspore/opencv_imgcodecs452.dll +0 -0
  219. mindspore/opencv_imgproc452.dll +0 -0
  220. mindspore/ops/__init__.py +3 -2
  221. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  222. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  223. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  224. mindspore/ops/_register_for_op.py +0 -11
  225. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  226. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  227. mindspore/ops/_vmap/vmap_array_ops.py +7 -6
  228. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  229. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  230. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  231. mindspore/ops/auto_generate/__init__.py +4 -3
  232. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
  233. mindspore/ops/auto_generate/gen_extend_func.py +281 -135
  234. mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
  235. mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
  236. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  237. mindspore/ops/composite/__init__.py +2 -1
  238. mindspore/ops/composite/base.py +19 -24
  239. mindspore/ops/composite/math_ops.py +6 -16
  240. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  241. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
  242. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  243. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  244. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  248. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  249. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  250. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  251. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  252. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  254. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  255. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  256. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  257. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  259. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  260. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  263. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  264. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  267. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  268. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  271. mindspore/ops/function/__init__.py +28 -2
  272. mindspore/ops/function/_add_attr_func.py +58 -0
  273. mindspore/ops/function/array_func.py +1629 -2345
  274. mindspore/ops/function/clip_func.py +38 -45
  275. mindspore/ops/function/debug_func.py +36 -44
  276. mindspore/ops/function/grad/__init__.py +1 -0
  277. mindspore/ops/function/grad/grad_func.py +104 -71
  278. mindspore/ops/function/image_func.py +1 -1
  279. mindspore/ops/function/linalg_func.py +46 -78
  280. mindspore/ops/function/math_func.py +3035 -3705
  281. mindspore/ops/function/nn_func.py +676 -241
  282. mindspore/ops/function/other_func.py +159 -1
  283. mindspore/ops/function/parameter_func.py +17 -30
  284. mindspore/ops/function/random_func.py +204 -361
  285. mindspore/ops/function/reshard_func.py +4 -70
  286. mindspore/ops/function/sparse_func.py +3 -3
  287. mindspore/ops/function/sparse_unary_func.py +5 -5
  288. mindspore/ops/function/spectral_func.py +25 -58
  289. mindspore/ops/function/vmap_func.py +24 -17
  290. mindspore/ops/functional.py +6 -4
  291. mindspore/ops/functional_overload.py +547 -4
  292. mindspore/ops/op_info_register.py +32 -244
  293. mindspore/ops/operations/__init__.py +10 -5
  294. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  295. mindspore/ops/operations/_grad_ops.py +1 -10
  296. mindspore/ops/operations/_inner_ops.py +5 -76
  297. mindspore/ops/operations/_ms_kernel.py +4 -10
  298. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  299. mindspore/ops/operations/_scalar_ops.py +3 -2
  300. mindspore/ops/operations/_sequence_ops.py +1 -1
  301. mindspore/ops/operations/_tensor_array.py +1 -1
  302. mindspore/ops/operations/array_ops.py +37 -22
  303. mindspore/ops/operations/comm_ops.py +150 -107
  304. mindspore/ops/operations/custom_ops.py +221 -23
  305. mindspore/ops/operations/debug_ops.py +115 -16
  306. mindspore/ops/operations/inner_ops.py +1 -1
  307. mindspore/ops/operations/linalg_ops.py +1 -58
  308. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  309. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  310. mindspore/ops/operations/math_ops.py +21 -18
  311. mindspore/ops/operations/nn_ops.py +65 -191
  312. mindspore/ops/operations/other_ops.py +62 -9
  313. mindspore/ops/operations/random_ops.py +13 -7
  314. mindspore/ops/operations/reshard_ops.py +1 -1
  315. mindspore/ops/operations/sparse_ops.py +2 -2
  316. mindspore/ops/primitive.py +43 -32
  317. mindspore/ops/tensor_method.py +232 -13
  318. mindspore/ops_generate/__init__.py +0 -5
  319. mindspore/ops_generate/aclnn/__init__.py +0 -0
  320. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  321. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  322. mindspore/ops_generate/api/__init__.py +0 -0
  323. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  324. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  325. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  326. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  327. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  328. mindspore/ops_generate/api/gen_api.py +103 -0
  329. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  330. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  331. mindspore/ops_generate/common/__init__.py +0 -0
  332. mindspore/ops_generate/common/gen_constants.py +91 -0
  333. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  334. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  335. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  336. mindspore/ops_generate/gen_ops.py +23 -325
  337. mindspore/ops_generate/op_def/__init__.py +0 -0
  338. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  339. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  340. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
  341. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  342. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  343. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  344. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  345. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  346. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  347. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  348. mindspore/ops_generate/pyboost/__init__.py +0 -0
  349. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  350. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  351. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  352. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  353. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  354. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  355. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  356. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  357. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  358. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  359. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  360. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  361. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  362. mindspore/ops_generate/resources/__init__.py +0 -0
  363. mindspore/ops_generate/resources/resource_list.py +30 -0
  364. mindspore/ops_generate/resources/resource_loader.py +36 -0
  365. mindspore/ops_generate/resources/resource_manager.py +64 -0
  366. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  367. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  368. mindspore/parallel/__init__.py +6 -2
  369. mindspore/parallel/_auto_parallel_context.py +133 -6
  370. mindspore/parallel/_cell_wrapper.py +130 -15
  371. mindspore/parallel/_parallel_serialization.py +95 -4
  372. mindspore/parallel/_ps_context.py +1 -1
  373. mindspore/parallel/_recovery_context.py +7 -2
  374. mindspore/parallel/_tensor.py +142 -18
  375. mindspore/parallel/_utils.py +198 -25
  376. mindspore/parallel/algo_parameter_config.py +3 -3
  377. mindspore/parallel/auto_parallel.py +732 -0
  378. mindspore/parallel/checkpoint_convert.py +159 -0
  379. mindspore/parallel/checkpoint_transform.py +656 -37
  380. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  381. mindspore/parallel/cluster/run.py +1 -1
  382. mindspore/parallel/function/__init__.py +24 -0
  383. mindspore/parallel/function/reshard_func.py +259 -0
  384. mindspore/parallel/nn/__init__.py +25 -0
  385. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  386. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  387. mindspore/parallel/parameter_broadcast.py +24 -13
  388. mindspore/parallel/shard.py +137 -61
  389. mindspore/parallel/transform_safetensors.py +287 -95
  390. mindspore/pgodb140.dll +0 -0
  391. mindspore/pgort140.dll +0 -0
  392. mindspore/profiler/__init__.py +9 -5
  393. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  394. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  395. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
  397. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  398. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  399. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  400. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  401. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  402. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  403. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  404. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  405. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  406. mindspore/profiler/common/constant.py +12 -0
  407. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  408. mindspore/profiler/common/path_manager.py +24 -0
  409. mindspore/profiler/common/profiler_context.py +26 -2
  410. mindspore/profiler/common/profiler_meta_data.py +74 -0
  411. mindspore/profiler/common/profiler_parameters.py +59 -18
  412. mindspore/profiler/common/profiler_path_manager.py +66 -7
  413. mindspore/profiler/dynamic_profiler.py +112 -79
  414. mindspore/profiler/envprofiler.py +26 -1
  415. mindspore/profiler/experimental_config.py +197 -0
  416. mindspore/profiler/mstx.py +57 -14
  417. mindspore/profiler/platform/npu_profiler.py +33 -7
  418. mindspore/profiler/profiler.py +541 -45
  419. mindspore/profiler/profiler_action_controller.py +1 -1
  420. mindspore/profiler/profiler_interface.py +4 -0
  421. mindspore/profiler/schedule.py +57 -22
  422. mindspore/rewrite/api/node.py +15 -13
  423. mindspore/rewrite/api/symbol_tree.py +1 -1
  424. mindspore/run_check/_check_version.py +25 -14
  425. mindspore/run_check/run_check.py +1 -1
  426. mindspore/runtime/__init__.py +2 -2
  427. mindspore/runtime/executor.py +40 -11
  428. mindspore/runtime/memory.py +25 -8
  429. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  430. mindspore/swresample-4.dll +0 -0
  431. mindspore/swscale-6.dll +0 -0
  432. mindspore/tbbmalloc.dll +0 -0
  433. mindspore/tinyxml2.dll +0 -0
  434. mindspore/train/__init__.py +8 -8
  435. mindspore/train/_utils.py +35 -7
  436. mindspore/train/amp.py +1 -1
  437. mindspore/train/callback/__init__.py +2 -2
  438. mindspore/train/callback/_callback.py +2 -16
  439. mindspore/train/callback/_checkpoint.py +24 -40
  440. mindspore/train/callback/_cluster_monitor.py +14 -18
  441. mindspore/train/callback/_flops_collector.py +2 -3
  442. mindspore/train/callback/_history.py +7 -4
  443. mindspore/train/callback/_lambda_callback.py +2 -2
  444. mindspore/train/callback/_landscape.py +0 -3
  445. mindspore/train/callback/_loss_monitor.py +2 -1
  446. mindspore/train/callback/_on_request_exit.py +6 -5
  447. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  448. mindspore/train/callback/_summary_collector.py +8 -13
  449. mindspore/train/callback/_time_monitor.py +2 -1
  450. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
  451. mindspore/train/data_sink.py +25 -2
  452. mindspore/train/dataset_helper.py +4 -5
  453. mindspore/train/loss_scale_manager.py +8 -7
  454. mindspore/train/metrics/accuracy.py +3 -3
  455. mindspore/train/metrics/confusion_matrix.py +9 -9
  456. mindspore/train/metrics/error.py +3 -3
  457. mindspore/train/metrics/hausdorff_distance.py +4 -4
  458. mindspore/train/metrics/mean_surface_distance.py +3 -3
  459. mindspore/train/metrics/metric.py +0 -12
  460. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  461. mindspore/train/metrics/precision.py +8 -6
  462. mindspore/train/metrics/recall.py +9 -9
  463. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  464. mindspore/train/mind_ir_pb2.py +19 -12
  465. mindspore/train/model.py +176 -103
  466. mindspore/train/serialization.py +246 -988
  467. mindspore/train/summary/_summary_adapter.py +2 -2
  468. mindspore/train/summary/summary_record.py +1 -1
  469. mindspore/turbojpeg.dll +0 -0
  470. mindspore/utils/__init__.py +3 -2
  471. mindspore/utils/dryrun.py +4 -2
  472. mindspore/utils/hooks.py +81 -0
  473. mindspore/utils/utils.py +138 -4
  474. mindspore/vcmeta.dll +0 -0
  475. mindspore/vcruntime140.dll +0 -0
  476. mindspore/vcruntime140_1.dll +0 -0
  477. mindspore/version.py +1 -1
  478. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
  479. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
  480. mindspore/_install_custom.py +0 -43
  481. mindspore/common/_register_for_adapter.py +0 -74
  482. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  483. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  484. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  485. mindspore/ops_generate/gen_constants.py +0 -190
  486. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  487. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  488. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  489. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  490. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -13,20 +13,147 @@
13
13
  # limitations under the License.
14
14
  # ============================================================================
15
15
  """Holding mint APIs"""
16
+ from mindspore._c_expression import _add_instance
17
+ from mindspore._c_expression import _addcdiv_instance
16
18
  from mindspore._c_expression import _all_gather_matmul_instance
17
19
  from mindspore._c_expression import _bitwise_not_instance
18
20
  from mindspore._c_expression import _clamp_instance
19
21
  from mindspore._c_expression import _div_instance
20
22
  from mindspore._c_expression import _empty_instance
23
+ from mindspore._c_expression import _floor_divide_instance
21
24
  from mindspore._c_expression import _fmod_instance
25
+ from mindspore._c_expression import _gelu_instance
26
+ from mindspore._c_expression import _greater_equal_instance
27
+ from mindspore._c_expression import _kthvalue_instance
22
28
  from mindspore._c_expression import _lerp_instance
23
29
  from mindspore._c_expression import _matmul_reduce_scatter_instance
24
30
  from mindspore._c_expression import _max_instance
25
31
  from mindspore._c_expression import _min_instance
26
32
  from mindspore._c_expression import _nansum_instance
33
+ from mindspore._c_expression import _pixel_shuffle_instance
27
34
  from mindspore._c_expression import _remainder_instance
28
35
  from mindspore._c_expression import _repeat_interleave_instance
36
+ from mindspore._c_expression import _sub_instance
29
37
  from mindspore._c_expression import _where_instance
38
+ from mindspore._c_expression import _xlogy_instance
39
+
40
+ def add(*args, **kwargs):
41
+ r"""
42
+ add(input, other, *, alpha=1) -> Tensor
43
+
44
+ Adds scaled other value to `self`.
45
+
46
+ .. math::
47
+
48
+ out_{i} = self_{i} + alpha \times other_{i}
49
+
50
+ Note:
51
+ - When `self` and `other` have different shapes,
52
+ they must be able to broadcast to a common shape.
53
+ - `self`, `other` and `alpha` comply with the implicit type conversion rules to make the data types
54
+ consistent.
55
+
56
+ Args:
57
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
58
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
59
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
60
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
61
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
62
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
63
+
64
+ Keyword Args:
65
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
66
+
67
+ Returns:
68
+ Tensor with a shape that is the same as the broadcasted shape of the `self` and `other`,
69
+ and the data type is the one with higher precision or higher digits among `self`, `other` and `alpha`.
70
+
71
+ Raises:
72
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
73
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
74
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
75
+
76
+ Supported Platforms:
77
+ ``Ascend`` ``GPU`` ``CPU``
78
+
79
+ Examples:
80
+ >>> import numpy as np
81
+ >>> import mindspore
82
+ >>> from mindspore import Tensor, mint
83
+ >>> x = Tensor(1, mindspore.int32)
84
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
85
+ >>> alpha = 0.5
86
+ >>> output = mint.add(x, y, alpha=alpha) # x.add(y, alpha=alpha)
87
+ >>> print(output)
88
+ [3. 3.5 4.]
89
+ >>> # the data type of x is int32, the data type of y is float32,
90
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
91
+ >>> print(output.dtype)
92
+ Float32
93
+ """
94
+ return _add_instance(*args, **kwargs)
95
+
96
+
97
+ def __add__(*args, **kwargs):
98
+ r"""
99
+ __add__(input, other, *, alpha=1) -> Tensor
100
+
101
+ Alias for :func:`mindspore.mint.add`.
102
+
103
+ .. method:: mint.__add__(input, other, *, alpha=1) -> Tensor
104
+ :noindex:
105
+
106
+ Alias for overload function of :func:`mindspore.mint.add`.
107
+ """
108
+ return _add_instance(*args, **kwargs)
109
+
110
+
111
+ def addcdiv(*args, **kwargs):
112
+ r"""
113
+ addcdiv_ext(input, tensor1, tensor2, *, value=1) -> Tensor
114
+
115
+ Performs the element-wise division of tensor tensor1 by tensor tensor2,
116
+ multiply the result by the scalar value and add it to input data.
117
+
118
+ .. math::
119
+ y[i] = input[i] + value * (tensor1[i] / tensor2[i])
120
+
121
+ .. warning::
122
+ This is an experimental API that is subject to change or deletion.
123
+
124
+ Args:
125
+ input (Tensor): The tensor to be added.
126
+ tensor1 (Tensor): The numerator tensor.
127
+ tensor2 (Tensor): The denominator tensor.
128
+
129
+ Keyword Args:
130
+ value (Number, optional): The multiplier for tensor1/tensor2. Default: ``1`` .
131
+
132
+ Returns:
133
+ Tensor, has the same shape and dtype as tensor1/tensor2.
134
+
135
+ Raises:
136
+ TypeError: If dtype of `tensor1`, `tensor2`, or `input` is not tensor.
137
+ ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
138
+ ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1/tensor2`.
139
+ ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1/tensor2)`.
140
+
141
+ Supported Platforms:
142
+ ``Ascend``
143
+
144
+ Examples:
145
+ >>> import mindspore
146
+ >>> import numpy as np
147
+ >>> from mindspore import Tensor, ops
148
+ >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
149
+ >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
150
+ >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
151
+ >>> y = ops.addcdiv_ext(input_data, x1, x2, value=1)
152
+ >>> print(y)
153
+ [1.25 1.6666667 2.5 5. ]
154
+ """
155
+ return _addcdiv_instance(*args, **kwargs)
156
+
30
157
 
31
158
  def all_gather_matmul(*args, **kwargs):
32
159
  r"""
@@ -99,7 +226,7 @@ def all_gather_matmul(*args, **kwargs):
99
226
  Before running the following examples, you need to configure the communication environment variables.
100
227
 
101
228
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
102
- configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
229
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
103
230
  for more details.
104
231
 
105
232
  This example should be run with 2 devices.
@@ -351,6 +478,60 @@ def empty(*args, **kwargs):
351
478
  return _empty_instance(*args, **kwargs)
352
479
 
353
480
 
481
+ def floor_divide(*args, **kwargs):
482
+ r"""
483
+ Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
484
+
485
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
486
+ Inputs must be two tensors or one tensor and one scalar.
487
+ When the inputs are two tensors,
488
+ dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
489
+ When the inputs are one tensor and one scalar,
490
+ the scalar could only be a constant.
491
+
492
+ .. math::
493
+ out_{i} = \text{floor}( \frac{input_i}{other_i})
494
+
495
+ where the :math:`floor` indicates the Floor operator. For more details,
496
+ please refer to the :class:`mindspore.mint.floor` operator.
497
+
498
+ .. warning::
499
+ This is an experimental API that is subject to change or deletion.
500
+
501
+ Args:
502
+ input (Union[Tensor, Number, bool]): The first input is a number or
503
+ a bool or a tensor whose data type is number or bool.
504
+ other (Union[Tensor, Number, bool]): The second input is a number or
505
+ a bool or a tensor whose data type is number or bool.
506
+
507
+ Returns:
508
+ Tensor, the shape is the same as the one after broadcasting,
509
+ and the data type is the one with higher precision or higher digits among the two inputs.
510
+
511
+ Raises:
512
+ TypeError: If `input` and `other` are not the following: Tensor, number.Number or bool.
513
+
514
+ Supported Platforms:
515
+ ``Ascend`` ``GPU`` ``CPU``
516
+
517
+ Examples:
518
+ >>> import mindspore
519
+ >>> from mindspore import Tensor, mint
520
+ >>> import numpy as np
521
+ >>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
522
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
523
+ >>> output = mint.floor_divide(input, other)
524
+ >>> print(output)
525
+ [ 0 1 -1]
526
+ >>> input = Tensor(2.0, mindspore.float32)
527
+ >>> other = Tensor(2.0, mindspore.float32)
528
+ >>> output = mint.floor_divide(input, other)
529
+ >>> print(output)
530
+ 1.0
531
+ """
532
+ return _floor_divide_instance(*args, **kwargs)
533
+
534
+
354
535
  def fmod(*args, **kwargs):
355
536
  r"""
356
537
  fmod(input, other) -> Tensor
@@ -393,6 +574,192 @@ def fmod(*args, **kwargs):
393
574
  return _fmod_instance(*args, **kwargs)
394
575
 
395
576
 
577
+ def gelu(*args, **kwargs):
578
+ r"""
579
+ gelu(input, *, approximate='none') -> Tensor
580
+
581
+ Gaussian Error Linear Units activation function.
582
+
583
+ GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
584
+ And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
585
+ <https://arxiv.org/abs/1810.04805>`_.
586
+
587
+ When `approximate` argument is `none`, GELU is defined as follows:
588
+
589
+ .. math::
590
+ GELU(x_i) = x_i*P(X < x_i),
591
+
592
+ where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
593
+ :math:`x_i` is the input element.
594
+
595
+ When `approximate` argument is `tanh`, GELU is estimated with:
596
+
597
+ .. math::
598
+ GELU(x_i) = 0.5 * x_i * (1 + \tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3)))
599
+
600
+ GELU Activation Function Graph:
601
+
602
+ .. image:: ../images/GELU.png
603
+ :align: center
604
+
605
+ .. note::
606
+ On the Ascend platform, when `input` is -inf, its gradient is 0,
607
+ and when `input` is inf, its gradient is `dout`.
608
+
609
+ Args:
610
+ input (Tensor): The input of the activation function GeLU, the data type is float16, float32 or float64.
611
+
612
+ Keyword Args:
613
+ approximate (str, optional): the gelu approximation algorithm to use. Acceptable vaslues are ``'none'`` and ``'tanh'`` .
614
+ Default: ``'none'`` .
615
+
616
+ Returns:
617
+ Tensor, with the same type and shape as `input`.
618
+
619
+ Raises:
620
+ TypeError: If `input` is not a Tensor.
621
+ TypeError: If dtype of `input` is not bfloat16, float16, float32 or float64.
622
+ ValueError: If `approximate` value is neither `none` nor `tanh`.
623
+
624
+ Supported Platforms:
625
+ ``Ascend``
626
+
627
+ Examples:
628
+ >>> import mindspore
629
+ >>> import numpy as np
630
+ >>> from mindspore import Tensor, mint
631
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
632
+ >>> result = mint.nn.functional.gelu(input)
633
+ >>> print(result)
634
+ [[-1.58655241e-01 3.99987316e+00 -0.00000000e+00]
635
+ [ 1.95449972e+00 -1.41860323e-06 9.0000000e+00]]
636
+ >>> result = mint.nn.functional.gelu(input, approximate="tanh")
637
+ >>> print(result)
638
+ [[-1.58808023e-01 3.99992990e+00 -3.10779147e-21]
639
+ [ 1.95459759e+00 -2.29180174e-07 9.0000000e+00]]
640
+ """
641
+ return _gelu_instance(*args, **kwargs)
642
+
643
+
644
+ def greater_equal(*args, **kwargs):
645
+ r"""
646
+ greater_equal(input, other) -> Tensor
647
+
648
+ Computes the boolean value of :math:`input >= other` element-wise.
649
+
650
+ .. math::
651
+
652
+ out_{i} =\begin{cases}
653
+ & \text{True, if } input_{i}>=other_{i} \\
654
+ & \text{False, if } input_{i}<other_{i}
655
+ \end{cases}
656
+
657
+ Note:
658
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
659
+ consistent.
660
+ - The inputs must be two tensors or one tensor and one scalar.
661
+ - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
662
+ and the shapes of them can be broadcast.
663
+ - When the inputs are one tensor and one scalar, the scalar could only be a constant.
664
+ - Broadcasting is supported.
665
+ - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
666
+ in another input by copying the value of the dimension.
667
+
668
+ Args:
669
+ input (Union[Tensor, Number]): The first input is a number
670
+ or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
671
+ other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
672
+ or a Tensor of the number or bool_ data type. When the first input is a Scalar,
673
+ the second input must be a Tensor of number or bool_ data type.
674
+
675
+ Returns:
676
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
677
+
678
+ Raises:
679
+ TypeError: If neither `input` nor `other` is a Tensor.
680
+
681
+ Supported Platforms:
682
+ ``Ascend``
683
+
684
+ Examples:
685
+ >>> import mindspore
686
+ >>> import numpy as np
687
+ >>> from mindspore import Tensor, mint
688
+ >>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
689
+ >>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
690
+ >>> output = mint.greater_equal(input, other)
691
+ >>> print(output)
692
+ [True True False]
693
+ >>> y = 2.1
694
+ >>> output = mint.greater_equal(input, y)
695
+ >>> print(output)
696
+ [False False True]
697
+ """
698
+ return _greater_equal_instance(*args, **kwargs)
699
+
700
+
701
+ def ge(*args, **kwargs):
702
+ r"""
703
+ ge(input, other) -> Tensor
704
+
705
+ Alias for :func:`mindspore.mint.greater_equal`.
706
+ """
707
+ return _greater_equal_instance(*args, **kwargs)
708
+
709
+
710
+ def kthvalue(*args, **kwargs):
711
+ r"""
712
+ Calculates the kth smallest value along given dim specified by `dim` of the input
713
+ tensor, and returns a tuple of (`values`, `indices`) where `values` contains the k-th smallest element
714
+ and `indices` provides the index of each corresponding element.
715
+
716
+ Args:
717
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
718
+ :math:`(input_1, input_2, ..., input_N)`.
719
+ k (int): Specifies the k-th smallest element to retrieve.
720
+ dim (int, optional): The dimension along which to find the k-th smallest value. Default: ``-1`` .
721
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
722
+ input, the output will reduce dimension if ``False`` . Default: ``False`` .
723
+
724
+ Returns:
725
+ A tuple consisting of `values` and `indices`.
726
+
727
+ - **values** (Tensor) - The k-th smallest value of input tensor, with the same dtype as `input`.
728
+
729
+ -If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
730
+ -If `keepdim` is ``False`` , the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
731
+
732
+ - **indices** (Tensor) - The `indices` for the k-th smallest value of the input tensor, it has the same shape as `values` with dtype of int64.
733
+
734
+ Raises:
735
+ TypeError: If `k` or `dim` is not an int.
736
+ TypeError: If `keepdim` is not a bool.
737
+ TypeError: If dtype of `input` is not supported.
738
+ ValueError: If `input` is an empty Tensor.
739
+ RuntimeError: If `k` is not in the proper range.
740
+
741
+ Supported Platforms:
742
+ ``Ascend``
743
+
744
+ Examples:
745
+ >>> import mindspore
746
+ >>> import numpy as np
747
+ >>> from mindspore import Tensor, ops
748
+ >>> input_x = Tensor(np.array([[1.01, 2.02, 3.03], [1.04, 2.05, 3.06]]), mindspore.float32)
749
+ >>> out = ops.auto_generate.kthvalue(input_x, 2, 1, False)
750
+ >>> print(out)
751
+ (Tensor(shape=[2], dtype=Float32, value= [ 2.01999998e+00, 2.04999995e+00]), Tensor(shape=[2], dtype=Int64, value= [1, 1]))
752
+ >>> out1 = ops.auto_generate.kthvalue(input_x, 2, 1, True)
753
+ >>> print(out1)
754
+ (Tensor(shape=[2, 1], dtype=Float32, value=
755
+ [[ 2.01999998e+00],
756
+ [ 2.04999995e+00]]), Tensor(shape=[2, 1], dtype=Int64, value=
757
+ [[1],
758
+ [1]]))
759
+ """
760
+ return _kthvalue_instance(*args, **kwargs)
761
+
762
+
396
763
  def lerp(*args, **kwargs):
397
764
  r"""
398
765
  lerp(input, end, weight) -> Tensor
@@ -510,7 +877,7 @@ def matmul_reduce_scatter(*args, **kwargs):
510
877
  Before running the following examples, you need to configure the communication environment variables.
511
878
 
512
879
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
513
- configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
880
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
514
881
  for more details.
515
882
 
516
883
  This example should be run with 2 devices.
@@ -578,7 +945,7 @@ def max(*args, **kwargs):
578
945
  :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
579
946
  dim (int): The dimension to reduce.
580
947
  keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
581
- `input` , the output will reduce dimension if ``false``. Default: ``False``.
948
+ `input` , the output will reduce dimension if ``False``. Default: ``False``.
582
949
 
583
950
  Returns:
584
951
  tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
@@ -649,7 +1016,7 @@ def min(*args, **kwargs):
649
1016
  :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
650
1017
  dim (int): The dimension to reduce.
651
1018
  keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
652
- input, the output will reduce dimension if ``false``. Default: ``False``.
1019
+ input, the output will reduce dimension if ``False``. Default: ``False``.
653
1020
 
654
1021
  Returns:
655
1022
  tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
@@ -739,6 +1106,51 @@ def nansum(*args, **kwargs):
739
1106
  return _nansum_instance(*args, **kwargs)
740
1107
 
741
1108
 
1109
+ def pixel_shuffle(*args, **kwargs):
1110
+ r"""
1111
+ pixel_shuffle(input, upscale_factor) -> Tensor
1112
+
1113
+ Rearrange elements in a tensor according to an upscaling factor.
1114
+
1115
+ Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
1116
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
1117
+
1118
+ This is useful for implementing efficient sub-pixel convolution
1119
+ with a stride of :math:`1/r`.
1120
+
1121
+ For detailed introduction to the pixel_shuffle algorithm, refer to
1122
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
1123
+
1124
+ .. warning::
1125
+ This is an experimental API that is subject to change or deletion.
1126
+
1127
+ Args:
1128
+ input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
1129
+ and the length of third to last dimension can be divisible by the square of `upscale_factor`.
1130
+ upscale_factor (int): factor to shuffle the input Tensor, and is a positive integer.
1131
+ `upscale_factor` is the above-mentioned :math:`r`.
1132
+
1133
+ Returns:
1134
+ - **output** (Tensor) - Tensor of shape :math:`(*, C, H \times r, W \times r)` .
1135
+
1136
+ Raises:
1137
+ ValueError: If `upscale_factor` is not a positive integer.
1138
+ ValueError: If the length of third to last dimension is not divisible by the square of `upscale_factor`.
1139
+ ValueError: If the dimension of `input` is less than 3.
1140
+
1141
+ Supported Platforms:
1142
+ ``Ascend``
1143
+
1144
+ Examples:
1145
+ >>> from mindspore import mint
1146
+ >>> input = mint.randn(1, 9, 4, 4)
1147
+ >>> output = mint.nn.functional.pixel_shuffle(input, 3)
1148
+ >>> print(output.shape)
1149
+ (1, 1, 12, 12)
1150
+ """
1151
+ return _pixel_shuffle_instance(*args, **kwargs)
1152
+
1153
+
742
1154
  def remainder(*args, **kwargs):
743
1155
  r"""
744
1156
  remainder(input, other) -> Tensor
@@ -830,6 +1242,77 @@ def repeat_interleave(*args, **kwargs):
830
1242
  return _repeat_interleave_instance(*args, **kwargs)
831
1243
 
832
1244
 
1245
+ def sub(*args, **kwargs):
1246
+ r"""
1247
+ sub(input, other, *, alpha=1) -> Tensor
1248
+
1249
+ Subtracts scaled other value from self Tensor.
1250
+
1251
+ .. math::
1252
+
1253
+ out_{i} = self_{i} - alpha \times other_{i}
1254
+
1255
+ Note:
1256
+ - When the two inputs have different shapes,
1257
+ they must be able to broadcast to a common shape.
1258
+ - The two inputs and alpha comply with the implicit type conversion rules to make the data types
1259
+ consistent.
1260
+
1261
+ Args:
1262
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
1263
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1264
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1265
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
1266
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1267
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1268
+
1269
+ Keyword Args:
1270
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
1271
+
1272
+ Returns:
1273
+ Tensor with a shape that is the same as the broadcasted shape of the self `self` and `other`,
1274
+ and the data type is the one with higher precision or higher digits among the two inputs and alpha.
1275
+
1276
+ Raises:
1277
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
1278
+ TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
1279
+ TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
1280
+
1281
+ Supported Platforms:
1282
+ ``Ascend`` ``GPU`` ``CPU``
1283
+
1284
+ Examples:
1285
+ >>> import numpy as np
1286
+ >>> import mindspore
1287
+ >>> from mindspore import Tensor, mint
1288
+ >>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
1289
+ >>> y = Tensor(1, mindspore.int32)
1290
+ >>> alpha = 0.5
1291
+ >>> output = mint.sub(x, y, alpha=alpha)
1292
+ >>> print(output)
1293
+ [3.5 4.5 5.5]
1294
+ >>> # the data type of x is float32, the data type of y is int32,
1295
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
1296
+ >>> print(output.dtype)
1297
+ Float32
1298
+ """
1299
+ return _sub_instance(*args, **kwargs)
1300
+
1301
+
1302
+ def __sub__(*args, **kwargs):
1303
+ r"""
1304
+ __sub__(input, other, *, alpha=1) -> Tensor
1305
+
1306
+ Alias for :func:`mindspore.mint.sub`.
1307
+
1308
+ .. method:: mint.__sub__(input, other, *, alpha=1) -> Tensor
1309
+ :noindex:
1310
+
1311
+ Alias for overload function of :func:`mindspore.mint.sub`.
1312
+ """
1313
+ return _sub_instance(*args, **kwargs)
1314
+
1315
+
833
1316
  def where(*args, **kwargs):
834
1317
  r"""
835
1318
  where(condition, input, other) -> Tensor
@@ -877,7 +1360,58 @@ def where(*args, **kwargs):
877
1360
  """
878
1361
  return _where_instance(*args, **kwargs)
879
1362
 
1363
+
1364
+ def xlogy(*args, **kwargs):
1365
+ r"""
1366
+ xlogy(input, other) -> Tensor
1367
+
1368
+ Computes the first input multiplied by the logarithm of second input element-wise.
1369
+ Returns zero when `input` is zero.
1370
+
1371
+ .. math::
1372
+
1373
+ out_i = input_{i}\log{other_{i}}
1374
+
1375
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1376
+ The inputs must be two tensors or one tensor and one scalar.
1377
+ When the inputs are two tensors, the shapes of them could be broadcast.
1378
+
1379
+ Args:
1380
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1381
+ a bool or a tensor whose data type is
1382
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1383
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1384
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1385
+ a bool or a tensor whose data type is number or bool when the first input is a tensor.
1386
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1387
+
1388
+ Returns:
1389
+ Tensor, the shape is the same as the one after broadcasting,
1390
+ and the data type is the one with higher precision or higher digits among the two inputs.
1391
+
1392
+ Raises:
1393
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1394
+ ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1395
+
1396
+ Supported Platforms:
1397
+ ``Ascend`` ``GPU`` ``CPU``
1398
+
1399
+ Examples:
1400
+ >>> import mindspore
1401
+ >>> import numpy as np
1402
+ >>> from mindspore import Tensor, ops
1403
+ >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1404
+ >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1405
+ >>> output = ops.xlogy(input, other)
1406
+ >>> print(output)
1407
+ [-3.465736 0. 2.7725887]
1408
+ """
1409
+ return _xlogy_instance(*args, **kwargs)
1410
+
880
1411
  __all__ = [
1412
+ "add",
1413
+ "__add__",
1414
+ "addcdiv",
881
1415
  "all_gather_matmul",
882
1416
  "bitwise_not",
883
1417
  "clamp",
@@ -885,13 +1419,22 @@ __all__ = [
885
1419
  "div",
886
1420
  "divide",
887
1421
  "empty",
1422
+ "floor_divide",
888
1423
  "fmod",
1424
+ "gelu",
1425
+ "greater_equal",
1426
+ "ge",
1427
+ "kthvalue",
889
1428
  "lerp",
890
1429
  "matmul_reduce_scatter",
891
1430
  "max",
892
1431
  "min",
893
1432
  "nansum",
1433
+ "pixel_shuffle",
894
1434
  "remainder",
895
1435
  "repeat_interleave",
1436
+ "sub",
1437
+ "__sub__",
896
1438
  "where",
1439
+ "xlogy",
897
1440
  ]