mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +47 -198
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +229 -99
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +480 -372
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +5 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +975 -1981
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +324 -573
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +183 -117
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +179 -120
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +798 -761
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +933 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1373 -192
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +53 -42
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +19 -15
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +3 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +361 -359
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +52 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +757 -185
  287. mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
  288. mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4351 -3813
  334. mindspore/ops/function/nn_func.py +1712 -637
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +452 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +26 -18
  343. mindspore/ops/functional.py +23 -7
  344. mindspore/ops/functional_overload.py +1548 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +23 -15
  347. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +298 -87
  360. mindspore/ops/operations/debug_ops.py +157 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +212 -531
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1895 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +159 -40
  426. mindspore/parallel/_cell_wrapper.py +132 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +700 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +258 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -59
  446. mindspore/parallel/transform_safetensors.py +364 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +416 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +96 -27
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +269 -136
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +552 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1548 @@
1
+ # Copyright 2024 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """Holding mint APIs"""
16
+ from mindspore._c_expression import _add_instance
17
+ from mindspore._c_expression import _addcdiv_instance
18
+ from mindspore._c_expression import _all_gather_matmul_instance
19
+ from mindspore._c_expression import _bitwise_not_instance
20
+ from mindspore._c_expression import _clamp_instance
21
+ from mindspore._c_expression import _div_instance
22
+ from mindspore._c_expression import _empty_instance
23
+ from mindspore._c_expression import _floor_divide_instance
24
+ from mindspore._c_expression import _fmod_instance
25
+ from mindspore._c_expression import _gelu_instance
26
+ from mindspore._c_expression import _gmm_instance
27
+ from mindspore._c_expression import _gmm_backward_instance
28
+ from mindspore._c_expression import _gmm_backward_fusion_instance
29
+ from mindspore._c_expression import _greater_equal_instance
30
+ from mindspore._c_expression import _kthvalue_instance
31
+ from mindspore._c_expression import _lerp_instance
32
+ from mindspore._c_expression import _matmul_reduce_scatter_instance
33
+ from mindspore._c_expression import _max_instance
34
+ from mindspore._c_expression import _min_instance
35
+ from mindspore._c_expression import _nansum_instance
36
+ from mindspore._c_expression import _pixel_shuffle_instance
37
+ from mindspore._c_expression import _remainder_instance
38
+ from mindspore._c_expression import _repeat_interleave_instance
39
+ from mindspore._c_expression import _sub_instance
40
+ from mindspore._c_expression import _where_instance
41
+ from mindspore._c_expression import _xlogy_instance
42
+
43
+ def add(*args, **kwargs):
44
+ r"""
45
+ add(input, other, *, alpha=1) -> Tensor
46
+
47
+ Adds scaled other value to `self`.
48
+
49
+ .. math::
50
+
51
+ out_{i} = self_{i} + alpha \times other_{i}
52
+
53
+ Note:
54
+ - When `self` and `other` have different shapes,
55
+ they must be able to broadcast to a common shape.
56
+ - `self`, `other` and `alpha` comply with the implicit type conversion rules to make the data types
57
+ consistent.
58
+
59
+ Args:
60
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
61
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
62
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
63
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
64
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
65
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
66
+
67
+ Keyword Args:
68
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
69
+
70
+ Returns:
71
+ Tensor with a shape that is the same as the broadcasted shape of the `self` and `other`,
72
+ and the data type is the one with higher precision or higher digits among `self`, `other` and `alpha`.
73
+
74
+ Raises:
75
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
76
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
77
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
78
+
79
+ Supported Platforms:
80
+ ``Ascend`` ``GPU`` ``CPU``
81
+
82
+ Examples:
83
+ >>> import numpy as np
84
+ >>> import mindspore
85
+ >>> from mindspore import Tensor, mint
86
+ >>> x = Tensor(1, mindspore.int32)
87
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
88
+ >>> alpha = 0.5
89
+ >>> output = mint.add(x, y, alpha=alpha) # x.add(y, alpha=alpha)
90
+ >>> print(output)
91
+ [3. 3.5 4.]
92
+ >>> # the data type of x is int32, the data type of y is float32,
93
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
94
+ >>> print(output.dtype)
95
+ Float32
96
+ """
97
+ return _add_instance(*args, **kwargs)
98
+
99
+
100
+ def __add__(*args, **kwargs):
101
+ r"""
102
+ __add__(input, other, *, alpha=1) -> Tensor
103
+
104
+ Alias for :func:`mindspore.mint.add`.
105
+
106
+ .. method:: mint.__add__(input, other, *, alpha=1) -> Tensor
107
+ :noindex:
108
+
109
+ Alias for overload function of :func:`mindspore.mint.add`.
110
+ """
111
+ return _add_instance(*args, **kwargs)
112
+
113
+
114
+ def addcdiv(*args, **kwargs):
115
+ r"""
116
+ addcdiv_ext(input, tensor1, tensor2, *, value=1) -> Tensor
117
+
118
+ Performs the element-wise division of tensor tensor1 by tensor tensor2,
119
+ multiply the result by the scalar value and add it to input data.
120
+
121
+ .. math::
122
+ y[i] = input[i] + value * (tensor1[i] / tensor2[i])
123
+
124
+ .. warning::
125
+ This is an experimental API that is subject to change or deletion.
126
+
127
+ Args:
128
+ input (Tensor): The tensor to be added.
129
+ tensor1 (Tensor): The numerator tensor.
130
+ tensor2 (Tensor): The denominator tensor.
131
+
132
+ Keyword Args:
133
+ value (Number, optional): The multiplier for tensor1/tensor2. Default: ``1`` .
134
+
135
+ Returns:
136
+ Tensor, has the same shape and dtype as tensor1/tensor2.
137
+
138
+ Raises:
139
+ TypeError: If dtype of `tensor1`, `tensor2`, or `input` is not tensor.
140
+ ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
141
+ ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1/tensor2`.
142
+ ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1/tensor2)`.
143
+
144
+ Supported Platforms:
145
+ ``Ascend``
146
+
147
+ Examples:
148
+ >>> import mindspore
149
+ >>> import numpy as np
150
+ >>> from mindspore import Tensor, ops
151
+ >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
152
+ >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
153
+ >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
154
+ >>> y = ops.addcdiv_ext(input_data, x1, x2, value=1)
155
+ >>> print(y)
156
+ [1.25 1.6666667 2.5 5. ]
157
+ """
158
+ return _addcdiv_instance(*args, **kwargs)
159
+
160
+
161
+ def all_gather_matmul(*args, **kwargs):
162
+ r"""
163
+ all_gather_matmul(input, x2, group, world_size, *, bias=None, gather_index=0, gather_output=True, comm_turn=0, trans_input=False, trans_x2=False) -> Tensor
164
+
165
+ In the TP segmentation scenario, allgather and matmul are fused, and communication and computational pipelines
166
+ are parallelized within the fusion operator.
167
+
168
+ .. math::
169
+ output = allgather(input)@x2
170
+
171
+ gather\_out = allgather(input)
172
+
173
+ .. warning::
174
+ This is an experimental API that is subject to change or deletion.
175
+
176
+ Args:
177
+ input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
178
+ dimensions, and the data format supports ND.
179
+ x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
180
+ supports 2 dimensions, and the data format supports ND.
181
+ group (str): Communication group name, can be created by ``create_group`` method, or use the default group
182
+ ``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
183
+ world_size (int): The total number of ranks in the communication group, should be consistent with the number
184
+ of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
185
+
186
+ Keyword Args:
187
+ bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
188
+ gather_index (int, optional): Indicates the allgather operation object, ``0`` means gather ``input`` ,
189
+ ``1`` means gather ``x2`` . Currently only ``0`` is supported. Default: ``0`` .
190
+ gather_output (bool, optional): Indicates whether gather output is required. Default: ``True`` .
191
+ comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
192
+ is supported. Default: ``0`` .
193
+ trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
194
+ supported. Default: ``False`` .
195
+ trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
196
+
197
+ Returns:
198
+ - output (Tensor) - The result of allgather and matmul fusion calculations.
199
+ - gather_out (Tensor) - The result of allgather. If gather_output is ``False`` , ``gather_out`` returns a
200
+ tensor with shape 0.
201
+
202
+ Note:
203
+ - When using this interface, please ensure that the driver firmware package and CANN package are both the
204
+ matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
205
+ - The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
206
+ range of k is [256, 65535). The shape of ``output`` is (m * world_size, n), and the shape of
207
+ ``gather_out`` is (m * world_size, k).
208
+ - The common fusion operators in a model only support the same communication group.
209
+
210
+ Raises:
211
+ TypeError: Any arg is of wrong type.
212
+ RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
213
+ RuntimeError: The dtypes of ``input`` and ``x2`` are different.
214
+ RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
215
+ RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
216
+ RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
217
+ RuntimeError: ``bias`` is not None.
218
+ RuntimeError: ``group`` does not exist.
219
+ RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
220
+ RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , or ``8`` .
221
+ RuntimeError: ``gather_index`` is not ``0`` .
222
+ RuntimeError: ``trans_input`` is ``True`` .
223
+
224
+ Supported Platforms:
225
+ ``Ascend``
226
+
227
+ Examples:
228
+ .. note::
229
+ Before running the following examples, you need to configure the communication environment variables.
230
+
231
+ For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
232
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
233
+ for more details.
234
+
235
+ This example should be run with 2 devices.
236
+
237
+ >>> import mindspore as ms
238
+ >>> import numpy as np
239
+ >>> from mindspore import ops
240
+ >>> ms.communication.init()
241
+ >>> rank = ms.communication.get_rank()
242
+ >>> np.random.seed(rank)
243
+ >>> input = ms.Tensor(np.random.randn(128, 256).astype(np.float32), dtype=ms.float16)
244
+ >>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
245
+ >>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
246
+ >>> world_size = ms.communication.get_group_size()
247
+ >>> output, gather_out = ops.all_gather_matmul(
248
+ ... input,
249
+ ... x2,
250
+ ... group,
251
+ ... world_size,
252
+ ... bias=None,
253
+ ... gather_index=0,
254
+ ... gather_output=True,
255
+ ... comm_turn=0,
256
+ ... trans_input=False,
257
+ ... trans_x2=False,
258
+ ... )
259
+ >>> print(output.shape)
260
+ (256, 512)
261
+ >>> print(gather_out.shape)
262
+ (256, 256)
263
+ """
264
+ return _all_gather_matmul_instance(*args, **kwargs)
265
+
266
+
267
+ def bitwise_not(*args, **kwargs):
268
+ r"""
269
+ bitwise_not(input) -> Tensor
270
+
271
+ Returns bitwise `not` of the input tensor.
272
+
273
+ .. warning::
274
+ This is an experimental API that is subject to change or deletion.
275
+
276
+ Args:
277
+ input (Tensor): The input tensor must be of integral or Boolean types.
278
+
279
+ Returns:
280
+ Tensor, has the same shape and type as `input`.
281
+
282
+ Raises:
283
+ TypeError: If `input` is not a Tensor.
284
+ RuntimeError: If dtype of `input` is not int or bool.
285
+
286
+ Supported Platforms:
287
+ ``Ascend``
288
+
289
+ Examples:
290
+ >>> import mindspore
291
+ >>> import numpy as np
292
+ >>> from mindspore import Tensor, mint
293
+ >>> x = Tensor(np.array([True, False, True, False]))
294
+ >>> y = mint.bitwise_not(x)
295
+ >>> print(y)
296
+ [False True False True]
297
+ """
298
+ return _bitwise_not_instance(*args, **kwargs)
299
+
300
+
301
+ def clamp(*args, **kwargs):
302
+ r"""
303
+ clamp(input, min=None, max=None) -> Tensor
304
+
305
+ Clamps tensor values between the specified minimum value and maximum value.
306
+
307
+ Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
308
+
309
+ .. math::
310
+
311
+ out_i= \left\{
312
+ \begin{array}{align}
313
+ max & \text{ if } input_i\ge max \\
314
+ input_i & \text{ if } min \lt input_i \lt max \\
315
+ min & \text{ if } input_i \le min \\
316
+ \end{array}\right.
317
+
318
+ Note:
319
+ - `min` and `max` cannot be None at the same time;
320
+ - When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
321
+ - When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
322
+ - If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
323
+ - The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
324
+
325
+ Args:
326
+ input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
327
+ min (Union(Tensor, float, int), optional): The minimum value. Default: ``None`` .
328
+ max (Union(Tensor, float, int), optional): The maximum value. Default: ``None`` .
329
+
330
+ Returns:
331
+ Tensor, a clipped Tensor.
332
+ The data type and shape are the same as input.
333
+
334
+ Raises:
335
+ ValueError: If both `min` and `max` are None.
336
+ TypeError: If the type of `input` is not Tensor.
337
+ TypeError: If the type of `min` is not in None, Tensor, float or int.
338
+ TypeError: If the type of `max` is not in None, Tensor, float or int.
339
+
340
+ Supported Platforms:
341
+ ``Ascend``
342
+
343
+ Examples:
344
+ >>> # case 1: the data type of input is Tensor
345
+ >>> import mindspore
346
+ >>> from mindspore import Tensor, mint
347
+ >>> import numpy as np
348
+ >>> min_value = Tensor(5, mindspore.float32)
349
+ >>> max_value = Tensor(20, mindspore.float32)
350
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
351
+ >>> output = mint.clamp(input, min_value, max_value)
352
+ >>> print(output)
353
+ [[ 5. 20. 5. 7.]
354
+ [ 5. 11. 6. 20.]]
355
+ >>> # case 2: the data type of input is number
356
+ >>> import mindspore
357
+ >>> from mindspore import Tensor, mint
358
+ >>> import numpy as np
359
+ >>> min_value = 5
360
+ >>> max_value = 20
361
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
362
+ >>> output = mint.clamp(input, min_value, max_value)
363
+ >>> print(output)
364
+ [[ 5. 20. 5. 7.]
365
+ [ 5. 11. 6. 20.]]
366
+ """
367
+ return _clamp_instance(*args, **kwargs)
368
+
369
+
370
+ def clip(*args, **kwargs):
371
+ r"""
372
+ clip(input, min=None, max=None) -> Tensor
373
+
374
+ Alias for :func:`mindspore.mint.clamp`.
375
+ """
376
+ return _clamp_instance(*args, **kwargs)
377
+
378
+
379
+ def div(*args, **kwargs):
380
+ r"""
381
+ div(input, other, *, rounding_mode=None) -> Tensor
382
+
383
+ Divides each element of the `input` by the corresponding element of the `other` .
384
+
385
+ .. math::
386
+
387
+ out_{i} = input_{i} / other_{i}
388
+
389
+ .. note::
390
+ - When the two inputs have different shapes, they must be able to broadcast to a common shape.
391
+ - The two inputs can not be bool type at the same time,
392
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
393
+ - The two inputs comply with the implicit type conversion rules to make the data types
394
+ consistent.
395
+
396
+ Args:
397
+ input (Union[Tensor, Number, bool]): The dividend.
398
+ other (Union[Tensor, Number, bool]): The divisor.
399
+
400
+ Keyword Args:
401
+ rounding_mode (str, optional): Type of rounding applied to the result. Default: ``None`` .
402
+ Three types are defined as,
403
+
404
+ - None: Default behavior, which is the same as true division in Python or `true_divide` in NumPy.
405
+
406
+ - "floor": Rounds the division of the inputs down, which is the same as floor division in Python
407
+ or `floor_divide` in NumPy.
408
+
409
+ - "trunc": Rounds the division of the inputs towards zero, which is the same as C-style integer division.
410
+
411
+ Returns:
412
+ Tensor, the shape is the same as the one after broadcasting,
413
+ and the data type is the one with higher precision or higher digits among the two inputs.
414
+
415
+ Raises:
416
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
417
+ ValueError: If `rounding_mode` value is not None, "floor" or "trunc".
418
+
419
+ Supported Platforms:
420
+ ``Ascend``
421
+
422
+ Examples:
423
+ >>> import mindspore
424
+ >>> import numpy as np
425
+ >>> from mindspore import Tensor, mint
426
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
427
+ >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
428
+ >>> output = mint.div(x, y)
429
+ >>> print(output)
430
+ [0.25 0.4 0.5]
431
+ """
432
+ return _div_instance(*args, **kwargs)
433
+
434
+
435
+ def divide(*args, **kwargs):
436
+ r"""
437
+ divide(input, other, *, rounding_mode=None) -> Tensor
438
+
439
+ Alias for :func:`mindspore.mint.div`.
440
+ """
441
+ return _div_instance(*args, **kwargs)
442
+
443
+
444
+ def empty(*args, **kwargs):
445
+ r"""
446
+ empty(*size, dtype=None, device=None) -> Tensor
447
+
448
+ Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
449
+ `dtype` and `device` respectively.
450
+
451
+ .. warning::
452
+ This is an experimental API that is subject to change or deletion.
453
+
454
+ Args:
455
+ size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
456
+ positive integers or tupled or list containing positive integers.
457
+
458
+ Keyword Args:
459
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
460
+ `mindspore.float32` will be used. Default: ``None`` .
461
+ device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
462
+ `device = None`, `mindspore.context.device_target` will be used. Default ``None``.
463
+
464
+ Returns:
465
+ Tensor, whose dtype and size are defined by input.
466
+
467
+ Raises:
468
+ TypeError: If `size` is neither an int nor a tuple or list of int.
469
+
470
+ Supported Platforms:
471
+ ``Ascend``
472
+
473
+ Examples:
474
+ >>> import mindspore
475
+ >>> from mindspore import ops
476
+ >>> output = ops.empty((2, 3), dtype=mindspore.float32)
477
+ >>> print(output)
478
+ [[0. 0. 0.]
479
+ [0. 0. 0.]]
480
+ """
481
+ return _empty_instance(*args, **kwargs)
482
+
483
+
484
+ def floor_divide(*args, **kwargs):
485
+ r"""
486
+ Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
487
+
488
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
489
+ Inputs must be two tensors or one tensor and one scalar.
490
+ When the inputs are two tensors,
491
+ dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
492
+ When the inputs are one tensor and one scalar,
493
+ the scalar could only be a constant.
494
+
495
+ .. math::
496
+ out_{i} = \text{floor}( \frac{input_i}{other_i})
497
+
498
+ where the :math:`floor` indicates the Floor operator. For more details,
499
+ please refer to the :class:`mindspore.mint.floor` operator.
500
+
501
+ .. warning::
502
+ This is an experimental API that is subject to change or deletion.
503
+
504
+ Args:
505
+ input (Union[Tensor, Number, bool]): The first input is a number or
506
+ a bool or a tensor whose data type is number or bool.
507
+ other (Union[Tensor, Number, bool]): The second input is a number or
508
+ a bool or a tensor whose data type is number or bool.
509
+
510
+ Returns:
511
+ Tensor, the shape is the same as the one after broadcasting,
512
+ and the data type is the one with higher precision or higher digits among the two inputs.
513
+
514
+ Raises:
515
+ TypeError: If `input` and `other` are not the following: Tensor, number.Number or bool.
516
+
517
+ Supported Platforms:
518
+ ``Ascend`` ``GPU`` ``CPU``
519
+
520
+ Examples:
521
+ >>> import mindspore
522
+ >>> from mindspore import Tensor, mint
523
+ >>> import numpy as np
524
+ >>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
525
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
526
+ >>> output = mint.floor_divide(input, other)
527
+ >>> print(output)
528
+ [ 0 1 -1]
529
+ >>> input = Tensor(2.0, mindspore.float32)
530
+ >>> other = Tensor(2.0, mindspore.float32)
531
+ >>> output = mint.floor_divide(input, other)
532
+ >>> print(output)
533
+ 1.0
534
+ """
535
+ return _floor_divide_instance(*args, **kwargs)
536
+
537
+
538
+ def fmod(*args, **kwargs):
539
+ r"""
540
+ fmod(input, other) -> Tensor
541
+
542
+ Computes the floating-point remainder of the division operation input/other.
543
+
544
+ .. math::
545
+
546
+ out = input - n * other
547
+
548
+ Where :math:`n` is :math:`input/other` with its fractional part truncated.
549
+ The returned value has the same sign as `input` and is less than `other` in magnitude.
550
+
551
+ .. warning::
552
+ This is an experimental API that is subject to change or deletion.
553
+
554
+ Args:
555
+ input (Tensor): the dividend.
556
+ other (Union[Tensor, Number]): the divisor.
557
+
558
+ Returns:
559
+ Tensor, the shape is the same as the one after broadcasting,
560
+ and the data type is the one with higher precision or higher digits among the two inputs.
561
+
562
+ Raises:
563
+ TypeError: If `input` is not a Tensor.
564
+
565
+ Supported Platforms:
566
+ ``Ascend``
567
+
568
+ Examples:
569
+ >>> import mindspore
570
+ >>> import numpy as np
571
+ >>> from mindspore import Tensor, mint
572
+ >>> input = Tensor(np.array([-4., -3.5, 0, 3.5, 4]), mindspore.float32)
573
+ >>> output = mint.fmod(input, 2.5)
574
+ >>> print(output)
575
+ [-1.5 -1. 0. 1. 1.5]
576
+ """
577
+ return _fmod_instance(*args, **kwargs)
578
+
579
+
580
+ def gelu(*args, **kwargs):
581
+ r"""
582
+ gelu(input, *, approximate='none') -> Tensor
583
+
584
+ Gaussian Error Linear Units activation function.
585
+
586
+ GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
587
+ And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
588
+ <https://arxiv.org/abs/1810.04805>`_.
589
+
590
+ When `approximate` argument is `none`, GELU is defined as follows:
591
+
592
+ .. math::
593
+ GELU(x_i) = x_i*P(X < x_i),
594
+
595
+ where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
596
+ :math:`x_i` is the input element.
597
+
598
+ When `approximate` argument is `tanh`, GELU is estimated with:
599
+
600
+ .. math::
601
+ GELU(x_i) = 0.5 * x_i * (1 + \tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3)))
602
+
603
+ GELU Activation Function Graph:
604
+
605
+ .. image:: ../images/GELU.png
606
+ :align: center
607
+
608
+ .. note::
609
+ On the Ascend platform, when `input` is -inf, its gradient is 0,
610
+ and when `input` is inf, its gradient is `dout`.
611
+
612
+ Args:
613
+ input (Tensor): The input of the activation function GeLU, the data type is float16, float32 or float64.
614
+
615
+ Keyword Args:
616
+ approximate (str, optional): the gelu approximation algorithm to use. Acceptable vaslues are ``'none'`` and ``'tanh'`` .
617
+ Default: ``'none'`` .
618
+
619
+ Returns:
620
+ Tensor, with the same type and shape as `input`.
621
+
622
+ Raises:
623
+ TypeError: If `input` is not a Tensor.
624
+ TypeError: If dtype of `input` is not bfloat16, float16, float32 or float64.
625
+ ValueError: If `approximate` value is neither `none` nor `tanh`.
626
+
627
+ Supported Platforms:
628
+ ``Ascend``
629
+
630
+ Examples:
631
+ >>> import mindspore
632
+ >>> import numpy as np
633
+ >>> from mindspore import Tensor, mint
634
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
635
+ >>> result = mint.nn.functional.gelu(input)
636
+ >>> print(result)
637
+ [[-1.58655241e-01 3.99987316e+00 -0.00000000e+00]
638
+ [ 1.95449972e+00 -1.41860323e-06 9.0000000e+00]]
639
+ >>> result = mint.nn.functional.gelu(input, approximate="tanh")
640
+ >>> print(result)
641
+ [[-1.58808023e-01 3.99992990e+00 -3.10779147e-21]
642
+ [ 1.95459759e+00 -2.29180174e-07 9.0000000e+00]]
643
+ """
644
+ return _gelu_instance(*args, **kwargs)
645
+
646
+
647
+ def gmm(*args, **kwargs):
648
+ r"""
649
+ gmm(x, weight, bias=None, group_list=None, group_type=0, group_list_type=0) -> tuple[Tensor]
650
+
651
+ Grouping matrix multiplication.
652
+
653
+ .. warning::
654
+ - This is an experimental API that is subject to change or deletion.
655
+ - `group_type` must be a constant.
656
+ - Only support on Atlas A2 training series.
657
+ - When the type of `group_list` is tuple[int] or list[int], it should a non-negative non-decreasing sequence,
658
+ indicating indexes of each group along the split axis. In this scenario, the arg `group_list_type` is useless.
659
+
660
+ .. note::
661
+ - When `group_type` is 2, the tensors in `x` must be non-continuous tensors which has
662
+ been transposed.
663
+ - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
664
+ which is implemented by ops.function.math_func.gmm_backward or through automatic differentiation.
665
+
666
+ Args:
667
+ x (tuple[Tensor]): The first tensors to be multiplied, whose num should be 1.
668
+ weight (tuple[Tensor]): The second tensors to be multiplied, whose num should be 1.
669
+ bias (tuple[Tensor], optional): Biases added to outputs, whose num should be 1.
670
+ The shape of each tensor in `bias` should be :math: `(group_list.shape[0], n)`
671
+ or :math: `(len(group_list), n)`. In the training scenario, the bias only supports None.
672
+ Default: ``None`` .
673
+ group_list (Union[Tensor, list[int], tuple[int]], optional): 1-D Tensor, list[int]
674
+ or tuple[int], indicating indexes or sizes of each group along the split axis.
675
+ When `group_list` is list[int] or tuple[int], it's length should be less than or equal to 128.
676
+ When `group_list` is a Tensor, it's size should be less than or equal to 1024.
677
+ Supported dtypes: int64.
678
+ Default: ``None`` .
679
+
680
+ - If `group_list_type` is 0, it must be a non-negative non-decreasing sequence.
681
+ And when `group_type` is 0, the last element in `group_list` should be equal to
682
+ the first dimension of the tensor in `x` . When `group_type` is 2, the last element
683
+ in `group_list` should be equal to the second dimension of the tensor in `x` .
684
+
685
+ - If `group_list_type` is 1, the value in `group_list` are the sizes of each group.
686
+ group_type (int, optional): Represents the axes that need to be grouped. For example,
687
+ :math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
688
+
689
+ - If `group_type` is 0, it means that the m-axis is grouped, meaning that the shape
690
+ of each tensor in `x` should be :math: `(m, k)` , the shape of each tensor in `weight`
691
+ should be :math: `(group_list.shape[0], k, n)` or :math: `(len(group_list), k, n)`,
692
+ and the shape of each tensor in result would be :math: `(m, n)` .
693
+
694
+ - If `group_type` is 2, it means that the k-axis is grouped, meaning that
695
+ the shape of each tensor in `x` should be :math: `(m, k)`, the shape of each
696
+ tensor in `weight` should be :math: `(k, n)`, and the shape of each tensor
697
+ in result would be :math: `(group_list.shape[0], m, n)` or :math: `(len(group_list), m, n)`.
698
+ group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
699
+ result of the size of each group. If it's 1, the value in `group_list` are the size
700
+ of each group. Default: ``0`` .
701
+
702
+ `x` , `weight` and `bias` only support the following 3 type combinations:
703
+
704
+ - x: float16, weight: float16, bias: float16
705
+ - x: bfloat16, weight: bfloat16, bias: float32
706
+ - x: float32, weight: float32, bias: float32
707
+
708
+ Returns:
709
+ tuple[Tensor], the results of grouping matrix multiplication.
710
+
711
+ Supported Platforms:
712
+ ``Ascend``
713
+
714
+ Examples:
715
+ >>> import numpy as np
716
+ >>> from mindspore import Tensor, ops
717
+ >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
718
+ >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
719
+ >>> group_list = Tensor([2, 4, 2, 2])
720
+ >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=1)
721
+ >>> print(y[0].shape)
722
+ >>> (10, 8)
723
+ >>> group_list = [2, 6, 8, 10]
724
+ >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=0)
725
+ >>> print(y[0].shape)
726
+ >>> (10, 8)
727
+ """
728
+ return _gmm_instance(*args, **kwargs)
729
+
730
+
731
+ def gmm_backward(*args, **kwargs):
732
+ r"""
733
+ gmm_backward(grad, x, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
734
+
735
+ the grad of ops.function.math_func.gmm
736
+ """
737
+ return _gmm_backward_instance(*args, **kwargs)
738
+
739
+
740
+ def gmm_backward_fusion(*args, **kwargs):
741
+ r"""
742
+ gmm_backward_fusion(grad, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
743
+
744
+ the grad of ops.function.math_func.gmm, only dx
745
+ """
746
+ return _gmm_backward_fusion_instance(*args, **kwargs)
747
+
748
+
749
+ def greater_equal(*args, **kwargs):
750
+ r"""
751
+ greater_equal(input, other) -> Tensor
752
+
753
+ Computes the boolean value of :math:`input >= other` element-wise.
754
+
755
+ .. math::
756
+
757
+ out_{i} =\begin{cases}
758
+ & \text{True, if } input_{i}>=other_{i} \\
759
+ & \text{False, if } input_{i}<other_{i}
760
+ \end{cases}
761
+
762
+ Note:
763
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
764
+ consistent.
765
+ - The inputs must be two tensors or one tensor and one scalar.
766
+ - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
767
+ and the shapes of them can be broadcast.
768
+ - When the inputs are one tensor and one scalar, the scalar could only be a constant.
769
+ - Broadcasting is supported.
770
+ - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
771
+ in another input by copying the value of the dimension.
772
+
773
+ Args:
774
+ input (Union[Tensor, Number]): The first input is a number
775
+ or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
776
+ other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
777
+ or a Tensor of the number or bool_ data type. When the first input is a Scalar,
778
+ the second input must be a Tensor of number or bool_ data type.
779
+
780
+ Returns:
781
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
782
+
783
+ Raises:
784
+ TypeError: If neither `input` nor `other` is a Tensor.
785
+
786
+ Supported Platforms:
787
+ ``Ascend``
788
+
789
+ Examples:
790
+ >>> import mindspore
791
+ >>> import numpy as np
792
+ >>> from mindspore import Tensor, mint
793
+ >>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
794
+ >>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
795
+ >>> output = mint.greater_equal(input, other)
796
+ >>> print(output)
797
+ [True True False]
798
+ >>> y = 2.1
799
+ >>> output = mint.greater_equal(input, y)
800
+ >>> print(output)
801
+ [False False True]
802
+ """
803
+ return _greater_equal_instance(*args, **kwargs)
804
+
805
+
806
+ def ge(*args, **kwargs):
807
+ r"""
808
+ ge(input, other) -> Tensor
809
+
810
+ Alias for :func:`mindspore.mint.greater_equal`.
811
+ """
812
+ return _greater_equal_instance(*args, **kwargs)
813
+
814
+
815
+ def kthvalue(*args, **kwargs):
816
+ r"""
817
+ Calculates the kth smallest value along given dim specified by `dim` of the input
818
+ tensor, and returns a tuple of (`values`, `indices`) where `values` contains the k-th smallest element
819
+ and `indices` provides the index of each corresponding element.
820
+
821
+ Args:
822
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
823
+ :math:`(input_1, input_2, ..., input_N)`.
824
+ k (int): Specifies the k-th smallest element to retrieve.
825
+ dim (int, optional): The dimension along which to find the k-th smallest value. Default: ``-1`` .
826
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
827
+ input, the output will reduce dimension if ``False`` . Default: ``False`` .
828
+
829
+ Returns:
830
+ A tuple consisting of `values` and `indices`.
831
+
832
+ - **values** (Tensor) - The k-th smallest value of input tensor, with the same dtype as `input`.
833
+
834
+ -If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
835
+ -If `keepdim` is ``False`` , the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
836
+
837
+ - **indices** (Tensor) - The `indices` for the k-th smallest value of the input tensor, it has the same shape as `values` with dtype of int64.
838
+
839
+ Raises:
840
+ TypeError: If `k` or `dim` is not an int.
841
+ TypeError: If `keepdim` is not a bool.
842
+ TypeError: If dtype of `input` is not supported.
843
+ ValueError: If `input` is an empty Tensor.
844
+ RuntimeError: If `k` is not in the proper range.
845
+
846
+ Supported Platforms:
847
+ ``Ascend``
848
+
849
+ Examples:
850
+ >>> import mindspore
851
+ >>> import numpy as np
852
+ >>> from mindspore import Tensor, ops
853
+ >>> input_x = Tensor(np.array([[1.01, 2.02, 3.03], [1.04, 2.05, 3.06]]), mindspore.float32)
854
+ >>> out = ops.auto_generate.kthvalue(input_x, 2, 1, False)
855
+ >>> print(out)
856
+ (Tensor(shape=[2], dtype=Float32, value= [ 2.01999998e+00, 2.04999995e+00]), Tensor(shape=[2], dtype=Int64, value= [1, 1]))
857
+ >>> out1 = ops.auto_generate.kthvalue(input_x, 2, 1, True)
858
+ >>> print(out1)
859
+ (Tensor(shape=[2, 1], dtype=Float32, value=
860
+ [[ 2.01999998e+00],
861
+ [ 2.04999995e+00]]), Tensor(shape=[2, 1], dtype=Int64, value=
862
+ [[1],
863
+ [1]]))
864
+ """
865
+ return _kthvalue_instance(*args, **kwargs)
866
+
867
+
868
+ def lerp(*args, **kwargs):
869
+ r"""
870
+ lerp(input, end, weight) -> Tensor
871
+
872
+ Perform a linear interpolation of two tensors input and end based on a float or tensor weight.
873
+
874
+ If `weight` is a tensor, the shapes of three inputs need to be broadcast;
875
+ If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
876
+ If `weight` is a float and platform is Ascend, the types of `input` and `end` need to be float32.
877
+
878
+ .. warning::
879
+ This is an experimental API that is subject to change or deletion.
880
+
881
+ .. math::
882
+ output_{i} = input_{i} + weight_{i} * (end_{i} - input_{i})
883
+
884
+ Args:
885
+ input (Tensor): The tensor with the starting points. Data type must be float16 or float32.
886
+ end (Tensor): The tensor with the ending points. Data type must be the same as `input`.
887
+ weight (Union[float, Tensor]): The weight for the interpolation formula. Must be a float scalar
888
+ or a tensor with float16 or float32 data type.
889
+
890
+ Returns:
891
+ Tensor, has the same type and shape as input `input`.
892
+
893
+ Raises:
894
+ TypeError: If `input` or `end` is not a tensor.
895
+ TypeError: If `weight` is neither scalar(float) nor tensor.
896
+ TypeError: If dtype of `input` or `end` is neither float16 nor float32.
897
+ TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
898
+ TypeError: If `input` and `end` have different data types.
899
+ TypeError: If `input`, `end` and `weight` have different data types when `weight` is a tensor.
900
+ ValueError: If `end` could not be broadcast to a tensor with shape of `input`.
901
+ ValueError: If `weight` could not be broadcast to tensors with shapes of `input` and `end` when it is a tensor.
902
+
903
+ Supported Platforms:
904
+ ``Ascend`` ``GPU`` ``CPU``
905
+
906
+ Examples:
907
+ >>> import mindspore
908
+ >>> import numpy as np
909
+ >>> from mindspore import Tensor, mint
910
+ >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
911
+ >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
912
+ >>> output = mint.lerp(start, end, 0.5)
913
+ >>> print(output)
914
+ [5.5 6. 6.5 7. ]
915
+ """
916
+ return _lerp_instance(*args, **kwargs)
917
+
918
+
919
+ def matmul_reduce_scatter(*args, **kwargs):
920
+ r"""
921
+ matmul_reduce_scatter(input, x2, group, world_size, *, reduce_op='sum', bias=None, comm_turn=0, trans_input=False, trans_x2=False) -> Tensor
922
+
923
+ In the TP segmentation scenario, matmul and reducescatter are fused, and communication and computational
924
+ pipelines are parallelized within the fusion operator.
925
+
926
+ .. math::
927
+ output = reducescatter(input@x2)
928
+
929
+ .. warning::
930
+ This is an experimental API that is subject to change or deletion.
931
+
932
+ Args:
933
+ input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
934
+ dimensions, and the data format supports ND.
935
+ x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
936
+ supports 2 dimensions, and the data format supports ND.
937
+ group (str): Communication group name, can be created by ``create_group`` method, or use the default group
938
+ ``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
939
+ world_size (int): The total number of ranks in the communication group, should be consistent with the number
940
+ of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
941
+
942
+ Keyword Args:
943
+ reduce_op (str, optional) The reduce operation type. Currently only ``'sum'`` is supported. Default:
944
+ ``'sum'`` .
945
+ bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
946
+ comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
947
+ is supported. Default: ``0`` .
948
+ trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
949
+ supported. Default: ``False`` .
950
+ trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
951
+
952
+ Returns:
953
+ - output (Tensor) - The result of allgather and matmul fusion calculations.
954
+
955
+ Note:
956
+ - When using this interface, please ensure that the driver firmware package and CANN package are both the
957
+ matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
958
+ - The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
959
+ range of k is [256, 65535), and m is required to be an integer multiple of ``world_size`` . The shape of
960
+ ``output`` is (m * world_size, n).
961
+ - The common fusion operators in a model only support the same communication group.
962
+
963
+ Raises:
964
+ TypeError: Any arg is of wrong type.
965
+ RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
966
+ RuntimeError: The dtypes of ``input`` and ``x2`` are different.
967
+ RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
968
+ RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
969
+ RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
970
+ RuntimeError: ``bias`` is not None.
971
+ RuntimeError: ``group`` does not exist.
972
+ RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
973
+ RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , or ``8`` .
974
+ RuntimeError: ``reduce_op`` is not ``'sum'`` .
975
+ RuntimeError: ``trans_input`` is ``True`` .
976
+
977
+ Supported Platforms:
978
+ ``Ascend``
979
+
980
+ Examples:
981
+ .. note::
982
+ Before running the following examples, you need to configure the communication environment variables.
983
+
984
+ For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
985
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
986
+ for more details.
987
+
988
+ This example should be run with 2 devices.
989
+
990
+ >>> import mindspore as ms
991
+ >>> from mindspore import ops
992
+ >>> import numpy as np
993
+ >>> ms.communication.init()
994
+ >>> rank = ms.communication.get_rank()
995
+ >>> np.random.seed(rank)
996
+ >>> input = ms.Tensor(np.random.randn(1024, 256).astype(np.float32), dtype=ms.float16)
997
+ >>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
998
+ >>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
999
+ >>> world_size = ms.communication.get_group_size()
1000
+ >>> reduce_op = ops.ReduceOp.SUM
1001
+ >>> output = ops.matmul_reduce_scatter(
1002
+ ... input,
1003
+ ... x2,
1004
+ ... group,
1005
+ ... world_size,
1006
+ ... reduce_op=reduce_op,
1007
+ ... bias=None,
1008
+ ... comm_turn=0,
1009
+ ... trans_input=False,
1010
+ ... trans_x2=False,
1011
+ ... )
1012
+ >>> print(output.shape)
1013
+ (512, 512)
1014
+ """
1015
+ return _matmul_reduce_scatter_instance(*args, **kwargs)
1016
+
1017
+
1018
+ def max(*args, **kwargs):
1019
+ r"""
1020
+ max(input) -> Tensor
1021
+
1022
+ Returns the maximum value of the input tensor.
1023
+
1024
+ Args:
1025
+ input (Tensor): The input tensor.
1026
+
1027
+ Returns:
1028
+ Scalar Tensor with the same dtype as `input`, the maximum value of the input.
1029
+
1030
+ Supported Platforms:
1031
+ ``Ascend`` ``GPU`` ``CPU``
1032
+
1033
+ Examples:
1034
+ >>> import mindspore
1035
+ >>> import numpy as np
1036
+ >>> from mindspore import Tensor, mint
1037
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1038
+ >>> output = mint.max(x)
1039
+ >>> print(output)
1040
+ 0.7
1041
+
1042
+ .. function:: max(input, dim, keepdim=False) -> tuple(Tensor)
1043
+ :noindex:
1044
+
1045
+ Calculates the maximum value along with the given dim for the input tensor, and returns the maximum values and
1046
+ indices.
1047
+
1048
+ Args:
1049
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
1050
+ :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
1051
+ dim (int): The dimension to reduce.
1052
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
1053
+ `input` , the output will reduce dimension if ``False``. Default: ``False``.
1054
+
1055
+ Returns:
1056
+ tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
1057
+ dimension `dim` and the corresponding index.
1058
+
1059
+ - **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`.
1060
+ - **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdim`
1061
+ is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
1062
+ Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
1063
+
1064
+ Raises:
1065
+ TypeError: If `input` is not Tensor.
1066
+ TypeError: If `keepdim` is not a bool.
1067
+ TypeError: If `dim` is not an int.
1068
+
1069
+ Supported Platforms:
1070
+ ``Ascend`` ``GPU`` ``CPU``
1071
+
1072
+ Examples:
1073
+ >>> import mindspore
1074
+ >>> import numpy as np
1075
+ >>> from mindspore import Tensor, mint
1076
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1077
+ >>> output, index = mint.max(x, 0, keepdim=True)
1078
+ >>> print(output, index)
1079
+ [0.7] [3]
1080
+
1081
+ .. function:: max(input, other) -> Tensor
1082
+ :noindex:
1083
+
1084
+ For details, please refer to :func:`mindspore.mint.maximum`.
1085
+ """
1086
+ return _max_instance(*args, **kwargs)
1087
+
1088
+
1089
+ def min(*args, **kwargs):
1090
+ r"""
1091
+ min(input) -> Tensor
1092
+
1093
+ Returns the minimum value of the input tensor.
1094
+
1095
+ Args:
1096
+ input (Tensor): The input tensor.
1097
+
1098
+ Returns:
1099
+ Scalar Tensor with the same dtype as `input`, the minimum value of the input.
1100
+
1101
+ Supported Platforms:
1102
+ ``Ascend`` ``GPU`` ``CPU``
1103
+
1104
+ Examples:
1105
+ >>> import mindspore
1106
+ >>> import numpy as np
1107
+ >>> from mindspore import Tensor, mint
1108
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1109
+ >>> output = mint.min(x)
1110
+ >>> print(output)
1111
+ 0.0
1112
+
1113
+ .. function:: min(input, dim, keepdim=False) -> Tensor
1114
+ :noindex:
1115
+
1116
+ Calculates the minimum value along with the given dim for the input tensor, and returns the minimum values and
1117
+ indices.
1118
+
1119
+ Args:
1120
+ input (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
1121
+ :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
1122
+ dim (int): The dimension to reduce.
1123
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
1124
+ input, the output will reduce dimension if ``False``. Default: ``False``.
1125
+
1126
+ Returns:
1127
+ tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
1128
+ dimension `dim` and the corresponding index.
1129
+
1130
+ - **values** (Tensor) - The minimum value of input tensor, with the same shape as `index`, and same dtype as `input`.
1131
+ - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keepdim`
1132
+ is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
1133
+ Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
1134
+
1135
+ Raises:
1136
+ TypeError: If `input` is not Tensor.
1137
+ TypeError: If `keepdim` is not a bool.
1138
+ TypeError: If `dim` is not an int.
1139
+
1140
+ Supported Platforms:
1141
+ ``Ascend`` ``GPU`` ``CPU``
1142
+
1143
+ Examples:
1144
+ >>> import mindspore
1145
+ >>> import numpy as np
1146
+ >>> from mindspore import Tensor, mint
1147
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1148
+ >>> output, index = mint.min(x, 0, keepdim=True)
1149
+ >>> print(output, index)
1150
+ [0.0] [0]
1151
+
1152
+ .. function:: min(input, other) -> Tensor
1153
+ :noindex:
1154
+
1155
+ For details, please refer to :func:`mindspore.mint.minimum`.
1156
+ """
1157
+ return _min_instance(*args, **kwargs)
1158
+
1159
+
1160
+ def nansum(*args, **kwargs):
1161
+ r"""
1162
+ nansum(input, dim=None, keepdim=False, *, dtype=None) -> Tensor
1163
+
1164
+ Computes sum of `input` over a given dimension, treating NaNs as zero.
1165
+
1166
+ .. warning::
1167
+ It is only supported on Atlas A2 Training Series Products.
1168
+ This is an experimental API that is subject to change or deletion.
1169
+
1170
+ Args:
1171
+ input (Tensor): The input Tensor.
1172
+ dim (Union[int, tuple(int)], optional): The dimensions to sum.
1173
+ Dim must be in the range [-rank(input), rank(input)). Default: ``None``, which indicates the sum of all
1174
+ elements in a tensor.
1175
+ keepdim (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``, indicating that no dimension is kept.
1176
+
1177
+ Keyword Args:
1178
+ dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
1179
+
1180
+ Returns:
1181
+ Tensor, the sum of input `input` in the given dimension dim, treating NaNs as zero.
1182
+
1183
+ - If dim is None, keepdim is False,
1184
+ the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
1185
+ - If dim is int, set as 2, and keepdim is False,
1186
+ the shape of output is :math:`(input_1, input_3, ..., input_R)`.
1187
+ - If dim is tuple(int) or list(int), set as (2, 3), and keepdim is False,
1188
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
1189
+
1190
+ Raises:
1191
+ TypeError: If `input` is not Tensor.
1192
+ TypeError: If `keepdim` is not a bool.
1193
+ TypeError: If the dtype of `input` or `dtype` is complex type.
1194
+ ValueError: If `dim` is not in [-rank(input), rank(input)).
1195
+
1196
+ Supported Platforms:
1197
+ ``Ascend``
1198
+
1199
+ Examples:
1200
+ >>> import mindspore
1201
+ >>> import numpy as np
1202
+ >>> from mindspore import Tensor, mint
1203
+ >>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
1204
+ >>> output1 = mint.nansum(x, dim=0, keepdim=False, dtype=mindspore.float32)
1205
+ >>> output2 = mint.nansum(x, dim=0, keepdim=True, dtype=mindspore.float32)
1206
+ >>> print(output1)
1207
+ [1. 4. 3.]
1208
+ >>> print(output2)
1209
+ [[1. 4. 3.]]
1210
+ """
1211
+ return _nansum_instance(*args, **kwargs)
1212
+
1213
+
1214
+ def pixel_shuffle(*args, **kwargs):
1215
+ r"""
1216
+ pixel_shuffle(input, upscale_factor) -> Tensor
1217
+
1218
+ Rearrange elements in a tensor according to an upscaling factor.
1219
+
1220
+ Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
1221
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
1222
+
1223
+ This is useful for implementing efficient sub-pixel convolution
1224
+ with a stride of :math:`1/r`.
1225
+
1226
+ For detailed introduction to the pixel_shuffle algorithm, refer to
1227
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
1228
+
1229
+ .. warning::
1230
+ This is an experimental API that is subject to change or deletion.
1231
+
1232
+ Args:
1233
+ input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
1234
+ and the length of third to last dimension can be divisible by the square of `upscale_factor`.
1235
+ upscale_factor (int): factor to shuffle the input Tensor, and is a positive integer.
1236
+ `upscale_factor` is the above-mentioned :math:`r`.
1237
+
1238
+ Returns:
1239
+ - **output** (Tensor) - Tensor of shape :math:`(*, C, H \times r, W \times r)` .
1240
+
1241
+ Raises:
1242
+ ValueError: If `upscale_factor` is not a positive integer.
1243
+ ValueError: If the length of third to last dimension is not divisible by the square of `upscale_factor`.
1244
+ ValueError: If the dimension of `input` is less than 3.
1245
+
1246
+ Supported Platforms:
1247
+ ``Ascend``
1248
+
1249
+ Examples:
1250
+ >>> from mindspore import mint
1251
+ >>> input = mint.randn(1, 9, 4, 4)
1252
+ >>> output = mint.nn.functional.pixel_shuffle(input, 3)
1253
+ >>> print(output.shape)
1254
+ (1, 1, 12, 12)
1255
+ """
1256
+ return _pixel_shuffle_instance(*args, **kwargs)
1257
+
1258
+
1259
+ def remainder(*args, **kwargs):
1260
+ r"""
1261
+ remainder(input, other) -> Tensor
1262
+
1263
+ Computes the remainder of `input` divided by `other` element-wise. The result has the same sign as the divisor and
1264
+ its absolute value is less than that of `other`.
1265
+
1266
+ Supports broadcasting to a common shape and implicit type promotion.
1267
+
1268
+ .. code:: python
1269
+
1270
+ remainder(input, other) == input - input.div(other, rounding_mode="floor") * other
1271
+
1272
+ Note:
1273
+ Complex inputs are not supported. At least one input need to be tensor, but not both are bool tensors.
1274
+
1275
+ Args:
1276
+ input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
1277
+ a bool or a tensor whose data type is
1278
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1279
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1280
+ other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
1281
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
1282
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
1283
+
1284
+ Returns:
1285
+ Tensor, with dtype promoted and shape broadcasted.
1286
+
1287
+ Raises:
1288
+ TypeError: If `input` and `other` are not of types: (tensor, tensor), (tensor, number), (tensor, bool),
1289
+ (number, tensor) or (bool, tensor).
1290
+ ValueError: If `input` and `other` are not broadcastable.
1291
+
1292
+ Supported Platforms:
1293
+ ``Ascend``
1294
+
1295
+ Examples:
1296
+ >>> import numpy as np
1297
+ >>> from mindspore import Tensor, mint
1298
+ >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float32))
1299
+ >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float64))
1300
+ >>> output = mint.remainder(x, y)
1301
+ >>> print(output)
1302
+ [2. 1. 0.]
1303
+ """
1304
+ return _remainder_instance(*args, **kwargs)
1305
+
1306
+
1307
+ def repeat_interleave(*args, **kwargs):
1308
+ r"""
1309
+ repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
1310
+
1311
+ Repeat elements of a tensor along an axis, like :func:`mindspore.numpy.repeat`.
1312
+
1313
+ .. warning::
1314
+ Only support on Atlas A2 training series.
1315
+
1316
+ Args:
1317
+ input (Tensor): The tensor to repeat values for. Must be of types: float16,
1318
+ float32, int8, uint8, int16, int32, or int64.
1319
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
1320
+ dim (int, optional): The dim along which to repeat, Default: ``None``. If dims is None,
1321
+ the input Tensor will be flattened and the output will alse be flattened.
1322
+
1323
+ Keyword Args:
1324
+ output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
1325
+ Default: ``None``.
1326
+
1327
+ Returns:
1328
+ One tensor with values repeated along the specified dim. If input has shape
1329
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
1330
+ si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
1331
+
1332
+ Supported Platforms:
1333
+ ``Ascend``
1334
+
1335
+ Examples:
1336
+ >>> import mindspore
1337
+ >>> import numpy as np
1338
+ >>> from mindspore import Tensor, mint
1339
+ >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
1340
+ >>> output = mint.repeat_interleave(input, repeats=2, dim=0)
1341
+ >>> print(output)
1342
+ [[0 1 2]
1343
+ [0 1 2]
1344
+ [3 4 5]
1345
+ [3 4 5]]
1346
+ """
1347
+ return _repeat_interleave_instance(*args, **kwargs)
1348
+
1349
+
1350
+ def sub(*args, **kwargs):
1351
+ r"""
1352
+ sub(input, other, *, alpha=1) -> Tensor
1353
+
1354
+ Subtracts scaled other value from self Tensor.
1355
+
1356
+ .. math::
1357
+
1358
+ out_{i} = self_{i} - alpha \times other_{i}
1359
+
1360
+ Note:
1361
+ - When the two inputs have different shapes,
1362
+ they must be able to broadcast to a common shape.
1363
+ - The two inputs and alpha comply with the implicit type conversion rules to make the data types
1364
+ consistent.
1365
+
1366
+ Args:
1367
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
1368
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1369
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1370
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
1371
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1372
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1373
+
1374
+ Keyword Args:
1375
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
1376
+
1377
+ Returns:
1378
+ Tensor with a shape that is the same as the broadcasted shape of the self `self` and `other`,
1379
+ and the data type is the one with higher precision or higher digits among the two inputs and alpha.
1380
+
1381
+ Raises:
1382
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
1383
+ TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
1384
+ TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
1385
+
1386
+ Supported Platforms:
1387
+ ``Ascend`` ``GPU`` ``CPU``
1388
+
1389
+ Examples:
1390
+ >>> import numpy as np
1391
+ >>> import mindspore
1392
+ >>> from mindspore import Tensor, mint
1393
+ >>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
1394
+ >>> y = Tensor(1, mindspore.int32)
1395
+ >>> alpha = 0.5
1396
+ >>> output = mint.sub(x, y, alpha=alpha)
1397
+ >>> print(output)
1398
+ [3.5 4.5 5.5]
1399
+ >>> # the data type of x is float32, the data type of y is int32,
1400
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
1401
+ >>> print(output.dtype)
1402
+ Float32
1403
+ """
1404
+ return _sub_instance(*args, **kwargs)
1405
+
1406
+
1407
+ def __sub__(*args, **kwargs):
1408
+ r"""
1409
+ __sub__(input, other, *, alpha=1) -> Tensor
1410
+
1411
+ Alias for :func:`mindspore.mint.sub`.
1412
+
1413
+ .. method:: mint.__sub__(input, other, *, alpha=1) -> Tensor
1414
+ :noindex:
1415
+
1416
+ Alias for overload function of :func:`mindspore.mint.sub`.
1417
+ """
1418
+ return _sub_instance(*args, **kwargs)
1419
+
1420
+
1421
+ def where(*args, **kwargs):
1422
+ r"""
1423
+ where(condition, input, other) -> Tensor
1424
+
1425
+ Selects elements from `input` or `other` based on `condition` and returns a tensor.
1426
+
1427
+ .. math::
1428
+ output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
1429
+
1430
+ Args:
1431
+ condition (Tensor[bool]): If true, yield `input`, otherwise yield `other`.
1432
+ input (Union[Tensor, Scalar]): When `condition` is true, values to select from.
1433
+ other (Union[Tensor, Scalar]): When `condition` is false, values to select from.
1434
+
1435
+ Returns:
1436
+ Tensor, elements are selected from `input` and `other`.
1437
+
1438
+ Raises:
1439
+ TypeError: If `condition` is not a tensor.
1440
+ TypeError: If both `input` and `other` are scalars.
1441
+ ValueError: If `condition`, `input` and `other` can not broadcast to each other.
1442
+
1443
+ Supported Platforms:
1444
+ ``Ascend`` ``GPU`` ``CPU``
1445
+
1446
+ Examples:
1447
+ >>> import numpy as np
1448
+ >>> from mindspore import tensor, ops
1449
+ >>> from mindspore import dtype as mstype
1450
+ >>> a = tensor(np.arange(4).reshape((2, 2)), mstype.float32)
1451
+ >>> b = tensor(np.ones((2, 2)), mstype.float32)
1452
+ >>> condition = a < 3
1453
+ >>> output = ops.where(condition, a, b)
1454
+ >>> print(output)
1455
+ [[0. 1.]
1456
+ [2. 1.]]
1457
+
1458
+ .. function:: where(condition) -> Tensor
1459
+ :noindex:
1460
+
1461
+ Identical to :func:`mindspore.ops.nonzero` with input `condition` and `as_tuple` being True.
1462
+
1463
+ Supported Platforms:
1464
+ ``Ascend``
1465
+ """
1466
+ return _where_instance(*args, **kwargs)
1467
+
1468
+
1469
+ def xlogy(*args, **kwargs):
1470
+ r"""
1471
+ xlogy(input, other) -> Tensor
1472
+
1473
+ Computes the first input multiplied by the logarithm of second input element-wise.
1474
+ Returns zero when `input` is zero.
1475
+
1476
+ .. math::
1477
+
1478
+ out_i = input_{i}\log{other_{i}}
1479
+
1480
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1481
+ The inputs must be two tensors or one tensor and one scalar.
1482
+ When the inputs are two tensors, the shapes of them could be broadcast.
1483
+
1484
+ Args:
1485
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1486
+ a bool or a tensor whose data type is
1487
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1488
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1489
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1490
+ a bool or a tensor whose data type is number or bool when the first input is a tensor.
1491
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1492
+
1493
+ Returns:
1494
+ Tensor, the shape is the same as the one after broadcasting,
1495
+ and the data type is the one with higher precision or higher digits among the two inputs.
1496
+
1497
+ Raises:
1498
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1499
+ ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1500
+
1501
+ Supported Platforms:
1502
+ ``Ascend`` ``GPU`` ``CPU``
1503
+
1504
+ Examples:
1505
+ >>> import mindspore
1506
+ >>> import numpy as np
1507
+ >>> from mindspore import Tensor, ops
1508
+ >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1509
+ >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1510
+ >>> output = ops.xlogy(input, other)
1511
+ >>> print(output)
1512
+ [-3.465736 0. 2.7725887]
1513
+ """
1514
+ return _xlogy_instance(*args, **kwargs)
1515
+
1516
+ __all__ = [
1517
+ "add",
1518
+ "__add__",
1519
+ "addcdiv",
1520
+ "all_gather_matmul",
1521
+ "bitwise_not",
1522
+ "clamp",
1523
+ "clip",
1524
+ "div",
1525
+ "divide",
1526
+ "empty",
1527
+ "floor_divide",
1528
+ "fmod",
1529
+ "gelu",
1530
+ "gmm",
1531
+ "gmm_backward",
1532
+ "gmm_backward_fusion",
1533
+ "greater_equal",
1534
+ "ge",
1535
+ "kthvalue",
1536
+ "lerp",
1537
+ "matmul_reduce_scatter",
1538
+ "max",
1539
+ "min",
1540
+ "nansum",
1541
+ "pixel_shuffle",
1542
+ "remainder",
1543
+ "repeat_interleave",
1544
+ "sub",
1545
+ "__sub__",
1546
+ "where",
1547
+ "xlogy",
1548
+ ]