mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1440 @@
1
+ # Copyright 2024 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """Holding mint APIs"""
16
+ from mindspore._c_expression import _add_instance
17
+ from mindspore._c_expression import _addcdiv_instance
18
+ from mindspore._c_expression import _all_gather_matmul_instance
19
+ from mindspore._c_expression import _bitwise_not_instance
20
+ from mindspore._c_expression import _clamp_instance
21
+ from mindspore._c_expression import _div_instance
22
+ from mindspore._c_expression import _empty_instance
23
+ from mindspore._c_expression import _floor_divide_instance
24
+ from mindspore._c_expression import _fmod_instance
25
+ from mindspore._c_expression import _gelu_instance
26
+ from mindspore._c_expression import _greater_equal_instance
27
+ from mindspore._c_expression import _kthvalue_instance
28
+ from mindspore._c_expression import _lerp_instance
29
+ from mindspore._c_expression import _matmul_reduce_scatter_instance
30
+ from mindspore._c_expression import _max_instance
31
+ from mindspore._c_expression import _min_instance
32
+ from mindspore._c_expression import _nansum_instance
33
+ from mindspore._c_expression import _pixel_shuffle_instance
34
+ from mindspore._c_expression import _remainder_instance
35
+ from mindspore._c_expression import _repeat_interleave_instance
36
+ from mindspore._c_expression import _sub_instance
37
+ from mindspore._c_expression import _where_instance
38
+ from mindspore._c_expression import _xlogy_instance
39
+
40
+ def add(*args, **kwargs):
41
+ r"""
42
+ add(input, other, *, alpha=1) -> Tensor
43
+
44
+ Adds scaled other value to `self`.
45
+
46
+ .. math::
47
+
48
+ out_{i} = self_{i} + alpha \times other_{i}
49
+
50
+ Note:
51
+ - When `self` and `other` have different shapes,
52
+ they must be able to broadcast to a common shape.
53
+ - `self`, `other` and `alpha` comply with the implicit type conversion rules to make the data types
54
+ consistent.
55
+
56
+ Args:
57
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
58
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
59
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
60
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
61
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
62
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
63
+
64
+ Keyword Args:
65
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
66
+
67
+ Returns:
68
+ Tensor with a shape that is the same as the broadcasted shape of the `self` and `other`,
69
+ and the data type is the one with higher precision or higher digits among `self`, `other` and `alpha`.
70
+
71
+ Raises:
72
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
73
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
74
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
75
+
76
+ Supported Platforms:
77
+ ``Ascend`` ``GPU`` ``CPU``
78
+
79
+ Examples:
80
+ >>> import numpy as np
81
+ >>> import mindspore
82
+ >>> from mindspore import Tensor, mint
83
+ >>> x = Tensor(1, mindspore.int32)
84
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
85
+ >>> alpha = 0.5
86
+ >>> output = mint.add(x, y, alpha=alpha) # x.add(y, alpha=alpha)
87
+ >>> print(output)
88
+ [3. 3.5 4.]
89
+ >>> # the data type of x is int32, the data type of y is float32,
90
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
91
+ >>> print(output.dtype)
92
+ Float32
93
+ """
94
+ return _add_instance(*args, **kwargs)
95
+
96
+
97
+ def __add__(*args, **kwargs):
98
+ r"""
99
+ __add__(input, other, *, alpha=1) -> Tensor
100
+
101
+ Alias for :func:`mindspore.mint.add`.
102
+
103
+ .. method:: mint.__add__(input, other, *, alpha=1) -> Tensor
104
+ :noindex:
105
+
106
+ Alias for overload function of :func:`mindspore.mint.add`.
107
+ """
108
+ return _add_instance(*args, **kwargs)
109
+
110
+
111
+ def addcdiv(*args, **kwargs):
112
+ r"""
113
+ addcdiv_ext(input, tensor1, tensor2, *, value=1) -> Tensor
114
+
115
+ Performs the element-wise division of tensor tensor1 by tensor tensor2,
116
+ multiply the result by the scalar value and add it to input data.
117
+
118
+ .. math::
119
+ y[i] = input[i] + value * (tensor1[i] / tensor2[i])
120
+
121
+ .. warning::
122
+ This is an experimental API that is subject to change or deletion.
123
+
124
+ Args:
125
+ input (Tensor): The tensor to be added.
126
+ tensor1 (Tensor): The numerator tensor.
127
+ tensor2 (Tensor): The denominator tensor.
128
+
129
+ Keyword Args:
130
+ value (Number, optional): The multiplier for tensor1/tensor2. Default: ``1`` .
131
+
132
+ Returns:
133
+ Tensor, has the same shape and dtype as tensor1/tensor2.
134
+
135
+ Raises:
136
+ TypeError: If dtype of `tensor1`, `tensor2`, or `input` is not tensor.
137
+ ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
138
+ ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1/tensor2`.
139
+ ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1/tensor2)`.
140
+
141
+ Supported Platforms:
142
+ ``Ascend``
143
+
144
+ Examples:
145
+ >>> import mindspore
146
+ >>> import numpy as np
147
+ >>> from mindspore import Tensor, ops
148
+ >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
149
+ >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
150
+ >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
151
+ >>> y = ops.addcdiv_ext(input_data, x1, x2, value=1)
152
+ >>> print(y)
153
+ [1.25 1.6666667 2.5 5. ]
154
+ """
155
+ return _addcdiv_instance(*args, **kwargs)
156
+
157
+
158
+ def all_gather_matmul(*args, **kwargs):
159
+ r"""
160
+ all_gather_matmul(input, x2, group, world_size, *, bias=None, gather_index=0, gather_output=True, comm_turn=0, trans_input=False, trans_x2=False) -> Tensor
161
+
162
+ In the TP segmentation scenario, allgather and matmul are fused, and communication and computational pipelines
163
+ are parallelized within the fusion operator.
164
+
165
+ .. math::
166
+ output = allgather(input)@x2
167
+
168
+ gather\_out = allgather(input)
169
+
170
+ .. warning::
171
+ This is an experimental API that is subject to change or deletion.
172
+
173
+ Args:
174
+ input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
175
+ dimensions, and the data format supports ND.
176
+ x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
177
+ supports 2 dimensions, and the data format supports ND.
178
+ group (str): Communication group name, can be created by ``create_group`` method, or use the default group
179
+ ``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
180
+ world_size (int): The total number of ranks in the communication group, should be consistent with the number
181
+ of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
182
+
183
+ Keyword Args:
184
+ bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
185
+ gather_index (int, optional): Indicates the allgather operation object, ``0`` means gather ``input`` ,
186
+ ``1`` means gather ``x2`` . Currently only ``0`` is supported. Default: ``0`` .
187
+ gather_output (bool, optional): Indicates whether gather output is required. Default: ``True`` .
188
+ comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
189
+ is supported. Default: ``0`` .
190
+ trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
191
+ supported. Default: ``False`` .
192
+ trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
193
+
194
+ Returns:
195
+ - output (Tensor) - The result of allgather and matmul fusion calculations.
196
+ - gather_out (Tensor) - The result of allgather. If gather_output is ``False`` , ``gather_out`` returns a
197
+ tensor with shape 0.
198
+
199
+ Note:
200
+ - When using this interface, please ensure that the driver firmware package and CANN package are both the
201
+ matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
202
+ - The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
203
+ range of k is [256, 65535). The shape of ``output`` is (m * world_size, n), and the shape of
204
+ ``gather_out`` is (m * world_size, k).
205
+ - The common fusion operators in a model only support the same communication group.
206
+
207
+ Raises:
208
+ TypeError: Any arg is of wrong type.
209
+ RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
210
+ RuntimeError: The dtypes of ``input`` and ``x2`` are different.
211
+ RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
212
+ RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
213
+ RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
214
+ RuntimeError: ``bias`` is not None.
215
+ RuntimeError: ``group`` does not exist.
216
+ RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
217
+ RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , or ``8`` .
218
+ RuntimeError: ``gather_index`` is not ``0`` .
219
+ RuntimeError: ``trans_input`` is ``True`` .
220
+
221
+ Supported Platforms:
222
+ ``Ascend``
223
+
224
+ Examples:
225
+ .. note::
226
+ Before running the following examples, you need to configure the communication environment variables.
227
+
228
+ For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
229
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
230
+ for more details.
231
+
232
+ This example should be run with 2 devices.
233
+
234
+ >>> import mindspore as ms
235
+ >>> import numpy as np
236
+ >>> from mindspore import ops
237
+ >>> ms.communication.init()
238
+ >>> rank = ms.communication.get_rank()
239
+ >>> np.random.seed(rank)
240
+ >>> input = ms.Tensor(np.random.randn(128, 256).astype(np.float32), dtype=ms.float16)
241
+ >>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
242
+ >>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
243
+ >>> world_size = ms.communication.get_group_size()
244
+ >>> output, gather_out = ops.all_gather_matmul(
245
+ ... input,
246
+ ... x2,
247
+ ... group,
248
+ ... world_size,
249
+ ... bias=None,
250
+ ... gather_index=0,
251
+ ... gather_output=True,
252
+ ... comm_turn=0,
253
+ ... trans_input=False,
254
+ ... trans_x2=False,
255
+ ... )
256
+ >>> print(output.shape)
257
+ (256, 512)
258
+ >>> print(gather_out.shape)
259
+ (256, 256)
260
+ """
261
+ return _all_gather_matmul_instance(*args, **kwargs)
262
+
263
+
264
+ def bitwise_not(*args, **kwargs):
265
+ r"""
266
+ bitwise_not(input) -> Tensor
267
+
268
+ Returns bitwise `not` of the input tensor.
269
+
270
+ .. warning::
271
+ This is an experimental API that is subject to change or deletion.
272
+
273
+ Args:
274
+ input (Tensor): The input tensor must be of integral or Boolean types.
275
+
276
+ Returns:
277
+ Tensor, has the same shape and type as `input`.
278
+
279
+ Raises:
280
+ TypeError: If `input` is not a Tensor.
281
+ RuntimeError: If dtype of `input` is not int or bool.
282
+
283
+ Supported Platforms:
284
+ ``Ascend``
285
+
286
+ Examples:
287
+ >>> import mindspore
288
+ >>> import numpy as np
289
+ >>> from mindspore import Tensor, mint
290
+ >>> x = Tensor(np.array([True, False, True, False]))
291
+ >>> y = mint.bitwise_not(x)
292
+ >>> print(y)
293
+ [False True False True]
294
+ """
295
+ return _bitwise_not_instance(*args, **kwargs)
296
+
297
+
298
+ def clamp(*args, **kwargs):
299
+ r"""
300
+ clamp(input, min=None, max=None) -> Tensor
301
+
302
+ Clamps tensor values between the specified minimum value and maximum value.
303
+
304
+ Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
305
+
306
+ .. math::
307
+
308
+ out_i= \left\{
309
+ \begin{array}{align}
310
+ max & \text{ if } input_i\ge max \\
311
+ input_i & \text{ if } min \lt input_i \lt max \\
312
+ min & \text{ if } input_i \le min \\
313
+ \end{array}\right.
314
+
315
+ Note:
316
+ - `min` and `max` cannot be None at the same time;
317
+ - When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
318
+ - When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
319
+ - If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
320
+ - The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
321
+
322
+ Args:
323
+ input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
324
+ min (Union(Tensor, float, int), optional): The minimum value. Default: ``None`` .
325
+ max (Union(Tensor, float, int), optional): The maximum value. Default: ``None`` .
326
+
327
+ Returns:
328
+ Tensor, a clipped Tensor.
329
+ The data type and shape are the same as input.
330
+
331
+ Raises:
332
+ ValueError: If both `min` and `max` are None.
333
+ TypeError: If the type of `input` is not Tensor.
334
+ TypeError: If the type of `min` is not in None, Tensor, float or int.
335
+ TypeError: If the type of `max` is not in None, Tensor, float or int.
336
+
337
+ Supported Platforms:
338
+ ``Ascend``
339
+
340
+ Examples:
341
+ >>> # case 1: the data type of input is Tensor
342
+ >>> import mindspore
343
+ >>> from mindspore import Tensor, mint
344
+ >>> import numpy as np
345
+ >>> min_value = Tensor(5, mindspore.float32)
346
+ >>> max_value = Tensor(20, mindspore.float32)
347
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
348
+ >>> output = mint.clamp(input, min_value, max_value)
349
+ >>> print(output)
350
+ [[ 5. 20. 5. 7.]
351
+ [ 5. 11. 6. 20.]]
352
+ >>> # case 2: the data type of input is number
353
+ >>> import mindspore
354
+ >>> from mindspore import Tensor, mint
355
+ >>> import numpy as np
356
+ >>> min_value = 5
357
+ >>> max_value = 20
358
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
359
+ >>> output = mint.clamp(input, min_value, max_value)
360
+ >>> print(output)
361
+ [[ 5. 20. 5. 7.]
362
+ [ 5. 11. 6. 20.]]
363
+ """
364
+ return _clamp_instance(*args, **kwargs)
365
+
366
+
367
+ def clip(*args, **kwargs):
368
+ r"""
369
+ clip(input, min=None, max=None) -> Tensor
370
+
371
+ Alias for :func:`mindspore.mint.clamp`.
372
+ """
373
+ return _clamp_instance(*args, **kwargs)
374
+
375
+
376
+ def div(*args, **kwargs):
377
+ r"""
378
+ div(input, other, *, rounding_mode=None) -> Tensor
379
+
380
+ Divides each element of the `input` by the corresponding element of the `other` .
381
+
382
+ .. math::
383
+
384
+ out_{i} = input_{i} / other_{i}
385
+
386
+ .. note::
387
+ - When the two inputs have different shapes, they must be able to broadcast to a common shape.
388
+ - The two inputs can not be bool type at the same time,
389
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
390
+ - The two inputs comply with the implicit type conversion rules to make the data types
391
+ consistent.
392
+
393
+ Args:
394
+ input (Union[Tensor, Number, bool]): The dividend.
395
+ other (Union[Tensor, Number, bool]): The divisor.
396
+
397
+ Keyword Args:
398
+ rounding_mode (str, optional): Type of rounding applied to the result. Default: ``None`` .
399
+ Three types are defined as,
400
+
401
+ - None: Default behavior, which is the same as true division in Python or `true_divide` in NumPy.
402
+
403
+ - "floor": Rounds the division of the inputs down, which is the same as floor division in Python
404
+ or `floor_divide` in NumPy.
405
+
406
+ - "trunc": Rounds the division of the inputs towards zero, which is the same as C-style integer division.
407
+
408
+ Returns:
409
+ Tensor, the shape is the same as the one after broadcasting,
410
+ and the data type is the one with higher precision or higher digits among the two inputs.
411
+
412
+ Raises:
413
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
414
+ ValueError: If `rounding_mode` value is not None, "floor" or "trunc".
415
+
416
+ Supported Platforms:
417
+ ``Ascend``
418
+
419
+ Examples:
420
+ >>> import mindspore
421
+ >>> import numpy as np
422
+ >>> from mindspore import Tensor, mint
423
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
424
+ >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
425
+ >>> output = mint.div(x, y)
426
+ >>> print(output)
427
+ [0.25 0.4 0.5]
428
+ """
429
+ return _div_instance(*args, **kwargs)
430
+
431
+
432
+ def divide(*args, **kwargs):
433
+ r"""
434
+ divide(input, other, *, rounding_mode=None) -> Tensor
435
+
436
+ Alias for :func:`mindspore.mint.div`.
437
+ """
438
+ return _div_instance(*args, **kwargs)
439
+
440
+
441
+ def empty(*args, **kwargs):
442
+ r"""
443
+ empty(*size, dtype=None, device=None) -> Tensor
444
+
445
+ Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
446
+ `dtype` and `device` respectively.
447
+
448
+ .. warning::
449
+ This is an experimental API that is subject to change or deletion.
450
+
451
+ Args:
452
+ size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
453
+ positive integers or tupled or list containing positive integers.
454
+
455
+ Keyword Args:
456
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
457
+ `mindspore.float32` will be used. Default: ``None`` .
458
+ device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
459
+ `device = None`, `mindspore.context.device_target` will be used. Default ``None``.
460
+
461
+ Returns:
462
+ Tensor, whose dtype and size are defined by input.
463
+
464
+ Raises:
465
+ TypeError: If `size` is neither an int nor a tuple or list of int.
466
+
467
+ Supported Platforms:
468
+ ``Ascend``
469
+
470
+ Examples:
471
+ >>> import mindspore
472
+ >>> from mindspore import ops
473
+ >>> output = ops.empty((2, 3), dtype=mindspore.float32)
474
+ >>> print(output)
475
+ [[0. 0. 0.]
476
+ [0. 0. 0.]]
477
+ """
478
+ return _empty_instance(*args, **kwargs)
479
+
480
+
481
+ def floor_divide(*args, **kwargs):
482
+ r"""
483
+ Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
484
+
485
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
486
+ Inputs must be two tensors or one tensor and one scalar.
487
+ When the inputs are two tensors,
488
+ dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
489
+ When the inputs are one tensor and one scalar,
490
+ the scalar could only be a constant.
491
+
492
+ .. math::
493
+ out_{i} = \text{floor}( \frac{input_i}{other_i})
494
+
495
+ where the :math:`floor` indicates the Floor operator. For more details,
496
+ please refer to the :class:`mindspore.mint.floor` operator.
497
+
498
+ .. warning::
499
+ This is an experimental API that is subject to change or deletion.
500
+
501
+ Args:
502
+ input (Union[Tensor, Number, bool]): The first input is a number or
503
+ a bool or a tensor whose data type is number or bool.
504
+ other (Union[Tensor, Number, bool]): The second input is a number or
505
+ a bool or a tensor whose data type is number or bool.
506
+
507
+ Returns:
508
+ Tensor, the shape is the same as the one after broadcasting,
509
+ and the data type is the one with higher precision or higher digits among the two inputs.
510
+
511
+ Raises:
512
+ TypeError: If `input` and `other` are not the following: Tensor, number.Number or bool.
513
+
514
+ Supported Platforms:
515
+ ``Ascend`` ``GPU`` ``CPU``
516
+
517
+ Examples:
518
+ >>> import mindspore
519
+ >>> from mindspore import Tensor, mint
520
+ >>> import numpy as np
521
+ >>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
522
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
523
+ >>> output = mint.floor_divide(input, other)
524
+ >>> print(output)
525
+ [ 0 1 -1]
526
+ >>> input = Tensor(2.0, mindspore.float32)
527
+ >>> other = Tensor(2.0, mindspore.float32)
528
+ >>> output = mint.floor_divide(input, other)
529
+ >>> print(output)
530
+ 1.0
531
+ """
532
+ return _floor_divide_instance(*args, **kwargs)
533
+
534
+
535
+ def fmod(*args, **kwargs):
536
+ r"""
537
+ fmod(input, other) -> Tensor
538
+
539
+ Computes the floating-point remainder of the division operation input/other.
540
+
541
+ .. math::
542
+
543
+ out = input - n * other
544
+
545
+ Where :math:`n` is :math:`input/other` with its fractional part truncated.
546
+ The returned value has the same sign as `input` and is less than `other` in magnitude.
547
+
548
+ .. warning::
549
+ This is an experimental API that is subject to change or deletion.
550
+
551
+ Args:
552
+ input (Tensor): the dividend.
553
+ other (Union[Tensor, Number]): the divisor.
554
+
555
+ Returns:
556
+ Tensor, the shape is the same as the one after broadcasting,
557
+ and the data type is the one with higher precision or higher digits among the two inputs.
558
+
559
+ Raises:
560
+ TypeError: If `input` is not a Tensor.
561
+
562
+ Supported Platforms:
563
+ ``Ascend``
564
+
565
+ Examples:
566
+ >>> import mindspore
567
+ >>> import numpy as np
568
+ >>> from mindspore import Tensor, mint
569
+ >>> input = Tensor(np.array([-4., -3.5, 0, 3.5, 4]), mindspore.float32)
570
+ >>> output = mint.fmod(input, 2.5)
571
+ >>> print(output)
572
+ [-1.5 -1. 0. 1. 1.5]
573
+ """
574
+ return _fmod_instance(*args, **kwargs)
575
+
576
+
577
+ def gelu(*args, **kwargs):
578
+ r"""
579
+ gelu(input, *, approximate='none') -> Tensor
580
+
581
+ Gaussian Error Linear Units activation function.
582
+
583
+ GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
584
+ And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
585
+ <https://arxiv.org/abs/1810.04805>`_.
586
+
587
+ When `approximate` argument is `none`, GELU is defined as follows:
588
+
589
+ .. math::
590
+ GELU(x_i) = x_i*P(X < x_i),
591
+
592
+ where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
593
+ :math:`x_i` is the input element.
594
+
595
+ When `approximate` argument is `tanh`, GELU is estimated with:
596
+
597
+ .. math::
598
+ GELU(x_i) = 0.5 * x_i * (1 + \tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3)))
599
+
600
+ GELU Activation Function Graph:
601
+
602
+ .. image:: ../images/GELU.png
603
+ :align: center
604
+
605
+ .. note::
606
+ On the Ascend platform, when `input` is -inf, its gradient is 0,
607
+ and when `input` is inf, its gradient is `dout`.
608
+
609
+ Args:
610
+ input (Tensor): The input of the activation function GeLU, the data type is float16, float32 or float64.
611
+
612
+ Keyword Args:
613
+ approximate (str, optional): the gelu approximation algorithm to use. Acceptable vaslues are ``'none'`` and ``'tanh'`` .
614
+ Default: ``'none'`` .
615
+
616
+ Returns:
617
+ Tensor, with the same type and shape as `input`.
618
+
619
+ Raises:
620
+ TypeError: If `input` is not a Tensor.
621
+ TypeError: If dtype of `input` is not bfloat16, float16, float32 or float64.
622
+ ValueError: If `approximate` value is neither `none` nor `tanh`.
623
+
624
+ Supported Platforms:
625
+ ``Ascend``
626
+
627
+ Examples:
628
+ >>> import mindspore
629
+ >>> import numpy as np
630
+ >>> from mindspore import Tensor, mint
631
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
632
+ >>> result = mint.nn.functional.gelu(input)
633
+ >>> print(result)
634
+ [[-1.58655241e-01 3.99987316e+00 -0.00000000e+00]
635
+ [ 1.95449972e+00 -1.41860323e-06 9.0000000e+00]]
636
+ >>> result = mint.nn.functional.gelu(input, approximate="tanh")
637
+ >>> print(result)
638
+ [[-1.58808023e-01 3.99992990e+00 -3.10779147e-21]
639
+ [ 1.95459759e+00 -2.29180174e-07 9.0000000e+00]]
640
+ """
641
+ return _gelu_instance(*args, **kwargs)
642
+
643
+
644
+ def greater_equal(*args, **kwargs):
645
+ r"""
646
+ greater_equal(input, other) -> Tensor
647
+
648
+ Computes the boolean value of :math:`input >= other` element-wise.
649
+
650
+ .. math::
651
+
652
+ out_{i} =\begin{cases}
653
+ & \text{True, if } input_{i}>=other_{i} \\
654
+ & \text{False, if } input_{i}<other_{i}
655
+ \end{cases}
656
+
657
+ Note:
658
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
659
+ consistent.
660
+ - The inputs must be two tensors or one tensor and one scalar.
661
+ - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
662
+ and the shapes of them can be broadcast.
663
+ - When the inputs are one tensor and one scalar, the scalar could only be a constant.
664
+ - Broadcasting is supported.
665
+ - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
666
+ in another input by copying the value of the dimension.
667
+
668
+ Args:
669
+ input (Union[Tensor, Number]): The first input is a number
670
+ or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
671
+ other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
672
+ or a Tensor of the number or bool_ data type. When the first input is a Scalar,
673
+ the second input must be a Tensor of number or bool_ data type.
674
+
675
+ Returns:
676
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
677
+
678
+ Raises:
679
+ TypeError: If neither `input` nor `other` is a Tensor.
680
+
681
+ Supported Platforms:
682
+ ``Ascend``
683
+
684
+ Examples:
685
+ >>> import mindspore
686
+ >>> import numpy as np
687
+ >>> from mindspore import Tensor, mint
688
+ >>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
689
+ >>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
690
+ >>> output = mint.greater_equal(input, other)
691
+ >>> print(output)
692
+ [True True False]
693
+ >>> y = 2.1
694
+ >>> output = mint.greater_equal(input, y)
695
+ >>> print(output)
696
+ [False False True]
697
+ """
698
+ return _greater_equal_instance(*args, **kwargs)
699
+
700
+
701
+ def ge(*args, **kwargs):
702
+ r"""
703
+ ge(input, other) -> Tensor
704
+
705
+ Alias for :func:`mindspore.mint.greater_equal`.
706
+ """
707
+ return _greater_equal_instance(*args, **kwargs)
708
+
709
+
710
+ def kthvalue(*args, **kwargs):
711
+ r"""
712
+ Calculates the kth smallest value along given dim specified by `dim` of the input
713
+ tensor, and returns a tuple of (`values`, `indices`) where `values` contains the k-th smallest element
714
+ and `indices` provides the index of each corresponding element.
715
+
716
+ Args:
717
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
718
+ :math:`(input_1, input_2, ..., input_N)`.
719
+ k (int): Specifies the k-th smallest element to retrieve.
720
+ dim (int, optional): The dimension along which to find the k-th smallest value. Default: ``-1`` .
721
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
722
+ input, the output will reduce dimension if ``False`` . Default: ``False`` .
723
+
724
+ Returns:
725
+ A tuple consisting of `values` and `indices`.
726
+
727
+ - **values** (Tensor) - The k-th smallest value of input tensor, with the same dtype as `input`.
728
+
729
+ -If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
730
+ -If `keepdim` is ``False`` , the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
731
+
732
+ - **indices** (Tensor) - The `indices` for the k-th smallest value of the input tensor, it has the same shape as `values` with dtype of int64.
733
+
734
+ Raises:
735
+ TypeError: If `k` or `dim` is not an int.
736
+ TypeError: If `keepdim` is not a bool.
737
+ TypeError: If dtype of `input` is not supported.
738
+ ValueError: If `input` is an empty Tensor.
739
+ RuntimeError: If `k` is not in the proper range.
740
+
741
+ Supported Platforms:
742
+ ``Ascend``
743
+
744
+ Examples:
745
+ >>> import mindspore
746
+ >>> import numpy as np
747
+ >>> from mindspore import Tensor, ops
748
+ >>> input_x = Tensor(np.array([[1.01, 2.02, 3.03], [1.04, 2.05, 3.06]]), mindspore.float32)
749
+ >>> out = ops.auto_generate.kthvalue(input_x, 2, 1, False)
750
+ >>> print(out)
751
+ (Tensor(shape=[2], dtype=Float32, value= [ 2.01999998e+00, 2.04999995e+00]), Tensor(shape=[2], dtype=Int64, value= [1, 1]))
752
+ >>> out1 = ops.auto_generate.kthvalue(input_x, 2, 1, True)
753
+ >>> print(out1)
754
+ (Tensor(shape=[2, 1], dtype=Float32, value=
755
+ [[ 2.01999998e+00],
756
+ [ 2.04999995e+00]]), Tensor(shape=[2, 1], dtype=Int64, value=
757
+ [[1],
758
+ [1]]))
759
+ """
760
+ return _kthvalue_instance(*args, **kwargs)
761
+
762
+
763
+ def lerp(*args, **kwargs):
764
+ r"""
765
+ lerp(input, end, weight) -> Tensor
766
+
767
+ Perform a linear interpolation of two tensors input and end based on a float or tensor weight.
768
+
769
+ If `weight` is a tensor, the shapes of three inputs need to be broadcast;
770
+ If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
771
+ If `weight` is a float and platform is Ascend, the types of `input` and `end` need to be float32.
772
+
773
+ .. warning::
774
+ This is an experimental API that is subject to change or deletion.
775
+
776
+ .. math::
777
+ output_{i} = input_{i} + weight_{i} * (end_{i} - input_{i})
778
+
779
+ Args:
780
+ input (Tensor): The tensor with the starting points. Data type must be float16 or float32.
781
+ end (Tensor): The tensor with the ending points. Data type must be the same as `input`.
782
+ weight (Union[float, Tensor]): The weight for the interpolation formula. Must be a float scalar
783
+ or a tensor with float16 or float32 data type.
784
+
785
+ Returns:
786
+ Tensor, has the same type and shape as input `input`.
787
+
788
+ Raises:
789
+ TypeError: If `input` or `end` is not a tensor.
790
+ TypeError: If `weight` is neither scalar(float) nor tensor.
791
+ TypeError: If dtype of `input` or `end` is neither float16 nor float32.
792
+ TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
793
+ TypeError: If `input` and `end` have different data types.
794
+ TypeError: If `input`, `end` and `weight` have different data types when `weight` is a tensor.
795
+ ValueError: If `end` could not be broadcast to a tensor with shape of `input`.
796
+ ValueError: If `weight` could not be broadcast to tensors with shapes of `input` and `end` when it is a tensor.
797
+
798
+ Supported Platforms:
799
+ ``Ascend`` ``GPU`` ``CPU``
800
+
801
+ Examples:
802
+ >>> import mindspore
803
+ >>> import numpy as np
804
+ >>> from mindspore import Tensor, mint
805
+ >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
806
+ >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
807
+ >>> output = mint.lerp(start, end, 0.5)
808
+ >>> print(output)
809
+ [5.5 6. 6.5 7. ]
810
+ """
811
+ return _lerp_instance(*args, **kwargs)
812
+
813
+
814
+ def matmul_reduce_scatter(*args, **kwargs):
815
+ r"""
816
+ matmul_reduce_scatter(input, x2, group, world_size, *, reduce_op='sum', bias=None, comm_turn=0, trans_input=False, trans_x2=False) -> Tensor
817
+
818
+ In the TP segmentation scenario, matmul and reducescatter are fused, and communication and computational
819
+ pipelines are parallelized within the fusion operator.
820
+
821
+ .. math::
822
+ output = reducescatter(input@x2)
823
+
824
+ .. warning::
825
+ This is an experimental API that is subject to change or deletion.
826
+
827
+ Args:
828
+ input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
829
+ dimensions, and the data format supports ND.
830
+ x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
831
+ supports 2 dimensions, and the data format supports ND.
832
+ group (str): Communication group name, can be created by ``create_group`` method, or use the default group
833
+ ``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
834
+ world_size (int): The total number of ranks in the communication group, should be consistent with the number
835
+ of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
836
+
837
+ Keyword Args:
838
+ reduce_op (str, optional) The reduce operation type. Currently only ``'sum'`` is supported. Default:
839
+ ``'sum'`` .
840
+ bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
841
+ comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
842
+ is supported. Default: ``0`` .
843
+ trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
844
+ supported. Default: ``False`` .
845
+ trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
846
+
847
+ Returns:
848
+ - output (Tensor) - The result of allgather and matmul fusion calculations.
849
+
850
+ Note:
851
+ - When using this interface, please ensure that the driver firmware package and CANN package are both the
852
+ matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
853
+ - The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
854
+ range of k is [256, 65535), and m is required to be an integer multiple of ``world_size`` . The shape of
855
+ ``output`` is (m * world_size, n).
856
+ - The common fusion operators in a model only support the same communication group.
857
+
858
+ Raises:
859
+ TypeError: Any arg is of wrong type.
860
+ RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
861
+ RuntimeError: The dtypes of ``input`` and ``x2`` are different.
862
+ RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
863
+ RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
864
+ RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
865
+ RuntimeError: ``bias`` is not None.
866
+ RuntimeError: ``group`` does not exist.
867
+ RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
868
+ RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , or ``8`` .
869
+ RuntimeError: ``reduce_op`` is not ``'sum'`` .
870
+ RuntimeError: ``trans_input`` is ``True`` .
871
+
872
+ Supported Platforms:
873
+ ``Ascend``
874
+
875
+ Examples:
876
+ .. note::
877
+ Before running the following examples, you need to configure the communication environment variables.
878
+
879
+ For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
880
+ configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
881
+ for more details.
882
+
883
+ This example should be run with 2 devices.
884
+
885
+ >>> import mindspore as ms
886
+ >>> from mindspore import ops
887
+ >>> import numpy as np
888
+ >>> ms.communication.init()
889
+ >>> rank = ms.communication.get_rank()
890
+ >>> np.random.seed(rank)
891
+ >>> input = ms.Tensor(np.random.randn(1024, 256).astype(np.float32), dtype=ms.float16)
892
+ >>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
893
+ >>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
894
+ >>> world_size = ms.communication.get_group_size()
895
+ >>> reduce_op = ops.ReduceOp.SUM
896
+ >>> output = ops.matmul_reduce_scatter(
897
+ ... input,
898
+ ... x2,
899
+ ... group,
900
+ ... world_size,
901
+ ... reduce_op=reduce_op,
902
+ ... bias=None,
903
+ ... comm_turn=0,
904
+ ... trans_input=False,
905
+ ... trans_x2=False,
906
+ ... )
907
+ >>> print(output.shape)
908
+ (512, 512)
909
+ """
910
+ return _matmul_reduce_scatter_instance(*args, **kwargs)
911
+
912
+
913
+ def max(*args, **kwargs):
914
+ r"""
915
+ max(input) -> Tensor
916
+
917
+ Returns the maximum value of the input tensor.
918
+
919
+ Args:
920
+ input (Tensor): The input tensor.
921
+
922
+ Returns:
923
+ Scalar Tensor with the same dtype as `input`, the maximum value of the input.
924
+
925
+ Supported Platforms:
926
+ ``Ascend`` ``GPU`` ``CPU``
927
+
928
+ Examples:
929
+ >>> import mindspore
930
+ >>> import numpy as np
931
+ >>> from mindspore import Tensor, mint
932
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
933
+ >>> output = mint.max(x)
934
+ >>> print(output)
935
+ 0.7
936
+
937
+ .. function:: max(input, dim, keepdim=False) -> tuple(Tensor)
938
+ :noindex:
939
+
940
+ Calculates the maximum value along with the given dim for the input tensor, and returns the maximum values and
941
+ indices.
942
+
943
+ Args:
944
+ input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
945
+ :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
946
+ dim (int): The dimension to reduce.
947
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
948
+ `input` , the output will reduce dimension if ``False``. Default: ``False``.
949
+
950
+ Returns:
951
+ tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
952
+ dimension `dim` and the corresponding index.
953
+
954
+ - **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`.
955
+ - **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdim`
956
+ is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
957
+ Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
958
+
959
+ Raises:
960
+ TypeError: If `input` is not Tensor.
961
+ TypeError: If `keepdim` is not a bool.
962
+ TypeError: If `dim` is not an int.
963
+
964
+ Supported Platforms:
965
+ ``Ascend`` ``GPU`` ``CPU``
966
+
967
+ Examples:
968
+ >>> import mindspore
969
+ >>> import numpy as np
970
+ >>> from mindspore import Tensor, mint
971
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
972
+ >>> output, index = mint.max(x, 0, keepdim=True)
973
+ >>> print(output, index)
974
+ [0.7] [3]
975
+
976
+ .. function:: max(input, other) -> Tensor
977
+ :noindex:
978
+
979
+ For details, please refer to :func:`mindspore.mint.maximum`.
980
+ """
981
+ return _max_instance(*args, **kwargs)
982
+
983
+
984
+ def min(*args, **kwargs):
985
+ r"""
986
+ min(input) -> Tensor
987
+
988
+ Returns the minimum value of the input tensor.
989
+
990
+ Args:
991
+ input (Tensor): The input tensor.
992
+
993
+ Returns:
994
+ Scalar Tensor with the same dtype as `input`, the minimum value of the input.
995
+
996
+ Supported Platforms:
997
+ ``Ascend`` ``GPU`` ``CPU``
998
+
999
+ Examples:
1000
+ >>> import mindspore
1001
+ >>> import numpy as np
1002
+ >>> from mindspore import Tensor, mint
1003
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1004
+ >>> output = mint.min(x)
1005
+ >>> print(output)
1006
+ 0.0
1007
+
1008
+ .. function:: min(input, dim, keepdim=False) -> Tensor
1009
+ :noindex:
1010
+
1011
+ Calculates the minimum value along with the given dim for the input tensor, and returns the minimum values and
1012
+ indices.
1013
+
1014
+ Args:
1015
+ input (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
1016
+ :math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
1017
+ dim (int): The dimension to reduce.
1018
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
1019
+ input, the output will reduce dimension if ``False``. Default: ``False``.
1020
+
1021
+ Returns:
1022
+ tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
1023
+ dimension `dim` and the corresponding index.
1024
+
1025
+ - **values** (Tensor) - The minimum value of input tensor, with the same shape as `index`, and same dtype as `input`.
1026
+ - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keepdim`
1027
+ is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
1028
+ Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
1029
+
1030
+ Raises:
1031
+ TypeError: If `input` is not Tensor.
1032
+ TypeError: If `keepdim` is not a bool.
1033
+ TypeError: If `dim` is not an int.
1034
+
1035
+ Supported Platforms:
1036
+ ``Ascend`` ``GPU`` ``CPU``
1037
+
1038
+ Examples:
1039
+ >>> import mindspore
1040
+ >>> import numpy as np
1041
+ >>> from mindspore import Tensor, mint
1042
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1043
+ >>> output, index = mint.min(x, 0, keepdim=True)
1044
+ >>> print(output, index)
1045
+ [0.0] [0]
1046
+
1047
+ .. function:: min(input, other) -> Tensor
1048
+ :noindex:
1049
+
1050
+ For details, please refer to :func:`mindspore.mint.minimum`.
1051
+ """
1052
+ return _min_instance(*args, **kwargs)
1053
+
1054
+
1055
+ def nansum(*args, **kwargs):
1056
+ r"""
1057
+ nansum(input, dim=None, keepdim=False, *, dtype=None) -> Tensor
1058
+
1059
+ Computes sum of `input` over a given dimension, treating NaNs as zero.
1060
+
1061
+ .. warning::
1062
+ It is only supported on Atlas A2 Training Series Products.
1063
+ This is an experimental API that is subject to change or deletion.
1064
+
1065
+ Args:
1066
+ input (Tensor): The input Tensor.
1067
+ dim (Union[int, tuple(int)], optional): The dimensions to sum.
1068
+ Dim must be in the range [-rank(input), rank(input)). Default: ``None``, which indicates the sum of all
1069
+ elements in a tensor.
1070
+ keepdim (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``, indicating that no dimension is kept.
1071
+
1072
+ Keyword Args:
1073
+ dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
1074
+
1075
+ Returns:
1076
+ Tensor, the sum of input `input` in the given dimension dim, treating NaNs as zero.
1077
+
1078
+ - If dim is None, keepdim is False,
1079
+ the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
1080
+ - If dim is int, set as 2, and keepdim is False,
1081
+ the shape of output is :math:`(input_1, input_3, ..., input_R)`.
1082
+ - If dim is tuple(int) or list(int), set as (2, 3), and keepdim is False,
1083
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
1084
+
1085
+ Raises:
1086
+ TypeError: If `input` is not Tensor.
1087
+ TypeError: If `keepdim` is not a bool.
1088
+ TypeError: If the dtype of `input` or `dtype` is complex type.
1089
+ ValueError: If `dim` is not in [-rank(input), rank(input)).
1090
+
1091
+ Supported Platforms:
1092
+ ``Ascend``
1093
+
1094
+ Examples:
1095
+ >>> import mindspore
1096
+ >>> import numpy as np
1097
+ >>> from mindspore import Tensor, mint
1098
+ >>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
1099
+ >>> output1 = mint.nansum(x, dim=0, keepdim=False, dtype=mindspore.float32)
1100
+ >>> output2 = mint.nansum(x, dim=0, keepdim=True, dtype=mindspore.float32)
1101
+ >>> print(output1)
1102
+ [1. 4. 3.]
1103
+ >>> print(output2)
1104
+ [[1. 4. 3.]]
1105
+ """
1106
+ return _nansum_instance(*args, **kwargs)
1107
+
1108
+
1109
+ def pixel_shuffle(*args, **kwargs):
1110
+ r"""
1111
+ pixel_shuffle(input, upscale_factor) -> Tensor
1112
+
1113
+ Rearrange elements in a tensor according to an upscaling factor.
1114
+
1115
+ Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
1116
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
1117
+
1118
+ This is useful for implementing efficient sub-pixel convolution
1119
+ with a stride of :math:`1/r`.
1120
+
1121
+ For detailed introduction to the pixel_shuffle algorithm, refer to
1122
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
1123
+
1124
+ .. warning::
1125
+ This is an experimental API that is subject to change or deletion.
1126
+
1127
+ Args:
1128
+ input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
1129
+ and the length of third to last dimension can be divisible by the square of `upscale_factor`.
1130
+ upscale_factor (int): factor to shuffle the input Tensor, and is a positive integer.
1131
+ `upscale_factor` is the above-mentioned :math:`r`.
1132
+
1133
+ Returns:
1134
+ - **output** (Tensor) - Tensor of shape :math:`(*, C, H \times r, W \times r)` .
1135
+
1136
+ Raises:
1137
+ ValueError: If `upscale_factor` is not a positive integer.
1138
+ ValueError: If the length of third to last dimension is not divisible by the square of `upscale_factor`.
1139
+ ValueError: If the dimension of `input` is less than 3.
1140
+
1141
+ Supported Platforms:
1142
+ ``Ascend``
1143
+
1144
+ Examples:
1145
+ >>> from mindspore import mint
1146
+ >>> input = mint.randn(1, 9, 4, 4)
1147
+ >>> output = mint.nn.functional.pixel_shuffle(input, 3)
1148
+ >>> print(output.shape)
1149
+ (1, 1, 12, 12)
1150
+ """
1151
+ return _pixel_shuffle_instance(*args, **kwargs)
1152
+
1153
+
1154
+ def remainder(*args, **kwargs):
1155
+ r"""
1156
+ remainder(input, other) -> Tensor
1157
+
1158
+ Computes the remainder of `input` divided by `other` element-wise. The result has the same sign as the divisor and
1159
+ its absolute value is less than that of `other`.
1160
+
1161
+ Supports broadcasting to a common shape and implicit type promotion.
1162
+
1163
+ .. code:: python
1164
+
1165
+ remainder(input, other) == input - input.div(other, rounding_mode="floor") * other
1166
+
1167
+ Note:
1168
+ Complex inputs are not supported. At least one input need to be tensor, but not both are bool tensors.
1169
+
1170
+ Args:
1171
+ input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
1172
+ a bool or a tensor whose data type is
1173
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1174
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1175
+ other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
1176
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
1177
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
1178
+
1179
+ Returns:
1180
+ Tensor, with dtype promoted and shape broadcasted.
1181
+
1182
+ Raises:
1183
+ TypeError: If `input` and `other` are not of types: (tensor, tensor), (tensor, number), (tensor, bool),
1184
+ (number, tensor) or (bool, tensor).
1185
+ ValueError: If `input` and `other` are not broadcastable.
1186
+
1187
+ Supported Platforms:
1188
+ ``Ascend``
1189
+
1190
+ Examples:
1191
+ >>> import numpy as np
1192
+ >>> from mindspore import Tensor, mint
1193
+ >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float32))
1194
+ >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float64))
1195
+ >>> output = mint.remainder(x, y)
1196
+ >>> print(output)
1197
+ [2. 1. 0.]
1198
+ """
1199
+ return _remainder_instance(*args, **kwargs)
1200
+
1201
+
1202
+ def repeat_interleave(*args, **kwargs):
1203
+ r"""
1204
+ repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
1205
+
1206
+ Repeat elements of a tensor along an axis, like :func:`mindspore.numpy.repeat`.
1207
+
1208
+ .. warning::
1209
+ Only support on Atlas A2 training series.
1210
+
1211
+ Args:
1212
+ input (Tensor): The tensor to repeat values for. Must be of types: float16,
1213
+ float32, int8, uint8, int16, int32, or int64.
1214
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
1215
+ dim (int, optional): The dim along which to repeat, Default: ``None``. If dims is None,
1216
+ the input Tensor will be flattened and the output will alse be flattened.
1217
+
1218
+ Keyword Args:
1219
+ output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
1220
+ Default: ``None``.
1221
+
1222
+ Returns:
1223
+ One tensor with values repeated along the specified dim. If input has shape
1224
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
1225
+ si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
1226
+
1227
+ Supported Platforms:
1228
+ ``Ascend``
1229
+
1230
+ Examples:
1231
+ >>> import mindspore
1232
+ >>> import numpy as np
1233
+ >>> from mindspore import Tensor, mint
1234
+ >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
1235
+ >>> output = mint.repeat_interleave(input, repeats=2, dim=0)
1236
+ >>> print(output)
1237
+ [[0 1 2]
1238
+ [0 1 2]
1239
+ [3 4 5]
1240
+ [3 4 5]]
1241
+ """
1242
+ return _repeat_interleave_instance(*args, **kwargs)
1243
+
1244
+
1245
+ def sub(*args, **kwargs):
1246
+ r"""
1247
+ sub(input, other, *, alpha=1) -> Tensor
1248
+
1249
+ Subtracts scaled other value from self Tensor.
1250
+
1251
+ .. math::
1252
+
1253
+ out_{i} = self_{i} - alpha \times other_{i}
1254
+
1255
+ Note:
1256
+ - When the two inputs have different shapes,
1257
+ they must be able to broadcast to a common shape.
1258
+ - The two inputs and alpha comply with the implicit type conversion rules to make the data types
1259
+ consistent.
1260
+
1261
+ Args:
1262
+ input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
1263
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1264
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1265
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
1266
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1267
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1268
+
1269
+ Keyword Args:
1270
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
1271
+
1272
+ Returns:
1273
+ Tensor with a shape that is the same as the broadcasted shape of the self `self` and `other`,
1274
+ and the data type is the one with higher precision or higher digits among the two inputs and alpha.
1275
+
1276
+ Raises:
1277
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
1278
+ TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
1279
+ TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
1280
+
1281
+ Supported Platforms:
1282
+ ``Ascend`` ``GPU`` ``CPU``
1283
+
1284
+ Examples:
1285
+ >>> import numpy as np
1286
+ >>> import mindspore
1287
+ >>> from mindspore import Tensor, mint
1288
+ >>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
1289
+ >>> y = Tensor(1, mindspore.int32)
1290
+ >>> alpha = 0.5
1291
+ >>> output = mint.sub(x, y, alpha=alpha)
1292
+ >>> print(output)
1293
+ [3.5 4.5 5.5]
1294
+ >>> # the data type of x is float32, the data type of y is int32,
1295
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
1296
+ >>> print(output.dtype)
1297
+ Float32
1298
+ """
1299
+ return _sub_instance(*args, **kwargs)
1300
+
1301
+
1302
+ def __sub__(*args, **kwargs):
1303
+ r"""
1304
+ __sub__(input, other, *, alpha=1) -> Tensor
1305
+
1306
+ Alias for :func:`mindspore.mint.sub`.
1307
+
1308
+ .. method:: mint.__sub__(input, other, *, alpha=1) -> Tensor
1309
+ :noindex:
1310
+
1311
+ Alias for overload function of :func:`mindspore.mint.sub`.
1312
+ """
1313
+ return _sub_instance(*args, **kwargs)
1314
+
1315
+
1316
+ def where(*args, **kwargs):
1317
+ r"""
1318
+ where(condition, input, other) -> Tensor
1319
+
1320
+ Selects elements from `input` or `other` based on `condition` and returns a tensor.
1321
+
1322
+ .. math::
1323
+ output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
1324
+
1325
+ Args:
1326
+ condition (Tensor[bool]): If true, yield `input`, otherwise yield `other`.
1327
+ input (Union[Tensor, Scalar]): When `condition` is true, values to select from.
1328
+ other (Union[Tensor, Scalar]): When `condition` is false, values to select from.
1329
+
1330
+ Returns:
1331
+ Tensor, elements are selected from `input` and `other`.
1332
+
1333
+ Raises:
1334
+ TypeError: If `condition` is not a tensor.
1335
+ TypeError: If both `input` and `other` are scalars.
1336
+ ValueError: If `condition`, `input` and `other` can not broadcast to each other.
1337
+
1338
+ Supported Platforms:
1339
+ ``Ascend`` ``GPU`` ``CPU``
1340
+
1341
+ Examples:
1342
+ >>> import numpy as np
1343
+ >>> from mindspore import tensor, ops
1344
+ >>> from mindspore import dtype as mstype
1345
+ >>> a = tensor(np.arange(4).reshape((2, 2)), mstype.float32)
1346
+ >>> b = tensor(np.ones((2, 2)), mstype.float32)
1347
+ >>> condition = a < 3
1348
+ >>> output = ops.where(condition, a, b)
1349
+ >>> print(output)
1350
+ [[0. 1.]
1351
+ [2. 1.]]
1352
+
1353
+ .. function:: where(condition) -> Tensor
1354
+ :noindex:
1355
+
1356
+ Identical to :func:`mindspore.ops.nonzero` with input `condition` and `as_tuple` being True.
1357
+
1358
+ Supported Platforms:
1359
+ ``Ascend``
1360
+ """
1361
+ return _where_instance(*args, **kwargs)
1362
+
1363
+
1364
+ def xlogy(*args, **kwargs):
1365
+ r"""
1366
+ xlogy(input, other) -> Tensor
1367
+
1368
+ Computes the first input multiplied by the logarithm of second input element-wise.
1369
+ Returns zero when `input` is zero.
1370
+
1371
+ .. math::
1372
+
1373
+ out_i = input_{i}\log{other_{i}}
1374
+
1375
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1376
+ The inputs must be two tensors or one tensor and one scalar.
1377
+ When the inputs are two tensors, the shapes of them could be broadcast.
1378
+
1379
+ Args:
1380
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1381
+ a bool or a tensor whose data type is
1382
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1383
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1384
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1385
+ a bool or a tensor whose data type is number or bool when the first input is a tensor.
1386
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1387
+
1388
+ Returns:
1389
+ Tensor, the shape is the same as the one after broadcasting,
1390
+ and the data type is the one with higher precision or higher digits among the two inputs.
1391
+
1392
+ Raises:
1393
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1394
+ ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1395
+
1396
+ Supported Platforms:
1397
+ ``Ascend`` ``GPU`` ``CPU``
1398
+
1399
+ Examples:
1400
+ >>> import mindspore
1401
+ >>> import numpy as np
1402
+ >>> from mindspore import Tensor, ops
1403
+ >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1404
+ >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1405
+ >>> output = ops.xlogy(input, other)
1406
+ >>> print(output)
1407
+ [-3.465736 0. 2.7725887]
1408
+ """
1409
+ return _xlogy_instance(*args, **kwargs)
1410
+
1411
+ __all__ = [
1412
+ "add",
1413
+ "__add__",
1414
+ "addcdiv",
1415
+ "all_gather_matmul",
1416
+ "bitwise_not",
1417
+ "clamp",
1418
+ "clip",
1419
+ "div",
1420
+ "divide",
1421
+ "empty",
1422
+ "floor_divide",
1423
+ "fmod",
1424
+ "gelu",
1425
+ "greater_equal",
1426
+ "ge",
1427
+ "kthvalue",
1428
+ "lerp",
1429
+ "matmul_reduce_scatter",
1430
+ "max",
1431
+ "min",
1432
+ "nansum",
1433
+ "pixel_shuffle",
1434
+ "remainder",
1435
+ "repeat_interleave",
1436
+ "sub",
1437
+ "__sub__",
1438
+ "where",
1439
+ "xlogy",
1440
+ ]