mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,4760 @@
1
+ # Copyright 2024 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """Add docstrings to Tensor functions"""
16
+ from mindspore.common.tensor import Tensor
17
+ from mindspore._c_expression import _add_docstr as add_docstr
18
+
19
+
20
+ def attach_docstr(method, docstr):
21
+ try:
22
+ add_docstr(getattr(Tensor, method), docstr)
23
+ except Exception as e:
24
+ raise AttributeError(
25
+ f"Failed to attach docstring to Tensor.{method}.\n"
26
+ f"Please check if there is a duplicate Tensor.{method} in tensor.py."
27
+ )
28
+
29
+ attach_docstr("absolute", r"""absolute() -> Tensor
30
+
31
+ Alias for :func:`Tensor.abs`.
32
+ """)
33
+ attach_docstr("abs", r"""abs() -> Tensor
34
+
35
+ For details, please refer to :func:`mindspore.ops.abs`.
36
+ """)
37
+ attach_docstr("acosh", r"""acosh() -> Tensor
38
+
39
+ For details, please refer to :func:`mindspore.ops.acosh`.
40
+ """)
41
+ attach_docstr("acos", r"""acos() -> Tensor
42
+
43
+ For details, please refer to :func:`mindspore.ops.acos`.
44
+ """)
45
+ attach_docstr("addbmm", r"""addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
46
+
47
+ For details, please refer to :func:`mindspore.ops.addbmm`.""")
48
+ attach_docstr("addcdiv", r"""addcdiv(tensor1, tensor2, *, value=1) -> Tensor
49
+
50
+ For details, please refer to :func:`mindspore.ops.addcdiv`.
51
+
52
+ Supported Platforms:
53
+ ``Ascend``""")
54
+ attach_docstr("addmm", r"""addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
55
+
56
+ For details, please refer to :func:`mindspore.ops.addmm`.""")
57
+ attach_docstr("addmv", r"""addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
58
+
59
+ For details, please refer to :func:`mindspore.ops.addmv`.
60
+
61
+ Supported Platforms:
62
+ ``Ascend``""")
63
+ attach_docstr("add", r"""add(other) -> Tensor
64
+
65
+ Adds other value to `self` element-wise.
66
+
67
+ .. math::
68
+
69
+ out_{i} = self_{i} + other_{i}
70
+
71
+ Note:
72
+ - When `self` and `other` have different shapes,
73
+ they must be able to broadcast to a common shape.
74
+ - `self` and `other` can not be bool type at the same time,
75
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
76
+ - `self` and `other` comply with the implicit type conversion rules to make the data types
77
+ consistent.
78
+ - The dimension of `self` should be greater than or equal to 1.
79
+
80
+ Args:
81
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
82
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
83
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
84
+
85
+ Returns:
86
+ Tensor with a shape that is the same as the broadcasted shape of `self` and `other`,
87
+ and the data type is the one with higher precision or higher digits between `self` and `other`.
88
+
89
+ Raises:
90
+ TypeError: If `other` is not one of the following: Tensor, number.Number, bool.
91
+
92
+ Supported Platforms:
93
+ ``Ascend`` ``GPU`` ``CPU``
94
+
95
+ Examples:
96
+ >>> import numpy as np
97
+ >>> import mindspore
98
+ >>> from mindspore import Tensor
99
+ >>> # case 1: x and y are both Tensor.
100
+ >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
101
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
102
+ >>> output = Tensor.add(x, y) # x.add(y)
103
+ >>> print(output)
104
+ [5. 7. 9.]
105
+ >>> # case 2: x is a scalar and y is a Tensor
106
+ >>> x = Tensor(1, mindspore.int32)
107
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
108
+ >>> output = Tensor.add(x, y) # x.add(y)
109
+ >>> print(output)
110
+ [5. 6. 7.]
111
+ >>> # the data type of x is int32, the data type of y is float32,
112
+ >>> # and the output is the data format of higher precision float32.
113
+ >>> print(output.dtype)
114
+ Float32
115
+
116
+ .. method:: Tensor.add(other, *, alpha=1) -> Tensor
117
+ :noindex:
118
+
119
+ Adds scaled other value to `self`.
120
+
121
+ .. math::
122
+
123
+ out_{i} = self_{i} + alpha \times other_{i}
124
+
125
+ Note:
126
+ - When `self` and `other` have different shapes,
127
+ they must be able to broadcast to a common shape.
128
+ - `self`, `other` and alpha comply with the implicit type conversion rules to make the data types
129
+ consistent.
130
+
131
+ Args:
132
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
133
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
134
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
135
+
136
+ Keyword Args:
137
+ alpha (number.Number): A scaling factor applied to `other`, default 1.
138
+
139
+ Returns:
140
+ Tensor with a shape that is the same as the broadcasted shape of the `self` and `other`,
141
+ and the data type is the one with higher precision or higher digits among `self`, `other` and `alpha`.
142
+
143
+ Raises:
144
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
145
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
146
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
147
+
148
+ Supported Platforms:
149
+ ``Ascend`` ``GPU`` ``CPU``
150
+
151
+ Examples:
152
+ >>> import numpy as np
153
+ >>> import mindspore
154
+ >>> from mindspore import Tensor
155
+ >>> x = Tensor(1, mindspore.int32)
156
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
157
+ >>> alpha = 0.5
158
+ >>> output = Tensor.add(x, y, alpha=alpha) # x.add(y, alpha=alpha)
159
+ >>> print(output)
160
+ [3. 3.5 4.]
161
+ >>> # the data type of x is int32, the data type of y is float32,
162
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
163
+ >>> print(output.dtype)
164
+ Float32
165
+ """)
166
+ attach_docstr("add_", r"""add_(other) -> Tensor
167
+
168
+ In-place version of :func:`mindspore.Tensor.add`.""")
169
+ attach_docstr("allclose", r"""allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
170
+
171
+ Returns a new Tensor with boolean elements representing if each element of `self`
172
+ is "close" to the corresponding element of `other`. Closeness is defined as:
173
+
174
+ .. math::
175
+ |self-other| <= atol + rtol x |other|
176
+
177
+ .. warning::
178
+ This is an experimental API that is subject to change or deletion.
179
+
180
+ Args:
181
+ other (Tensor): Tensor to compare. Dtype must be same as `self`.
182
+ rtol (Union[float, int, bool], optional): Relative tolerance. Default: ``1e-05`` .
183
+ atol (Union[float, int, bool], optional): Absolute tolerance. Default: ``1e-08`` .
184
+ equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False``.
185
+
186
+ Returns:
187
+ A bool Scalar.
188
+
189
+ Raises:
190
+ TypeError: `self` or `other` is not Tensor.
191
+ TypeError: Data types of `self` and `other` are not in the list of supported types.
192
+ TypeError: `atol` or `rtol` is not float, int or bool.
193
+ TypeError: `equal_nan` is not bool.
194
+ TypeError: `self` and `other` have different dtypes.
195
+ ValueError: `self` and `other` cannot broadcast.
196
+
197
+ Supported Platforms:
198
+ ``Ascend`` ``GPU`` ``CPU``
199
+
200
+ Examples:
201
+ >>> import mindspore
202
+ >>> import numpy as np
203
+ >>> from mindspore import Tensor
204
+ >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
205
+ >>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
206
+ >>> output = input.allclose(other)
207
+ >>> print(output)
208
+ False
209
+ """)
210
+ attach_docstr("all", r"""all(axis=None, keep_dims=False) -> Tensor
211
+
212
+ Tests if all element in tensor evaluates to `True` along the given axes.
213
+
214
+ Args:
215
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
216
+ all dimensions are reduced. Default ``None`` .
217
+ keep_dims (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
218
+
219
+ Returns:
220
+ Tensor
221
+
222
+ Supported Platforms:
223
+ ``Ascend`` ``GPU`` ``CPU``
224
+
225
+ Examples:
226
+ >>> import mindspore
227
+ >>> x = mindspore.tensor([[True, False], [True, True]])
228
+ >>>
229
+ >>> # case 1: By default, mindspore.Tensor.all tests along all the axes.
230
+ >>> x.all()
231
+ Tensor(shape=[], dtype=Bool, value= False)
232
+ >>>
233
+ >>> # case 2: Reduces a dimension along axis 1, with keep_dims False.
234
+ >>> x.all(axis=1)
235
+ Tensor(shape=[2], dtype=Bool, value= [False, True])
236
+ >>>
237
+ >>> # case 3: Reduces a dimension along axis (0, 1), with keep_dims False.
238
+ >>> x.all(axis=(0,1))
239
+ Tensor(shape=[], dtype=Bool, value= False)
240
+ >>>
241
+ >>> # case 4: Reduces a dimension along axis [0, 1], with keep_dims True.
242
+ >>> x.all(axis=[0,1], keep_dims=True)
243
+ Tensor(shape=[1, 1], dtype=Bool, value=
244
+ [[False]])
245
+
246
+ .. method:: Tensor.all(dim=None, keepdim=False) -> Tensor
247
+ :noindex:
248
+
249
+ Tests if all element in tensor evaluates to `True` along the given axes.
250
+
251
+ Args:
252
+ dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
253
+ all dimensions are reduced. Default ``None`` .
254
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
255
+
256
+ Returns:
257
+ Tensor
258
+
259
+ Supported Platforms:
260
+ ``Ascend`` ``GPU`` ``CPU``
261
+
262
+ Examples:
263
+ >>> import mindspore
264
+ >>> x = mindspore.tensor([[True, False], [True, True]])
265
+ >>>
266
+ >>> # case 1: By default, mindspore.Tensor.all tests along all the axes.
267
+ >>> x.all()
268
+ Tensor(shape=[], dtype=Bool, value= False)
269
+ >>>
270
+ >>> # case 2: Reduces a dimension along dim 1, with keepdim False.
271
+ >>> x.all(dim=1)
272
+ Tensor(shape=[2], dtype=Bool, value= [False, True])
273
+ >>>
274
+ >>> # case 3: Reduces a dimension along dim (0, 1), with keepdim False.
275
+ >>> x.all(dim=(0,1))
276
+ Tensor(shape=[], dtype=Bool, value= False)
277
+ >>>
278
+ >>> # case 4: Reduces a dimension along dim [0, 1], with keepdim True.
279
+ >>> x.all(dim=[0,1], keepdim=True)
280
+ Tensor(shape=[1, 1], dtype=Bool, value=
281
+ [[False]])
282
+ """)
283
+ attach_docstr("any", r"""any(axis=None, keep_dims=False) -> Tensor
284
+
285
+ Tests if any element in tensor evaluates to `True` along the given axes.
286
+
287
+ Args:
288
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
289
+ all dimensions are reduced. Default ``None`` .
290
+ keep_dims (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
291
+
292
+ Returns:
293
+ Tensor
294
+
295
+ Supported Platforms:
296
+ ``Ascend`` ``GPU`` ``CPU``
297
+
298
+ Examples:
299
+ >>> import mindspore
300
+ >>> x = mindspore.tensor([[True, False], [True, True]])
301
+ >>>
302
+ >>> # case 1: By default, mindspore.Tensor.any tests along all the axes.
303
+ >>> x.any()
304
+ Tensor(shape=[], dtype=Bool, value= True)
305
+ >>>
306
+ >>> # case 2: Reduces a dimension along axis 1, with keep_dims False.
307
+ >>> x.any(axis=1)
308
+ Tensor(shape=[2], dtype=Bool, value= [ True, True])
309
+ >>>
310
+ >>> # case 3: Reduces a dimension along axis (0, 1), with keep_dims False.
311
+ >>> x.any(axis=(0,1))
312
+ Tensor(shape=[], dtype=Bool, value= True)
313
+ >>>
314
+ >>> # case 4: Reduces a dimension along axis [0, 1], with keep_dims True.
315
+ >>> x.any(axis=[0,1], keep_dims=True)
316
+ Tensor(shape=[1, 1], dtype=Bool, value=
317
+ [[ True]])
318
+
319
+ .. method:: Tensor.any(dim=None, keepdim=False) -> Tensor
320
+ :noindex:
321
+
322
+ Tests if any element in tensor evaluates to `True` along the given axes.
323
+
324
+ Args:
325
+ dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
326
+ all dimensions are reduced. Default ``None`` .
327
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
328
+
329
+ Returns:
330
+ Tensor
331
+
332
+ Supported Platforms:
333
+ ``Ascend`` ``GPU`` ``CPU``
334
+
335
+ Examples:
336
+ >>> import mindspore
337
+ >>> x = mindspore.tensor([[True, False], [True, True]])
338
+ >>>
339
+ >>> # case 1: By default, mindspore.Tensor.any tests along all the axes.
340
+ >>> x.any()
341
+ Tensor(shape=[], dtype=Bool, value= True)
342
+ >>>
343
+ >>> # case 2: Reduces a dimension along dim 1, with keepdim False.
344
+ >>> x.any(dim=1)
345
+ Tensor(shape=[2], dtype=Bool, value= [ True, True])
346
+ >>>
347
+ >>> # case 3: Reduces a dimension along dim (0, 1), with keepdim False.
348
+ >>> x.any(dim=(0,1))
349
+ Tensor(shape=[], dtype=Bool, value= True)
350
+ >>>
351
+ >>> # case 4: Reduces a dimension along dim [0, 1], with keepdim True.
352
+ >>> x.any(dim=[0,1], keepdim=True)
353
+ Tensor(shape=[1, 1], dtype=Bool, value=
354
+ [[ True]])
355
+ """)
356
+ attach_docstr("arccosh", r"""arccosh() -> Tensor
357
+
358
+ Alias for :func:`mindspore.Tensor.acosh`.
359
+ """)
360
+ attach_docstr("arccos", r"""arccos() -> Tensor
361
+
362
+ Alias for :func:`mindspore.Tensor.acos`.
363
+ """)
364
+ attach_docstr("arcsinh", r"""arcsinh() -> Tensor
365
+
366
+ Alias for :func:`mindspore.Tensor.asinh`.
367
+ """)
368
+ attach_docstr("arcsin", r"""arcsin() -> Tensor
369
+
370
+ Alias for :func:`mindspore.Tensor.asin`.
371
+ """)
372
+ attach_docstr("arctan2", r"""arctan2(other) -> Tensor
373
+
374
+ Alias for :func:`Tensor.atan2`.
375
+ """)
376
+ attach_docstr("arctanh", r"""arctanh() -> Tensor
377
+
378
+ Alias for :func:`mindspore.Tensor.atanh`.
379
+ """)
380
+ attach_docstr("arctan", r"""arctan() -> Tensor
381
+
382
+ Alias for :func:`mindspore.Tensor.atan`.
383
+ """)
384
+ attach_docstr("argmax", r"""argmax(axis=None, keepdims=False) -> Tensor
385
+
386
+ Return the indices of the maximum values along the given axis of the tensor.
387
+
388
+ Args:
389
+ axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in the
390
+ tensor. Default ``None`` .
391
+ keepdims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
392
+
393
+ Returns:
394
+ Tensor
395
+
396
+ Supported Platforms:
397
+ ``Ascend`` ``GPU`` ``CPU``
398
+
399
+ Examples:
400
+ >>> import mindspore
401
+ >>> x = mindspore.tensor([[9, 3, 4, 5],
402
+ ... [5, 2, 7, 4],
403
+ ... [8, 1, 3, 6]])
404
+ >>> # case 1: By default, compute the maximum of all elements.
405
+ >>> x.argmax()
406
+ Tensor(shape=[], dtype=Int64, value= 0)
407
+ >>>
408
+ >>> # case 2: Compute the maximum along axis 1.
409
+ >>> x.argmax(axis=1)
410
+ Tensor(shape=[3], dtype=Int64, value= [0, 2, 0])
411
+ >>>
412
+ >>> # case 3: If keepdims=True, the output shape will be same of that of the input.
413
+ >>> x.argmax(axis=1, keepdims=True)
414
+ Tensor(shape=[3, 1], dtype=Int64, value=
415
+ [[0],
416
+ [2],
417
+ [0]])
418
+
419
+ .. method:: Tensor.argmax(dim=None, keepdim=False) -> Tensor
420
+ :noindex:
421
+
422
+ Return the maximum values along the given dimension of the tensor.
423
+
424
+ Args:
425
+ dim (Union[int, None], optional): Specify the dim for computation. If ``None`` , compute all elements in the
426
+ tensor.
427
+ keepdim (bool, optional): Whether the output tensor has dim retained.
428
+
429
+ Returns:
430
+ Tensor
431
+
432
+ Supported Platforms:
433
+ ``Ascend``
434
+
435
+ Examples:
436
+ >>> import mindspore
437
+ >>> x = mindspore.tensor([[9, 3, 4, 5],
438
+ ... [5, 2, 7, 4],
439
+ ... [8, 1, 3, 6]])
440
+ >>> # case 1: By default, compute the maximum of all elements.
441
+ >>> x.argmax()
442
+ Tensor(shape=[], dtype=Int64, value= 0)
443
+ >>>
444
+ >>> # case 2: Compute the maximum along dim 1.
445
+ >>> x.argmax(dim=1)
446
+ Tensor(shape=[3], dtype=Int64, value= [0, 2, 0])
447
+ >>>
448
+ >>> # case 3: If keepdim=True, the output shape will be same of that of the input.
449
+ >>> x.argmax(dim=1, keepdim=True)
450
+ Tensor(shape=[3, 1], dtype=Int64, value=
451
+ [[0],
452
+ [2],
453
+ [0]])
454
+ """)
455
+ attach_docstr("argmin", r"""argmin(axis=None, keepdims=False) -> Tensor
456
+
457
+ Returns the indices of the minimum values along the given axis of the tensor.
458
+
459
+ Args:
460
+ axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in the
461
+ tensor. Default ``None`` .
462
+ keepdims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
463
+
464
+ Returns:
465
+ Tensor
466
+
467
+ Supported Platforms:
468
+ ``Ascend`` ``GPU`` ``CPU``
469
+
470
+ Examples:
471
+ >>> import mindspore
472
+ >>> x = mindspore.tensor([[2, 5, 1, 6],
473
+ ... [3, -7, -2, 4],
474
+ ... [8, -4, 1, -3]])
475
+ >>> # case 1: By default, compute the minimum of all elements.
476
+ >>> x.argmin()
477
+ Tensor(shape=[], dtype=Int32, value= 5)
478
+ >>>
479
+ >>> # case 2: Compute the minimum along axis 1.
480
+ >>> x.argmin(axis=1)
481
+ Tensor(shape=[3], dtype=Int32, value= [2, 1, 1])
482
+ >>>
483
+ >>> # case 3: If keepdims=True, the output shape will be same of that of the input.
484
+ >>> x.argmin(axis=1, keepdims=True)
485
+ Tensor(shape=[3, 1], dtype=Int32, value=
486
+ [[2],
487
+ [1],
488
+ [1]])
489
+
490
+ .. method:: Tensor.argmin(dim=None, keepdim=False) -> Tensor
491
+ :noindex:
492
+
493
+ Returns the indices of the minimum values along the given axis of the tensor.
494
+
495
+ Args:
496
+ dim (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in the
497
+ tensor.
498
+ keepdim (bool, optional): Whether the output tensor has dim retained.
499
+
500
+ Returns:
501
+ Tensor
502
+
503
+ Supported Platforms:
504
+ ``Ascend``
505
+
506
+ Examples:
507
+ >>> import mindspore
508
+ >>> x = mindspore.tensor([[2, 5, 1, 6],
509
+ ... [3, -7, -2, 4],
510
+ ... [8, -4, 1, -3]])
511
+ >>> # case 1: By default, compute the minimum of all elements.
512
+ >>> x.argmin()
513
+ Tensor(shape=[], dtype=Int32, value= 5)
514
+ >>>
515
+ >>> # case 2: Compute the minimum along dim 1.
516
+ >>> x.argmin(dim=1)
517
+ Tensor(shape=[3], dtype=Int32, value= [2, 1, 1])
518
+ >>>
519
+ >>> # case 3: If keepdim=True, the output shape will be same of that of the input.
520
+ >>> x.argmin(dim=1, keepdim=True)
521
+ Tensor(shape=[3, 1], dtype=Int32, value=
522
+ [[2],
523
+ [1],
524
+ [1]])
525
+ """)
526
+ attach_docstr("argsort", r"""argsort(axis=-1, descending=False) -> Tensor
527
+
528
+ Sorts `self` along the given dimension in specified order and return the sorted indices.
529
+
530
+ Args:
531
+ axis (int, optional): The axis to sort along. Default: ``-1`` , means the last dimension.
532
+ The Ascend backend only supports sorting the last dimension.
533
+ descending (bool, optional): The sort order. If `descending` is True then the elements
534
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
535
+
536
+ Returns:
537
+ Tensor, the indices of sorted `self`. Data type is int32.
538
+
539
+ Supported Platforms:
540
+ ``Ascend`` ``GPU`` ``CPU``
541
+
542
+ Examples:
543
+ >>> import mindspore
544
+ >>> import numpy as np
545
+ >>> from mindspore import Tensor
546
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
547
+ >>> sort = Tensor.argsort(x) # x.argsort()
548
+ >>> print(sort)
549
+ [[2 1 0]
550
+ [2 0 1]
551
+ [0 1 2]]
552
+
553
+ .. method:: Tensor.argsort(dim=-1, descending=False, stable=False) -> Tensor
554
+ :noindex:
555
+
556
+ Sorts `self` along the given dimension in specified order and return the sorted indices.
557
+
558
+ .. warning::
559
+ This is an experimental optimizer API that is subject to deletion or change.
560
+
561
+ Args:
562
+ dim (int, optional): The dim to sort along. Default: ``-1`` , means the last dimension.
563
+ The Ascend backend only supports sorting the last dimension.
564
+ descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
565
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
566
+ stable (bool, optional): Whether to use stable sorting algorithm. Default: ``False``.
567
+
568
+ Returns:
569
+ Tensor, the indices of sorted `self`. Data type is int64.
570
+
571
+ Raises:
572
+ ValueError: If `dim` is out of range.
573
+ TypeError: If dtype of `dim` is not int32.
574
+ TypeError: If dtype of `descending` is not bool.
575
+ TypeError: If dtype of `stable` is not bool.
576
+
577
+ Supported Platforms:
578
+ ``Ascend``
579
+
580
+ Examples:
581
+ >>> import mindspore
582
+ >>> import numpy as np
583
+ >>> from mindspore import Tensor
584
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
585
+ >>> sort = Tensor.argsort(x) # x.argsort()
586
+ >>> print(sort)
587
+ [[2 1 0]
588
+ [2 0 1]
589
+ [0 1 2]]
590
+ """)
591
+ attach_docstr("asinh", r"""asinh() -> Tensor
592
+
593
+ For details, please refer to :func:`mindspore.ops.asinh`.
594
+ """)
595
+ attach_docstr("asin", r"""asin() -> Tensor
596
+
597
+ For details, please refer to :func:`mindspore.ops.asin`.
598
+ """)
599
+ attach_docstr("atan2", r"""atan2(other) -> Tensor
600
+
601
+ For details, please refer to :func:`mindspore.ops.atan2`.""")
602
+ attach_docstr("atanh", r"""atanh() ->Tensor
603
+
604
+ For details, please refer to :func:`mindspore.ops.atanh`.""")
605
+ attach_docstr("atan", r"""atan() -> Tensor
606
+
607
+ For details, please refer to :func:`mindspore.ops.atan`.""")
608
+ attach_docstr("baddbmm", r"""baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
609
+
610
+ For details, please refer to :func:`mindspore.ops.baddbmm`.""")
611
+ attach_docstr("bincount", r"""bincount(weights=None, minlength=0) -> Tensor
612
+
613
+ For details, please refer to :func:`mindspore.ops.bincount`.""")
614
+ attach_docstr("bitwise_and", r"""bitwise_and(other) ->Tensor
615
+
616
+ Returns bitwise `and` of two tensors element-wise.
617
+
618
+ Note:
619
+ `self` and `other` comply with the type conversion rules to make the data types consistent.
620
+
621
+ Args:
622
+ other (Tensor, Number.number): The shape is the same as the `self` or can be broadcast to the shape of `self`.
623
+
624
+ Returns:
625
+ Tensor, has the same type as the `self` and has the same shape as after broadcasting.
626
+
627
+ Supported Platforms:
628
+ ``Ascend`` ``GPU`` ``CPU``
629
+
630
+ Examples:
631
+ >>> import mindspore
632
+ >>> import numpy as np
633
+ >>> from mindspore import Tensor
634
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
635
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
636
+ >>> output = input.bitwise_and(other)
637
+ >>> print(output)
638
+ [ 0 0 1 -1 1 0 1]
639
+ """)
640
+ attach_docstr("bitwise_not", r"""bitwise_not() -> Tensor
641
+
642
+ Returns bitwise `not` of `self`.
643
+
644
+ .. warning::
645
+ This is an experimental API that is subject to change or deletion.
646
+
647
+ Returns:
648
+ Tensor, has the same shape and type as `self`.
649
+
650
+ Raises:
651
+ TypeError: If `self` is not a Tensor.
652
+ RuntimeError: If dtype of `self` is not int or bool.
653
+
654
+ Supported Platforms:
655
+ ``Ascend``
656
+
657
+ Examples:
658
+ >>> import mindspore
659
+ >>> import numpy as np
660
+ >>> from mindspore import Tensor
661
+ >>> input = Tensor(np.array([True, False, True, False]))
662
+ >>> output = input.bitwise_not()
663
+ >>> print(output)
664
+ [False True False True]
665
+ """)
666
+ attach_docstr("bitwise_or", r"""bitwise_or(other) ->Tensor
667
+
668
+ Returns bitwise `or` of two tensors element-wise.
669
+
670
+ Note:
671
+ `self` and `other` comply with the type conversion rules to make the data types consistent.
672
+
673
+ Args:
674
+ other (Tensor, Number.number): The shape is the same as the `self` or can be broadcast to the shape of `self`.
675
+
676
+ Returns:
677
+ Tensor, has the same type as the `self` and has the same shape as after broadcasting.
678
+
679
+ Supported Platforms:
680
+ ``Ascend`` ``GPU`` ``CPU``
681
+
682
+ Examples:
683
+ >>> import mindspore
684
+ >>> import numpy as np
685
+ >>> from mindspore import Tensor
686
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
687
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
688
+ >>> output = input.bitwise_or(other)
689
+ >>> print(output)
690
+ [ 0 1 1 -1 -1 3 3]
691
+ """)
692
+ attach_docstr("bitwise_xor", r"""bitwise_xor(other) ->Tensor
693
+
694
+ Returns bitwise `xor` of two tensors element-wise.
695
+
696
+ Note:
697
+ `self` and `other` comply with the type conversion rules to make the data types consistent.
698
+
699
+ Args:
700
+ other (Tensor, Number.number): The shape is the same as the `self` or can be broadcast to the shape of `self`.
701
+
702
+ Returns:
703
+ Tensor, has the same type as the `self` and has the same shape as after broadcasting.
704
+
705
+ Supported Platforms:
706
+ ``Ascend`` ``GPU`` ``CPU``
707
+
708
+ Examples:
709
+ >>> import mindspore
710
+ >>> import numpy as np
711
+ >>> from mindspore import Tensor
712
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
713
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
714
+ >>> output = input.bitwise_xor(other)
715
+ >>> print(output)
716
+ [ 0 1 0 0 -2 3 2]
717
+ """)
718
+ attach_docstr("ceil", r"""ceil() -> Tensor
719
+
720
+ For details, please refer to :func:`mindspore.ops.ceil`.""")
721
+ attach_docstr("chunk", r"""chunk(chunks, dim=0) -> Tuple of Tensors
722
+
723
+ Cut the self Tensor into `chunks` sub-tensors along the specified dimension.
724
+
725
+ Note:
726
+ The number of sub-tensors returned by this function may be less than the number
727
+ of sub-tensors specified by `chunks`.
728
+
729
+ .. warning::
730
+ This is an experimental API that is subject to change or deletion.
731
+
732
+ Args:
733
+ chunks (int): Number of sub-tensors to cut.
734
+ dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
735
+
736
+ Returns:
737
+ A tuple of sub-tensors.
738
+
739
+ Raises:
740
+ TypeError: The sum of `chunks` is not int.
741
+ TypeError: If argument `dim` is not int.
742
+ ValueError: If argument `dim` is out of range of :math:`[-self.ndim, self.ndim)` .
743
+ ValueError: If argument `chunks` is not positive number.
744
+
745
+ Supported Platforms:
746
+ ``Ascend``
747
+
748
+ Examples:
749
+ >>> import numpy as np
750
+ >>> import mindspore
751
+ >>> from mindspore import Tensor
752
+ >>> input_x = Tensor(np.arange(9).astype("float32"))
753
+ >>> output = input_x.chunk(3, dim=0)
754
+ >>> print(output)
755
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
756
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
757
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
758
+
759
+ .. method:: Tensor.chunk(chunks, axis=0) -> Tuple of Tensors
760
+ :noindex:
761
+
762
+ Cut the self Tensor into `chunks` sub-tensors along the specified axis.
763
+
764
+ Note:
765
+ This function may return less than the specified number of chunks!
766
+
767
+ Args:
768
+ chunks (int): Number of sub-tensors to cut.
769
+ axis (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
770
+
771
+ Returns:
772
+ A tuple of sub-tensors.
773
+
774
+ Raises:
775
+ TypeError: The sum of `chunks` is not int.
776
+ TypeError: If argument `axis` is not int.
777
+ ValueError: If argument `axis` is out of range of :math:`[-self.ndim, self.ndim)` .
778
+ ValueError: If argument `chunks` is not positive number.
779
+
780
+ Supported Platforms:
781
+ ``Ascend`` ``GPU`` ``CPU``
782
+
783
+ Examples:
784
+ >>> import numpy as np
785
+ >>> from mindspore import Tensor
786
+ >>> input_x = Tensor(np.arange(9).astype("float32"))
787
+ >>> output = input_x.chunk(3, axis=0)
788
+ >>> print(output)
789
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
790
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
791
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))""")
792
+ attach_docstr("clamp", r"""clamp(min=None, max=None) -> Tensor
793
+
794
+ For details, please refer to :func:`mindspore.ops.clamp`.""")
795
+ attach_docstr("clip", r"""clip(min=None, max=None) -> Tensor
796
+
797
+ Alias for :func:`mindspore.Tensor.clamp`.
798
+ """)
799
+ attach_docstr("clone", r"""clone() -> Tensor
800
+
801
+ Returns a copy of self.
802
+
803
+ .. warning::
804
+ This is an experimental API that is subject to change or deletion.
805
+
806
+ Note:
807
+ This function is differentiable, and gradients will flow back directly from the calculation
808
+ result of the function to the `self`.
809
+
810
+ Returns:
811
+ Tensor, with the same data, shape and type as `self`.
812
+
813
+ Supported Platforms:
814
+ ``Ascend``
815
+
816
+ Examples:
817
+ >>> import numpy as np
818
+ >>> from mindspore import Tensor
819
+ >>> input = Tensor(np.ones((3,3)).astype("float32"))
820
+ >>> output = input.clone()
821
+ >>> print(output)
822
+ [[1. 1. 1.]
823
+ [1. 1. 1.]
824
+ [1. 1. 1.]]
825
+ """)
826
+ attach_docstr("copy_", r"""copy_(src, non_blocking=False) -> Tensor
827
+
828
+ Copies the elements from `src` into `self` tensor and returns `self`.
829
+
830
+ .. warning::
831
+ This is an experimental API that is subject to change or deletion.
832
+ The `src` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
833
+
834
+ Args:
835
+ src (Tensor): the source tensor to copy from.
836
+ non_blocking (bool, optional): no effect currently. Default: ``False``.
837
+
838
+ Returns:
839
+ Return self Tensor.
840
+
841
+ Supported Platforms:
842
+ ``Ascend``
843
+
844
+ Examples:
845
+ >>> import numpy as np
846
+ >>> from mindspore import Tensor
847
+ >>> a = Tensor(np.ones((3, 3)).astype("float32"))
848
+ >>> b = Tensor(np.zeros((3, 3)).astype("float32"))
849
+ >>> a.copy_(b)
850
+ >>> print(a)
851
+ [[0. 0. 0.]
852
+ [0. 0. 0.]
853
+ [0. 0. 0.]]
854
+ """)
855
+ attach_docstr("cosh", r"""cosh() -> Tensor
856
+
857
+ For details, please refer to :func:`mindspore.ops.cosh`.""")
858
+ attach_docstr("cos", r"""cos() -> Tensor
859
+
860
+ For details, please refer to :func:`mindspore.ops.cos`.""")
861
+ attach_docstr("count_nonzero", r"""count_nonzero(dim=None) -> Tensor
862
+
863
+ Counts the number of non-zero values in the tensor input along the given dim. If no dim is specified then all non-zeros in the tensor are counted.
864
+
865
+ .. warning::
866
+ This is an experimental API that is subject to change or deletion.
867
+
868
+ Args:
869
+ dim (Union[None, int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
870
+
871
+ Returns:
872
+ Tensor, number of nonzero element across dim specified by `dim`.
873
+
874
+ Raises:
875
+ TypeError: If `dim` is not int, tuple(int), list(int) or None.
876
+ ValueError: If any value in `dim` is not in range :math:`[-self.ndim, self.ndim)`.
877
+
878
+ Supported Platforms:
879
+ ``Ascend``
880
+
881
+ Examples:
882
+ >>> from mindspore import Tensor
883
+ >>> import numpy as np
884
+ >>> import mindspore
885
+ >>> # case 1: each value specified.
886
+ >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
887
+ >>> nonzero_num = x.count_nonzero(dim=[0, 1])
888
+ >>> print(nonzero_num)
889
+ [[3]]
890
+ >>> # case 2: all value is default.
891
+ >>> nonzero_num = x.count_nonzero()
892
+ >>> print(nonzero_num)
893
+ 3
894
+ >>> # case 3: dim value was specified 0.
895
+ >>> nonzero_num = x.count_nonzero(dim=[0,])
896
+ >>> print(nonzero_num)
897
+ [1 2 0]
898
+ >>> # case 4: dim value was specified 1.
899
+ >>> nonzero_num = x.count_nonzero(dim=[1,])
900
+ >>> print(nonzero_num)
901
+ [1 2]
902
+
903
+ .. method:: Tensor.count_nonzero(axis=(), keep_dims=False, dtype=None) -> Tensor
904
+ :noindex:
905
+
906
+ Count number of nonzero elements across axis of input tensor.
907
+
908
+ Args:
909
+ axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
910
+ Default: ``()`` , reduce all dimensions.
911
+ keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
912
+ If true, keep these reduced dimensions and the length is 1.
913
+ If false, don't keep these dimensions. Default: ``False`` .
914
+ dtype (Union[Number, mindspore.bool\_], optional): The data type of the output tensor.
915
+ Default: ``None`` .
916
+
917
+ Returns:
918
+ Tensor, number of nonzero element across axis specified by `axis`.
919
+ The data type is specified by `dtype`.
920
+
921
+ Raises:
922
+ TypeError: If `axis` is not int, tuple or list.
923
+ ValueError: If any value in `axis` is not in range :math:`[-self.ndim, self.ndim)`.
924
+
925
+ Supported Platforms:
926
+ ``Ascend`` ``GPU`` ``CPU``
927
+
928
+ Examples:
929
+ >>> from mindspore import Tensor
930
+ >>> import numpy as np
931
+ >>> import mindspore
932
+ >>> # case 1: each value specified.
933
+ >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
934
+ >>> nonzero_num = x.count_nonzero(x=x, axis=[0, 1], keep_dims=True, dtype=mindspore.int32)
935
+ >>> print(nonzero_num)
936
+ [[3]]
937
+ >>> # case 2: all value is default.
938
+ >>> nonzero_num = x.count_nonzero()
939
+ >>> print(nonzero_num)
940
+ 3
941
+ >>> # case 3: axis value was specified 0.
942
+ >>> nonzero_num = x.count_nonzero(axis=[0,])
943
+ >>> print(nonzero_num)
944
+ [1 2 0]
945
+ >>> # case 4: axis value was specified 1.
946
+ >>> nonzero_num = x.count_nonzero(axis=[1,])
947
+ >>> print(nonzero_num)
948
+ [1 2]
949
+ >>> # case 5: keep_dims value was specified.
950
+ >>> nonzero_num = x.count_nonzero(keep_dims=True)
951
+ >>> print(nonzero_num)
952
+ [[3]]
953
+ >>> # case 6: keep_dims and axis value was specified.
954
+ >>> nonzero_num = x.count_nonzero(axis=[0,], keep_dims=True)
955
+ >>> print(nonzero_num)
956
+ [[1 2 0]]""")
957
+ attach_docstr("cumsum", r"""cumsum(dim, *, dtype=None) -> Tensor
958
+
959
+ Computes the cumulative sum of self Tensor along `dim`.
960
+
961
+ .. math::
962
+
963
+ y_i = x_1 + x_2 + x_3 + ... + x_i
964
+
965
+ Args:
966
+ dim (int): Dim along which the cumulative sum is computed.
967
+
968
+ Keyword Args:
969
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
970
+ the self Tensor will be cast to `dtype` before the computation. This is useful for preventing overflows.
971
+ If not specified, stay the same as original Tensor. Default: ``None`` .
972
+
973
+ Returns:
974
+ Tensor, the shape of the output Tensor is consistent with the self Tensor's.
975
+
976
+ Raises:
977
+ ValueError: If the `dim` is out of range.
978
+
979
+ Supported Platforms:
980
+ ``Ascend``
981
+
982
+ Examples:
983
+ >>> import numpy as np
984
+ >>> from mindspore import Tensor
985
+ >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
986
+ >>> # case 1: along the dim 0
987
+ >>> y = x.cumsum(dim=0)
988
+ >>> print(y)
989
+ [[ 3. 4. 6. 10.]
990
+ [ 4. 10. 13. 19.]
991
+ [ 8. 13. 21. 26.]
992
+ [ 9. 16. 28. 35.]]
993
+ >>> # case 2: along the dim 1
994
+ >>> y = x.cumsum(dim=1)
995
+ >>> print(y)
996
+ [[ 3. 7. 13. 23.]
997
+ [ 1. 7. 14. 23.]
998
+ [ 4. 7. 15. 22.]
999
+ [ 1. 4. 11. 20.]]
1000
+
1001
+ .. method:: Tensor.cumsum(axis=None, dtype=None) -> Tensor
1002
+ :noindex:
1003
+
1004
+ Computes the cumulative sum of self Tensor along `axis`.
1005
+
1006
+ .. math::
1007
+
1008
+ y_i = x_1 + x_2 + x_3 + ... + x_i
1009
+
1010
+ Note:
1011
+ On Ascend, the dtype of `self` only supports :int8, uint8, int32, float16 or float32 in case of static shape.
1012
+ For the case of dynamic shape, the dtype of `self` only supports int32, float16 or float32.
1013
+
1014
+ Args:
1015
+ axis (int): Axis along which the cumulative sum is computed.
1016
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
1017
+ the self Tensor will be cast to `dtype` before the computation. This is useful for preventing overflows.
1018
+ If not specified, stay the same as original Tensor. Default: ``None`` .
1019
+
1020
+ Returns:
1021
+ Tensor, the shape of the output Tensor is consistent with the self Tensor's.
1022
+
1023
+ Raises:
1024
+ ValueError: If the axis is out of range.
1025
+
1026
+ Supported Platforms:
1027
+ ``Ascend`` ``GPU`` ``CPU``
1028
+
1029
+ Examples:
1030
+ >>> import mindspore
1031
+ >>> import numpy as np
1032
+ >>> from mindspore import Tensor
1033
+ >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
1034
+ >>> # case 1: along the axis 0
1035
+ >>> y = x.cumsum(axis=0)
1036
+ >>> print(y)
1037
+ [[ 3. 4. 6. 10.]
1038
+ [ 4. 10. 13. 19.]
1039
+ [ 8. 13. 21. 26.]
1040
+ [ 9. 16. 28. 35.]]
1041
+ >>> # case 2: along the axis 1
1042
+ >>> y = x.cumsum(axis=1)
1043
+ >>> print(y)
1044
+ [[ 3. 7. 13. 23.]
1045
+ [ 1. 7. 14. 23.]
1046
+ [ 4. 7. 15. 22.]
1047
+ [ 1. 4. 11. 20.]]
1048
+ """)
1049
+ attach_docstr("diag", r"""diag() -> Tensor
1050
+
1051
+ For details, please refer to :func:`mindspore.ops.diag`.
1052
+
1053
+ .. method:: Tensor.diag(diagonal=0) -> Tensor
1054
+ :noindex:
1055
+
1056
+ If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal.
1057
+
1058
+ If input is a matrix (2-D tensor), then returns a 1-D tensor with the diagonal elements of input.
1059
+
1060
+ The argument diagonal controls which diagonal to consider:
1061
+
1062
+ - If `diagonal` = 0, it is the main diagonal.
1063
+
1064
+ - If `diagonal` > 0, it is above the main diagonal.
1065
+
1066
+ - If `diagonal` < 0, it is below the main diagonal.
1067
+
1068
+ .. warning::
1069
+ - This is an experimental API that is subject to change or deletion.
1070
+ - The graph mode and CPU/GPU backends do not support non-zero values for the diagonal parameter.
1071
+
1072
+ Args:
1073
+ diagonal (int, optional): the diagonal to consider. Default: ``0``.
1074
+
1075
+ Returns:
1076
+ Tensor, has the same dtype as the `input`, its shape is up to `diagonal`:
1077
+
1078
+ - If `input` shape is :math:`(x_0)`: then output shape is :math:`(x_0 + \left | diagonal \right | , x_0 + \left | diagonal \right | )` 2-D Tensor.
1079
+
1080
+ - If `input` shape is :math:`(x_0, x_1)`: then output shape is main diagonal to move :math:`(\left | diagonal \right |)` elements remains elements' length 1-D Tensor.
1081
+
1082
+ Raises:
1083
+ ValueError: If shape of `input` is not 1-D and 2-D.
1084
+
1085
+ Supported Platforms:
1086
+ ``Ascend``
1087
+
1088
+ Examples:
1089
+ >>> from mindspore import Tensor
1090
+ >>> input = Tensor([1, 2, 3, 4]).astype('int32')
1091
+ >>> output = input.diag()
1092
+ >>> print(output)
1093
+ [[1 0 0 0]
1094
+ [0 2 0 0]
1095
+ [0 0 3 0]
1096
+ [0 0 0 4]]""")
1097
+ attach_docstr("divide", r"""divide(other, *, rounding_mode=None) -> Tensor
1098
+
1099
+ Alias for :func:`mindspore.Tensor.div`.
1100
+ """)
1101
+ attach_docstr("div", r"""div(other, *, rounding_mode=None) -> Tensor
1102
+
1103
+ For details, please refer to :func:`mindspore.ops.div`.""")
1104
+ attach_docstr("div_", r"""div_(other, *, rounding_mode=None) -> Tensor
1105
+
1106
+ In-place version of :func:`mindspore.Tensor.div`.""")
1107
+ attach_docstr("dot", r"""dot(other) -> Tensor
1108
+
1109
+ Computes the dot product of two 1D tensor.
1110
+
1111
+ Args:
1112
+ other (Tensor): The input in the dot product, must be 1D.
1113
+
1114
+ Returns:
1115
+ Tensor, the shape is [] and the data type is same as `self`.
1116
+
1117
+ Raises:
1118
+ TypeError: If `other` is not tensor.
1119
+ RuntimeError: If dtypes of `self` and `other` are not same.
1120
+ RuntimeError: If shapes of `self` and `other` are not same.
1121
+ RuntimeError: If shapes of `self` and `other` are not 1D.
1122
+
1123
+ Supported Platforms:
1124
+ ``Ascend``
1125
+
1126
+ Examples:
1127
+ >>> import mindspore
1128
+ >>> from mindspore import Tensor
1129
+ >>> input = Tensor([2.0, 3.0], mindspore.float32)
1130
+ >>> other = Tensor([2.0, 1.0], mindspore.float32)
1131
+ >>> output = Tensor.dot(input, other) # input.dot(other)
1132
+ >>> print(output)
1133
+ [7. ]
1134
+ >>> print(output.dtype)
1135
+ Float32
1136
+ """)
1137
+ attach_docstr("eq", r"""eq(other) -> Tensor
1138
+
1139
+ For details, please refer to :func:`mindspore.ops.eq`.""")
1140
+ attach_docstr("erfc", r"""erfc() -> Tensor
1141
+
1142
+ For details, please refer to :func:`mindspore.ops.erfc`.""")
1143
+ attach_docstr("erf", r"""erf() -> Tensor
1144
+
1145
+ For details, please refer to :func:`mindspore.ops.erf`.""")
1146
+ attach_docstr("expand_as", r"""expand_as(other) -> Tensor
1147
+
1148
+ Expand the shape of the input tensor to be the same as the another input tensor. The dim of the
1149
+ input shape must be smaller than or equal to that of another and the broadcast rules must be met.
1150
+
1151
+ Args:
1152
+ other (Tensor): The target Tensor. It's shape is the target shape that input tensor need to be expanded.
1153
+
1154
+ Returns:
1155
+ Tensor, with the given shape of `other` and the same data type as `self`.
1156
+
1157
+ Raises:
1158
+ TypeError: If `other` is not a tensor.
1159
+ ValueError: If the shapes of `other` and `self` are incompatible.
1160
+
1161
+ Supported Platforms:
1162
+ ``Ascend``
1163
+
1164
+ Examples:
1165
+ >>> import numpy as np
1166
+ >>> from mindspore import Tensor
1167
+ >>> x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32))
1168
+ >>> other = Tensor(np.array([[1, 1, 1], [1, 1, 1]]).astype(np.float32))
1169
+ >>> output = x.expand_as(other)
1170
+ >>> print(output)
1171
+ [[1. 2. 3.]
1172
+ [1. 2. 3.]]
1173
+
1174
+ .. method:: Tensor.expand_as(x) -> Tensor
1175
+ :noindex:
1176
+
1177
+ Expand the dimension of input tensor to the dimension of target tensor.
1178
+
1179
+ Args:
1180
+ x (Tensor): The target tensor. The shape of the target tensor must obey
1181
+ the broadcasting rule.
1182
+
1183
+ Returns:
1184
+ Tensor, has the same dimension as target tensor.
1185
+
1186
+ Supported Platforms:
1187
+ ``Ascend`` ``GPU`` ``CPU``
1188
+
1189
+ Examples:
1190
+ >>> import numpy as np
1191
+ >>> from mindspore import Tensor
1192
+ >>> from mindspore import dtype as mstype
1193
+ >>> input = Tensor([1, 2, 3], dtype=mstype.float32)
1194
+ >>> x = Tensor(np.ones((2, 3)), dtype=mstype.float32)
1195
+ >>> output = input.expand_as(x=x)
1196
+ >>> print(output)
1197
+ [[1. 2. 3.]
1198
+ [1. 2. 3.]]
1199
+ """)
1200
+ attach_docstr("expm1", r"""expm1() -> Tensor
1201
+
1202
+ For details, please refer to :func:`mindspore.ops.expm1`.
1203
+ """)
1204
+ attach_docstr("exp", r"""exp() -> Tensor
1205
+
1206
+ For details, please refer to :func:`mindspore.ops.exp`.
1207
+ """)
1208
+ attach_docstr("exp_", r"""exp_() -> Tensor
1209
+
1210
+ Inplace version of :func:`mindspore.Tensor.exp`.
1211
+
1212
+ .. warning::
1213
+ This is an experimental API that is subject to change or deletion.
1214
+ """)
1215
+ attach_docstr("fill_diagonal_", r"""fill_diagonal_(fill_value, warp=False) -> Tensor
1216
+
1217
+ Fills the main diagonal of a Tensor in-place with a specified value and returns the result.
1218
+ The `self` has at least 2 dimensions, and all dimensions of `self` must be equal in length
1219
+ when the dimension of `self` is greater than 2.
1220
+
1221
+ .. warning::
1222
+ This is an experimental API that is subject to change or deletion.
1223
+
1224
+ Args:
1225
+ fill_value (number): The value to fill the diagonal of `self`.
1226
+ wrap (bool, optional): Controls whether the diagonal elements continue onto the
1227
+ remaining rows in case of a tall matrix(A matrix has more rows than columns). Default: ``False`` .
1228
+
1229
+ Returns:
1230
+ Tensor, has the same shape and data type as `self`.
1231
+
1232
+ Raises:
1233
+ ValueError: If the dimension of `self` is not greater than 1.
1234
+ ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
1235
+
1236
+ Supported Platforms:
1237
+ ``Ascend``
1238
+
1239
+ Examples:
1240
+ >>> import numpy as np
1241
+ >>> from mindspore import Tensor
1242
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
1243
+ >>> fill_value = 9.9
1244
+ >>> x.fill_diagonal_(fill_value)
1245
+ >>> print(x)
1246
+ [[9.9 2. 3. ]
1247
+ [4. 9.9 6. ]
1248
+ [7. 8. 9.9]]
1249
+ """)
1250
+ attach_docstr("fill_", r"""fill_(value) -> Tensor
1251
+
1252
+ Fills `self` tensor with the specified `value` .
1253
+
1254
+ .. warning::
1255
+ This is an experimental API that is subject to change or deletion.
1256
+
1257
+ Args:
1258
+ value (Union[Tensor, number.Number, bool]): Value to fill the `self` .
1259
+
1260
+ Returns:
1261
+ Tensor.
1262
+
1263
+ Raises:
1264
+ RunTimeError: The data type of `self` or `value` is not supported.
1265
+ RunTimeError: When the `value` is Tensor, it should be 0-D Tensor or 1-D Tensor with shape=[1].
1266
+
1267
+ Supported Platforms:
1268
+ ``Ascend``
1269
+
1270
+ Examples:
1271
+ >>> import mindspore
1272
+ >>> from mindspore import ops
1273
+ >>> x = ops.zeros((3, 3))
1274
+ >>> print(x)
1275
+ [[0. 0. 0.]
1276
+ [0. 0. 0.]
1277
+ [0. 0. 0.]]
1278
+ >>> output = x.fill_(1.0)
1279
+ >>> print(output)
1280
+ [[1. 1. 1.]
1281
+ [1. 1. 1.]
1282
+ [1. 1. 1.]]
1283
+ >>> print(x)
1284
+ [[1. 1. 1.]
1285
+ [1. 1. 1.]
1286
+ [1. 1. 1.]]""")
1287
+ attach_docstr("flatten", r"""flatten(start_dim=0, end_dim=-1) -> Tensor
1288
+
1289
+ Flatten a tensor along dimensions from `start_dim` to `end_dim`.
1290
+
1291
+ Args:
1292
+ start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
1293
+ end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
1294
+
1295
+ Returns:
1296
+ Tensor. If no dimensions are flattened, returns the original `self`, otherwise return the flattened Tensor.
1297
+ If `self` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1298
+
1299
+ Raises:
1300
+ TypeError: If `start_dim` or `end_dim` is not int.
1301
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1302
+ ValueError: If `start_dim` or `end_dim` is not in range of [-self.dim, self.dim-1].
1303
+
1304
+ Supported Platforms:
1305
+ ``Ascend`` ``GPU`` ``CPU``
1306
+
1307
+ Examples:
1308
+ >>> import mindspore
1309
+ >>> import numpy as np
1310
+ >>> from mindspore import Tensor
1311
+ >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
1312
+ >>> output = input_x.flatten(0, -1)
1313
+ >>> print(output.shape)
1314
+ (24,)
1315
+
1316
+ .. method:: Tensor.flatten(order='C', *, start_dim=0, end_dim=-1) -> Tensor
1317
+ :noindex:
1318
+
1319
+ Flatten a tensor along dimensions from `start_dim` to `start_dim`.
1320
+
1321
+ Args:
1322
+ order (str, optional): Only ``'C'`` and ``'F'`` are supported.
1323
+ ``'C'`` means to flatten in row-major (C-style) order.
1324
+ ``'F'`` means to flatten in column-major (Fortran-style) order. Default: ``'C'`` .
1325
+
1326
+ Keyword Args:
1327
+ start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
1328
+ end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
1329
+
1330
+ Returns:
1331
+ Tensor. If no dimensions are flattened, returns the original `self`, otherwise return the flattened Tensor.
1332
+ If `self` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1333
+
1334
+ Raises:
1335
+ TypeError: If `order` is not string type.
1336
+ ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
1337
+ TypeError: If `start_dim` or `end_dim` is not int.
1338
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1339
+ ValueError: If `start_dim` or `end_dim` is not in range of [-self.dim, self.dim-1].
1340
+
1341
+ Supported Platforms:
1342
+ ``Ascend`` ``GPU`` ``CPU``
1343
+
1344
+ Examples:
1345
+ >>> import mindspore
1346
+ >>> import numpy as np
1347
+ >>> from mindspore import Tensor
1348
+ >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
1349
+ >>> output = input_x.flatten(order='C')
1350
+ >>> print(output.shape)
1351
+ (24,)
1352
+ """)
1353
+ attach_docstr("floor_divide", r"""floor_divide(other) -> Tensor
1354
+
1355
+ Divides the self tensor by the other input tensor element-wise and round down to the closest integer.
1356
+
1357
+ `self` and `other` comply with the implicit type conversion rules to make the data types consistent.
1358
+ Inputs must be two tensors or one tensor and one scalar.
1359
+ When the `self` and `other` are two tensors,
1360
+ dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1361
+ When the `self` and `other` are one tensor and one scalar,
1362
+ the scalar could only be a constant.
1363
+
1364
+ .. math::
1365
+ out_{i} = \text{floor}( \frac{self_i}{other_i})
1366
+
1367
+ where the :math:`floor` indicates the Floor operator. For more details,
1368
+ please refer to the :class:`mindspore.mint.floor` operator.
1369
+
1370
+ .. warning::
1371
+ This is an experimental API that is subject to change or deletion.
1372
+
1373
+ Args:
1374
+ other (Union[Tensor, Number, bool]): The other input is a number or
1375
+ a bool or a tensor whose data type is number or bool.
1376
+
1377
+ Returns:
1378
+ Tensor, the shape is the same as the one after broadcasting,
1379
+ and the data type is the one with higher precision or higher digits between `self` and `other`.
1380
+
1381
+ Raises:
1382
+ TypeError: If `self` and `other` are not the following: Tensor, number.Number or bool.
1383
+
1384
+ Supported Platforms:
1385
+ ``Ascend`` ``GPU`` ``CPU``
1386
+
1387
+ Examples:
1388
+ >>> import mindspore
1389
+ >>> from mindspore import Tensor
1390
+ >>> import numpy as np
1391
+ >>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
1392
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
1393
+ >>> output = input.floor_divide(other)
1394
+ >>> print(output)
1395
+ [ 0 1 -1]
1396
+ >>> input = Tensor(2.0, mindspore.float32)
1397
+ >>> other = Tensor(2.0, mindspore.float32)
1398
+ >>> output = input.floor_divide(other)
1399
+ >>> print(output)
1400
+ 1.0
1401
+ """)
1402
+ attach_docstr("floor_divide_", r"""floor_divide_(other) -> Tensor
1403
+
1404
+ Divides the self tensor by the other tensor element-wise and round down to the closest integer.
1405
+
1406
+ .. math::
1407
+ out_{i} = \text{floor}( \frac{self_i}{other_i})
1408
+
1409
+ where the :math:`floor` indicates the Floor operator. For more details,
1410
+ please refer to the :class:`mindspore.mint.floor` operator.
1411
+
1412
+ .. warning::
1413
+ This is an experimental API that is subject to change or deletion.
1414
+
1415
+ Note:
1416
+ When `self` and `other` have different shapes, `other` should be able to broadcast to `self`.
1417
+
1418
+ Args:
1419
+ other (Union[Tensor, Number, bool]): The other input is a number or
1420
+ a bool or a tensor whose data type is number or bool.
1421
+
1422
+ Returns:
1423
+ Tensor, the shape is the same as `self` , and the data type is the same as `self` .
1424
+
1425
+ Raises:
1426
+ TypeError: If `other` is not one of the following: Tensor, number.Number or bool.
1427
+ RuntimeError: If `other` cannot be broadcast to `self`.
1428
+
1429
+ Supported Platforms:
1430
+ ``Ascend``
1431
+
1432
+ Examples:
1433
+ >>> import mindspore
1434
+ >>> from mindspore import Tensor
1435
+ >>> import numpy as np
1436
+ >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
1437
+ >>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
1438
+ >>> output = x.floor_divide_(other)
1439
+ >>> print(output)
1440
+ [ 0 1 -1]
1441
+ >>> print(x)
1442
+ [ 0 1 -1]
1443
+ """)
1444
+ attach_docstr("floor", r"""floor() -> Tensor
1445
+
1446
+ For details, please refer to :func:`mindspore.ops.floor`.""")
1447
+ attach_docstr("fmod", r"""fmod(other) -> Tensor
1448
+
1449
+ For details, please refer to :func:`mindspore.ops.fmod`.""")
1450
+ attach_docstr("frac", r"""frac() -> Tensor
1451
+
1452
+ For details, please refer to :func:`mindspore.ops.frac`.
1453
+ """)
1454
+ attach_docstr("gather", r"""gather(dim, index) -> Tensor
1455
+
1456
+ Gather data from a tensor by indices.
1457
+
1458
+ .. math::
1459
+ output[(i_0, i_1, ..., i_{dim}, i_{dim+1}, ..., i_n)] =
1460
+ input[(i_0, i_1, ..., index[(i_0, i_1, ..., i_{dim}, i_{dim+1}, ..., i_n)], i_{dim+1}, ..., i_n)]
1461
+
1462
+ .. warning::
1463
+ On Ascend, the behavior is unpredictable in the following cases:
1464
+
1465
+ - the value of `index` is not in the range `[-self.shape[dim], self.shape[dim])` in forward;
1466
+ - the value of `index` is not in the range `[0, self.shape[dim])` in backward.
1467
+
1468
+ Args:
1469
+ dim (int): the axis to index along, must be in range `[-self.rank, self.rank)`.
1470
+ index (Tensor): The index tensor, with int32 or int64 data type. A valid `index` should be:
1471
+
1472
+ - :math:`index.rank == self.rank`;
1473
+ - for :math:`axis != dim`, :math:`index.shape[axis] <= self.shape[axis]`;
1474
+ - the value of :math:`index` is in range :math:`[-self.shape[dim], self.shape[dim])`.
1475
+
1476
+ Returns:
1477
+ Tensor, has the same type as `self` and the same shape as `index`.
1478
+
1479
+ Raises:
1480
+ ValueError: If the shape of `index` is illegal.
1481
+ ValueError: If `dim` is not in :math:`[-self.rank, self.rank)`.
1482
+ ValueError: If the value of `index` is out of the valid range.
1483
+ TypeError: If the type of `index` is illegal.
1484
+
1485
+ Supported Platforms:
1486
+ ``Ascend`` ``GPU`` ``CPU``
1487
+
1488
+ Examples:
1489
+ >>> import mindspore
1490
+ >>> import numpy as np
1491
+ >>> from mindspore import Tensor
1492
+ >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1493
+ >>> index = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
1494
+ >>> output = input.gather(1, index)
1495
+ >>> print(output)
1496
+ [[-0.1 -0.1]
1497
+ [0.5 0.5]]
1498
+
1499
+ .. method:: Tensor.gather(input_indices, axis, batch_dims=0) -> Tensor
1500
+ :noindex:
1501
+
1502
+ Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
1503
+
1504
+ The following figure shows the calculation process of Gather commonly:
1505
+
1506
+ .. image:: ../../images/Gather.png
1507
+
1508
+ where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
1509
+
1510
+ .. note::
1511
+ - The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
1512
+ On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
1513
+ undefined.
1514
+ - The data type of self cannot be
1515
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
1516
+ platform currently.
1517
+
1518
+ Args:
1519
+ input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1520
+ Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
1521
+ axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
1522
+ It must be greater than or equal to `batch_dims`.
1523
+ When `axis` is a Tensor, the size must be 1.
1524
+ batch_dims (int, optional): Specifies the number of batch dimensions. It must be less than or euqal to the rank
1525
+ of `input_indices`. Default: ``0`` .
1526
+
1527
+ Returns:
1528
+ Tensor, the shape of tensor is
1529
+ :math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
1530
+
1531
+ Raises:
1532
+ TypeError: If `axis` is not an int or Tensor.
1533
+ ValueError: If `axis` is a Tensor and its size is not 1.
1534
+ TypeError: If `self` is not a tensor.
1535
+ TypeError: If `input_indices` is not a tensor of type int.
1536
+ RuntimeError: If `input_indices` is out of range :math:`[0, input_param.shape[axis])` on CPU or GPU.
1537
+
1538
+ Supported Platforms:
1539
+ ``Ascend`` ``GPU`` ``CPU``
1540
+
1541
+ Examples:
1542
+ >>> import mindspore
1543
+ >>> import numpy as np
1544
+ >>> from mindspore import Tensor
1545
+ >>> # case1: input_indices is a Tensor with shape (5, ).
1546
+ >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
1547
+ >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
1548
+ >>> axis = 0
1549
+ >>> output = input_params.gather(input_indices=input_indices, axis=axis)
1550
+ >>> print(output)
1551
+ [1. 3. 5. 3. 7.]
1552
+ >>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
1553
+ >>> # the output shape is equal to the input_indices shape.
1554
+ >>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
1555
+ >>> axis = 0
1556
+ >>> output = input_params.gather(input_indices=input_indices, axis=axis)
1557
+ >>> print(output)
1558
+ [[1. 3.]
1559
+ [3. 7.]]
1560
+ >>> # case3: input_indices is a Tensor with shape (2, ) and
1561
+ >>> # input_params is a Tensor with shape (3, 4) and axis is 0.
1562
+ >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
1563
+ >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
1564
+ >>> axis = 0
1565
+ >>> output = input_params.gather(input_indices=input_indices, axis=axis)
1566
+ >>> print(output)
1567
+ [[ 1. 2. 3. 4.]
1568
+ [ 9. 10. 11. 12.]]
1569
+ >>> # case4: input_indices is a Tensor with shape (2, ) and
1570
+ >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
1571
+ >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
1572
+ >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
1573
+ >>> axis = 1
1574
+ >>> batch_dims = 1
1575
+ >>> output = input_params.gather(input_indices, axis, batch_dims)
1576
+ >>> print(output)
1577
+ [ 1. 7. 10.]
1578
+ """)
1579
+ attach_docstr("gcd", r"""gcd(other) -> Tensor
1580
+
1581
+ For details, please refer to :func:`mindspore.ops.gcd`.""")
1582
+ attach_docstr("ge", r"""ge(other) -> Tensor
1583
+
1584
+ Alias for :func:`mindspore.Tensor.greater_equal`.
1585
+ """)
1586
+ attach_docstr("greater", r"""greater(other) -> Tensor
1587
+
1588
+ For details, please refer to :func:`mindspore.ops.greater`.""")
1589
+ attach_docstr("greater_equal", r"""greater_equal(other) -> Tensor
1590
+
1591
+ For details, please refer to :func:`mindspore.ops.greater_equal`.
1592
+ """)
1593
+ attach_docstr("gt", r"""gt(other) -> Tensor
1594
+
1595
+ For details, please refer to :func:'mindspore.Tensor.greater'.""")
1596
+ attach_docstr("hardshrink", r"""hardshrink(lambd=0.5) -> Tensor
1597
+
1598
+ For details, please refer to :func:`mindspore.ops.hardshrink`.""")
1599
+ attach_docstr("histc", r"""histc(bins=100, min=0, max=0) -> Tensor
1600
+
1601
+ For details, please refer to :func:`mindspore.ops.histc`.
1602
+
1603
+ Supported Platforms:
1604
+ ``Ascend`` ``GPU`` ``CPU``""")
1605
+ attach_docstr("index_add", r"""index_add(indices, y, axis, use_lock=True, check_index_bound=True) -> Tensor
1606
+
1607
+ Adds tensor `y` to specified axis and indices of tensor `self`. The axis should be in [-len(self.dim), len(self.dim) - 1], and indices should be in [0, the size of `self` - 1] at the axis dimension.
1608
+
1609
+ Args:
1610
+ indices (Tensor): Add the value of `self` and `y` along the dimension of the `axis` according to the specified index value, with data type int32. The `indices` must be 1D with the same size as the size of `y` in the `axis` dimension. The values of `indices` should be in [0, b), where the b is the size of `self` in the `axis` dimension.
1611
+ y (Tensor): The input tensor with the value to add.
1612
+ axis (int): The dimension along which to index.
1613
+ use_lock (bool, optional): Whether to enable a lock to protect the updating process of variable tensors. If ``True`` , when updating the value of `self`, this process will be protected by a lock by using atomic operation. If ``False`` , the result may be unpredictable. Default: ``True`` .
1614
+ check_index_bound (bool, optional): If ``True`` , check indices boundary. If ``False`` , don't check indices boundary. Default: ``True`` .
1615
+
1616
+ Returns:
1617
+ Tensor, has the same shape and dtype as `self`.
1618
+
1619
+ Raises:
1620
+ TypeError: If neither `indices` nor `y` is a Tensor.
1621
+ ValueError: If axis is out of the range of `self` shape.
1622
+ ValueError: If `self` rank is not the same as `y` rank.
1623
+ ValueError: If shape of `indices` is not 1D or size of `indices` is not equal to dimension of y[axis].
1624
+ ValueError: If `y`'s shape is not the same as `self` except the `axis` th dimension.
1625
+
1626
+ Supported Platforms:
1627
+ ``Ascend`` ``GPU`` ``CPU``
1628
+
1629
+ Examples:
1630
+ >>> import mindspore
1631
+ >>> import numpy as np
1632
+ >>> from mindspore import Tensor
1633
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1634
+ >>> indices = Tensor(np.array([0, 2]), mindspore.int32)
1635
+ >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
1636
+ >>> output = x.index_add(indices, y, axis = 1)
1637
+ >>> print(output)
1638
+ [[ 1.5 2. 4. ]
1639
+ [ 5. 5. 7.5]
1640
+ [ 9. 8. 11.5]]
1641
+
1642
+ .. method:: Tensor.index_add(dim, index, source, *, alpha=1) -> Tensor
1643
+ :noindex:
1644
+
1645
+ For details, please refer to :func:`mindspore.ops.index_add`.
1646
+ The corresponding relationships between the parameters of `Tensor.index_add` and :func:`mindspore.ops.index_add`
1647
+ are as follows: `dim` -> `axis`, `index` -> `indices`, `source * alpha` -> `y`.
1648
+ """)
1649
+ attach_docstr("index_select", r"""index_select(axis, index) -> Tensor
1650
+
1651
+ Generates a new Tensor that accesses the values of `self` along the specified `axis` dimension
1652
+ using the indices specified in `index`. The new Tensor has the same number of dimensions as `self`,
1653
+ with the size of the `axis` dimension being equal to the length of `index`, and the size of all other
1654
+ dimensions will be unchanged from the original `self` Tensor.
1655
+
1656
+ .. note::
1657
+ The value of index must be in the range of `[0, self.shape[axis])`, the result is undefined out of range.
1658
+
1659
+ Args:
1660
+ axis (int): The dimension to be indexed.
1661
+ index (Tensor): A 1-D Tensor with the indices to access in `self` along the specified axis.
1662
+
1663
+ Returns:
1664
+ Tensor, has the same dtype as `self` Tensor.
1665
+
1666
+ Raises:
1667
+ TypeError: If `index` is not a Tensor.
1668
+ TypeError: If `axis` is not int number.
1669
+ ValueError: If the value of `axis` is out the range of `[-self.ndim, self.ndim - 1]`.
1670
+ ValueError: If the dimension of `index` is not equal to 1.
1671
+
1672
+ Supported Platforms:
1673
+ ``Ascend`` ``GPU`` ``CPU``
1674
+
1675
+ Examples:
1676
+ >>> import mindspore
1677
+ >>> from mindspore import Tensor
1678
+ >>> import numpy as np
1679
+ >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
1680
+ >>> print(input)
1681
+ [[[ 0. 1. 2. 3.]
1682
+ [ 4. 5. 6. 7.]]
1683
+ [[ 8. 9. 10. 11.]
1684
+ [12. 13. 14. 15.]]]
1685
+ >>> index = Tensor([0,], mindspore.int32)
1686
+ >>> y = input.index_select(1, index)
1687
+ >>> print(y)
1688
+ [[[ 0. 1. 2. 3.]]
1689
+ [[ 8. 9. 10. 11.]]]
1690
+
1691
+ .. method:: Tensor.index_select(dim, index) -> Tensor
1692
+ :noindex:
1693
+
1694
+ Generates a new Tensor that accesses the values of `self` along the specified `dim` dimension
1695
+ using the indices specified in `index`. The new Tensor has the same number of dimensions as `self`,
1696
+ with the size of the `dim` dimension being equal to the length of `index`, and the size of all other
1697
+ dimensions will be unchanged from the original `self` Tensor.
1698
+
1699
+ .. note::
1700
+ The value of index must be in the range of `[0, self.shape[dim])`, the result is undefined out of range.
1701
+
1702
+ Args:
1703
+ dim (int): The dimension to be indexed.
1704
+ index (Tensor): A 1-D Tensor with the indices to access in `self` along the specified dim.
1705
+
1706
+ Returns:
1707
+ Tensor, has the same dtype as `self` Tensor.
1708
+
1709
+ Raises:
1710
+ TypeError: If `index` is not a Tensor.
1711
+ TypeError: If `dim` is not int number.
1712
+ ValueError: If the value of `dim` is out the range of `[-self.ndim, self.ndim - 1]`.
1713
+ ValueError: If the dimension of `index` is not equal to 1.
1714
+
1715
+ Supported Platforms:
1716
+ ``Ascend`` ``GPU`` ``CPU``
1717
+
1718
+ Examples:
1719
+ >>> import mindspore
1720
+ >>> from mindspore import Tensor
1721
+ >>> import numpy as np
1722
+ >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
1723
+ >>> print(input)
1724
+ [[[ 0. 1. 2. 3.]
1725
+ [ 4. 5. 6. 7.]]
1726
+ [[ 8. 9. 10. 11.]
1727
+ [12. 13. 14. 15.]]]
1728
+ >>> index = Tensor([0,], mindspore.int32)
1729
+ >>> y = input.index_select(1, index)
1730
+ >>> print(y)
1731
+ [[[ 0. 1. 2. 3.]]
1732
+ [[ 8. 9. 10. 11.]]]""")
1733
+ attach_docstr("inverse", r"""inverse() -> Tensor
1734
+
1735
+ For details, please refer to :func:`mindspore.ops.inverse`.
1736
+
1737
+ Supported Platforms:
1738
+ ``Ascend`` ``GPU`` ``CPU``
1739
+ """)
1740
+ attach_docstr("isclose", r"""isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
1741
+
1742
+ Returns a tensor of Boolean values indicating whether each element of `input`
1743
+ is "close" to the corresponding element of `other`. Closeness is defined as:
1744
+
1745
+ .. math::
1746
+ |input-other| <= atol + rtol * |other|
1747
+
1748
+ Args:
1749
+ other (Tensor): Second tensor to compare.
1750
+ rtol (float, optional): Relative tolerance. Default: ``1e-05`` .
1751
+ atol (float, optional): Absolute tolerance. Default: ``1e-08`` .
1752
+ equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``True`` .
1753
+
1754
+ Returns:
1755
+ Tensor, with the same shape as `input` and `other` after broadcasting, its dtype is bool.
1756
+
1757
+ Supported Platforms:
1758
+ ``Ascend`` ``GPU`` ``CPU``
1759
+
1760
+ Examples:
1761
+ >>> import mindspore
1762
+ >>> import numpy as np
1763
+ >>> from mindspore import Tensor
1764
+ >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
1765
+ >>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
1766
+ >>> output = Tensor.isclose(input, other)
1767
+ >>> print(output)
1768
+ [ True False False False True]
1769
+
1770
+ .. method:: Tensor.isclose(x2, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
1771
+ :noindex:
1772
+
1773
+ Returns a new Tensor with boolean elements representing if each element of `input`
1774
+ is "close" to the corresponding element of `x2`. Closeness is defined as:
1775
+
1776
+ .. math::
1777
+ |input-x2| <= atol + rtol * |x2|
1778
+
1779
+ Args:
1780
+ x2 (Tensor): Second tensor to compare. Dtype must be same as `input`.
1781
+ rtol (Union[float, int, bool], optional): Relative tolerance. Default: ``1e-05`` .
1782
+ atol (Union[float, int, bool], optional): Absolute tolerance. Default: ``1e-08`` .
1783
+ equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False``.
1784
+
1785
+ Returns:
1786
+ A bool Tensor, with the shape as broadcasted result of the input `input` and `x2`.
1787
+
1788
+ Raises:
1789
+ TypeError: `x2` is not Tensor.
1790
+ TypeError: `input` or `x2` dtype is not support. Support dtype: float16, float32, float64, int8, int16, int32,
1791
+ int64 and uint8. On Ascend, more dtypes are support: bool and bfloat16.
1792
+ TypeError: `atol` or `rtol` is not float, int or bool.
1793
+ TypeError: `equal_nan` is not bool.
1794
+ TypeError: `input` and `x2` have different dtypes.
1795
+ ValueError: `input` and `x2` cannot broadcast.
1796
+
1797
+ Supported Platforms:
1798
+ ``Ascend`` ``GPU`` ``CPU``
1799
+
1800
+ Examples:
1801
+ >>> import mindspore
1802
+ >>> import numpy as np
1803
+ >>> from mindspore import Tensor
1804
+ >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
1805
+ >>> x2 = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
1806
+ >>> output = Tensor.isclose(input, x2)
1807
+ >>> print(output)
1808
+ [ True False False False True]
1809
+ """)
1810
+ attach_docstr("isfinite", r"""isfinite() -> Tensor
1811
+
1812
+ For details, please refer to :func:`mindspore.ops.isfinite`.
1813
+ """)
1814
+ attach_docstr("isinf", r"""isinf() -> Tensor
1815
+
1816
+ For details, please refer to :func:`mindspore.ops.isinf`.
1817
+
1818
+ Supported Platforms:
1819
+ ``Ascend`` ``CPU`` ``GPU``""")
1820
+ attach_docstr("isneginf", r"""isneginf() -> Tensor
1821
+
1822
+ For details, please refer to :func:`mindspore.ops.isneginf`.
1823
+ """)
1824
+ attach_docstr("kthvalue", r"""kthvalue(k, dim=-1, keepdim=False) -> Tensor
1825
+
1826
+ Calculates the kth smallest value along given dim specified by `dim` of the `self`
1827
+ tensor, and returns a tuple of (`values`, `indices`) where `values` contains the k-th smallest element
1828
+ and `indices` provides the index of each corresponding element.
1829
+
1830
+ Args:
1831
+ k (int): Specifies the k-th smallest element to retrieve.
1832
+ dim (int, optional): The dimension along which to find the k-th smallest value. Default: ``-1`` .
1833
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
1834
+ `self`, the output will reduce dimension if ``False`` . Default: ``False`` .
1835
+
1836
+ Returns:
1837
+ A tuple consisting of `values` and `indices`.
1838
+
1839
+ - **values** (Tensor) - The k-th smallest value of self tensor, with the same dtype as `self`.
1840
+ - **indices** (Tensor) - The indices for the k-th smallest value of the self tensor, it has the same shape as `values` with dtype of int64.
1841
+
1842
+ Raises:
1843
+ TypeError: If `k` or `dim` is not an int.
1844
+ TypeError: If `keepdim` is not a bool.
1845
+ TypeError: If dtype of `self` is not supported.
1846
+ ValueError: If `self` is an empty Tensor.
1847
+ RuntimeError: If `k` is not in the proper range.
1848
+
1849
+ Supported Platforms:
1850
+ ``Ascend``
1851
+
1852
+ Examples:
1853
+ >>> import mindspore
1854
+ >>> import numpy as np
1855
+ >>> from mindspore import Tensor, ops
1856
+ >>> input_x = Tensor(np.array([[1.01, 2.02, 3.03], [1.04, 2.05, 3.06]]), mindspore.float32)
1857
+ >>> out = input_x.kthvalue(2, 1, False)
1858
+ >>> print(out)
1859
+ (Tensor(shape=[2], dtype=Float32, value= [ 2.01999998e+00, 2.04999995e+00]), Tensor(shape=[2], dtype=Int64, value= [1, 1]))
1860
+ >>> out1 = input_x.kthvalue(2, 1, True)
1861
+ >>> print(out1)
1862
+ (Tensor(shape=[2, 1], dtype=Float32, value=
1863
+ [[ 2.01999998e+00],
1864
+ [ 2.04999995e+00]]), Tensor(shape=[2, 1], dtype=Int64, value=
1865
+ [[1],
1866
+ [1]]))
1867
+ """)
1868
+ attach_docstr("lerp", r"""lerp(end, weight) -> Tensor
1869
+
1870
+ For more details, please refer to :func:`mindspore.ops.lerp`.""")
1871
+ attach_docstr("less", r"""less(other) -> Tensor
1872
+
1873
+ For details, please refer to :func:`mindspore.ops.less`.""")
1874
+ attach_docstr("less_equal", r"""less_equal(other) -> Tensor
1875
+
1876
+ For details, please refer to :func:`mindspore.ops.less_equal`.""")
1877
+ attach_docstr("le", r"""le(other) -> Tensor
1878
+
1879
+ For details, please refer to :func:`mindspore.ops.le`.""")
1880
+ attach_docstr("log10", r"""log10() -> Tensor
1881
+
1882
+ For details, please refer to :func:`mindspore.ops.log10`.
1883
+ """)
1884
+ attach_docstr("log1p", r"""log1p() -> Tensor
1885
+
1886
+ For details, please refer to :func:`mindspore.ops.log1p`.
1887
+ """)
1888
+ attach_docstr("log2", r"""log2() -> Tensor
1889
+
1890
+ For details, please refer to :func:`mindspore.ops.log2`.
1891
+ """)
1892
+ attach_docstr("logaddexp2", r"""logaddexp2(other) -> Tensor
1893
+
1894
+ For details, please refer to :func:`mindspore.ops.logaddexp2`.
1895
+ """)
1896
+ attach_docstr("logaddexp", r"""logaddexp(other) -> Tensor
1897
+
1898
+ For details, please refer to :func:`mindspore.ops.logaddexp`.
1899
+ """)
1900
+ attach_docstr("logical_and", r"""logical_and(other) -> Tensor
1901
+
1902
+ For details, please refer to :func:`mindspore.ops.logical_and`.
1903
+ """)
1904
+ attach_docstr("logical_not", r"""logical_not() -> Tensor
1905
+
1906
+ For details, please refer to :func:`mindspore.ops.logical_not`.
1907
+ """)
1908
+ attach_docstr("logical_or", r"""logical_or(other) -> Tensor
1909
+
1910
+ For details, please refer to :func:`mindspore.ops.logical_or`.
1911
+ """)
1912
+ attach_docstr("logical_xor", r"""logical_xor(other) -> Tensor
1913
+
1914
+ Computes the "logical XOR" of two tensors element-wise.
1915
+
1916
+ .. math::
1917
+ out_{i} = self_{i} \oplus other_{i}
1918
+
1919
+ .. note::
1920
+ - `self` and `other` comply with the type conversion rules to make the data types consistent.
1921
+ - When the `other` is bool, it could only be a constant.
1922
+
1923
+ Args:
1924
+ other (Union[Tensor, bool]): A bool or a tensor whose data type can be implicitly converted to bool.
1925
+
1926
+ Returns:
1927
+ Tensor, the shape is the same as the `self` and `other` after broadcasting, and the data type is bool.
1928
+
1929
+ Supported Platforms:
1930
+ ``Ascend`` ``CPU``
1931
+
1932
+ Examples:
1933
+ >>> import mindspore
1934
+ >>> import numpy as np
1935
+ >>> from mindspore import Tensor
1936
+ >>> input = Tensor(np.array([True, False, True]), mindspore.bool_)
1937
+ >>> other = Tensor(np.array([True, True, False]), mindspore.bool_)
1938
+ >>> output = input.logical_xor(other)
1939
+ >>> print(output)
1940
+ [ False True True]
1941
+ >>> x = Tensor(1, mindspore.bool_)
1942
+ >>> other = Tensor(0, mindspore.bool_)
1943
+ >>> output = input.logical_xor(other)
1944
+ >>> print(output)
1945
+ True
1946
+ """)
1947
+ attach_docstr("logsumexp", r"""logsumexp(dim, keepdim=False) -> Tensor
1948
+
1949
+ For details, please refer to :func:`mindspore.ops.logsumexp`.
1950
+ """)
1951
+ attach_docstr("log", r"""log() -> Tensor
1952
+
1953
+ For details, please refer to :func:`mindspore.ops.log`.
1954
+ """)
1955
+ attach_docstr("log_", r"""log_() -> Tensor
1956
+
1957
+ Inplace version of :func:`mindspore.Tensor.log`.
1958
+
1959
+ .. warning::
1960
+ This is an experimental API that is subject to change or deletion.
1961
+ """)
1962
+ attach_docstr("lt", r"""lt(other) -> Tensor
1963
+
1964
+ For more details, please refer to :func:`mindspore.Tensor.less`.
1965
+ """)
1966
+ attach_docstr("masked_fill", r"""masked_fill(mask, value) -> Tensor
1967
+
1968
+ For details, please refer to :func:`mindspore.ops.masked_fill`.""")
1969
+ attach_docstr("masked_fill_", r"""masked_fill_(mask, value) -> Tensor
1970
+
1971
+ In-place version of :func:`mindspore.Tensor.masked_fill`.
1972
+
1973
+ .. warning::
1974
+ This is an experimental API that is subject to change or deletion.
1975
+ """)
1976
+ attach_docstr("masked_select", r"""masked_select(mask) -> Tensor
1977
+
1978
+ For details, please refer to :func:`mindspore.ops.masked_select`.""")
1979
+ attach_docstr("matmul", r"""matmul(tensor2) -> Union[Tensor, numbers.Number]
1980
+
1981
+ Returns the matrix product of two tensors.
1982
+
1983
+ Note:
1984
+ - Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
1985
+
1986
+ - The dtype of `self` and `tensor2` must be same.
1987
+
1988
+ - On Ascend platform, the dims of `self` and `tensor2` must be between 1 and 6.
1989
+ - On GPU platform, the supported dtypes of `self` and `tensor2` are ms.float16 and ms.float32.
1990
+
1991
+ Args:
1992
+ tensor2 (Tensor): Input tensor, scalar not allowed.
1993
+ The last dimension of `self` must be the same size as the second last dimension of `tensor2`.
1994
+ And the shape of tensor and other could be broadcast.
1995
+
1996
+ Returns:
1997
+ Tensor or scalar, the matrix product of the inputs. This is a scalar only
1998
+ when both `self` and `tensor2` are 1-d vectors.
1999
+
2000
+ Raises:
2001
+ TypeError: If the dtype of `self` and the dtype of `tensor2` are not the same.
2002
+ ValueError: If the last dimension of `self` is not the same size as the
2003
+ second-to-last dimension of `tensor2`, or if a scalar value is passed in.
2004
+ ValueError: If the shape of `self` and `tensor2` could not broadcast together.
2005
+ RuntimeError: On Ascend platforms, the dims of `self` or `tensor2` is less than 1 or greater than 6.
2006
+
2007
+ Supported Platforms:
2008
+ ``Ascend`` ``GPU`` ``CPU``
2009
+
2010
+ Examples:
2011
+ >>> import mindspore
2012
+ >>> import numpy as np
2013
+ >>> from mindspore import Tensor
2014
+ >>> # case 1 : Reasonable application of broadcast mechanism
2015
+ >>> input = Tensor(np.arange(2 * 3 * 4).reshape(2, 3, 4), mindspore.float32)
2016
+ >>> other = Tensor(np.arange(4 * 5).reshape(4, 5), mindspore.float32)
2017
+ >>> output = input.matmul(other)
2018
+ >>> print(output)
2019
+ [[[ 70. 76. 82. 88. 94.]
2020
+ [ 190. 212. 234. 256. 278.]
2021
+ [ 310. 348. 386. 424. 462.]]
2022
+ [[ 430. 484. 538. 592. 646.]
2023
+ [ 550. 620. 690. 760. 830.]
2024
+ [ 670. 756. 842. 928. 1014.]]]
2025
+ >>> print(output.shape)
2026
+ (2, 3, 5)
2027
+ >>> # case 2 : the rank of `tensor2` is 1
2028
+ >>> input = Tensor(np.ones([1, 2]), mindspore.float32)
2029
+ >>> other = Tensor(np.ones([2,]), mindspore.float32)
2030
+ >>> output = input.matmul(other)
2031
+ >>> print(output)
2032
+ [2.]
2033
+ >>> print(output.shape)
2034
+ (1,)
2035
+ """)
2036
+ attach_docstr("maximum", r"""maximum(other) -> Tensor
2037
+
2038
+ For details, please refer to :func:`mindspore.ops.maximum`.""")
2039
+ attach_docstr("max", r"""max() -> Tensor
2040
+
2041
+ Returns the maximum value of the self tensor.
2042
+
2043
+ Returns:
2044
+ Scalar Tensor with the same dtype as `input`, the maximum value of the input.
2045
+
2046
+ Supported Platforms:
2047
+ ``Ascend`` ``GPU`` ``CPU``
2048
+
2049
+ Examples:
2050
+ >>> import mindspore
2051
+ >>> import numpy as np
2052
+ >>> from mindspore import Tensor
2053
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2054
+ >>> output = x.max()
2055
+ >>> print(output)
2056
+ [0.7]
2057
+
2058
+ .. method:: Tensor.max(dim, keepdim=False) -> tuple(Tensor)
2059
+ :noindex:
2060
+
2061
+ Calculates the maximum value along with the given dim for the input tensor, and returns the maximum values and
2062
+ indices.
2063
+
2064
+ Args:
2065
+ dim (int): The dimension to reduce.
2066
+ keepdim (bool, optional): Whether to keep dimension, if ``True`` the output will keep the same dimension as the
2067
+ `self` , the output will reduce dimension if ``False``. Default: ``False``.
2068
+
2069
+ Returns:
2070
+ tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
2071
+ dimension `dim` and the corresponding index.
2072
+
2073
+ - **values** (Tensor) - The maximum value of self tensor, with the same shape as `index`, and same dtype as `self`.
2074
+ - **index** (Tensor) - The index for the maximum value of the self tensor, with dtype int64. If `keepdim` is
2075
+ ``True`` , the shape of output tensors is :math:`(self_1, self_2, ..., self_{dim-1}, 1, self_{dim+1}, ..., self_N)`.
2076
+ Otherwise, the shape is :math:`(self_1, self_2, ..., self_{dim-1}, self_{dim+1}, ..., self_N)` .
2077
+
2078
+ Raises:
2079
+ TypeError: If `keepdim` is not a bool.
2080
+ TypeError: If `dim` is not an int.
2081
+ TypeError: If self tensor data type is Complex.
2082
+
2083
+ Supported Platforms:
2084
+ ``Ascend`` ``GPU`` ``CPU``
2085
+
2086
+ Examples:
2087
+ >>> import mindspore
2088
+ >>> import numpy as np
2089
+ >>> from mindspore import Tensor
2090
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2091
+ >>> output, index = x.max(0, keepdim=True)
2092
+ >>> print(output, index)
2093
+ [0.7] [3]
2094
+
2095
+ .. method:: Tensor.max(axis=None, keepdims=False, *, initial=None, where=True, return_indices=False) -> tuple(Tensor)
2096
+ :noindex:
2097
+
2098
+ Return the maximum of a tensor or maximum along an axis.
2099
+
2100
+ Note:
2101
+ When `axis` is ``None``, `keepdims` and subsequent parameters have no effect.
2102
+ At the same time, the index is fixed to return 0.
2103
+
2104
+ Args:
2105
+ axis (Union[None, int, list, tuple of ints], optional): Axis or axes along which to operate. By default,
2106
+ flattened input is used. If this is a tuple of ints, the maximum is selected over multiple axes,
2107
+ instead of a single axis or all the axes as before. Default: ``None`` .
2108
+ keepdims (bool, optional):
2109
+ If this is set to ``True`` , the axes which are reduced are left in the result as dimensions with size one.
2110
+ With this option, the result will broadcast correctly against the input array. Default: ``False`` .
2111
+
2112
+ Keyword Args:
2113
+ initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
2114
+ on empty slice. Default: ``None`` .
2115
+ where (bool Tensor, optional): A boolean tensor which is broadcasted to match the dimensions of array,
2116
+ and selects elements to include in the reduction. If non-default value is passed, initial must also
2117
+ be provided. Default: ``True`` .
2118
+ return_indices (bool, optional): Whether to return the index of the maximum value. Default: ``False`` .
2119
+ If `axis` is a list or tuple of ints, it must be ``False`` .
2120
+
2121
+ Returns:
2122
+ Tensor or scalar, maximum of self tensor. If `axis` is ``None`` , the result is a scalar value.
2123
+ If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2124
+
2125
+ Raises:
2126
+ TypeError: If arguments have types not specified above.
2127
+
2128
+ See also:
2129
+ - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2130
+ - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2131
+ - :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
2132
+
2133
+ Supported Platforms:
2134
+ ``Ascend`` ``GPU`` ``CPU``
2135
+
2136
+ Examples:
2137
+ >>> import numpy as np
2138
+ >>> from mindspore import Tensor
2139
+ >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2140
+ >>> output = a.max()
2141
+ >>> print(output)
2142
+ 3.0
2143
+ >>> value, indices = a.max(axis=0, return_indices=True)
2144
+ >>> print(value)
2145
+ [2. 3.]
2146
+ >>> print(indices)
2147
+ [1 1]
2148
+ """)
2149
+ attach_docstr("mean", r"""mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
2150
+
2151
+ Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
2152
+ And reduce a dimension of `self` along the specified `dim`. `keepdim`
2153
+ determines whether the dimensions of the output and self are the same.
2154
+
2155
+ Note:
2156
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
2157
+
2158
+ Args:
2159
+ dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. Default: ``None`` ,
2160
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `self` is r,
2161
+ and the value range is [-r,r).
2162
+ keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
2163
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
2164
+
2165
+ Keyword Args:
2166
+ dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
2167
+
2168
+ Returns:
2169
+ Tensor, has the same data type as self tensor.
2170
+
2171
+ - If `dim` is ``None`` , and `keepdim` is ``False`` ,
2172
+ the output is a 0-D tensor representing the product of all elements in the self tensor.
2173
+ - If `dim` is int, set as 1, and `keepdim` is ``False`` ,
2174
+ the shape of output is :math:`(x_0, x_2, ..., x_R)`.
2175
+ - If `dim` is tuple(int), set as (1, 2), and `keepdim` is ``False`` ,
2176
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
2177
+ - If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
2178
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
2179
+
2180
+ Raises:
2181
+ TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
2182
+ TypeError: If `keepdim` is not a bool.
2183
+ ValueError: If `dim` is out of range.
2184
+
2185
+ Supported Platforms:
2186
+ ``Ascend`` ``GPU`` ``CPU``
2187
+
2188
+ Examples:
2189
+ >>> import mindspore
2190
+ >>> import numpy as np
2191
+ >>> from mindspore import Tensor
2192
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
2193
+ >>> output = Tensor.mean(x, 1, keepdim=True)
2194
+ >>> result = output.shape
2195
+ >>> print(result)
2196
+ (3, 1, 5, 6)
2197
+ >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
2198
+ >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
2199
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
2200
+ ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
2201
+ ... mindspore.float32)
2202
+ >>> output = Tensor.mean(x)
2203
+ >>> print(output)
2204
+ 5.0
2205
+ >>> print(output.shape)
2206
+ ()
2207
+ >>> # case 2: Reduces a dimension along the dim 0
2208
+ >>> output = Tensor.mean(x, 0, True)
2209
+ >>> print(output)
2210
+ [[[4. 4. 4. 4. 4. 4.]
2211
+ [5. 5. 5. 5. 5. 5.]
2212
+ [6. 6. 6. 6. 6. 6.]]]
2213
+ >>> # case 3: Reduces a dimension along the dim 1
2214
+ >>> output = Tensor.mean(x, 1, True)
2215
+ >>> print(output)
2216
+ [[[2. 2. 2. 2. 2. 2.]]
2217
+ [[5. 5. 5. 5. 5. 5.]]
2218
+ [[8. 8. 8. 8. 8. 8.]]]
2219
+ >>> # case 4: Reduces a dimension along the dim 2
2220
+ >>> output = Tensor.mean(x, 2, True)
2221
+ >>> print(output)
2222
+ [[[ 2.]
2223
+ [ 2.]
2224
+ [ 2.]]
2225
+ [[ 4.]
2226
+ [ 5.]
2227
+ [ 6.]]
2228
+ [[ 6.]
2229
+ [ 8.]
2230
+ [10.]]]
2231
+
2232
+ .. method:: Tensor.mean(axis=None, keep_dims=False) -> Tensor
2233
+ :noindex:
2234
+
2235
+ For details, please refer to :func:`mindspore.ops.mean` .
2236
+ """)
2237
+ attach_docstr("median", r"""median(axis=-1, keepdims=False) -> Tuple of Tensors
2238
+
2239
+ Computes the median and indices of input tensor.
2240
+
2241
+ .. warning::
2242
+ - `indices` does not necessarily contain the first occurrence of each median value found in the `input`,
2243
+ unless it is unique. The specific implementation of this API is device-specific.
2244
+ The results may be different on CPU and GPU.
2245
+
2246
+ Args:
2247
+ axis (int, optional): Specify the axis for calculation. Default: ``-1`` .
2248
+ keepdims (bool, optional): Whether the output tensor need to retain `axis` dimension or not.
2249
+ Default: ``False`` .
2250
+
2251
+ Returns:
2252
+ - y (Tensor) - Returns the median value along the specified dimension.
2253
+ And It has the same dtype as the `input`.
2254
+
2255
+ - indices (Tensor) - The index of the median. And the dtype is int64.
2256
+
2257
+ Raises:
2258
+ TypeError: If `axis` is not an int.
2259
+ TypeError: If `keepdims` is not a bool.
2260
+ ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
2261
+
2262
+ Supported Platforms:
2263
+ ``GPU`` ``CPU``
2264
+
2265
+ Examples:
2266
+ >>> import numpy as np
2267
+ >>> from mindspore import Tensor
2268
+ >>> x = Tensor(np.array([[0.57, 0.11, 0.21],[0.38, 0.50, 0.57], [0.36, 0.16, 0.44]]).astype(np.float32))
2269
+ >>> y = x.median(axis=0, keepdims=False)
2270
+ >>> print(y)
2271
+ (Tensor(shape=[3], dtype=Float32, value= [ 3.79999995e-01, 1.59999996e-01, 4.39999998e-01]),
2272
+ Tensor(shape=[3], dtype=Int64, value= [1, 2, 2]))
2273
+
2274
+
2275
+ .. method:: Tensor.median() -> Tensor
2276
+ :noindex:
2277
+
2278
+ Return the median of the input.
2279
+
2280
+ Returns:
2281
+ - y (Tensor) - Output median.
2282
+
2283
+ Supported Platforms:
2284
+ ``Ascend``
2285
+
2286
+ .. method:: Tensor.median(dim=-1, keepdim=False) -> Tuple of Tensors
2287
+ :noindex:
2288
+
2289
+ Output the median on the specified dimension ``dim`` and its corresponding index.
2290
+ If ``dim`` is None, calculate the median of all elements in the Tensor.
2291
+
2292
+ Args:
2293
+ dim (int, optional): Specify the axis for calculation. Default: ``None`` .
2294
+ keepdim (bool, optional): Whether the output tensor need to retain ``dim`` dimension or not.
2295
+ Default: ``False`` .
2296
+
2297
+ Returns:
2298
+ - y (Tensor) - Output median, with the same data type as ``input`` .
2299
+
2300
+ - If ``dim`` is ``None`` , ``y`` only has one element.
2301
+ - If ``keepdim`` is ``True`` , the ``y`` has the same shape as the ``input`` except the shape
2302
+ of ``y`` in dimension `dim` is size 1.
2303
+ - Otherwise, the ``y`` lacks `dim` dimension than input.
2304
+
2305
+ - indices (Tensor) - The index of the median. Shape is consistent with ``y`` , with a data type of int64.
2306
+
2307
+ Raises:
2308
+ TypeError: If ``dim`` is not an int.
2309
+ TypeError: If ``keepdim`` is not a bool.
2310
+ ValueError: If ``dim`` is not in range of [-x.dim, x.dim-1].
2311
+
2312
+ Supported Platforms:
2313
+ ``Ascend``
2314
+
2315
+ Examples:
2316
+ >>> import numpy as np
2317
+ >>> from mindspore import Tensor
2318
+ >>> x = Tensor(np.array([[0.57, 0.11, 0.21],[0.38, 0.50, 0.57], [0.36, 0.16, 0.44]]).astype(np.float32))
2319
+ >>> y = x.median(dim=0, keepdim=False)
2320
+ >>> print(y)
2321
+ (Tensor(shape=[3], dtype=Float32, value= [ 3.79999995e-01, 1.59999996e-01, 4.39999998e-01]),
2322
+ Tensor(shape=[3], dtype=Int64, value= [1, 2, 2]))""")
2323
+ attach_docstr("minimum", r"""minimum(other) -> Tensor
2324
+
2325
+ For details, please refer to :func:`mindspore.ops.minimum`.""")
2326
+ attach_docstr("min", r"""min() -> Tensor
2327
+
2328
+ Returns the minimum value of the self tensor.
2329
+
2330
+ Returns:
2331
+ Scalar Tensor with the same dtype as `self`, the minimum value of the self.
2332
+
2333
+ Supported Platforms:
2334
+ ``Ascend`` ``GPU`` ``CPU``
2335
+
2336
+ Examples:
2337
+ >>> import mindspore
2338
+ >>> import numpy as np
2339
+ >>> from mindspore import Tensor
2340
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2341
+ >>> output = Tensor.min(x)
2342
+ >>> print(output)
2343
+ 0.0
2344
+
2345
+ .. method:: Tensor.min(dim, keepdim=False) -> tuple(Tensor)
2346
+ :noindex:
2347
+
2348
+ Calculates the minimum value along with the given dim for the self tensor, and returns the minimum values and
2349
+ indices.
2350
+
2351
+ Args:
2352
+ dim (int): The dimension to reduce.
2353
+ keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as
2354
+ the `self` , the output will reduce dimension if ``False``. Default: ``False``.
2355
+
2356
+ Returns:
2357
+ tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
2358
+ dimension `dim` and the corresponding index.
2359
+
2360
+ - **values** (Tensor) - The minimum value of self tensor along the given dimension `dim`, with the same shape
2361
+ as `index`, and same dtype as `self`.
2362
+ - **index** (Tensor) - The index for the minimum value of the self tensor, with dtype int64. If `keepdim`
2363
+ is ``True`` , the shape of output tensors is :math:`(self_1, self_2, ..., self_{dim-1}, 1, self_{dim+1}, ..., self_N)`.
2364
+ Otherwise, the shape is :math:`(self_1, self_2, ..., self_{dim-1}, self_{dim+1}, ..., self_N)` .
2365
+
2366
+ Raises:
2367
+ TypeError: If `keepdim` is not a bool.
2368
+ TypeError: If `dim` is not an int.
2369
+ TypeError: If self tensor data type is Complex.
2370
+
2371
+ Supported Platforms:
2372
+ ``Ascend`` ``GPU`` ``CPU``
2373
+
2374
+ Examples:
2375
+ >>> import mindspore
2376
+ >>> import numpy as np
2377
+ >>> from mindspore import Tensor
2378
+ >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2379
+ >>> output, index = x.min(0, keepdim=True)
2380
+ >>> print(output, index)
2381
+ [0.0] [0]
2382
+
2383
+ .. method:: Tensor.min(axis=None, keepdims=False, *, initial=None, where=True, return_indices=False) -> Tensor|number.Number
2384
+ :noindex:
2385
+
2386
+ Return the minimum of a tensor or minimum along an axis.
2387
+
2388
+ Note:
2389
+ When `axis` is ``None``, `keepdims` and subsequent parameters have no effect.
2390
+ At the same time, the index is fixed to return 0.
2391
+
2392
+ Args:
2393
+ axis (Union[None, int, list, tuple of ints], optional): An axis or axes along which to operate. By default,
2394
+ flattened input is used. If `axis` is a tuple of ints, the minimum is selected over multiple axes,
2395
+ instead of a single axis or all the axes as before. Default: ``None`` .
2396
+ keepdims (bool, optional): If ``True`` , the axes which are reduced are left in the result as dimensions with
2397
+ size one. With this option, the result will broadcast correctly against the input array. Default: ``False`` .
2398
+
2399
+ Keyword Args:
2400
+ initial (scalar, optional): The minimum value of an output element. Must be present to allow computation on
2401
+ empty slice. Default: ``None`` .
2402
+ where (Tensor[bool], optional): A boolean tensor which is broadcasted to match the dimensions of array,
2403
+ and selects elements to include in the reduction. If non-default value is passed, initial must also
2404
+ be provided. Default: ``True`` .
2405
+ return_indices (bool, optional): Whether to return the index of the minimum value. Default: ``False`` .
2406
+ If `axis` is a list or tuple of ints, it must be ``False`` .
2407
+
2408
+ Returns:
2409
+ Tensor or scalar, minimum of self tensor. If `axis` is ``None`` , the result is a scalar
2410
+ value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2411
+
2412
+ Raises:
2413
+ TypeError: If arguments have types not specified above.
2414
+
2415
+ See also:
2416
+ - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2417
+ - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2418
+ - :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
2419
+
2420
+ Supported Platforms:
2421
+ ``Ascend`` ``GPU`` ``CPU``
2422
+
2423
+ Examples:
2424
+ >>> import numpy as np
2425
+ >>> from mindspore import Tensor
2426
+ >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2427
+ >>> output = Tensor.min(a)
2428
+ >>> print(output)
2429
+ 0.0
2430
+ >>> output = Tensor.min(a, axis=0)
2431
+ >>> print(output)
2432
+ [0. 1.]
2433
+ >>> output = Tensor.min(a, axis=0, initial=9, where=Tensor([False]))
2434
+ >>> print(output)
2435
+ [9. 9.]
2436
+ >>> output = Tensor.min(a, axis=0, initial=9, where=Tensor([False, True]))
2437
+ >>> print(output)
2438
+ [9. 1.]
2439
+ >>> value, indices = Tensor.min(a, axis=0, return_indices=True)
2440
+ >>> print(value)
2441
+ [0. 1.]
2442
+ >>> print(indices)
2443
+ [0 0]
2444
+ """)
2445
+ attach_docstr("mm", r"""mm(mat2) -> Tensor
2446
+
2447
+ Returns the matrix product of two arrays.
2448
+ If `self` is a :math:`(n \times m)` Tensor, `mat2` is a
2449
+ :math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
2450
+
2451
+ Note:
2452
+ This function cannot support broadcasting.
2453
+ Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
2454
+
2455
+ .. warning::
2456
+ This is an experimental API that is subject to change or deletion.
2457
+
2458
+ Args:
2459
+ mat2 (Tensor): The second matrix of matrix multiplication.
2460
+ The last dimension of `self` must be the same size as the first dimension of `mat2`.
2461
+
2462
+ Returns:
2463
+ Tensor, the matrix product of the inputs.
2464
+
2465
+ Raises:
2466
+ TypeError: If `self` or `mat2` is not a Tensor.
2467
+ RuntimeError: If the last dimension of `self` is not the same size as the
2468
+ second-to-last dimension of `mat2`.
2469
+ RuntimeError: If dtype of `self` or `mat2` is not float16, float32 or bfloat16.
2470
+
2471
+ Supported Platforms:
2472
+ ``Ascend``
2473
+
2474
+ Examples:
2475
+ >>> import mindspore as ms
2476
+ >>> import numpy as np
2477
+ >>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
2478
+ >>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
2479
+ >>> out = x1.mm(x2)
2480
+ >>> print(out.shape)
2481
+ (2, 4)
2482
+ """)
2483
+ attach_docstr("mul", r"""mul(other) -> Tensor
2484
+
2485
+ For details, please refer to :func:`mindspore.ops.mul`.
2486
+ """)
2487
+ attach_docstr("mul_", r"""mul_(other) -> Tensor
2488
+
2489
+ Multiplies two tensors element-wise.
2490
+
2491
+ .. math::
2492
+
2493
+ out_{i} = tensor_{i} * other_{i}
2494
+
2495
+ .. warning::
2496
+ This is an experimental API that is subject to change or deletion.
2497
+
2498
+ Note:
2499
+ - When `self` and `other` have different shapes,
2500
+ `other` be able to broadcast to a `self`.
2501
+ - `self` and `other` can not be bool type at the same time,
2502
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
2503
+
2504
+ Args:
2505
+ other (Union[Tensor, number.Number, bool]): `other` is a number.Number or
2506
+ a bool or a tensor whose data type is number.Number and bool.
2507
+
2508
+ Returns:
2509
+ Tensor, the shape is the same as `self` , and the data type is the same as `self` .
2510
+
2511
+ Raises:
2512
+ TypeError: If `other` is not one of the following: Tensor, number.Number, bool.
2513
+ RuntimeError: If `other` cannot be broadcast to `self`.
2514
+
2515
+ Supported Platforms:
2516
+ ``Ascend``
2517
+
2518
+ Examples:
2519
+ >>> import mindspore
2520
+ >>> import numpy as np
2521
+ >>> from mindspore import Tensor
2522
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
2523
+ >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
2524
+ >>> output = x.mul_(y)
2525
+ >>> print(output)
2526
+ [ 4. 10. 18.]
2527
+ >>> print(x)
2528
+ [ 4. 10. 18.]
2529
+ """)
2530
+ attach_docstr("nansum", r"""nansum(dim=None, keepdim=False, *, dtype=None) -> Tensor
2531
+
2532
+ Computes sum of input Tensor over a given dimension, treating NaNs as zero.
2533
+
2534
+ .. warning::
2535
+ - It is only supported on Atlas A2 Training Series Products.
2536
+ - This is an experimental API that is subject to change or deletion.
2537
+
2538
+ Args:
2539
+ dim (Union[int, tuple(int)], optional): The dimensions to sum.
2540
+ Dim must be in the range [-rank(self), rank(self)). Default: ``None``, which indicates the sum of all elements in a tensor.
2541
+ keepdim (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``, indicating that no dimension is kept.
2542
+
2543
+ Keyword Args:
2544
+ dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
2545
+
2546
+ Returns:
2547
+ Tensor, the sum of input Tensor in the given dimension dim, treating NaNs as zero.
2548
+
2549
+ - If dim is None, keepdim is False,
2550
+ the output is a 0-D Tensor representing the sum of all elements in the self Tensor.
2551
+ - If dim is int, set as 2, and keepdim is False,
2552
+ the shape of output is :math:`(self_1, self_3, ..., self_R)`.
2553
+ - If dim is tuple(int) or list(int), set as (2, 3), and keepdim is False,
2554
+ the shape of output is :math:`(self_1, self_4, ..., self_R)`.
2555
+
2556
+ Raises:
2557
+ TypeError: If `keepdim` is not a bool.
2558
+ TypeError: If the dtype of `self` or `dtype` is complex type.
2559
+ ValueError: If `dim` is not in [-rank(self), rank(self)).
2560
+
2561
+ Supported Platforms:
2562
+ ``Ascend``
2563
+
2564
+ Examples:
2565
+ >>> import mindspore
2566
+ >>> import numpy as np
2567
+ >>> from mindspore import Tensor
2568
+ >>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
2569
+ >>> output1 = x.nansum(dim=0, keepdim=False, dtype=mindspore.float32)
2570
+ >>> output2 = x.nansum(dim=0, keepdim=True, dtype=mindspore.float32)
2571
+ >>> print(output1)
2572
+ [1. 4. 3.]
2573
+ >>> print(output2)
2574
+ [[1. 4. 3.]]
2575
+
2576
+ .. method:: Tensor.nansum(axis=None, keepdims=False, *, dtype=None) -> Tensor
2577
+ :noindex:
2578
+
2579
+ Computes sum of `input` over a given dimension, treating NaNs as zero.
2580
+
2581
+ Args:
2582
+ axis (Union[int, tuple(int)], optional): The dimensions to reduce. Supposed the rank of `self` is r,
2583
+ axis must be in the range [-r,r). Default: ``None``, all dimensions are reduced.
2584
+ keepdims (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``.
2585
+
2586
+ Keyword Args:
2587
+ dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
2588
+
2589
+ Returns:
2590
+ Tensor, the sum of input Tensor in the given dimension dim, treating NaNs as zero.
2591
+
2592
+ - If axis is None, keepdims is False,
2593
+ the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
2594
+ - If axis is int, set as 2, and keepdims is False,
2595
+ the shape of output is :math:`(self_1, self_3, ..., self_R)`.
2596
+ - If axis is tuple(int) or list(int), set as (2, 3), and keepdims is False,
2597
+ the shape of output is :math:`(self_1, self_4, ..., self_R)`.
2598
+
2599
+ Raises:
2600
+ TypeError: If `keepdims` is not a bool.
2601
+ TypeError: If the dtype of `self` or `dtype` is complex type.
2602
+ ValueError: If `axis` not in [-rank(self), rank(self)).
2603
+
2604
+ Supported Platforms:
2605
+ ``Ascend`` ``GPU`` ``CPU``
2606
+
2607
+ Examples:
2608
+ >>> import mindspore
2609
+ >>> import numpy as np
2610
+ >>> from mindspore import Tensor
2611
+ >>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
2612
+ >>> output1 = x.nansum(axis=0, keepdims=False, dtype=mindspore.float32)
2613
+ >>> output2 = x.nansum(axis=0, keepdims=True, dtype=mindspore.float32)
2614
+ >>> print(output1)
2615
+ [1. 4. 3.]
2616
+ >>> print(output2)
2617
+ [[1. 4. 3.]]""")
2618
+ attach_docstr("nan_to_num", r"""nan_to_num(nan=None, posinf=None, neginf=None) -> Tensor
2619
+
2620
+ For details, please refer to :func:`mindspore.ops.nan_to_num`.
2621
+
2622
+ Supported Platforms:
2623
+ ``Ascend`` ``CPU``
2624
+ """)
2625
+ attach_docstr("narrow", r"""narrow(dim, start, length) -> Tensor
2626
+
2627
+ Obtains a tensor of a specified length at a specified start position along a specified axis.
2628
+
2629
+ Args:
2630
+ dim (int): the axis along which to narrow.
2631
+ start (Union[int, Tensor]): the starting dimension.
2632
+ length (int): the distance to the ending dimension.
2633
+
2634
+ Returns:
2635
+ output (Tensors) - The narrowed tensor.
2636
+
2637
+ Raises:
2638
+ ValueError: The value of `dim` is out of range [-self.ndim, self.ndim).
2639
+ ValueError: The value of `start` is out of range [-self.shape[dim], self.shape[dim]].
2640
+ ValueError: The value of `length` is out of range [0, self.shape[dim] - start].
2641
+
2642
+ Supported Platforms:
2643
+ ``Ascend`` ``GPU`` ``CPU``
2644
+
2645
+ Examples:
2646
+ >>> import mindspore
2647
+ >>> from mindspore import Tensor
2648
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
2649
+ >>> output = x.narrow(0, 0, 2)
2650
+ >>> print(output)
2651
+ [[ 1 2 3]
2652
+ [ 4 5 6]]
2653
+ >>> output = x.narrow(1, 1, 2)
2654
+ >>> print(output)
2655
+ [[ 2 3]
2656
+ [ 5 6]
2657
+ [ 8 9]]
2658
+ """)
2659
+ attach_docstr("negative", r"""negative() -> Tensor
2660
+
2661
+ Alias for :func:`mindspore.Tensor.neg`.
2662
+ """)
2663
+ attach_docstr("neg", r"""neg() -> Tensor
2664
+
2665
+ For details, please refer to :func:`mindspore.ops.neg`.
2666
+ """)
2667
+ attach_docstr("new_ones", r"""new_ones(size, dtype=None) -> Tensor
2668
+
2669
+ Return a tensor of `size` filled with ones.
2670
+
2671
+ Args:
2672
+ size (Union[int, tuple(int), list(int)]): An int, list or tuple of integers defining the output shape.
2673
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of the output tensor. If None, the returned
2674
+ tensor has the same dtype as `self`. Default: ``None``.
2675
+
2676
+ Returns:
2677
+ Tensor, the shape and dtype is defined above and filled with ones.
2678
+
2679
+ Raises:
2680
+ TypeError: If `size` is neither an int nor a tuple/list of int.
2681
+ TypeError: If `dtype` is not a MindSpore dtype.
2682
+ ValueError: If `size` contains negative values.
2683
+
2684
+ Supported Platforms:
2685
+ ``Ascend`` ``GPU`` ``CPU``
2686
+
2687
+ Examples:
2688
+ >>> import mindspore
2689
+ >>> from mindspore import Tensor
2690
+ >>> x = Tensor((), mindspore.int32)
2691
+ >>> x.new_ones((2, 3))
2692
+ Tensor(shape=[2, 3], dtype=Int32, value=
2693
+ [[1, 1, 1],
2694
+ [1, 1, 1]])
2695
+ """)
2696
+ attach_docstr("new_zeros", r"""new_zeros(size, dtype=None) -> Tensor
2697
+
2698
+ Return a tensor of `size` filled with zeros.
2699
+
2700
+ Args:
2701
+ size (Union[int, tuple(int), list(int)]): An int, list or tuple of integers defining the output shape.
2702
+ dtype (:class:`mindspore.dtype`, optional): The desired dtype of the output tensor. If None, the returned
2703
+ tensor has the same dtype as `self`. Default: ``None``.
2704
+
2705
+ Returns:
2706
+ Tensor, the shape and dtype is defined above and filled with zeros.
2707
+
2708
+ Raises:
2709
+ TypeError: If `size` is neither an int nor a tuple/list of int.
2710
+ TypeError: If `dtype` is not a MindSpore dtype.
2711
+ ValueError: If `size` contains negative values.
2712
+
2713
+ Supported Platforms:
2714
+ ``Ascend`` ``GPU`` ``CPU``
2715
+
2716
+ Examples:
2717
+ >>> import mindspore
2718
+ >>> from mindspore import Tensor
2719
+ >>> x = Tensor((), mindspore.int32)
2720
+ >>> x.new_zeros((2, 3))
2721
+ Tensor(shape=[2, 3], dtype=Int32, value=
2722
+ [[0, 0, 0],
2723
+ [0, 0, 0]])
2724
+ """)
2725
+ attach_docstr("ne", r"""ne(other) -> Tensor
2726
+
2727
+ Alias for :func:`mindspore.Tensor.not_equal`.
2728
+ """)
2729
+ attach_docstr("not_equal", r"""not_equal(other) -> Tensor
2730
+
2731
+ For details, please refer to :func:`mindspore.ops.ne`.
2732
+ """)
2733
+ attach_docstr("outer", r"""outer(vec2) -> Tensor
2734
+
2735
+ For details, please refer to :func:`mindspore.ops.outer`.
2736
+ """)
2737
+ attach_docstr("pow", r"""pow(exponent) -> Tensor
2738
+
2739
+ For details, please refer to :func:`mindspore.ops.pow`.
2740
+ """)
2741
+ attach_docstr("prod", r"""prod(dim=None, keepdim=False, dtype=None) -> Tensor
2742
+
2743
+ Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
2744
+ reduce a dimension of `self` along the `dim`. Determine whether the dimensions of the output and self are the
2745
+ same by controlling `keepdim`.
2746
+
2747
+ Args:
2748
+ dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
2749
+ Only constant value is allowed. Assume the rank of `self` is r, and the value range is [-r,r).
2750
+ keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
2751
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
2752
+ dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
2753
+
2754
+ Returns:
2755
+ Tensor.
2756
+
2757
+ - If `dim` is ``None`` , and `keepdim` is ``False`` ,
2758
+ the output is a 0-D tensor representing the product of all elements in the self tensor.
2759
+ - If `dim` is int, set as 1, and `keepdim` is ``False`` ,
2760
+ the shape of output is :math:`(self_0, self_2, ..., self_R)`.
2761
+ - If `dim` is tuple(int) or list(int), set as (1, 2), and `keepdim` is ``False`` ,
2762
+ the shape of output is :math:`(self_0, self_3, ..., self_R)`.
2763
+ - If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
2764
+ the shape of output is :math:`(self_0, self_3, ..., self_R)`.
2765
+
2766
+ Raises:
2767
+ TypeError: If `dim` is not one of the following: int, Tuple, list or Tensor.
2768
+ TypeError: If `keepdim` is not a bool.
2769
+ ValueError: If `dim` is out of range.
2770
+
2771
+ Supported Platforms:
2772
+ ``Ascend`` ``GPU`` ``CPU``
2773
+
2774
+ Examples:
2775
+ >>> import mindspore
2776
+ >>> import numpy as np
2777
+ >>> from mindspore import Tensor
2778
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
2779
+ >>> output = Tensor.prod(x, 1, True)
2780
+ >>> result = output.shape
2781
+ >>> print(result)
2782
+ (3, 1, 5, 6)
2783
+ >>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
2784
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
2785
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
2786
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
2787
+ >>> output = Tensor.prod(x)
2788
+ >>> print(output)
2789
+ 2.2833798e+33
2790
+ >>> print(output.shape)
2791
+ ()
2792
+ >>> # case 2: Reduces a dimension along axis 0.
2793
+ >>> output = Tensor.prod(x, 0, True)
2794
+ >>> print(output)
2795
+ [[[ 28. 28. 28. 28. 28. 28.]
2796
+ [ 80. 80. 80. 80. 80. 80.]
2797
+ [162. 162. 162. 162. 162. 162.]]]
2798
+ >>> # case 3: Reduces a dimension along axis 1.
2799
+ >>> output = Tensor.prod(x, 1, True)
2800
+ >>> print(output)
2801
+ [[[ 6. 6. 6. 6. 6. 6.]]
2802
+ [[120. 120. 120. 120. 120. 120.]]
2803
+ [[504. 504. 504. 504. 504. 504.]]]
2804
+ >>> # case 4: Reduces a dimension along axis 2.
2805
+ >>> output = Tensor.prod(x, 2, True)
2806
+ >>> print(output)
2807
+ [[[1.00000e+00]
2808
+ [6.40000e+01]
2809
+ [7.29000e+02]]
2810
+ [[4.09600e+03]
2811
+ [1.56250e+04]
2812
+ [4.66560e+04]]
2813
+ [[1.17649e+05]
2814
+ [2.62144e+05]
2815
+ [5.31441e+05]]]
2816
+
2817
+
2818
+ .. method:: Tensor.prod(axis=None, keep_dims=False, dtype=None)-> Tensor
2819
+ :noindex:
2820
+
2821
+ For more details, please refer to :func:`mindspore.ops.prod`.
2822
+ """)
2823
+ attach_docstr("put_", r"""put_(index, source, accumulate=False) -> Tensor
2824
+
2825
+ Copies the elements from source into the positions specified by index.
2826
+ index and source need to have the same number of elements, but not necessarily the same shape.
2827
+
2828
+ .. warning::
2829
+ This is an experimental API that is subject to change or deletion.
2830
+
2831
+ Args:
2832
+ index (LongTensor): the index to be operated in the tensor.
2833
+ source (Tensor): the tensor containing values to copy from.
2834
+ accumulate (bool, optional): whether to accumulate into self, default: ``False``.
2835
+
2836
+ Returns:
2837
+ Tensor, with the same dtype and shape as the `input`.
2838
+
2839
+ Raises:
2840
+ TypeError: If dtype of `index` is not long type.
2841
+ TypeError: If `input` and `source` have different dtypes.
2842
+
2843
+ Supported Platforms:
2844
+ ``Ascend``
2845
+
2846
+ Examples:
2847
+ >>> import mindspore as ms
2848
+ >>> from mindspore import Tensor
2849
+ >>> src = Tensor([[4, 3, 5],[6, 7, 8]], ms.float32)
2850
+ >>> index = Tensor([1, 3], ms.int64)
2851
+ >>> source = Tensor([9, 10], ms.float32)
2852
+ >>> src.put_(index, source)
2853
+ >>> print(src)
2854
+ [[4. 9. 5.]
2855
+ [10. 7. 8.]]
2856
+ """)
2857
+ attach_docstr("reciprocal", r"""reciprocal() -> Tensor
2858
+
2859
+ For details, please refer to :func:`mindspore.ops.reciprocal`.
2860
+ """)
2861
+ attach_docstr("remainder", r"""remainder(other) -> Tensor
2862
+
2863
+ Computes the remainder of `self` divided by `other` element-wise. The result has the same sign as the divisor and
2864
+ its absolute value is less than that of `other`.
2865
+
2866
+ Supports broadcasting to a common shape and implicit type promotion.
2867
+
2868
+ .. code:: python
2869
+
2870
+ remainder(input, other) == input - input.div(other, rounding_mode="floor") * other
2871
+
2872
+ .. note::
2873
+ Complex inputs are not supported. At least one input need to be tensor, but not both are bool tensors.
2874
+
2875
+ The dividend `self` is a tensor whose data type is
2876
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
2877
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
2878
+
2879
+ Args:
2880
+ other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
2881
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
2882
+
2883
+ Returns:
2884
+ Tensor, with dtype promoted and shape broadcasted.
2885
+
2886
+ Raises:
2887
+ TypeError: If `self` and `other` are not of types: (Tensor, Tensor), (Tensor, Number),
2888
+ (Tensor, bool), (Number, Tensor) or (bool, Tensor).
2889
+ ValueError: If `self` and `other` are not broadcastable.
2890
+
2891
+ Supported Platforms:
2892
+ ``Ascend``
2893
+
2894
+ Examples:
2895
+ >>> import numpy as np
2896
+ >>> from mindspore import Tensor
2897
+ >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float32))
2898
+ >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float64))
2899
+ >>> output = x.remainder(y)
2900
+ >>> print(output)
2901
+ [2. 1. 0.]
2902
+
2903
+ .. method:: Tensor.remainder(divisor) -> Tensor
2904
+ :noindex:
2905
+
2906
+ Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
2907
+
2908
+ Inputs of `self` and `divisor` comply with the implicit type conversion rules to make the data types consistent.
2909
+ The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
2910
+ both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
2911
+ and one scalar, the scalar could only be a constant.
2912
+
2913
+ .. code:: python
2914
+
2915
+ remainder(input, other) == input - input.div(other, rounding_mode="floor") * other
2916
+
2917
+ .. warning::
2918
+ - When the elements of input exceed 2048, there might be accuracy problems.
2919
+ - The calculation results of this operator on Ascend and CPU might be inconsistent.
2920
+ - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
2921
+
2922
+ .. note::
2923
+ The first input `self` is a tensor whose data type is number.
2924
+
2925
+ Args:
2926
+ divisor (Union[Tensor, numbers.Number, bool]): When the first input is a tensor, The second input
2927
+ could be a number, a bool or a tensor whose data type is number.
2928
+
2929
+ Returns:
2930
+ Tensor, the shape is the same as the one after broadcasting,
2931
+ and the data type is the one with higher precision.
2932
+
2933
+ Raises:
2934
+ TypeError: If neither `self` nor `divisor` is one of the following: Tensor, Number, bool.
2935
+ ValueError: If the shape of `self` and `divisor` cannot be broadcasted to each other.
2936
+
2937
+ Supported Platforms:
2938
+ ``Ascend`` ``GPU`` ``CPU``
2939
+
2940
+ Examples:
2941
+ >>> import numpy as np
2942
+ >>> from mindspore import Tensor
2943
+ >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float16))
2944
+ >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float16))
2945
+ >>> output = x.remainder(divisor=y)
2946
+ >>> print(output)
2947
+ [2. 1. 0.]
2948
+ """)
2949
+ attach_docstr("repeat", r"""repeat(*repeats)
2950
+
2951
+ Copy the elements in each dimension of a Tensor based on the specified number of repetition times.
2952
+
2953
+ This function copies the tensor's data.
2954
+
2955
+ The shape of the output tensor can be described as follows, where :math:`n` is the number of
2956
+ elements in `repeats`.
2957
+
2958
+ .. math::
2959
+
2960
+ shape_{i} = \begin{cases}
2961
+ repeats_{i} * input.shape_{i} & \text{if } 0 \le i < input.{rank} \\
2962
+ repeats_{i} & \text{if } input.{rank} \le i < n \\
2963
+ \end{cases}
2964
+
2965
+ .. warning::
2966
+ This is an experimental API that is subject to change or deletion.
2967
+
2968
+ .. note::
2969
+ If need to specify the number of repetition times for each element of a single dimension, please
2970
+ refer to :func:`mindspore.Tensor.repeat_interleave`.
2971
+
2972
+ Args:
2973
+ *repeats (int): Number of repetitions of `self` in each dimension. The value must be a
2974
+ non-negative number. ``1`` indicates that the dimension remains unchanged. The number
2975
+ of elements in `repeats` must be greater than or equals to the number of dimensions
2976
+ in `self` . When the number of dimensions of `self` is less than the number of elements
2977
+ of `repeats` , `self` is broadcasted to the number of dimensions with the same number of
2978
+ elements of `repeats` (as shown in the example).
2979
+
2980
+ Returns:
2981
+ Tensor, the new Tensor after the element is copied from the specified number of repetitions.
2982
+
2983
+ Raises:
2984
+ RuntimeError: If the number of elements of `repeats` is less than the number of dimensions
2985
+ of `self` . Or `repeats` has negative element.
2986
+ RuntimeError: If the number of elements of `repeats` or the number of dimensions of `self` is larger than 8.
2987
+ TypeError: If type of `repeats` is unsupported.
2988
+
2989
+ Supported Platforms:
2990
+ ``Ascend``
2991
+
2992
+ Examples:
2993
+ >>> from mindspore import Tensor
2994
+ >>> a = Tensor([[0, 1, 2], [3, 4, 5]])
2995
+ >>> print(a.repeat(3, 2))
2996
+ [[0 1 2 0 1 2]
2997
+ [3 4 5 3 4 5]
2998
+ [0 1 2 0 1 2]
2999
+ [3 4 5 3 4 5]
3000
+ [0 1 2 0 1 2]
3001
+ [3 4 5 3 4 5]]
3002
+ >>> print(a.repeat(2, 1, 3)) # a is treated as a shape [1, 2, 3]
3003
+ [[[0 1 2 0 1 2 0 1 2]
3004
+ [3 4 5 3 4 5 3 4 5]]
3005
+ [[0 1 2 0 1 2 0 1 2]
3006
+ [3 4 5 3 4 5 3 4 5]]]
3007
+
3008
+ .. method:: Tensor.repeat(repeats) -> Tensor
3009
+ :noindex:
3010
+
3011
+ Copy the elements in each dimension of a Tensor based on the specified number of repetition times.
3012
+
3013
+ This function copies the tensor's data.
3014
+
3015
+ Expect that a variable-length int parameter is changed to a parameter which type is list or tuple,
3016
+ other operations are the same as the overload with `*repeats` parameter.
3017
+
3018
+ The shape of the output tensor can be described as follows, where :math:`n` is the number of
3019
+ elements in `repeats`.
3020
+
3021
+ .. math::
3022
+
3023
+ shape_{i} = \begin{cases}
3024
+ repeats_{i} * input.shape_{i} & \text{if } 0 \le i < input.{rank} \\
3025
+ repeats_{i} & \text{if } input.{rank} \le i < n \\
3026
+ \end{cases}
3027
+
3028
+ .. warning::
3029
+ This is an experimental API that is subject to change or deletion.
3030
+
3031
+ .. note::
3032
+ If need to specify the number of repetition times for each element of a single dimension, please
3033
+ refer to :func:`mindspore.Tensor.repeat_interleave`.
3034
+
3035
+ Args:
3036
+ repeats (Union[tuple[int], list[int]]): Number of repetitions of `self` in each dimension. The value
3037
+ must be a non-negative number. ``1`` indicates that the dimension remains unchanged. The number
3038
+ of elements in `repeats` must be greater than or equals to the number of dimensions in `self` .
3039
+ When the number of dimensions of `self` is less than the number of elements of `repeats` , `self`
3040
+ is broadcasted to the number of dimensions with the same number of elements of `repeats` (as shown
3041
+ in the example).
3042
+
3043
+ Returns:
3044
+ Tensor, the new Tensor after the element is copied from the specified number of repetitions.
3045
+
3046
+ Raises:
3047
+ RuntimeError: If the number of elements of `repeats` is less than the number of dimensions
3048
+ of `self` . Or `repeats` has negative element.
3049
+ RuntimeError: If the number of elements of `repeats` or the number of dimensions of `self` is larger than 8.
3050
+ TypeError: If type of `repeats` is unsupported.
3051
+
3052
+ See also:
3053
+ - :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
3054
+ - :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
3055
+ - :func:`mindspore.Tensor.repeat_interleave`: Repeats each element on the specified axis of a Tensor based
3056
+ on the specified number of times.
3057
+ - :func:`mindspore.Tensor.tile`: Repeats a Tensor on each dimension for a specified number of times. And
3058
+ there is no requirement on the number of parameters `repeats` .
3059
+
3060
+ Supported Platforms:
3061
+ ``Ascend``
3062
+
3063
+ Examples:
3064
+ >>> from mindspore import Tensor
3065
+ >>> a = Tensor([[0, 1, 2], [3, 4, 5]])
3066
+ >>> print(a.repeat([3, 2]))
3067
+ [[0 1 2 0 1 2]
3068
+ [3 4 5 3 4 5]
3069
+ [0 1 2 0 1 2]
3070
+ [3 4 5 3 4 5]
3071
+ [0 1 2 0 1 2]
3072
+ [3 4 5 3 4 5]]
3073
+ >>> print(a.repeat(repeats=(2, 1, 3))) # a is treated as a shape [1, 2, 3]
3074
+ [[[0 1 2 0 1 2 0 1 2]
3075
+ [3 4 5 3 4 5 3 4 5]]
3076
+ [[0 1 2 0 1 2 0 1 2]
3077
+ [3 4 5 3 4 5 3 4 5]]]
3078
+ """)
3079
+ attach_docstr("repeat_interleave", r"""repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
3080
+
3081
+ Repeat elements of a tensor along a dim, like :func:`mindspore.numpy.repeat`.
3082
+
3083
+ .. warning::
3084
+ Only support on Atlas A2 training series.
3085
+
3086
+ .. note::
3087
+ The self tensor to repeat values for. Must be of type: float16, float32,
3088
+ int8, uint8, int16, int32, or int64.
3089
+
3090
+ Args:
3091
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
3092
+ dim (int, optional): The dim along which to repeat, Default: ``None``. if dim is None,
3093
+ the self Tensor will be flattened and the output will alse be flattened.
3094
+
3095
+ Keyword Args:
3096
+ output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
3097
+ Default: ``None``.
3098
+
3099
+ Returns:
3100
+ One tensor with values repeated along the specified dim. If self has shape
3101
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
3102
+ si * repeats, ..., sn)`. The output type will be the same as the type of `self`.
3103
+
3104
+ Supported Platforms:
3105
+ ``Ascend``
3106
+
3107
+ Examples:
3108
+ >>> import mindspore
3109
+ >>> import numpy as np
3110
+ >>> from mindspore import Tensor
3111
+ >>> input1 = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
3112
+ >>> output1 = input1.repeat_interleave(repeats=2, dim=0, output_size=None)
3113
+ >>> input2 = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3114
+ >>> output2 = input2.repeat_interleave(Tensor(np.array([1, 2])), dim=0, output_size=None)
3115
+ >>> print(output1)
3116
+ >>> print(output2)
3117
+ [[0 1 2]
3118
+ [0 1 2]
3119
+ [3 4 5]
3120
+ [3 4 5]]
3121
+ [[1 2]
3122
+ [3 4]
3123
+ [3 4]]
3124
+
3125
+ .. method:: Tensor.repeat_interleave(repeats, dim=None) -> Tensor
3126
+ :noindex:
3127
+
3128
+ Repeat elements of a tensor along an dim, like :func:`mindspore.numpy.repeat`.
3129
+
3130
+ .. note::
3131
+ The tensor to repeat values for. Must be of type: float16,
3132
+ float32, int8, uint8, int16, int32, or int64.
3133
+
3134
+ Args:
3135
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
3136
+ dim (int, optional): The dim along which to repeat, Default: ``None``. if dim is None,
3137
+ the self Tensor will be flattened and the output will alse be flattened.
3138
+
3139
+ Returns:
3140
+ One tensor with values repeated along the specified dim. If self has shape
3141
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
3142
+ si * repeats, ..., sn)`. The output type will be the same as the type of `self`.
3143
+
3144
+ Supported Platforms:
3145
+ ``Ascend`` ``GPU`` ``CPU``
3146
+
3147
+ Examples:
3148
+ >>> import mindspore
3149
+ >>> import numpy as np
3150
+ >>> from mindspore import Tensor
3151
+ >>> input1 = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
3152
+ >>> output1 = input1.repeat_interleave(repeats=2, dim=0)
3153
+ >>> input2 = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3154
+ >>> output2 = input2.repeat_interleave(Tensor(np.array([1, 2])), dim=0)
3155
+ >>> print(output1)
3156
+ >>> print(output2)
3157
+ [[0 1 2]
3158
+ [0 1 2]
3159
+ [3 4 5]
3160
+ [3 4 5]]
3161
+ [[1 2]
3162
+ [3 4]
3163
+ [3 4]]
3164
+ """)
3165
+ attach_docstr("reshape", r"""reshape(*shape) -> Tensor
3166
+
3167
+ For details, please refer to :func:`mindspore.ops.reshape`.
3168
+ """)
3169
+ attach_docstr("roll", r"""roll(shifts, dims) -> Tensor
3170
+
3171
+ For details, please refer to :func:`mindspore.ops.roll`.
3172
+ """)
3173
+ attach_docstr("round", r"""round(decimals=0) -> Tensor
3174
+
3175
+ For details, please refer to :func:`mindspore.ops.round`.
3176
+ """)
3177
+ attach_docstr("rsqrt", r"""rsqrt() -> Tensor
3178
+
3179
+ For details, please refer to :func:`mindspore.ops.rsqrt`.
3180
+ """)
3181
+ attach_docstr("scatter_add", r"""scatter_add(dim, index, src) -> Tensor
3182
+
3183
+ Add all elements in `src` to the index specified by `index` to `self` along dimension specified by `dim`.
3184
+ It takes three inputs `self`, `src` and `index` of the same rank r >= 1.
3185
+
3186
+ For a 3-D tensor, the operation updates input as follows:
3187
+
3188
+ .. code-block::
3189
+
3190
+ self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
3191
+
3192
+ self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
3193
+
3194
+ self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
3195
+
3196
+ .. note::
3197
+ The rank of this tensor `self` must be at least 1.
3198
+
3199
+ Args:
3200
+ dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`self`).
3201
+ index (Tensor): The index of `self` to do scatter operation whose data type must be int32 or
3202
+ int64. Same rank as `self`. Except for the dimension specified by `dim`,
3203
+ the size of each dimension of `index` must be less than or equal to the size of
3204
+ the corresponding dimension of `self`.
3205
+ src (Tensor): The tensor doing the scatter operation with `self`, has the same type as `self` and
3206
+ the size of each dimension must be greater than or equal to that of `index`.
3207
+
3208
+ Returns:
3209
+ Tensor, has the same shape and type as `self`.
3210
+
3211
+ Raises:
3212
+ TypeError: If `index` is neither int32 nor int64.
3213
+ ValueError: If anyone of the rank among `self`, `index` and `src` is less than 1.
3214
+ ValueError: If the rank of `self`, `index` and `src` is not the same.
3215
+ ValueError: The size of any dimension of `index` except the dimension specified by `dim` is
3216
+ greater than the size of the corresponding dimension of `self`.
3217
+ ValueError: If the size of any dimension of `src` is less than that of `index`.
3218
+
3219
+ Supported Platforms:
3220
+ ``Ascend``
3221
+
3222
+ Examples:
3223
+ >>> import numpy as np
3224
+ >>> import mindspore as ms
3225
+ >>> from mindspore import Tensor
3226
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3227
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3228
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3229
+ >>> out = input.scatter_add(dim=1, index=index, src=src)
3230
+ >>> print(out)
3231
+ [[1. 2. 11. 4. 13.]]
3232
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3233
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3234
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3235
+ >>> out = input.scatter_add(dim=0, index=index, src=src)
3236
+ >>> print(out)
3237
+ [[1. 2. 3. 0. 0.]
3238
+ [0. 0. 0. 0. 0.]
3239
+ [4. 5. 6. 0. 0.]
3240
+ [0. 0. 0. 0. 0.]
3241
+ [7. 8. 9. 0. 0.]]
3242
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3243
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3244
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3245
+ >>> out = input.scatter_add(dim=1, index=index, src=src)
3246
+ >>> print(out)
3247
+ [[1. 0. 2. 0. 3.]
3248
+ [4. 0. 5. 0. 6.]
3249
+ [7. 0. 8. 0. 9.]
3250
+ [0. 0. 0. 0. 0.]
3251
+ [0. 0. 0. 0. 0.]]
3252
+
3253
+ .. method:: Tensor.scatter_add(indices, updates) -> Tensor
3254
+ :noindex:
3255
+
3256
+ Creates a new tensor by adding the values from the positions in `self` indicated by
3257
+ `indices`, with values from `updates`. When multiple values are given for the same
3258
+ index, the updated result will be the sum of all values. This operation is almost
3259
+ equivalent to using ScatterNdAdd, except that the updates are applied on output `Tensor`
3260
+ instead of input `Parameter`.
3261
+
3262
+ The last axis of `indices` is the depth of each index vectors. For each index vector,
3263
+ there must be a corresponding value in `updates`. The shape of `updates` should be
3264
+ equal to the shape of `self[indices]`. For more details, see Examples.
3265
+
3266
+ .. math::
3267
+ output\left [indices \right ] = input\_x + update
3268
+
3269
+ .. note::
3270
+ The dimension of this tensor `self` must be no less than indices.shape[-1].
3271
+
3272
+ If some values of the `indices` are out of bound:
3273
+
3274
+ - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3275
+ the corresponding `updates` will not be updated to self tensor.
3276
+ - On CPU, if some values of the `indices` are out of bound, raising an index error.
3277
+ - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3278
+ unknown errors may be caused.
3279
+
3280
+ Args:
3281
+ indices (Tensor): The index of input tensor whose data type is int32 or int64.
3282
+ The rank must be at least 2.
3283
+ updates (Tensor): The tensor to update the input tensor, has the same type as input,
3284
+ and updates. And the shape should be
3285
+ equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3286
+
3287
+ Returns:
3288
+ Tensor, has the same shape and type as `self`.
3289
+
3290
+ Raises:
3291
+ TypeError: If dtype of `indices` is neither int32 nor int64.
3292
+ ValueError: If length of shape of `self` is less than the last dimension of shape of `indices`.
3293
+ RuntimeError: If a value of `indices` is not in `self` on CPU backend.
3294
+
3295
+ Supported Platforms:
3296
+ ``Ascend`` ``GPU`` ``CPU``
3297
+
3298
+ Examples:
3299
+ >>> import mindspore
3300
+ >>> import numpy as np
3301
+ >>> from mindspore import Tensor
3302
+ >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3303
+ >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3304
+ >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
3305
+ >>> output = input_x.scatter_add(indices, updates)
3306
+ >>> print(output)
3307
+ [[ 3.1 0.3 3.6]
3308
+ [ 0.4 0.5 -3.2]]
3309
+ """)
3310
+ attach_docstr("scatter", r"""scatter(dim, index, src) -> Tensor
3311
+
3312
+ Update the value in `src` to `self` according to the specified index.
3313
+ For a 3-D tensor, the output will be:
3314
+
3315
+ .. code-block::
3316
+
3317
+ output[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
3318
+
3319
+ output[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
3320
+
3321
+ output[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
3322
+
3323
+ .. note::
3324
+ The backward is supported only for the case `src.shape == index.shape` when `src` is a tensor.
3325
+ The rank of the input tensor `self` must be at least 1.
3326
+
3327
+ Args:
3328
+ dim (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(self).
3329
+ index (Tensor): The index to do update operation whose data must be positive number with type of int32
3330
+ or int64. Same rank as `self` . And accepted range is [-s, s) where s is the size along axis.
3331
+ src (Tensor, float): The data doing the update operation with `self`. Can be a tensor with the same data type
3332
+ as `self` or a float number to scatter.
3333
+
3334
+ Returns:
3335
+ Tensor, has the same shape and type as `self` .
3336
+
3337
+ Raises:
3338
+ TypeError: If `index` is neither int32 nor int64.
3339
+ ValueError: If rank of any of `self` , `index` and `src` is less than 1.
3340
+ ValueError: If the rank of `src` is not equal to the rank of `self` .
3341
+ TypeError: If the data types of `self` and `src` have different dtypes.
3342
+ RuntimeError: If `index` has negative elements.
3343
+
3344
+ Supported Platforms:
3345
+ ``Ascend`` ``GPU`` ``CPU``
3346
+
3347
+ Examples:
3348
+ >>> import numpy as np
3349
+ >>> import mindspore as ms
3350
+ >>> from mindspore import Tensor
3351
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3352
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3353
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3354
+ >>> out = input.scatter(dim=1, index=index, src=src)
3355
+ >>> print(out)
3356
+ [[1. 2. 8. 4. 8.]]
3357
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3358
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3359
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3360
+ >>> out = input.scatter(dim=0, index=index, src=src)
3361
+ >>> print(out)
3362
+ [[1. 2. 3. 0. 0.]
3363
+ [0. 0. 0. 0. 0.]
3364
+ [4. 5. 6. 0. 0.]
3365
+ [0. 0. 0. 0. 0.]
3366
+ [7. 8. 9. 0. 0.]]
3367
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3368
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3369
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3370
+ >>> out = input.scatter(dim=1, index=index, src=src)
3371
+ >>> print(out)
3372
+ [[1. 0. 2. 0. 3.]
3373
+ [4. 0. 5. 0. 6.]
3374
+ [7. 0. 8. 0. 9.]
3375
+ [0. 0. 0. 0. 0.]
3376
+ [0. 0. 0. 0. 0.]]
3377
+
3378
+ .. method:: Tensor.scatter(axis, index, src) -> Tensor
3379
+ :noindex:
3380
+
3381
+ Update the value in `src` to `self` according to the specified index.
3382
+ Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
3383
+
3384
+ .. note::
3385
+ The backward is supported only for the case `src.shape == index.shape`.
3386
+ The rank of the input tensor `self` must be at least 1.
3387
+
3388
+ Args:
3389
+ axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(self).
3390
+ index (Tensor): The index to do update operation whose data must be positive number with type of int32
3391
+ or int64. Same rank as `self` . And accepted range is [-s, s) where s is the size along axis.
3392
+ src (Tensor, float): The data doing the update operation with `self`. Can be a tensor with the same data type
3393
+ as `self` or a float number to scatter.
3394
+
3395
+ Returns:
3396
+ Tensor, has the same shape and type as `self` .
3397
+
3398
+ Raises:
3399
+ TypeError: If `index` is neither int32 nor int64.
3400
+ ValueError: If rank of any of `self` , `index` and `src` is less than 1.
3401
+ ValueError: If the rank of `src` is not equal to the rank of `self` .
3402
+ TypeError: If the data types of `self` and `src` have different dtypes.
3403
+ RuntimeError: If `index` has negative elements.
3404
+
3405
+ Supported Platforms:
3406
+ ``Ascend`` ``GPU`` ``CPU``
3407
+
3408
+ Examples:
3409
+ >>> import numpy as np
3410
+ >>> import mindspore as ms
3411
+ >>> from mindspore import Tensor
3412
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3413
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3414
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3415
+ >>> out = input.scatter(axis=1, index=index, src=src)
3416
+ >>> print(out)
3417
+ [[1. 2. 8. 4. 8.]]
3418
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3419
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3420
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3421
+ >>> out = input.scatter(axis=0, index=index, src=src)
3422
+ >>> print(out)
3423
+ [[1. 2. 3. 0. 0.]
3424
+ [0. 0. 0. 0. 0.]
3425
+ [4. 5. 6. 0. 0.]
3426
+ [0. 0. 0. 0. 0.]
3427
+ [7. 8. 9. 0. 0.]]
3428
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3429
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3430
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3431
+ >>> out = input.scatter(axis=1, index=index, src=src)
3432
+ >>> print(out)
3433
+ [[1. 0. 2. 0. 3.]
3434
+ [4. 0. 5. 0. 6.]
3435
+ [7. 0. 8. 0. 9.]
3436
+ [0. 0. 0. 0. 0.]
3437
+ [0. 0. 0. 0. 0.]]
3438
+ """)
3439
+ attach_docstr("scatter_", r"""scatter_(dim, index, src) -> Tensor
3440
+
3441
+ Update the value in `src` to update `self` according to the specified `index`.
3442
+
3443
+ Index the dimension `self` selected by `dim` using `index` , traverse the other
3444
+ dimensions in sequence, update the value of `src` to `self` , and return `self` .
3445
+
3446
+ This operator is the inverse of the in-place version of :func:`mindspore.Tensor.gather` .
3447
+
3448
+ This operation provides another three overloads to support parameter `reduce` and scalar value.
3449
+
3450
+ Here's an example using a 3-dimension tensor.
3451
+
3452
+ .. code-block::
3453
+
3454
+ self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
3455
+
3456
+ self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
3457
+
3458
+ .. warning::
3459
+ - If multiple indexes point to the same position in `self` , the final value of
3460
+ this position in `self` is uncertain.
3461
+ - On Ascend, behavior is unpredictable when the value of `index` is not in the
3462
+ range `[-self.shape[dim], self.shape[dim])` in forward.
3463
+ - This is an experimental API that is subject to change or deletion.
3464
+
3465
+ .. note::
3466
+ The inverse gradient from `self` to `src` can be calculated only when
3467
+ the shape of src is the same as that of `index`.
3468
+
3469
+ Args:
3470
+ dim (int): Which axis to scatter. Accepted range is `[-r, r)` where `r` is the rank of `self` .
3471
+ index (Tensor): The index to access `self` on the target axis specified by `dim` whose dtype must be int32
3472
+ or int64. If it is an empty Tensor, no operations is performed and directly returns `self` . Otherwise,
3473
+ its rank must be the same as `self` and the value range of each element must be `[-s, s)`
3474
+ where `s` is the size of `self` along axis `dim` .
3475
+ src (Tensor): The data to doing the update operation with `self` . It should have the same dtype and rank
3476
+ as `self` .
3477
+
3478
+ Returns:
3479
+ Tensor, the modified `self` itself.
3480
+
3481
+ Raises:
3482
+ TypeError: If type of `self` , `index` or `src` is unsupported.
3483
+ RuntimeError: If `dim` is out of the range `[-r, r)` .
3484
+ RuntimeError: If rank of `self` is larger than 8.
3485
+ RuntimeError: If dtype of tensor `self` , `index` or `src` is unsupported.
3486
+ RuntimeError: If dtype of `self` is not equal to the dtype of `src` .
3487
+ RuntimeError: If `self` , `index`, or `src` have different ranks and `index` is not an empty tensor.
3488
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > src.size(d)` .
3489
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > self.size(d)` .
3490
+
3491
+ Supported Platforms:
3492
+ ``Ascend``
3493
+
3494
+ Examples:
3495
+ >>> from mindspore import Tensor, int64, float32
3496
+ >>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
3497
+ >>> index = Tensor([[1, 0], [1, 0]], dtype=int64)
3498
+ >>> src = Tensor([[4, 3], [2, 1]], dtype=float32)
3499
+ >>> this_tensor.scatter_(1, index, src)
3500
+ >>> print(this_tensor)
3501
+ [[3., 4.],
3502
+ [1., 2.]]
3503
+
3504
+ .. method:: Tensor.scatter_(dim, index, src, *, reduce) -> Tensor
3505
+ :noindex:
3506
+
3507
+ Update the value in `src` to update `self` according to the specified `index`.
3508
+
3509
+ Using the operation specified by `reduce` to index the dimension `self` selected
3510
+ by `dim` using `index` , traverse the other dimensions in sequence, accumulate or
3511
+ multiply the value of `src` to `self` , and return `self` .
3512
+
3513
+ This operator is the inverse of the in-place version of :func:`mindspore.Tensor.gather` .
3514
+
3515
+ Expect that the replacement operation changes to accumulation or multiplication
3516
+ based on the parameter `reduce`, other operations are the same as the overloaded
3517
+ function that accept `src` without the parameter `reduce` .
3518
+
3519
+ Here's an example using a 3-dimension tensor.
3520
+
3521
+ .. code-block::
3522
+
3523
+ self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0, reduce == "add"
3524
+
3525
+ self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2, reduce == "multiply"
3526
+
3527
+ .. warning::
3528
+ - If multiple indexes point to the same position in `self` , the final value of
3529
+ this position in `self` is uncertain.
3530
+ - On Ascend, behavior is unpredictable when the value of `index` is not in the
3531
+ range `[-self.shape[dim], self.shape[dim])` in forward.
3532
+ - This is an experimental API that is subject to change or deletion.
3533
+
3534
+ .. note::
3535
+ This overload function does not support reverse gradient calculation and will return zeros if calculate gradient.
3536
+
3537
+ Args:
3538
+ dim (int): Which axis to scatter. Accepted range is `[-r, r)` where `r` is the rank of `self` .
3539
+ index (Tensor): The index to access `self` on the target axis specified by `dim` whose dtype must be int32
3540
+ or int64. If it is an empty Tensor, no operations is performed and directly returns `self` . Otherwise,
3541
+ its rank must be the same as `self` and the value range of each element must be `[-s, s)`
3542
+ where `s` is the size of `self` along axis `dim` .
3543
+ src (Tensor): The data to doing the accumulate or multiply operation with `self` . It should have the
3544
+ same dtype and rank as `self` .
3545
+
3546
+ Keyword Args:
3547
+ reduce (str): Reduce operation, supports ``"add"`` and ``"multiply"`` . When `reduce` is ``"add"`` , `src`
3548
+ is accumulated to `input` base on `index` . When `reduce` is ``"multiply"`` , `src` is multiplied
3549
+ to `input` base on `index` .
3550
+
3551
+ Returns:
3552
+ Tensor, the modified `self` itself.
3553
+
3554
+ Raises:
3555
+ TypeError: If type of `self` , `index` or `src` is unsupported.
3556
+ ValueError: If `reduce` is a str but not ``"add"`` or ``"multiply"`` .
3557
+ RuntimeError: If `dim` is out of the range `[-r, r)` .
3558
+ RuntimeError: If rank of `self` is larger than 8.
3559
+ RuntimeError: If dtype of tensor `self` , `index` or `src` is unsupported.
3560
+ RuntimeError: If dtype of `self` is not equal to the dtype of `src` .
3561
+ RuntimeError: If `self` , `index`, or `src` have different ranks and `index` is not an empty tensor.
3562
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > src.size(d)` .
3563
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > self.size(d)` .
3564
+
3565
+ Supported Platforms:
3566
+ ``Ascend``
3567
+
3568
+ Examples:
3569
+ >>> from mindspore import Tensor, int64, float32
3570
+ >>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
3571
+ >>> index = Tensor([[1, 0], [1, 0]], dtype=int64)
3572
+ >>> src = Tensor([[4, 3], [2, 1]], dtype=float32)
3573
+ >>> this_tensor.scatter_(1, index, src, reduce='add')
3574
+ >>> print(this_tensor)
3575
+ [[4., 6.],
3576
+ [4., 6.]]
3577
+
3578
+ .. method:: Tensor.scatter_(dim, index, value) -> Tensor
3579
+ :noindex:
3580
+
3581
+ Update the value `value` to update `self` according to the specified `index`.
3582
+
3583
+ Index the dimension `self` selected by `dim` using `index` , traverse the other
3584
+ dimensions in sequence, update the value `value` to `self` , and return `self` .
3585
+
3586
+ This operator is the inverse of the in-place version of :func:`mindspore.Tensor.gather` .
3587
+
3588
+ It can be considered that after the value is broadcasted as a Tensor whose shape
3589
+ and dtype are consistent with `self` , other operations are the same as the
3590
+ overloaded function that accept `src` without the parameter `reduce` .
3591
+
3592
+ Here's an example using a 3-dimension tensor.
3593
+
3594
+ .. code-block::
3595
+
3596
+ self[index[i][j][k]][j][k] = value # if dim == 0
3597
+
3598
+ self[i][j][index[i][j][k]] = value # if dim == 2
3599
+
3600
+ .. warning::
3601
+ - If multiple indexes point to the same position in `self` , the final value of
3602
+ this position in `self` is uncertain.
3603
+ - On Ascend, behavior is unpredictable when the value of `index` is not in the
3604
+ range `[-self.shape[dim], self.shape[dim])` in forward.
3605
+ - This is an experimental API that is subject to change or deletion.
3606
+
3607
+ Args:
3608
+ dim (int): Which axis to scatter. Accepted range is `[-r, r)` where `r` is the rank of `self` .
3609
+ index (Tensor): The index to access `self` on the target axis specified by `dim` whose dtype must be int32
3610
+ or int64. If it is an empty Tensor, no operations is performed and directly returns `self` . Otherwise,
3611
+ its rank must be the same as `self` and the value range of each element must be `[-s, s)`
3612
+ where `s` is the size of `self` along axis `dim` .
3613
+ value (int, float, bool): The data to doing the update operation with `self` . It can be considered as being
3614
+ broadcasted into a Tensor whose shape and dtype are the same as `self` , and then be regarded as `src`
3615
+ for calculation.
3616
+
3617
+ Returns:
3618
+ Tensor, the modified `self` itself.
3619
+
3620
+ Raises:
3621
+ TypeError: If type of `self` , `index` or `value` is unsupported.
3622
+ RuntimeError: If `dim` is out of the range `[-r, r)` .
3623
+ RuntimeError: If rank of `self` is larger than 8.
3624
+ RuntimeError: If dtype of tensor `self` or `index` is unsupported.
3625
+ RuntimeError: If `index` is not an empty tensor and its rank is different from the rank of `self` .
3626
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > self.size(d)` .
3627
+
3628
+ Supported Platforms:
3629
+ ``Ascend``
3630
+
3631
+ Examples:
3632
+ >>> from mindspore import Tensor, int64, float32
3633
+ >>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
3634
+ >>> index = Tensor([[0], [1]], dtype=int64)
3635
+ >>> this_tensor.scatter_(0, index, 10)
3636
+ >>> print(this_tensor)
3637
+ [[10., 2.],
3638
+ [10., 4.]]
3639
+
3640
+ .. method:: Tensor.scatter_(dim, index, value, *, reduce) -> Tensor
3641
+ :noindex:
3642
+
3643
+ Update the value `value` to update `self` according to the specified `index`.
3644
+
3645
+ Using the operation specified by `reduce` to index the dimension `self` selected
3646
+ by `dim` using `index` , traverse the other dimensions in sequence, accumulate or
3647
+ multiply the value `value` to `self` , and return `self` .
3648
+
3649
+ This operator is the inverse of the in-place version of :func:`mindspore.Tensor.gather` .
3650
+
3651
+ Expect that the replacement operation changes to accumulation or multiplication
3652
+ based on the parameter `reduce`, other operations are the same as the overloaded
3653
+ function that accept `value` without the parameter `reduce` .
3654
+
3655
+ Here's an example using a 3-dimension tensor.
3656
+
3657
+ .. code-block::
3658
+
3659
+ self[i][index[i][j][k]][k] += value # if dim == 1, reduce == "add"
3660
+
3661
+ self[i][j][index[i][j][k]] *= value # if dim == 2, reduce == "multiply"
3662
+
3663
+ .. warning::
3664
+ - If multiple indexes point to the same position in `self` , the final value of
3665
+ this position in `self` is uncertain.
3666
+ - On Ascend, behavior is unpredictable when the value of `index` is not in the
3667
+ range `[-self.shape[dim], self.shape[dim])` in forward.
3668
+ - This is an experimental API that is subject to change or deletion.
3669
+
3670
+ .. note::
3671
+ This overload function does not support reverse gradient calculation and will return zeros if calculate gradient.
3672
+
3673
+ Args:
3674
+ dim (int): Which axis to scatter. Accepted range is `[-r, r)` where `r` is the rank of `self` .
3675
+ index (Tensor): The index to access `self` on the target axis specified by `dim` whose dtype must be int32
3676
+ or int64. If it is an empty Tensor, no operations is performed and directly returns `self` . Otherwise,
3677
+ its rank must be the same as `self` and the value range of each element must be `[-s, s)`
3678
+ where `s` is the size of `self` along axis `dim` .
3679
+ value (int, float, bool): The data to doing the accumulate or multiply operation with `self` . It can be
3680
+ considered as being broadcasted into a Tensor whose shape and dtype are the same as `self` , and then
3681
+ be regarded as `src` for calculation.
3682
+
3683
+ Keyword Args:
3684
+ reduce (str): Reduce operation, supports ``"add"`` and ``"multiply"`` . When `reduce` is ``"add"`` , `value`
3685
+ is accumulated to `input` base on `index` . When `reduce` is ``"multiply"`` , `value` is multiplied
3686
+ to `input` base on `index` .
3687
+
3688
+ Returns:
3689
+ Tensor, the modified `self` itself.
3690
+
3691
+ Raises:
3692
+ TypeError: If type of `self` , `index` or `value` is unsupported.
3693
+ ValueError: If `reduce` is a str but not ``"add"`` or ``"multiply"`` .
3694
+ RuntimeError: If `dim` is out of the range `[-r, r)` .
3695
+ RuntimeError: If rank of `self` is larger than 8.
3696
+ RuntimeError: If dtype of tensor `self` or `index` is unsupported.
3697
+ RuntimeError: If `index` is not an empty tensor and its rank is different from the rank of `self` .
3698
+ RuntimeError: If there is a dimension `d` that makes `index.size(d) > self.size(d)` .
3699
+
3700
+ Supported Platforms:
3701
+ ``Ascend``
3702
+
3703
+ Examples:
3704
+ >>> from mindspore import Tensor, int64, float32
3705
+ >>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
3706
+ >>> index = Tensor([[0], [1]], dtype=int64)
3707
+ >>> this_tensor.scatter_(0, index, 3, reduce="multiply")
3708
+ >>> print(this_tensor)
3709
+ [[3., 2.],
3710
+ [9., 4.]]
3711
+ """)
3712
+ attach_docstr("select", r"""select(dim, index) -> Tensor
3713
+
3714
+ Slices the `self` tensor along the selected dimension at the given index.
3715
+
3716
+ .. warning::
3717
+ This is an experimental API that is subject to change or deletion.
3718
+
3719
+ Args:
3720
+ dim (int): the dimension to slice.
3721
+ index (int): the index to select with.
3722
+
3723
+ Returns:
3724
+ Tensor.
3725
+
3726
+ Supported Platforms:
3727
+ ``Ascend``
3728
+
3729
+ Examples:
3730
+ >>> import mindspore
3731
+ >>> from mindspore import Tensor
3732
+ >>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
3733
+ >>> y = Tensor.select(input, 0, 0)
3734
+ >>> print(y)
3735
+ [2 3 4 5]
3736
+
3737
+ .. method:: Tensor.select(condition, y) -> Tensor
3738
+ :noindex:
3739
+
3740
+ The conditional tensor determines whether the corresponding element in the output must be
3741
+ selected from `self` (if True) or `y` (if False) based on the value of each
3742
+ element.
3743
+
3744
+ It can be defined as:
3745
+
3746
+ .. math::
3747
+ out_i = \begin{cases}
3748
+ self_i, & \text{if } condition_i \\
3749
+ y_i, & \text{otherwise}
3750
+ \end{cases}
3751
+
3752
+ Args:
3753
+ condition (Tensor[bool]): The condition tensor, decides which element is chosen.
3754
+ The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
3755
+ y (Union[Tensor, int, float]): The second Tensor to be selected.
3756
+ If `y` is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
3757
+ If `y` is int or float, it will be casted to int32 or float32, and broadcast to the same shape as `self`.
3758
+ There must be at least one Tensor between `self` and `y`.
3759
+
3760
+ Returns:
3761
+ Tensor, has the same shape as `condition`.
3762
+
3763
+ Raises:
3764
+ TypeError: If y is not a Tensor, int or float.
3765
+ ValueError: The shape of inputs cannot be broadcast.
3766
+
3767
+ Supported Platforms:
3768
+ ``Ascend`` ``GPU`` ``CPU``
3769
+
3770
+ Examples:
3771
+ >>> import mindspore
3772
+ >>> from mindspore import Tensor
3773
+ >>> # Both input are Tensor
3774
+ >>> cond = Tensor([True, False])
3775
+ >>> x = Tensor([2,3], mindspore.float32)
3776
+ >>> y = Tensor([1,2], mindspore.float32)
3777
+ >>> output = Tensor.select(x, cond, y)
3778
+ >>> print(output)
3779
+ [2. 2.]
3780
+ """)
3781
+ attach_docstr("sigmoid", r"""sigmoid() -> Tensor
3782
+
3783
+ For details, please refer to :func:`mindspore.ops.sigmoid`.""")
3784
+ attach_docstr("sinc", r"""sinc() -> Tensor
3785
+
3786
+ For details, please refer to :func:`mindspore.ops.sinc`.
3787
+ """)
3788
+ attach_docstr("sinh", r"""sinh() -> Tensor
3789
+
3790
+ For details, please refer to :func:`mindspore.ops.sinh`.""")
3791
+ attach_docstr("sin", r"""sin() -> Tensor
3792
+
3793
+ For details, please refer to :func:`mindspore.ops.sin`.""")
3794
+ attach_docstr("sort", r"""sort(dim=-1, descending=False) -> (Tensor, Tensor)
3795
+
3796
+ Sorts the elements of the self tensor along the given dimension in the specified order.
3797
+
3798
+ .. warning::
3799
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
3800
+ If use float32, it may cause loss of accuracy.
3801
+
3802
+ Args:
3803
+ dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
3804
+ descending (bool, optional): Controls the sort order. If `descending` is True, the elements
3805
+ are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3806
+
3807
+ Returns:
3808
+ - y1, a tensor whose values are the sorted values, with the same shape and data type as self.
3809
+ - y2, a tensor that consists of the indices of the elements in the original self tensor.
3810
+ Data type is int64.
3811
+
3812
+ Raises:
3813
+ TypeError: If `dim` is not an int.
3814
+ TypeError: If `descending` is not a bool.
3815
+ TypeError: If `self` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
3816
+ TypeError: If `stable` is not a bool.
3817
+ ValueError: If `dim` is not in range of [-len(self.shape), len(self.shape)).
3818
+
3819
+ Supported Platforms:
3820
+ ``Ascend``
3821
+
3822
+ Examples:
3823
+ >>> import mindspore
3824
+ >>> import numpy as np
3825
+ >>> from mindspore import Tensor
3826
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3827
+ >>> output = x.sort(dim=-1)
3828
+ >>> # The output below is based on the Ascend platform.
3829
+ >>> print(output)
3830
+ (Tensor(shape=[3, 3], dtype=Float16, value=
3831
+ [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
3832
+ [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
3833
+ [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
3834
+ [[2, 1, 0],
3835
+ [2, 0, 1],
3836
+ [0, 1, 2]]))
3837
+
3838
+ .. method:: Tensor.sort(axis=-1, descending=False) -> (Tensor, Tensor)
3839
+ :noindex:
3840
+
3841
+ Sorts the elements of the input tensor along the given dimension in the specified order.
3842
+
3843
+ Args:
3844
+ axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
3845
+ The Ascend backend only supports sorting the last dimension.
3846
+ descending (bool, optional): Controls the sort order. If `descending` is True, the elements
3847
+ are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3848
+
3849
+ .. warning::
3850
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
3851
+ If use float32, it may cause loss of accuracy.
3852
+
3853
+ Returns:
3854
+ - y1, a tensor whose values are the sorted values, with the same shape and data type as self.
3855
+ - y2, a tensor that consists of the indices of the elements in the original self tensor.
3856
+ Data type is int32.
3857
+
3858
+ Raises:
3859
+ TypeError: If `axis` is not an int.
3860
+ TypeError: If `descending` is not a bool.
3861
+ TypeError: If dtype of `self` is neither float16, float32, uint8, int8, int16, int32, int64.
3862
+ ValueError: If `axis` is not in range of [-len(self.shape), len(self.shape)).
3863
+
3864
+ Supported Platforms:
3865
+ ``Ascend`` ``GPU`` ``CPU``
3866
+
3867
+ Examples:
3868
+ >>> import mindspore
3869
+ >>> import numpy as np
3870
+ >>> from mindspore import Tensor
3871
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3872
+ >>> output = x.sort(axis=-1)
3873
+ >>> # The output below is based on the Ascend platform.
3874
+ >>> print(output)
3875
+ (Tensor(shape=[3, 3], dtype=Float16, value=
3876
+ [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
3877
+ [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
3878
+ [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
3879
+ [[2, 1, 0],
3880
+ [2, 0, 1],
3881
+ [0, 1, 2]]))
3882
+ """)
3883
+ attach_docstr("split", r"""split(split_size, dim=0) -> tuple(Tensor)
3884
+
3885
+ Splits the Tensor into chunks along the given dim.
3886
+
3887
+ Args:
3888
+ split_size (Union[int, tuple(int), list(int)]):
3889
+ If `split_size` is an int type, `tensor` will be split into equally sized chunks, each chunk with
3890
+ size `split_size`. Last chunk will be smaller than `split_size` if `tensor.shape[dim]` is not divisible
3891
+ by `split_size`.
3892
+ If `split_size` is a list type, then `tensor` will be split into len(split_size)
3893
+ chunks with sizes `split_size` along the given `dim`.
3894
+ dim (int, optional): The dim along which to split. Default: ``0`` .
3895
+
3896
+ Returns:
3897
+ A tuple of sub-tensors.
3898
+
3899
+ Raises:
3900
+ TypeError: If argument `dim` is not int.
3901
+ ValueError: If argument `dim` is out of range of :math:`[-tensor.ndim, tensor.ndim)`.
3902
+ TypeError: If each element in `split_size` is not integer.
3903
+ TypeError: If argument `split_size` is not int, tuple(int) or list(int).
3904
+ ValueError: The sum of `split_size` is not equal to x.shape[dim].
3905
+
3906
+ Supported Platforms:
3907
+ ``Ascend``
3908
+
3909
+ Examples:
3910
+ >>> import numpy as np
3911
+ >>> from mindspore import Tensor
3912
+ >>> input_x = np.arange(9).astype("float32")
3913
+ >>> output = Tensor.split(Tensor(input_x), 3)
3914
+ >>> print(output)
3915
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
3916
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
3917
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
3918
+
3919
+
3920
+ .. method:: Tensor.split(split_size_or_sections, axis=0) -> tuple(Tensor)
3921
+ :noindex:
3922
+
3923
+ Splits the Tensor into chunks along the given axis.
3924
+
3925
+ Args:
3926
+ split_size_or_sections (Union[int, tuple(int), list(int)]):
3927
+ If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
3928
+ each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
3929
+ if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
3930
+ If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
3931
+ chunks with sizes `split_size_or_sections` along the given `axis`.
3932
+ axis (int, optional): The axis along which to split. Default: ``0`` .
3933
+
3934
+ Returns:
3935
+ A tuple of sub-tensors.
3936
+
3937
+ Raises:
3938
+ TypeError: If argument `axis` is not int.
3939
+ ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)`.
3940
+ TypeError: If each element in `split_size_or_sections` is not integer.
3941
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
3942
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
3943
+
3944
+ Supported Platforms:
3945
+ ``Ascend`` ``GPU`` ``CPU``
3946
+
3947
+ Examples:
3948
+ >>> import numpy as np
3949
+ >>> from mindspore import Tensor
3950
+ >>> input_x = np.arange(9).astype("float32")
3951
+ >>> output = Tensor.split(Tensor(input_x), 3)
3952
+ >>> print(output)
3953
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
3954
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
3955
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
3956
+ """)
3957
+ attach_docstr("sqrt", r"""sqrt() -> Tensor
3958
+
3959
+ For details, please refer to :func:`mindspore.ops.sqrt`.
3960
+ """)
3961
+ attach_docstr("square", r"""square() -> Tensor
3962
+
3963
+ For details, please refer to :func:`mindspore.ops.square`.""")
3964
+ attach_docstr("std", r"""std(axis=None, ddof=0, keepdims=False) -> Tensor
3965
+
3966
+ For details, please refer to :func:`mindspore.ops.std`.
3967
+
3968
+ .. method:: Tensor.std(dim=None, *, correction=1, keepdim=False) -> Tensor
3969
+ :noindex:
3970
+
3971
+ Calculates the standard deviation over the dimensions specified by `dim`. `dim` can be a single dimension, list of
3972
+ dimensions, or None to reduce over all dimensions.
3973
+
3974
+ The standard deviation (:math:`\sigma`) is calculated as:
3975
+
3976
+ .. math::
3977
+ \sigma =\sqrt{\frac{1}{N-\delta N}\sum_{j-1}^{N-1}\left(s e l f_{i j}-\overline{x_{i}}\right)^{2}}
3978
+
3979
+ where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean, :math:`N` is the number
3980
+ of samples and :math:`\delta N` is the `correction`.
3981
+
3982
+ .. warning::
3983
+ This is an experimental API that is subject to change or deletion.
3984
+
3985
+ Args:
3986
+ dim (None, int, tuple(int), optional): The dimension or dimensions to reduce. Defaults to ``None``.
3987
+ If ``None``, all dimensions are reduced.
3988
+
3989
+ Keyword Args:
3990
+ correction (int, optional): The difference between the sample size and sample degrees of freedom. Defaults
3991
+ to Bessel's correction. Defaults to ``1``.
3992
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. If ``True`` , keep these
3993
+ reduced dimensions and the length is 1. If ``False``, don't keep these dimensions. Defaults to ``False``.
3994
+
3995
+ Returns:
3996
+ Tensor, the standard deviation.
3997
+ Suppose the shape of `self` is :math:`(x_0, x_1, ..., x_R)`:
3998
+
3999
+ - If `dim` is () and `keepdim` is set to ``False`` , returns a 0-D Tensor, indicating the standard deviation of
4000
+ all elements in `self`.
4001
+ - If `dim` is int, e.g. ``1`` and `keepdim` is set to ``False`` , then the returned Tensor has shape
4002
+ :math:`(x_0, x_2, ..., x_R)`.
4003
+ - If `dim` is tuple(int) or list(int), e.g. ``(1, 2)`` and `keepdim` is set to ``False`` , then the returned
4004
+ Tensor has shape :math:`(x_0, x_3, ..., x_R)`.
4005
+
4006
+ Raises:
4007
+ TypeError: If `self` is not a Tensor.
4008
+ TypeError: If `self` is not in bfloat16, float16, float32.
4009
+ TypeError: If `dim` is not one of the followings: None, int, tuple.
4010
+ TypeError: If `correction` is not an int.
4011
+ TypeError: If `keepdim` is not a bool.
4012
+ ValueError: If `dim` is out of range :math:`[-self.ndim, self.ndim)`.
4013
+
4014
+ Supported Platforms:
4015
+ ``Ascend``
4016
+
4017
+ Examples:
4018
+ >>> import numpy as np
4019
+ >>> from mindspore import mint, Tensor
4020
+ >>> input = Tensor(np.array([[1, 2, 3], [-1, 1, 4]]).astype(np.float32))
4021
+ >>> output = input.std(dim=1, correction=1, keepdim=False)
4022
+ >>> print(output)
4023
+ [1. 2.5166113]
4024
+ """)
4025
+ attach_docstr("subtract", r"""subtract(other, *, alpha=1) -> Tensor
4026
+
4027
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4028
+ """)
4029
+ attach_docstr("sub", r"""sub(other, *, alpha=1) -> Tensor
4030
+
4031
+ Subtracts scaled other value from self Tensor.
4032
+
4033
+ .. math::
4034
+
4035
+ out_{i} = self_{i} - alpha \times other_{i}
4036
+
4037
+ Note:
4038
+ - When the two inputs have different shapes,
4039
+ they must be able to broadcast to a common shape.
4040
+ - The two inputs and alpha comply with the implicit type conversion rules to make the data types
4041
+ consistent.
4042
+
4043
+ Args:
4044
+ other (Union[Tensor, number.Number, bool]): The second self, is a number.Number or
4045
+ a bool or a tensor whose data type is
4046
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
4047
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4048
+
4049
+ Keyword Args:
4050
+ alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
4051
+
4052
+ Returns:
4053
+ Tensor with a shape that is the same as the broadcasted shape of the self `self` and `other`,
4054
+ and the data type is the one with higher precision or higher digits among the two inputs and alpha.
4055
+
4056
+ Raises:
4057
+ TypeError: If the type of `other` or `alpha` is not one of the following: Tensor, number.Number, bool.
4058
+ TypeError: If `alpha` is of type float but `self` and `other` are not of type float.
4059
+ TypeError: If `alpha` is of type bool but `self` and `other` are not of type bool.
4060
+
4061
+ Supported Platforms:
4062
+ ``Ascend`` ``GPU`` ``CPU``
4063
+
4064
+ Examples:
4065
+ >>> import numpy as np
4066
+ >>> import mindspore
4067
+ >>> from mindspore import Tensor
4068
+ >>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
4069
+ >>> y = Tensor(1, mindspore.int32)
4070
+ >>> output = Tensor.sub(x, y, alpha=0.5)
4071
+ >>> print(output)
4072
+ [3.5 4.5 5.5]
4073
+ >>> # the data type of x is float32, the data type of y is int32,
4074
+ >>> # alpha is a float, and the output is the data format of higher precision float32.
4075
+ >>> print(output.dtype)
4076
+ Float32
4077
+
4078
+
4079
+ .. method:: Tensor.sub(y) -> Tensor
4080
+ :noindex:
4081
+
4082
+ For details, please refer to :func:`mindspore.ops.sub` .
4083
+ """)
4084
+ attach_docstr("sub_", r"""sub_(other, *, alpha=1) -> Tensor
4085
+
4086
+ For details, please refer to :func:`mindspore.mint.sub`.
4087
+ """)
4088
+ attach_docstr("sum", r"""sum(dim=None, keepdim=False, *, dtype=None) -> Tensor
4089
+
4090
+ Calculate sum of Tensor elements over a given dim.
4091
+
4092
+ Note:
4093
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
4094
+
4095
+ Args:
4096
+ dim (Union[None, int, tuple(int), list(int), Tensor], optional): Dimensions along which a sum is performed.
4097
+ If ``None`` , sum all the elements of the self tensor.
4098
+ If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
4099
+ Must be in the range :math:`[-self.ndim, self.ndim)` . Default: ``None`` .
4100
+ keepdim (bool, optional): Whether the output tensor has `dim` retained or not.
4101
+ If ``True`` , keep these reduced dimensions and the length is 1.
4102
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
4103
+
4104
+ Keyword Args:
4105
+ dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
4106
+
4107
+ Returns:
4108
+ A Tensor, sum of elements over a given `dim` in `self`.
4109
+
4110
+ Raises:
4111
+ TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
4112
+ ValueError: If `dim` is not in the range :math:`[-self.ndim, self.ndim)` .
4113
+ TypeError: If `keepdim` is not a bool.
4114
+
4115
+ Supported Platforms:
4116
+ ``Ascend`` ``GPU`` ``CPU``
4117
+
4118
+ Examples:
4119
+ >>> import mindspore
4120
+ >>> import numpy as np
4121
+ >>> from mindspore import Tensor
4122
+ >>> from mindspore import dtype as mstype
4123
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
4124
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
4125
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
4126
+ >>> out = Tensor.sum(x)
4127
+ >>> print(out)
4128
+ 270.0
4129
+ >>> out = Tensor.sum(x, dim=2)
4130
+ >>> print(out)
4131
+ [[ 6. 12. 18.]
4132
+ [24. 30. 36.]
4133
+ [42. 48. 54.]]
4134
+ >>> out = Tensor.sum(x, dim=2, keepdim=True)
4135
+ >>> print(out)
4136
+ [[[ 6.]
4137
+ [12.]
4138
+ [18.]]
4139
+ [[24.]
4140
+ [30.]
4141
+ [36.]]
4142
+ [[42.]
4143
+ [48.]
4144
+ [54.]]]
4145
+
4146
+
4147
+ .. method:: Tensor.sum(axis=None, dtype=None, keepdims=False, initial=None) -> Tensor
4148
+ :noindex:
4149
+
4150
+ Return sum of tensor elements over a given axis.
4151
+
4152
+ Note:
4153
+ Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
4154
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
4155
+
4156
+ Args:
4157
+ axis (Union[None, int, tuple(int), list(int), Tensor], optional): Axis or axes along which a sum is performed.
4158
+ Default: ``None`` .
4159
+ If ``None`` , sum all the elements of the self tensor.
4160
+ If the `axis` is negative, it counts from the last to the first `axis`.
4161
+ If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
4162
+ or list instead of a single `axis` or all the axes as before.
4163
+ dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
4164
+ output Tensor.
4165
+ keepdims (bool, optional): If this is set to ``True`` , the axes which are reduced are left in the result as
4166
+ dimensions with size one. With this option, the result will broadcast correctly against the self
4167
+ array. If the default value is passed, then `keepdims` will not be passed through to the sum method
4168
+ of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
4169
+ implement `keepdims` any exceptions will be raised. Default: ``False`` .
4170
+ initial (scalar, optional): Starting value for the sum. Default: ``None`` .
4171
+
4172
+ Returns:
4173
+ Tensor. A tensor with the same shape as self, with the specified `axis` removed.
4174
+ If the self tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
4175
+
4176
+ Raises:
4177
+ TypeError: If self is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
4178
+ or `keepdims` is not integer, or `initial` is not scalar.
4179
+ ValueError: If any `axis` is out of range or duplicate axes exist.
4180
+
4181
+ See also:
4182
+ - :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
4183
+
4184
+ Supported Platforms:
4185
+ ``Ascend`` ``GPU`` ``CPU``
4186
+
4187
+ Examples:
4188
+ >>> import numpy as np
4189
+ >>> from mindspore import Tensor
4190
+ >>> input_x = Tensor(np.array([-1, 0, 1]).astype(np.float32))
4191
+ >>> print(input_x.sum())
4192
+ 0.0
4193
+ >>> input_x = Tensor(np.arange(10).reshape(2, 5).astype(np.float32))
4194
+ >>> print(input_x.sum(axis=1))
4195
+ [10. 35.]
4196
+ """)
4197
+ attach_docstr("take", r"""take(indices, axis=None, mode='clip') -> Tensor
4198
+
4199
+ Takes elements from a tensor along an axis.
4200
+
4201
+ Args:
4202
+ indices (Tensor): The indices with shape :math:`(Nj...)` of the values to extract.
4203
+ axis (int, optional): The axis over which to select values. By default,
4204
+ the flattened input tensor is used. Default: ``None`` .
4205
+ mode (str, optional): Support ``'raise'``, ``'wrap'``, ``'clip'``.
4206
+
4207
+ - ``raise``: Raises an error;
4208
+
4209
+ - ``wrap``: Wraps around;
4210
+
4211
+ - ``clip``: Clips to the range. ``'clip'`` mode means that all indices that are
4212
+ too large are replaced by the index that addresses the last element
4213
+ along that axis. Note that this disables indexing with negative numbers.
4214
+
4215
+ Default: ``'clip'`` .
4216
+
4217
+ Returns:
4218
+ Tensor, the indexed result.
4219
+
4220
+ Raises:
4221
+ ValueError: If `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip').
4222
+
4223
+ Supported Platforms:
4224
+ ``Ascend`` ``GPU`` ``CPU``
4225
+
4226
+ Examples:
4227
+ >>> import numpy as np
4228
+ >>> from mindspore import Tensor
4229
+ >>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
4230
+ >>> indices = Tensor(np.array([0, 1, 4]))
4231
+ >>> output = a.take(indices)
4232
+ >>> print(output)
4233
+ [4 3 6]
4234
+
4235
+ .. method:: Tensor.take(index) -> Tensor
4236
+ :noindex:
4237
+
4238
+ Select the self element at the given index.
4239
+
4240
+ .. warning::
4241
+ This is an experimental API that is subject to change or deletion.
4242
+
4243
+ Args:
4244
+ index (LongTensor): The index tensor of self tensor.
4245
+
4246
+ Returns:
4247
+ Tensor, has the same data type as index tensor.
4248
+
4249
+ Raises:
4250
+ TypeError: If the dtype of `index` is not long type.
4251
+
4252
+ Examples:
4253
+ >>> import mindspore as ms
4254
+ >>> from mindspore import Tensor
4255
+ >>> input = Tensor([[4, 3, 5],[6, 7, 8]], ms.float32)
4256
+ >>> index = Tensor([0, 2, 5], ms.int64)
4257
+ >>> output = input.take(index)
4258
+ >>> print(output)
4259
+ [4, 5, 8]""")
4260
+ attach_docstr("tanh", r"""tanh() -> Tensor
4261
+
4262
+ For details, please refer to :func:`mindspore.ops.tanh`.
4263
+ """)
4264
+ attach_docstr("tan", r"""tan() ->Tensor
4265
+
4266
+ For details, please refer to :func:`mindspore.ops.tan`.
4267
+ """)
4268
+ attach_docstr("tile", r"""tile(dims) -> Tensor
4269
+
4270
+ Replicates an tensor with given dims times.
4271
+
4272
+ Note:
4273
+ On Ascend, the number of `dims` should not exceed 8, and currently does not support scenarios
4274
+ where more than 4 dimensions are repeated simultaneously.
4275
+
4276
+ Args:
4277
+ dims (tuple[int]): The parameter that specifies the number of replications,
4278
+ the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
4279
+ Only constant value is allowed.
4280
+
4281
+ Returns:
4282
+ Tensor, has the same data type as the `self`. Suppose the length of `dims` is `d`,
4283
+ the dimension of `self` is `self.dim`, and the shape of `self` is :math:`(x_1, x_2, ..., x_S)`.
4284
+
4285
+ - If `self.dim = d`, then the shape of their corresponding positions can be multiplied, and
4286
+ the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
4287
+ - If `self.dim < d`, prepend 1 to the shape of `self` until their lengths are consistent.
4288
+ Such as set the shape of `self` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
4289
+ then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
4290
+ :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
4291
+ - If `self.dim > d`, prepend 1 to `dims` until their lengths are consistent. Such as set the
4292
+ `dims` as :math:`(1, ..., y_1, y_2, ..., y_S)`, then the shape of their corresponding positions
4293
+ can be multiplied, and the shape of Outputs is :math:`(x_1*1, ..., x_R*y_R, x_S*y_S)`.
4294
+
4295
+ Raises:
4296
+ TypeError: If `dims` is not a tuple or not all elements are int.
4297
+ ValueError: If not all elements of `dims` are greater than or equal to 0.
4298
+
4299
+ Supported Platforms:
4300
+ ``Ascend`` ``GPU`` ``CPU``
4301
+
4302
+ Examples:
4303
+ >>> import mindspore
4304
+ >>> import numpy as np
4305
+ >>> from mindspore import Tensor
4306
+ >>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
4307
+ >>> dims = (2, 3)
4308
+ >>> output = input.tile(dims)
4309
+ >>> print(output)
4310
+ [[1. 2. 1. 2. 1. 2.]
4311
+ [3. 4. 3. 4. 3. 4.]
4312
+ [1. 2. 1. 2. 1. 2.]
4313
+ [3. 4. 3. 4. 3. 4.]]
4314
+ >>> dims = (2, 3, 2)
4315
+ >>> output = input.tile(dims)
4316
+ >>> print(output)
4317
+ [[[1. 2. 1. 2.]
4318
+ [3. 4. 3. 4.]
4319
+ [1. 2. 1. 2.]
4320
+ [3. 4. 3. 4.]
4321
+ [1. 2. 1. 2.]
4322
+ [3. 4. 3. 4.]]
4323
+ [[1. 2. 1. 2.]
4324
+ [3. 4. 3. 4.]
4325
+ [1. 2. 1. 2.]
4326
+ [3. 4. 3. 4.]
4327
+ [1. 2. 1. 2.]
4328
+ [3. 4. 3. 4.]]]
4329
+
4330
+
4331
+ .. method:: Tensor.tile(reps) -> Tensor
4332
+ :noindex:
4333
+
4334
+ For more details, please refer to :func:`mindspore.ops.tile`. The parameter `reps` in the current interface and the parameter `dims` in the detail reference interface are actually the same parameter.
4335
+ """)
4336
+ attach_docstr("topk", r"""topk(k, dim=-1, largest=True, sorted=True) -> tuple(Tensor, Tensor)
4337
+
4338
+ Finds the `k` largest or smallest element along the given dimension and returns its value and corresponding index.
4339
+
4340
+ .. warning::
4341
+ - Due to different memory layout and traversal methods on different platforms,
4342
+ the display order of calculation results may be inconsistent when `sorted` is False.
4343
+
4344
+ If the `self` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
4345
+ and outputs its value and index as a Tensor. `values[k]` is the `k` largest item in `self`,
4346
+ and its index is `indices[k]` .
4347
+
4348
+ For a multi-dimensional matrix,
4349
+ calculates the first or last `k` entries in a given dimension, therefore:
4350
+
4351
+ .. math::
4352
+
4353
+ values.shape = indices.shape
4354
+
4355
+ If the two compared elements are the same, the one with the smaller index value is returned first.
4356
+
4357
+ Args:
4358
+ k (int): The number of top or bottom elements to be computed along the last dimension.
4359
+ dim (int, optional): The dimension to sort along. Default: ``-1`` .
4360
+ largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
4361
+ Default: ``True`` .
4362
+ sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending
4363
+ order or ascending order according to `largest`. If ``False`` , the obtained elements will not be
4364
+ sorted. Default: ``True`` .
4365
+
4366
+ Returns:
4367
+ A tuple consisting of `values` and `indices`.
4368
+
4369
+ - values (Tensor) - The `k` largest or smallest elements in each slice of the given dimension.
4370
+ - indices (Tensor) - The indices of values within the last dimension of self.
4371
+
4372
+ Raises:
4373
+ TypeError: If `sorted` is not a bool.
4374
+ TypeError: If `k` is not an int.
4375
+
4376
+ Supported Platforms:
4377
+ ``Ascend``
4378
+
4379
+ Examples:
4380
+ >>> import mindspore as ms
4381
+ >>> from mindspore import Tensor
4382
+ >>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
4383
+ ... [0.4388, 0.6525, 0.4685, 0.1868],
4384
+ ... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
4385
+ >>> output = Tensor.topk(x, 2, dim=1)
4386
+ >>> print(output)
4387
+ (Tensor(shape=[3, 2], dtype=Float32, value=
4388
+ [[ 9.67299998e-01, 5.36800027e-01],
4389
+ [ 6.52499974e-01, 4.68499988e-01],
4390
+ [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
4391
+ [[3, 0],
4392
+ [1, 2],
4393
+ [2, 3]]))
4394
+ >>> output2 = Tensor.topk(x, 2, dim=1, largest=False)
4395
+ >>> print(output2)
4396
+ (Tensor(shape=[3, 2], dtype=Float32, value=
4397
+ [[ 2.44700000e-01, 4.30200011e-01],
4398
+ [ 1.86800003e-01, 4.38800007e-01],
4399
+ [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
4400
+ [[1, 2],
4401
+ [3, 0],
4402
+ [0, 1]]))
4403
+
4404
+ .. method:: Tensor.topk(k, dim=None, largest=True, sorted=True) -> tuple(Tensor, Tensor)
4405
+ :noindex:
4406
+
4407
+ For more details, please refer to :func:`mindspore.ops.topk`.
4408
+ """)
4409
+ attach_docstr("transpose", r"""transpose(dim0, dim1) -> Tensor
4410
+
4411
+ Interchange two axes of a tensor.
4412
+
4413
+ .. warning::
4414
+ This is an experimental API that is subject to change or deletion.
4415
+
4416
+ Args:
4417
+ dim0 (int): Specifies the first dimension to be transposed.
4418
+ dim1 (int): Specifies the second dimension to be transposed.
4419
+
4420
+ Returns:
4421
+ Transposed tensor, has the same data type as `self`.
4422
+
4423
+ Raises:
4424
+ TypeError: If `dim0` or `dim1` is not integer.
4425
+ ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
4426
+
4427
+ Supported Platforms:
4428
+ ``Ascend``
4429
+
4430
+ Examples:
4431
+ >>> import numpy as np
4432
+ >>> from mindspore import Tensor
4433
+ >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
4434
+ >>> output = Tensor.transpose(input, 0, 2)
4435
+ >>> print(output.shape)
4436
+ (4, 3, 2)
4437
+
4438
+
4439
+ .. method:: Tensor.transpose(*axes) -> Tensor
4440
+ :noindex:
4441
+
4442
+ Permutes the dimensions of the self tensor according to self permutation.
4443
+
4444
+ For a 1-D array this has no effect, as a transposed vector is simply the same vector.
4445
+ To convert a 1-D array into a 2D column vector please refer to :func:`mindspore.ops.expand_dims`.
4446
+ For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
4447
+ their order indicates how the axes are permuted (see Examples).
4448
+ If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
4449
+ then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
4450
+
4451
+ Note:
4452
+ On GPU and CPU, if the value of `axes` is negative, its actual value is `axes[i] + rank(self)`.
4453
+
4454
+ Args:
4455
+ axes (tuple[int]): The permutation to be converted. The elements in `axes` are composed of the
4456
+ indexes of each dimension of `self`. The length of `axes` and the shape of `self` must be the
4457
+ same. Only constant value is allowed. Must be in the range [-rank(self), rank(self)).
4458
+
4459
+ Returns:
4460
+ Tensor, the type of output tensor is the same as `self` and the shape of output tensor is decided by the
4461
+ shape of `self` and the value of `axes`.
4462
+
4463
+ Raises:
4464
+ TypeError: If `axes` is not a tuple.
4465
+ ValueError: If length of shape of `self` is not equal to length of shape of `axes`.
4466
+ ValueError: If the same element exists in `axes`.
4467
+
4468
+ Supported Platforms:
4469
+ ``Ascend`` ``GPU`` ``CPU``
4470
+
4471
+ Examples:
4472
+ >>> import mindspore
4473
+ >>> import numpy as np
4474
+ >>> from mindspore import Tensor
4475
+ >>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
4476
+ >>> axes = (0, 2, 1)
4477
+ >>> output = Tensor.transpose(input, axes)
4478
+ >>> print(output)
4479
+ [[[ 1. 4.]
4480
+ [ 2. 5.]
4481
+ [ 3. 6.]]
4482
+ [[ 7. 10.]
4483
+ [ 8. 11.]
4484
+ [ 9. 12.]]]
4485
+ """)
4486
+ attach_docstr("tril", r"""tril(diagonal=0) -> Tensor
4487
+
4488
+ For details, please refer to :func:`mindspore.ops.tril`.
4489
+ """)
4490
+ attach_docstr("triu", r"""triu(diagonal=0) -> Tensor
4491
+
4492
+ For details, please refer to :func:`mindspore.ops.triu`.
4493
+ """)
4494
+ attach_docstr("true_divide", r"""Alias for Tensor.div() with :math:`rounding\_mode=None`.
4495
+ For details, please refer to :func:`mindspore.ops.div`.
4496
+ """)
4497
+ attach_docstr("trunc", r"""trunc() -> Tensor
4498
+
4499
+ For details, please refer to :func:`mindspore.ops.trunc`.
4500
+ """)
4501
+ attach_docstr("t", r"""Transpose `self` .
4502
+
4503
+ For details, please refer to :func:`mindspore.ops.t`.
4504
+
4505
+ Supported Platforms:
4506
+ ``Ascend``
4507
+ """)
4508
+ attach_docstr("unbind", r"""unbind(dim=0) -> Tensor
4509
+
4510
+ For details, please refer to :func:`mindspore.ops.unbind`.
4511
+ """)
4512
+ attach_docstr("unique", r"""unique(sorted=True, return_inverse=False, return_counts=False, dim=None) -> tuple(Tensor)
4513
+
4514
+ Returns the unique elements of `self`.
4515
+
4516
+ when `return_inverse=True`, also return a tensor containing the index of each value of `self`
4517
+ corresponding to the output unique tensor.
4518
+ when `return_counts=True`, also return a tensor containing the number of occurrences for each
4519
+ unique value or tensor.
4520
+
4521
+ Args:
4522
+ sorted(bool, optional): Whether to sort the unique elements in ascending order before returning as output.
4523
+ Default: ``True`` .
4524
+ return_inverse(bool, optional): Whether to also return the indices for where elements in `self` ended up in
4525
+ the returned unique list. Default: ``False`` .
4526
+ return_counts(bool, optional): Whether to also return the counts for each unique element. Default: ``False`` .
4527
+ dim(int, optional): the dimension to operate upon. If ``None``, the unique of the flattened `self` is returned.
4528
+ Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
4529
+ unique operation upon. Default: ``None`` .
4530
+
4531
+ Returns:
4532
+ A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
4533
+
4534
+ - **output** (Tensor) - The output tensor including the unique elements of `self`, it has same dtype as `self`.
4535
+ - **inverse_indices** (Tensor, optional) - Return when ``return_inverse`` is True. It represents the indices for where
4536
+ elements in `self` map to in the output. When ``dim`` is ``None``, it has same shape as `self`,
4537
+ otherwise, the shape is self.shape[dim].
4538
+ - **counts** (Tensor, optional) - Return when ``return_counts`` is True. It represents the number of occurrences for each
4539
+ unique value or tensor. When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
4540
+ output.shape(dim).
4541
+
4542
+ Supported Platforms:
4543
+ ``Ascend``
4544
+
4545
+ Examples:
4546
+ >>> import mindspore
4547
+ >>> import numpy as np
4548
+ >>> from mindspore import Tensor
4549
+ >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
4550
+ >>> output = x.unique(return_inverse=True)
4551
+ >>> print(output)
4552
+ (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
4553
+ >>> y = output[0]
4554
+ >>> print(y)
4555
+ [1 2 5]
4556
+ >>> idx = output[1]
4557
+ >>> print(idx)
4558
+ [0 1 2 1]
4559
+ """)
4560
+ attach_docstr("unsqueeze", r"""unsqueeze(dim) -> Tensor
4561
+
4562
+ For details, please refer to :func:`mindspore.ops.unsqueeze`.
4563
+ """)
4564
+ attach_docstr("var", r"""var(axis=None, ddof=0, keepdims=False) -> Tensor
4565
+
4566
+ Compute the variance along the specified axis.
4567
+
4568
+ The variance is the average of the squared deviations from the mean, i.e.,
4569
+ :math:`var = mean(abs(x - x.mean())**2)`.
4570
+
4571
+ Return the variance, which is computed for the flattened array by default,
4572
+ otherwise over the specified axis.
4573
+
4574
+ Note:
4575
+ Numpy arguments `dtype`, `out` and `where` are not supported.
4576
+
4577
+ Args:
4578
+ axis (Union[None, int, tuple(int)], optional): Axis or axes along which the variance is computed.
4579
+ The default is to compute the variance of the flattened array. Default: ``None`` .
4580
+ ddof (int, optional): Means Delta Degrees of Freedom. Default: ``0`` .
4581
+ The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
4582
+ keepdims (bool, optional): Whether the output Tensor has dim retained or not. If ``True`` , keep these reduced
4583
+ dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` .
4584
+
4585
+ Returns:
4586
+ Variance tensor.
4587
+
4588
+ Raises:
4589
+ TypeError: If `axis` is not one of the followings: None, int, tuple.
4590
+ TypeError: If `ddof` is not an int.
4591
+ TypeError: If `keepdims` is not a bool.
4592
+ ValueError: If `axis` is out of range :math:`[-self.ndim, self.ndim)`.
4593
+
4594
+ See also:
4595
+ - :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
4596
+ - :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
4597
+
4598
+ Supported Platforms:
4599
+ ``Ascend`` ``GPU`` ``CPU``
4600
+
4601
+ Examples:
4602
+ >>> import numpy as np
4603
+ >>> from mindspore import Tensor
4604
+ >>> input_x = Tensor(np.array([1., 2., 3., 4.], np.float32))
4605
+ >>> output = input_x.var()
4606
+ >>> print(output)
4607
+ 1.25
4608
+
4609
+ .. method:: Tensor.var(dim=None, *, correction=1, keepdim=False) -> Tensor
4610
+ :noindex:
4611
+
4612
+ Calculates the variance over the dimensions specified by `dim`. `dim` can be a single dimension, list of
4613
+ dimensions, or None to reduce over all dimensions.
4614
+
4615
+ The variance (:math:`\delta ^2`) is calculated as:
4616
+
4617
+ .. math::
4618
+ \delta ^2 = \frac{1}{\max(0, N - \delta N)}\sum^{N - 1}_{i = 0}(x_i - \bar{x})^2
4619
+
4620
+ where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean, :math:`N` is the number
4621
+ of samples and :math:`\delta N` is the `correction`.
4622
+
4623
+ Args:
4624
+ dim (None, int, tuple(int), optional): The dimension or dimensions to reduce. Defaults to ``None``.
4625
+ If ``None``, all dimensions are reduced.
4626
+
4627
+ Keyword Args:
4628
+ correction (int, optional): The difference between the sample size and sample degrees of freedom. Defaults
4629
+ to Bessel's correction. Defaults to ``1``.
4630
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. If ``True`` , keep these
4631
+ reduced dimensions and the length is 1. If ``False``, don't keep these dimensions. Defaults to ``False``.
4632
+
4633
+ Returns:
4634
+ Tensor, the variance.
4635
+ Suppose the shape of `self` is :math:`(x_0, x_1, ..., x_R)`:
4636
+
4637
+ - If `dim` is () and `keepdim` is set to ``False`` , returns a 0-D Tensor, indicating the variance of all
4638
+ elements in `self`.
4639
+ - If `dim` is int, e.g. ``1`` and `keepdim` is set to ``False`` , then the returned Tensor has shape
4640
+ :math:`(x_0, x_2, ..., x_R)`.
4641
+ - If `dim` is tuple(int) or list(int), e.g. ``(1, 2)`` and `keepdim` is set to ``False`` , then the returned
4642
+ Tensor has shape :math:`(x_0, x_3, ..., x_R)`.
4643
+
4644
+ Raises:
4645
+ TypeError: If `dim` is not one of the followings: None, int, list, tuple.
4646
+ TypeError: If `correction` is not an int.
4647
+ TypeError: If `keepdim` is not a bool.
4648
+ ValueError: If `dim` is out of range :math:`[-self.ndim, self.ndim)`.
4649
+
4650
+ Supported Platforms:
4651
+ ``Ascend``
4652
+
4653
+ Examples:
4654
+ >>> import mindspore
4655
+ >>> from mindspore import Tensor
4656
+ >>> input_x = Tensor([[8, 2, 1], [5, 9, 3], [4, 6, 7]], mindspore.float32)
4657
+ >>> output = input_x.var(dim=0, correction=1, keepdim=True)
4658
+ >>> print(output)
4659
+ [[ 4.333333, 12.333333, 9.333333]]""")
4660
+ attach_docstr("view_as", r"""view_as(other) -> Tensor
4661
+
4662
+ View `self` Tensor as the same shape as `other` .
4663
+
4664
+ Args:
4665
+ other(Tensor): The returned Tensor has the same shape as `other`.
4666
+
4667
+ Returns:
4668
+ Tensor, has the same shape as `other`.
4669
+
4670
+ Raises:
4671
+ TypeError: If `other` is not a Tensor.
4672
+
4673
+ Supported Platforms:
4674
+ ``Ascend`` ``GPU`` ``CPU``
4675
+
4676
+ Examples:
4677
+ >>> from mindspore import Tensor
4678
+ >>> from mindspore import dtype as mstype
4679
+ >>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
4680
+ >>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
4681
+ >>> output = a.view_as(b)
4682
+ >>> print(output)
4683
+ [1. 2. 3. 2. 3. 4.]
4684
+ """)
4685
+ attach_docstr("where", r"""where(condition, y) -> Tensor
4686
+
4687
+ For details, please refer to :func:`mindspore.ops.where`.
4688
+ """)
4689
+ attach_docstr("xlogy", r"""xlogy(other) -> Tensor
4690
+
4691
+ For details, please refer to :func:`mindspore.ops.xlogy`.
4692
+ """)
4693
+ attach_docstr("_to", r"""_to(dtype) -> Tensor
4694
+
4695
+ Returns a tensor with the new specified data type.
4696
+
4697
+ Note:
4698
+ When converting complex numbers to boolean type, the imaginary part of the complex number is not
4699
+ taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
4700
+
4701
+ Args:
4702
+ dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
4703
+
4704
+ Returns:
4705
+ Tensor, the data type of the tensor is `dtype`.
4706
+
4707
+ Raises:
4708
+ TypeError: If `dtype` is not a Number.
4709
+
4710
+ Supported Platforms:
4711
+ ``Ascend`` ``GPU`` ``CPU``
4712
+
4713
+ Examples:
4714
+ >>> import mindspore
4715
+ >>> import numpy as np
4716
+ >>> from mindspore import Tensor
4717
+ >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
4718
+ >>> input = Tensor(input_np)
4719
+ >>> dtype = mindspore.int32
4720
+ >>> output = input._to(dtype)
4721
+ >>> print(output.dtype)
4722
+ Int32
4723
+ >>> print(output.shape)
4724
+ (2, 3, 4, 5)
4725
+ """)
4726
+ attach_docstr("__abs__", r"""__abs__() -> Tensor
4727
+
4728
+ Alias for :func:`Tensor.abs`.
4729
+ """)
4730
+ attach_docstr("__add__", r"""__add__(other) -> Tensor
4731
+
4732
+ Alias for :func:`mindspore.Tensor.add`.
4733
+
4734
+ .. method:: Tensor.__add__(other, *, alpha=1) -> Tensor
4735
+ :noindex:
4736
+
4737
+ Alias for overload function of :func:`mindspore.Tensor.add`.
4738
+ """)
4739
+ attach_docstr("__isub__", r"""__isub__(other, *, alpha=1) -> Tensor
4740
+
4741
+ Alias for :func:`mindspore.Tensor.sub` of `mindspore.Tensor.sub(other, *, alpha=1)`.
4742
+
4743
+ .. method:: Tensor.__isub__(y) -> Tensor
4744
+ :noindex:
4745
+
4746
+ Alias for :func:`mindspore.Tensor.sub` of `mindspore.Tensor.sub(y)`.
4747
+ """)
4748
+ attach_docstr("__pow__", r"""__pow__() -> Tensor
4749
+
4750
+ Alias for :func:`mindspore.Tensor.pow`.
4751
+ """)
4752
+ attach_docstr("__sub__", r"""__sub__(other, *, alpha=1) -> Tensor
4753
+
4754
+ Alias for :func:`mindspore.Tensor.sub` of `mindspore.Tensor.sub(other, *, alpha=1)`.
4755
+
4756
+ .. method:: Tensor.__sub__(y) -> Tensor
4757
+ :noindex:
4758
+
4759
+ Alias for :func:`mindspore.Tensor.sub` of `mindspore.Tensor.sub(y)`.
4760
+ """)