mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (655) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -2
  3. mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +102 -0
  7. mindspore/_checkparam.py +1066 -1001
  8. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  9. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  10. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  11. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  12. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  13. mindspore/_extends/parse/__init__.py +5 -3
  14. mindspore/_extends/parse/namespace.py +16 -1
  15. mindspore/_extends/parse/parser.py +107 -22
  16. mindspore/_extends/parse/resources.py +0 -7
  17. mindspore/_extends/parse/standard_method.py +885 -413
  18. mindspore/amp.py +52 -57
  19. mindspore/boost/boost.py +2 -2
  20. mindspore/boost/boost_cell_wrapper.py +38 -20
  21. mindspore/boost/dim_reduce.py +3 -3
  22. mindspore/boost/group_loss_scale_manager.py +1 -1
  23. mindspore/common/__init__.py +4 -6
  24. mindspore/common/_decorator.py +2 -0
  25. mindspore/common/_register_for_adapter.py +55 -0
  26. mindspore/common/_stub_tensor.py +201 -0
  27. mindspore/common/_utils.py +41 -7
  28. mindspore/common/api.py +215 -141
  29. mindspore/common/dtype.py +8 -1
  30. mindspore/common/dump.py +2 -2
  31. mindspore/common/initializer.py +4 -2
  32. mindspore/common/jit_config.py +17 -13
  33. mindspore/common/mutable.py +33 -13
  34. mindspore/common/parameter.py +23 -21
  35. mindspore/common/seed.py +8 -24
  36. mindspore/common/sparse_tensor.py +62 -41
  37. mindspore/common/tensor.py +852 -1154
  38. mindspore/communication/__init__.py +2 -2
  39. mindspore/communication/_comm_helper.py +11 -4
  40. mindspore/communication/management.py +22 -21
  41. mindspore/config/op_info.config +501 -1008
  42. mindspore/context.py +201 -23
  43. mindspore/dataset/__init__.py +6 -6
  44. mindspore/dataset/audio/__init__.py +7 -7
  45. mindspore/dataset/audio/transforms.py +670 -30
  46. mindspore/dataset/audio/utils.py +47 -4
  47. mindspore/dataset/audio/validators.py +223 -1
  48. mindspore/dataset/callback/ds_callback.py +2 -2
  49. mindspore/dataset/core/config.py +210 -14
  50. mindspore/dataset/core/validator_helpers.py +2 -2
  51. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  52. mindspore/dataset/debug/debug_hook.py +65 -0
  53. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  54. mindspore/dataset/engine/__init__.py +7 -3
  55. mindspore/dataset/engine/cache_client.py +1 -1
  56. mindspore/dataset/engine/datasets.py +322 -66
  57. mindspore/dataset/engine/datasets_audio.py +80 -76
  58. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  59. mindspore/dataset/engine/datasets_text.py +232 -118
  60. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  61. mindspore/dataset/engine/datasets_vision.py +746 -225
  62. mindspore/dataset/engine/graphdata.py +75 -10
  63. mindspore/dataset/engine/iterators.py +45 -5
  64. mindspore/dataset/engine/offload.py +48 -28
  65. mindspore/dataset/engine/validators.py +117 -8
  66. mindspore/dataset/text/__init__.py +6 -5
  67. mindspore/dataset/text/transforms.py +86 -3
  68. mindspore/dataset/text/utils.py +6 -4
  69. mindspore/dataset/text/validators.py +25 -0
  70. mindspore/dataset/transforms/__init__.py +3 -2
  71. mindspore/dataset/transforms/c_transforms.py +1 -1
  72. mindspore/dataset/transforms/transforms.py +2 -2
  73. mindspore/dataset/utils/__init__.py +2 -1
  74. mindspore/dataset/utils/line_reader.py +121 -0
  75. mindspore/dataset/vision/__init__.py +2 -3
  76. mindspore/dataset/vision/c_transforms.py +9 -9
  77. mindspore/dataset/vision/py_transforms.py +5 -5
  78. mindspore/dataset/vision/py_transforms_util.py +2 -0
  79. mindspore/dataset/vision/transforms.py +160 -161
  80. mindspore/dataset/vision/utils.py +3 -3
  81. mindspore/experimental/map_parameter.py +38 -26
  82. mindspore/include/OWNERS +0 -1
  83. mindspore/include/api/callback/callback.h +9 -13
  84. mindspore/include/api/callback/ckpt_saver.h +2 -2
  85. mindspore/include/api/callback/loss_monitor.h +2 -2
  86. mindspore/include/api/callback/lr_scheduler.h +5 -5
  87. mindspore/include/api/callback/time_monitor.h +2 -2
  88. mindspore/include/api/callback/train_accuracy.h +4 -6
  89. mindspore/include/api/cfg.h +19 -6
  90. mindspore/include/api/context.h +44 -9
  91. mindspore/include/api/delegate.h +1 -1
  92. mindspore/include/api/metrics/accuracy.h +2 -2
  93. mindspore/include/api/metrics/metrics.h +4 -3
  94. mindspore/include/api/model.h +9 -4
  95. mindspore/include/api/model_parallel_runner.h +2 -2
  96. mindspore/include/api/net.h +12 -11
  97. mindspore/include/api/serialization.h +19 -3
  98. mindspore/include/api/types.h +3 -3
  99. mindspore/include/dataset/constants.h +7 -0
  100. mindspore/include/dataset/text.h +59 -0
  101. mindspore/jpeg62.dll +0 -0
  102. mindspore/log.py +1 -1
  103. mindspore/mindrecord/filereader.py +18 -0
  104. mindspore/mindrecord/filewriter.py +197 -34
  105. mindspore/mindrecord/shardreader.py +9 -0
  106. mindspore/mindrecord/shardwriter.py +1 -1
  107. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  108. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  109. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  110. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  111. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  112. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  113. mindspore/mindspore_backend.dll +0 -0
  114. mindspore/mindspore_common.dll +0 -0
  115. mindspore/mindspore_core.dll +0 -0
  116. mindspore/mindspore_glog.dll +0 -0
  117. mindspore/mindspore_shared_lib.dll +0 -0
  118. mindspore/nn/__init__.py +0 -4
  119. mindspore/nn/cell.py +204 -132
  120. mindspore/nn/dynamic_lr.py +1 -1
  121. mindspore/nn/grad/cell_grad.py +7 -6
  122. mindspore/nn/layer/__init__.py +5 -4
  123. mindspore/nn/layer/activation.py +40 -89
  124. mindspore/nn/layer/basic.py +255 -624
  125. mindspore/nn/layer/channel_shuffle.py +7 -6
  126. mindspore/nn/layer/combined.py +1 -1
  127. mindspore/nn/layer/container.py +41 -4
  128. mindspore/nn/layer/conv.py +64 -28
  129. mindspore/nn/layer/dense.py +9 -8
  130. mindspore/nn/layer/embedding.py +27 -25
  131. mindspore/nn/layer/image.py +53 -46
  132. mindspore/nn/layer/math.py +97 -105
  133. mindspore/nn/layer/normalization.py +117 -86
  134. mindspore/nn/layer/padding.py +185 -95
  135. mindspore/nn/layer/pooling.py +817 -414
  136. mindspore/nn/layer/rnn_cells.py +10 -15
  137. mindspore/nn/layer/rnns.py +37 -38
  138. mindspore/nn/layer/thor_layer.py +11 -12
  139. mindspore/nn/layer/timedistributed.py +5 -5
  140. mindspore/nn/layer/transformer.py +701 -0
  141. mindspore/nn/learning_rate_schedule.py +8 -8
  142. mindspore/nn/loss/__init__.py +5 -4
  143. mindspore/nn/loss/loss.py +334 -199
  144. mindspore/nn/optim/ada_grad.py +6 -6
  145. mindspore/nn/optim/adadelta.py +2 -3
  146. mindspore/nn/optim/adafactor.py +4 -5
  147. mindspore/nn/optim/adam.py +126 -62
  148. mindspore/nn/optim/adamax.py +3 -4
  149. mindspore/nn/optim/adasum.py +6 -6
  150. mindspore/nn/optim/asgd.py +2 -2
  151. mindspore/nn/optim/ftrl.py +67 -38
  152. mindspore/nn/optim/lamb.py +4 -5
  153. mindspore/nn/optim/lars.py +2 -2
  154. mindspore/nn/optim/lazyadam.py +43 -4
  155. mindspore/nn/optim/momentum.py +6 -5
  156. mindspore/nn/optim/optimizer.py +3 -1
  157. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  158. mindspore/nn/optim/rmsprop.py +1 -1
  159. mindspore/nn/optim/rprop.py +8 -9
  160. mindspore/nn/optim/sgd.py +19 -13
  161. mindspore/nn/optim/thor.py +10 -15
  162. mindspore/nn/probability/__init__.py +0 -2
  163. mindspore/nn/probability/bijector/bijector.py +4 -4
  164. mindspore/nn/probability/bijector/invert.py +1 -1
  165. mindspore/nn/probability/bijector/softplus.py +2 -2
  166. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  167. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  168. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  169. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  170. mindspore/nn/probability/distribution/beta.py +1 -1
  171. mindspore/nn/probability/distribution/categorical.py +5 -7
  172. mindspore/nn/probability/distribution/cauchy.py +3 -3
  173. mindspore/nn/probability/distribution/distribution.py +2 -2
  174. mindspore/nn/probability/distribution/exponential.py +2 -2
  175. mindspore/nn/probability/distribution/gamma.py +3 -3
  176. mindspore/nn/probability/distribution/geometric.py +1 -1
  177. mindspore/nn/probability/distribution/gumbel.py +3 -3
  178. mindspore/nn/probability/distribution/half_normal.py +15 -11
  179. mindspore/nn/probability/distribution/laplace.py +16 -13
  180. mindspore/nn/probability/distribution/logistic.py +2 -2
  181. mindspore/nn/probability/distribution/normal.py +1 -1
  182. mindspore/nn/probability/distribution/poisson.py +1 -1
  183. mindspore/nn/probability/distribution/student_t.py +20 -15
  184. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  185. mindspore/nn/probability/distribution/uniform.py +2 -2
  186. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  187. mindspore/nn/reinforcement/tensor_array.py +2 -2
  188. mindspore/nn/sparse/sparse.py +2 -2
  189. mindspore/nn/wrap/cell_wrapper.py +27 -10
  190. mindspore/nn/wrap/grad_reducer.py +2 -2
  191. mindspore/nn/wrap/loss_scale.py +40 -24
  192. mindspore/numpy/array_creations.py +33 -22
  193. mindspore/numpy/array_ops.py +35 -30
  194. mindspore/numpy/logic_ops.py +6 -27
  195. mindspore/numpy/math_ops.py +22 -19
  196. mindspore/numpy/utils.py +1 -1
  197. mindspore/numpy/utils_const.py +108 -58
  198. mindspore/opencv_core452.dll +0 -0
  199. mindspore/opencv_imgcodecs452.dll +0 -0
  200. mindspore/opencv_imgproc452.dll +0 -0
  201. mindspore/ops/_constants.py +0 -6
  202. mindspore/ops/_grad/__init__.py +2 -1
  203. mindspore/ops/_grad/grad_array_ops.py +86 -117
  204. mindspore/ops/_grad/grad_base.py +23 -1
  205. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  206. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  207. mindspore/ops/_grad/grad_implementations.py +9 -45
  208. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  209. mindspore/ops/_grad/grad_math_ops.py +142 -117
  210. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  211. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  212. mindspore/ops/_grad/grad_sparse.py +7 -6
  213. mindspore/ops/_grad_experimental/__init__.py +1 -0
  214. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  215. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  216. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  217. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  218. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  219. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  220. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  221. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  222. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  223. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  224. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  225. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  226. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  227. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  228. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  229. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  230. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  231. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  232. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  233. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  234. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  235. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  236. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  237. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  238. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  239. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  240. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  241. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  242. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  243. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  244. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  245. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  246. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  248. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  249. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  250. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  251. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  255. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  256. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  257. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  258. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  259. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  260. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  261. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  262. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  263. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  264. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  265. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  266. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  267. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  268. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  269. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  270. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  271. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  272. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  273. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  274. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  275. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  276. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  277. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  278. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  279. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  280. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  281. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  282. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  283. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  284. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  285. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  286. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  287. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  288. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  289. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  290. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  291. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  292. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  293. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  294. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  295. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  296. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  297. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  298. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  299. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  300. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  301. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  302. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  303. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  304. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  305. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  306. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  307. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  308. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  309. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  310. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  311. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  312. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  313. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  314. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  315. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  316. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  317. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  318. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  319. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  320. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  321. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  322. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  323. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  324. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  325. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  326. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  327. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  328. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  329. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  330. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  331. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  332. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  333. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  334. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  335. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  336. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  337. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  338. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  339. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  340. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  341. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  342. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  343. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  344. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  345. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  346. mindspore/ops/_register_for_op.py +1 -0
  347. mindspore/ops/_utils/__init__.py +1 -2
  348. mindspore/ops/_utils/utils.py +19 -40
  349. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  350. mindspore/ops/_vmap/vmap_base.py +16 -9
  351. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  352. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  353. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  354. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  355. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  356. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  357. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  358. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  359. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  360. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  361. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  362. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  363. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  364. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  365. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  366. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  367. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  368. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  369. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  370. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  371. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  372. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  373. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  374. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  375. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  376. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  377. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  378. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  379. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  380. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  381. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  382. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  383. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  384. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  385. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  386. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  387. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  388. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  389. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  390. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  391. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  392. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  394. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  395. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  396. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  397. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  398. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  399. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  400. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  401. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  402. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  403. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  404. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  405. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  406. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  407. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  408. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  409. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  413. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  414. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  415. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  416. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  417. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  418. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  419. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  420. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  421. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  422. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  423. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  424. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  425. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  426. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  427. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  428. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  429. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  430. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  431. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  432. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  433. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  434. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  435. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  436. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  437. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  438. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  439. mindspore/ops/composite/__init__.py +7 -8
  440. mindspore/ops/composite/base.py +101 -47
  441. mindspore/ops/composite/math_ops.py +188 -158
  442. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  443. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  444. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  445. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  446. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  447. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  448. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  449. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  450. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  451. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  452. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  453. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  454. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  455. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  456. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  457. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  458. mindspore/ops/function/__init__.py +152 -8
  459. mindspore/ops/function/array_func.py +2555 -674
  460. mindspore/ops/function/clip_func.py +209 -13
  461. mindspore/ops/function/debug_func.py +2 -2
  462. mindspore/ops/function/grad/__init__.py +2 -1
  463. mindspore/ops/function/grad/grad_func.py +147 -62
  464. mindspore/ops/function/image_func.py +54 -38
  465. mindspore/ops/function/linalg_func.py +167 -16
  466. mindspore/ops/function/math_func.py +4849 -1492
  467. mindspore/ops/function/nn_func.py +2573 -988
  468. mindspore/ops/function/other_func.py +115 -0
  469. mindspore/ops/function/parameter_func.py +3 -3
  470. mindspore/ops/function/random_func.py +790 -73
  471. mindspore/ops/function/sparse_func.py +98 -78
  472. mindspore/ops/function/sparse_unary_func.py +54 -53
  473. mindspore/ops/function/spectral_func.py +27 -24
  474. mindspore/ops/function/vmap_func.py +22 -2
  475. mindspore/ops/functional.py +97 -37
  476. mindspore/ops/op_info_register.py +70 -28
  477. mindspore/ops/operations/__init__.py +47 -14
  478. mindspore/ops/operations/_csr_ops.py +7 -7
  479. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  480. mindspore/ops/operations/_grad_ops.py +276 -187
  481. mindspore/ops/operations/_inner_ops.py +319 -113
  482. mindspore/ops/operations/_ms_kernel.py +10 -8
  483. mindspore/ops/operations/_ocr_ops.py +9 -9
  484. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  485. mindspore/ops/operations/_quant_ops.py +137 -102
  486. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  487. mindspore/ops/operations/_scalar_ops.py +466 -0
  488. mindspore/ops/operations/_sequence_ops.py +1004 -2
  489. mindspore/ops/operations/_tensor_array.py +10 -11
  490. mindspore/ops/operations/_thor_ops.py +1 -1
  491. mindspore/ops/operations/array_ops.py +801 -466
  492. mindspore/ops/operations/comm_ops.py +51 -49
  493. mindspore/ops/operations/control_ops.py +2 -2
  494. mindspore/ops/operations/custom_ops.py +123 -44
  495. mindspore/ops/operations/debug_ops.py +24 -24
  496. mindspore/ops/operations/image_ops.py +240 -153
  497. mindspore/ops/operations/inner_ops.py +34 -50
  498. mindspore/ops/operations/linalg_ops.py +31 -9
  499. mindspore/ops/operations/math_ops.py +988 -757
  500. mindspore/ops/operations/nn_ops.py +965 -819
  501. mindspore/ops/operations/other_ops.py +51 -40
  502. mindspore/ops/operations/random_ops.py +204 -122
  503. mindspore/ops/operations/rl_ops.py +8 -9
  504. mindspore/ops/operations/sparse_ops.py +254 -93
  505. mindspore/ops/operations/spectral_ops.py +35 -3
  506. mindspore/ops/primitive.py +111 -9
  507. mindspore/parallel/_auto_parallel_context.py +189 -83
  508. mindspore/parallel/_offload_context.py +185 -0
  509. mindspore/parallel/_parallel_serialization.py +99 -7
  510. mindspore/parallel/_ps_context.py +9 -5
  511. mindspore/parallel/_recovery_context.py +1 -1
  512. mindspore/parallel/_tensor.py +7 -1
  513. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  514. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  515. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  516. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  517. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  518. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  519. mindspore/parallel/_utils.py +1 -2
  520. mindspore/parallel/algo_parameter_config.py +1 -1
  521. mindspore/parallel/checkpoint_transform.py +37 -34
  522. mindspore/parallel/shard.py +17 -18
  523. mindspore/profiler/common/validator/validate_path.py +2 -2
  524. mindspore/profiler/envprofiling.py +69 -47
  525. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  526. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  527. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  528. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  529. mindspore/profiler/parser/integrator.py +15 -14
  530. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  531. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  532. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  533. mindspore/profiler/parser/optime_parser.py +17 -18
  534. mindspore/profiler/parser/profiler_info.py +2 -1
  535. mindspore/profiler/profiling.py +218 -186
  536. mindspore/rewrite/__init__.py +3 -1
  537. mindspore/rewrite/api/node.py +1 -114
  538. mindspore/rewrite/api/node_type.py +3 -0
  539. mindspore/rewrite/api/pattern_engine.py +31 -1
  540. mindspore/rewrite/api/scoped_value.py +4 -4
  541. mindspore/rewrite/api/symbol_tree.py +3 -78
  542. mindspore/rewrite/api/tree_node_helper.py +1 -1
  543. mindspore/rewrite/ast_creator_register.py +1 -0
  544. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  545. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  546. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  547. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  548. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  549. mindspore/rewrite/namespace.py +0 -2
  550. mindspore/rewrite/node.py +157 -11
  551. mindspore/rewrite/parsers/assign_parser.py +231 -53
  552. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  553. mindspore/rewrite/parsers/for_parser.py +24 -14
  554. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  555. mindspore/rewrite/parsers/if_parser.py +6 -2
  556. mindspore/rewrite/sparsify/__init__.py +0 -0
  557. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  558. mindspore/rewrite/sparsify/sparsify.py +109 -0
  559. mindspore/rewrite/sparsify/utils.py +173 -0
  560. mindspore/rewrite/symbol_tree.py +256 -133
  561. mindspore/rewrite/symbol_tree_builder.py +38 -1
  562. mindspore/run_check/_check_version.py +69 -63
  563. mindspore/run_check/run_check.py +2 -1
  564. mindspore/tinyxml2.dll +0 -0
  565. mindspore/train/__init__.py +1 -1
  566. mindspore/train/_utils.py +28 -5
  567. mindspore/train/amp.py +273 -102
  568. mindspore/train/callback/_backup_and_restore.py +5 -5
  569. mindspore/train/callback/_callback.py +2 -2
  570. mindspore/train/callback/_checkpoint.py +3 -3
  571. mindspore/train/callback/_early_stop.py +3 -3
  572. mindspore/train/callback/_lambda_callback.py +2 -2
  573. mindspore/train/callback/_landscape.py +29 -31
  574. mindspore/train/callback/_loss_monitor.py +3 -3
  575. mindspore/train/callback/_on_request_exit.py +3 -3
  576. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  577. mindspore/train/callback/_summary_collector.py +23 -16
  578. mindspore/train/callback/_time_monitor.py +3 -3
  579. mindspore/train/checkpoint_pb2.py +68 -8
  580. mindspore/train/data_sink.py +15 -3
  581. mindspore/train/dataset_helper.py +10 -15
  582. mindspore/train/loss_scale_manager.py +8 -11
  583. mindspore/train/metrics/__init__.py +1 -1
  584. mindspore/train/metrics/bleu_score.py +1 -1
  585. mindspore/train/metrics/confusion_matrix.py +1 -1
  586. mindspore/train/metrics/cosine_similarity.py +1 -1
  587. mindspore/train/metrics/dice.py +2 -2
  588. mindspore/train/metrics/fbeta.py +1 -1
  589. mindspore/train/metrics/hausdorff_distance.py +4 -3
  590. mindspore/train/metrics/mean_surface_distance.py +2 -2
  591. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  592. mindspore/train/metrics/perplexity.py +1 -1
  593. mindspore/train/metrics/precision.py +1 -1
  594. mindspore/train/metrics/recall.py +1 -1
  595. mindspore/train/metrics/roc.py +2 -2
  596. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  597. mindspore/train/mind_ir_pb2.py +116 -37
  598. mindspore/train/model.py +45 -28
  599. mindspore/train/serialization.py +295 -188
  600. mindspore/train/summary/_summary_adapter.py +1 -1
  601. mindspore/train/summary/summary_record.py +43 -13
  602. mindspore/train/train_thor/convert_utils.py +2 -2
  603. mindspore/train/train_thor/dataset_helper.py +3 -3
  604. mindspore/turbojpeg.dll +0 -0
  605. mindspore/version.py +1 -1
  606. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  607. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
  608. mindspore/compression/__init__.py +0 -19
  609. mindspore/compression/common/constant.py +0 -124
  610. mindspore/compression/export/__init__.py +0 -19
  611. mindspore/compression/export/quant_export.py +0 -515
  612. mindspore/compression/quant/__init__.py +0 -28
  613. mindspore/compression/quant/qat.py +0 -634
  614. mindspore/compression/quant/quant_utils.py +0 -462
  615. mindspore/compression/quant/quantizer.py +0 -68
  616. mindspore/nn/layer/quant.py +0 -1868
  617. mindspore/nn/layer/rnn_utils.py +0 -90
  618. mindspore/nn/probability/dpn/__init__.py +0 -22
  619. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  620. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  621. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  622. mindspore/nn/probability/infer/__init__.py +0 -22
  623. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  624. mindspore/nn/probability/infer/variational/svi.py +0 -84
  625. mindspore/nn/probability/toolbox/__init__.py +0 -22
  626. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  627. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  628. mindspore/nn/probability/transforms/__init__.py +0 -22
  629. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  630. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  631. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  632. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  633. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  634. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  635. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  636. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  637. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  638. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  639. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  640. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  642. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  643. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  644. mindspore/ops/composite/array_ops.py +0 -241
  645. mindspore/ops/composite/clip_ops.py +0 -134
  646. mindspore/ops/composite/random_ops.py +0 -426
  647. mindspore/ops/composite/vmap_ops.py +0 -38
  648. mindspore/parallel/nn/__init__.py +0 -42
  649. mindspore/parallel/nn/loss.py +0 -22
  650. mindspore/parallel/nn/moe.py +0 -21
  651. mindspore/parallel/nn/op_parallel_config.py +0 -22
  652. mindspore/parallel/nn/transformer.py +0 -31
  653. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  654. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  655. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -16,21 +16,24 @@
16
16
 
17
17
  __all__ = ['Tensor']
18
18
 
19
+ import abc
19
20
  import math
20
21
  import numbers
21
22
  import numpy as np
22
23
 
23
- from mindspore.communication.management import get_rank, get_group_size
24
+ from mindspore.communication.management import get_group_size
24
25
  from mindspore.common._utils import is_shape_unknown
25
26
  from mindspore.common.seed import get_seed
26
27
  from mindspore import context
27
28
  from mindspore import log as logger
28
29
  from mindspore.common import dtype as mstype
29
- from mindspore.common._utils import split_to_slice_if_need
30
+
31
+ from mindspore.common._utils import get_slice_num
30
32
  from mindspore.common._register_for_tensor import tensor_operator_registry
31
33
  from mindspore._c_expression import Tensor as Tensor_
32
- from mindspore._checkparam import Rel, check_is_number
33
- from mindspore._checkparam import Validator as validator
34
+ from mindspore import _checkparam as validator
35
+ from mindspore._checkparam import check_is_number, is_stub_tensor
36
+ from mindspore._check_jit_forbidden_api import jit_forbidden_register
34
37
 
35
38
  np_types = (np.int8, np.int16, np.int32, np.int64,
36
39
  np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
@@ -40,14 +43,14 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
40
43
  def _check_input_data_type(input_data):
41
44
  """Check the type of input_data for Tensor"""
42
45
  validator.check_value_type('input_data', input_data,
43
- (Tensor_, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
46
+ (Tensor_, Tensor, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
44
47
  'Tensor')
45
48
  valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
46
49
  np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
47
50
  if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
48
51
  input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S': # Support dtype np.str_
52
+ new_line = '\n'
49
53
  for index, x in np.ndenumerate(input_data):
50
- new_line = '\n'
51
54
  if np.array(x).dtype not in valid_dtypes:
52
55
  raise TypeError(f"initializing tensor by numpy array failed, because the "
53
56
  f"element type '{type(x)}' of array is not supported.\n"
@@ -65,7 +68,13 @@ def _check_input_data_type(input_data):
65
68
  f"For Tensor, the input_data is {input_data} that contain unsupported element.")
66
69
 
67
70
 
68
- class Tensor(Tensor_):
71
+ class _TensorMeta(type(Tensor_), abc.ABCMeta):
72
+ """
73
+ Meta class for Tensor. Used internally.
74
+ """
75
+
76
+
77
+ class Tensor(Tensor_, metaclass=_TensorMeta):
69
78
  """
70
79
  Tensor is a data structure that stores an n-dimensional array.
71
80
 
@@ -95,6 +104,10 @@ class Tensor(Tensor_):
95
104
  Outputs:
96
105
  Tensor.
97
106
 
107
+ Note:
108
+ The default value None of `input_data` works as a placeholder, it does not mean that we can create a NoneType
109
+ Tensor.
110
+
98
111
  Examples:
99
112
  >>> import numpy as np
100
113
  >>> import mindspore as ms
@@ -149,8 +162,12 @@ class Tensor(Tensor_):
149
162
 
150
163
  def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
151
164
  self.init_finished = False
165
+ if is_stub_tensor(input_data):
166
+ input_data = input_data.stub_sync()
167
+
152
168
  if internal:
153
- Tensor_.__init__(self, input_data)
169
+ if input_data is not None:
170
+ Tensor_.__init__(self, input_data)
154
171
  else:
155
172
  # If input data is numpy number, convert it to np array
156
173
  if isinstance(input_data, np_types):
@@ -168,8 +185,8 @@ class Tensor(Tensor_):
168
185
  else:
169
186
  _check_input_data_type(input_data)
170
187
  if dtype is not None:
171
- validator.check_type_name(
172
- 'dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
188
+ validator.check_type_name('dtype', dtype, mstype.number_type +
189
+ (mstype.bool_, mstype.string), "Tensor")
173
190
  else:
174
191
  dtype = self._set_default_dtype(input_data, dtype)
175
192
 
@@ -180,8 +197,8 @@ class Tensor(Tensor_):
180
197
  Tensor_.__init__(self, input_data, dtype)
181
198
  else:
182
199
  Tensor_.__init__(self, input_data)
200
+ validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
183
201
 
184
- validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
185
202
  self.const_arg = const_arg
186
203
  self.virtual_flag = False
187
204
  self.init = init
@@ -196,6 +213,16 @@ class Tensor(Tensor_):
196
213
  self.slice_num_of_persistent_data_ = None
197
214
  self.slice_shape_of_persistent_data_ = None
198
215
 
216
+ @classmethod
217
+ def __subclasshook__(cls, sub):
218
+ """
219
+ Subclass with stub_sync attr will be instance of Tensor
220
+ """
221
+ if cls is Tensor:
222
+ if any("stub_sync" in s.__dict__ for s in sub.__mro__):
223
+ return True
224
+ return NotImplemented
225
+
199
226
  @staticmethod
200
227
  def _set_default_dtype(input_data, dtype):
201
228
  """Set tensor default dtype"""
@@ -301,22 +328,16 @@ class Tensor(Tensor_):
301
328
  return tensor_operator_registry.get('__add__')(self, other)
302
329
 
303
330
  def __and__(self, other):
304
- if Tensor._use_logical_kernel(self, other):
305
- return tensor_operator_registry.get('logical_and')(self, other)
306
331
  if isinstance(other, (int, bool, float, Tensor)):
307
332
  return tensor_operator_registry.get('bitwise_and')(self, other)
308
333
  raise TypeError("Unsupported operand type(s) for &: 'Tensor' and '{}'".format(type(other)))
309
334
 
310
335
  def __xor__(self, other):
311
- if Tensor._use_logical_kernel(self, other):
312
- return tensor_operator_registry.get('logical_xor')(self, other)
313
336
  if isinstance(other, (int, bool, float, Tensor)):
314
337
  return tensor_operator_registry.get('bitwise_xor')(self, other)
315
338
  raise TypeError("Unsupported operand type(s) for ^: 'Tensor' and '{}'".format(type(other)))
316
339
 
317
340
  def __or__(self, other):
318
- if Tensor._use_logical_kernel(self, other):
319
- return tensor_operator_registry.get('logical_or')(self, other)
320
341
  if isinstance(other, (int, bool, float, Tensor)):
321
342
  return tensor_operator_registry.get('bitwise_or')(self, other)
322
343
  raise TypeError("Unsupported operand type(s) for |: 'Tensor' and '{}'".format(type(other)))
@@ -449,6 +470,20 @@ class Tensor(Tensor_):
449
470
  """Return the number of tensor dimensions."""
450
471
  return len(self._shape)
451
472
 
473
+ @property
474
+ def H(self):
475
+ """
476
+ Returns a view of a matrix (2-D tensor) conjugated and transposed.
477
+ x.H is equivalent to `mindspore.Tensor.swapaxes(0, 1).conj()` for complex matrices and
478
+ `mindspore.Tensor.swapaxes(0, 1)` for real matrices.
479
+ """
480
+ if self.ndim != 2:
481
+ raise ValueError(f"For tensor.H only support 2-D Tensor, but got {self.ndim}-D.")
482
+ output = self.swapaxes(0, 1)
483
+ if self.dtype in (mstype.complex64, mstype.complex128):
484
+ return output.conj()
485
+ return output
486
+
452
487
  @property
453
488
  def has_init(self):
454
489
  """Whether tensor is initialized."""
@@ -500,168 +535,13 @@ class Tensor(Tensor_):
500
535
 
501
536
  return Tensor(Tensor_.from_numpy(array))
502
537
 
503
- @staticmethod
504
- def frombuffer(buffer, dtype=mstype.float64, count=-1, offset=0):
505
- r"""
506
- Creates a 1-dimensional :class:`Tensor` from an object that implements
507
- the Python buffer protocol.
508
- Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
509
- the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
510
- elements.
511
-
512
- Args:
513
- buffer (object): a Python object that exposes the buffer interface.
514
- dtype (mindspore.dtype): the desired data type of returned tensor.
515
- count (int, optional): the number of desired elements to be read. If negative,
516
- all the elements (until the end of the buffer) will be read. Default: -1.
517
- offset (int, optional): the number of bytes to skip at the start of the buffer. Default: 0.
518
-
519
- Returns:
520
- a 1-dimensional Tensor from an object that implements the Python buffer protocol.
521
-
522
- Supported Platforms:
523
- ``Ascend`` ``GPU`` ``CPU``
524
-
525
- Examples:
526
- >>> from array import array
527
- >>> import numpy as np
528
- >>> import mindspore
529
- >>> from mindspore import Tensor
530
- >>> input_array = array("d", [1, 2, 3, 4])
531
- >>> input_array
532
- array('d', [1.0, 2.0, 3.0, 4.0])
533
- >>> output = Tensor.frombuffer(input_array, mindspore.int32)
534
- >>> print(output)
535
- [1 2 3 4]
536
- """
537
- res = np.frombuffer(buffer=buffer, dtype=np.float64, count=count, offset=offset)
538
- result = Tensor(res, dtype=dtype)
539
- return result
540
-
541
- @staticmethod
542
- def empty_strided(size, stride, dtype=mstype.float64, seed=None):
543
- r"""
544
- Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
545
-
546
- Args:
547
- size (tuple of python:ints): the shape of the output tensor.
548
- stride (tuple of python:ints): the strides of the output tensor.
549
- dtype (mindspore.dtype, optional): the desired data type of returned tensor.
550
-
551
- Returns:
552
- a tensor with the specified size and stride and filled with undefined data.
553
-
554
- Supported Platforms:
555
- ``Ascend`` ``GPU`` ``CPU``
556
-
557
- Examples:
558
- >>> from mindspore import Tensor
559
- >>> size = (3, 3)
560
- >>> stride = (1, 3)
561
- >>> output = Tensor.empty_strided(size, stride, seed = 0)
562
- >>> print(output)
563
- [[0.00000000e+00 7.15189366e+10 0.00000000e+00]
564
- [0.00000000e+00 0.00000000e+00 6.45894113e+10]
565
- [0.00000000e+00 8.91773001e+10 9.63662761e+10]]
566
- """
567
- np.random.seed(seed)
568
- tensor_ = Tensor(np.random.uniform(low=0, high=10e10, size=size))
569
- tensor_array = tensor_.asnumpy()
570
- stride_tensor = tensor_.as_strided(shape=size, strides=stride)
571
- stride_array = stride_tensor.asnumpy()
572
- stride_array.resize(len(stride_array) * len(stride_array[0]))
573
- for i in range(size[0]):
574
- for j in range(size[1]):
575
- if not sum(stride_array - tensor_array[i][j]) < 0.01:
576
- tensor_array[i][j] = 0.0
577
- return Tensor(tensor_array, dtype=dtype)
578
-
579
- @staticmethod
580
- def poisson(shape, mean, seed=0, seed2=0):
581
- r"""
582
- Returns a tensor of the same size as `input` with each element sampled from a Poisson
583
- distribution with rate parameter given by the corresponding element in `input` i.e.,
584
- \text{out}_i \sim \text{Poisson}(\text{input}_i)out*i*∼Poisson(input*i*),
585
- and self as a tensor is the μ parameter .the distribution was constructed with.
586
- The parameter defines mean number of occurrences of the event.
587
- It must be greater than 0. With float32 data type.
588
-
589
- Args:
590
- seed (int, option): set the random seed (0 to 2**32)
591
- seed2 (int, option): set the random seed2 (0 to 2**32)
592
-
593
- Inputs:
594
- - **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
595
-
596
- Returns:
597
- out (Union[Tensor, int]), with the same shape as input_tensor.
598
-
599
- Raises:
600
- TypeError: If neither `seed` nor `seed2` is an int.
601
- TypeError: If `shape` is not a tuple.
602
- TypeError: If `mean` is not a Tensor whose dtype is not float32.
603
-
604
- Supported Platforms:
605
- ``Ascend``
606
-
607
- Examples:
608
- >>> shape = (4, 1)
609
- >>> mean = Tensor(np.array([5.0, 10.0]), mstype.float32)
610
- >>> output = Tensor.Poisson(shape, mean, seed=5)
611
- >>> result = output.shape
612
- >>> print(result)
613
- (4, 2)
614
- """
615
- return tensor_operator_registry.get('poisson')(seed, seed2)(shape, mean)
616
-
617
- @staticmethod
618
- def as_tensor(data, dtype=None):
619
- r"""
620
- convert data to tensor in mindspore.
621
-
622
- Args:
623
- data (array_like): Initial data for the tensor. Can be a list, tuple,
624
- NumPy ndarray, scalar, and other types.
625
- dtype (mindspore.dtype, optional): the desired data type of returned tensor.
626
- Default: if None, infers data type from data.
627
-
628
- Returns:
629
- Tensor contains the data and the dtype is in mindspore.
630
-
631
- Supported Platforms:
632
- ``Ascend`` ``GPU`` ``CPU``
633
-
634
- Examples:
635
- >>> import numpy as np
636
- >>> import mindspore as ms
637
- >>> import mindspore.nn as nn
638
- >>> from mindspore import Tensor
639
- >>> input_data = np.array([1, 2, 3])
640
- >>> ms_tensor = Tensor.as_tensor(input_data)
641
- >>> ms_tensor
642
- Tensor(shape=[3], dtype=Int64, value= [1, 2, 3])
643
- """
644
- return Tensor(data, dtype=dtype)
645
-
646
- @staticmethod
647
- def _use_logical_kernel(me, other) -> bool:
648
- """
649
- Decide to use logical kernel or bitwise kernel for &|^ operations.
650
- If self or other is bool or bool tensor, then return true, use logical kernel,
651
- else false to use bitwise kernel.
652
- """
653
- def _is_bool_or_bool_tensor(data):
654
- return isinstance(data, bool) or (isinstance(data, Tensor) and data.dtype == mstype.bool_)
655
- if _is_bool_or_bool_tensor(me) and _is_bool_or_bool_tensor(other):
656
- return True
657
- return False
658
-
659
538
  def ndimension(self):
660
539
  r"""
661
540
  Alias for :func:`mindspore.Tensor.ndim`.
662
541
  """
663
542
  return len(self._shape)
664
543
 
544
+ @jit_forbidden_register
665
545
  def set_const_arg(self, const_arg=True):
666
546
  """
667
547
  Specify whether the tensor is a constant when it is used for the argument of a network.
@@ -727,9 +607,25 @@ class Tensor(Tensor_):
727
607
  Returns:
728
608
  Tensor, Tensor that's been assigned.
729
609
  """
610
+ if is_stub_tensor(value):
611
+ value = value.stub_sync()
730
612
  self.assign_value_cpp(value)
731
613
  return self
732
614
 
615
+ def bincount(self, weights=None, minlength=0):
616
+ r"""
617
+ For details, please refer to :func:`mindspore.ops.bincount`.
618
+ """
619
+ self._init_check()
620
+ return tensor_operator_registry.get('bincount')(self, weights, minlength)
621
+
622
+ def chunk(self, chunks, axis=0):
623
+ r"""
624
+ For details, please refer to :func:`mindspore.ops.chunk`.
625
+ """
626
+ self._init_check()
627
+ return tensor_operator_registry.get('chunk')(self, chunks, axis)
628
+
733
629
  def item(self, index=None):
734
630
  """
735
631
  Get the item at the specified index of the tensor.
@@ -765,7 +661,7 @@ class Tensor(Tensor_):
765
661
  Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible).
766
662
 
767
663
  There must be at least 1 argument, and define the last argument as item.
768
- Then, tensor.itemset(\*args) is equivalent to :math:`tensor[args] = item`.
664
+ Then, tensor.itemset(\*args) is equivalent to :math:`Tensor[args] = item`.
769
665
 
770
666
  Args:
771
667
  args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
@@ -776,7 +672,7 @@ class Tensor(Tensor_):
776
672
  It is either an int or a tuple.
777
673
 
778
674
  Returns:
779
- A new tensor that doesn't affect the original tensor, with value set by :math:`tensor[args] = item`.
675
+ A new tensor that doesn't affect the original tensor, with value set by :math:`Tensor[args] = item`.
780
676
 
781
677
  Raises:
782
678
  ValueError: If the length of the first argument is not equal to self.ndim.
@@ -823,8 +719,7 @@ class Tensor(Tensor_):
823
719
 
824
720
  def numpy(self):
825
721
  """
826
- Refer to `Tensor.asnumpy() \
827
- <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore/Tensor/mindspore.Tensor.asnumpy.html>`_.
722
+ Alias for :func:`mindspore.Tensor.asnumpy`.
828
723
  """
829
724
  return self.asnumpy()
830
725
 
@@ -859,6 +754,22 @@ class Tensor(Tensor_):
859
754
  """
860
755
  return self.slice_num_of_persistent_data_
861
756
 
757
+ def histc(self, bins=100, min=0., max=0.):
758
+ """
759
+ For details, please refer to :func:`mindspore.ops.histc`.
760
+ """
761
+ self._init_check()
762
+ validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
763
+ validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
764
+ return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
765
+
766
+ def geqrf(self):
767
+ """
768
+ For details, please refer to :func:`mindspore.ops.geqrf`.
769
+ """
770
+ self._init_check()
771
+ return tensor_operator_registry.get('geqrf')(self)
772
+
862
773
  def slice_shape_of_persistent_data(self):
863
774
  """
864
775
  Get slice shape of tensor after cut to slice size.
@@ -900,28 +811,26 @@ class Tensor(Tensor_):
900
811
  self._init_check()
901
812
  Tensor_._flush_from_cache(self)
902
813
 
903
- def addcdiv(self, x1, x2, value):
814
+ def addcdiv(self, tensor1, tensor2, value=1):
904
815
  r"""
905
816
  For details, please refer to :func:`mindspore.ops.addcdiv`.
906
817
  """
907
818
  self._init_check()
908
- return tensor_operator_registry.get('addcdiv')()(self, x1, x2, value)
819
+ return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
909
820
 
910
- def addcmul(self, x1, x2, value):
821
+ def addcmul(self, tensor1, tensor2, value=1):
911
822
  r"""
912
823
  For details, please refer to :func:`mindspore.ops.addcmul`.
913
824
  """
914
-
915
825
  self._init_check()
916
- return tensor_operator_registry.get('addcmul')()(self, x1, x2, value)
826
+ return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
917
827
 
918
- def add(self, y):
828
+ def add(self, other):
919
829
  r"""
920
830
  For details, please refer to :func:`mindspore.ops.add`.
921
831
  """
922
-
923
832
  self._init_check()
924
- return tensor_operator_registry.get('add')()(self, y)
833
+ return tensor_operator_registry.get('add')()(self, other)
925
834
 
926
835
  def subtract(self, other, *, alpha=1):
927
836
  r"""
@@ -936,29 +845,15 @@ class Tensor(Tensor_):
936
845
  For details, please refer to :func:`mindspore.ops.div`.
937
846
  """
938
847
  self._init_check()
939
- return tensor_operator_registry.get('div')(self, value, None)
848
+ return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
940
849
 
941
850
  def triu(self, diagonal=0):
942
851
  r"""
943
- Returns a triangular matrix based on the diagonal. Default is the main diagonal.
944
-
945
- Args:
946
- diagonal (int): The index of diagonal. Default: 0.
947
-
948
- Returns:
949
- Tensor, a tensor has the same shape and data type as input.
950
-
951
- Raises:
952
- TypeError: If `diagonal` is not an int.
953
- TypeError: If `x` is not an Tensor.
954
- ValueError: If length of shape of x is less than 1.
955
-
956
- Supported Platforms:
957
- ``GPU`` ``CPU``
852
+ For details, please refer to :func:`mindspore.ops.triu`.
958
853
  """
959
854
  self._init_check()
960
855
  validator.check_value_type('diagonal', diagonal, [int], 'triu')
961
- return tensor_operator_registry.get('triu')(diagonal)(self)
856
+ return tensor_operator_registry.get('triu')(self, diagonal)
962
857
 
963
858
  def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
964
859
  r"""
@@ -988,34 +883,10 @@ class Tensor(Tensor_):
988
883
  self._init_check()
989
884
  return tensor_operator_registry.get('adjoint')(self)
990
885
 
991
- def all(self, axis=(), keep_dims=False):
992
- """
993
- Check all tensor elements along a given axis evaluate to True.
994
-
995
- Args:
996
- axis (Union[None, int, tuple(int)]): Dimensions of reduction.
997
- When the axis is None or empty tuple, reduce all dimensions. When the axis is int or
998
- tuple(int), if the dimension of Tensor is dim, the value range is [-dim, dim). Default: ().
999
- keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
1000
-
1001
- Returns:
1002
- Tensor, if all tensor elements along the given axis evaluate to True, its value is True,
1003
- otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
1004
-
1005
- Supported Platforms:
1006
- ``Ascend`` ``GPU`` ``CPU``
1007
-
1008
- See also:
1009
- :func:`mindspore.Tensor.any`: Check any tensor element along a given axis evaluate to True.
1010
-
1011
- Examples:
1012
- >>> from mindspore import Tensor
1013
- >>> a = Tensor([True, True, False])
1014
- >>> output = a.all()
1015
- >>> print(output)
1016
- False
886
+ def all(self, axis=None, keep_dims=False):
887
+ r"""
888
+ For details, please refer to :func:`mindspore.ops.all`.
1017
889
  """
1018
-
1019
890
  self._init_check()
1020
891
  if axis is None:
1021
892
  axis = ()
@@ -1028,45 +899,21 @@ class Tensor(Tensor_):
1028
899
  self._init_check()
1029
900
  return tensor_operator_registry.get('angle')(self)
1030
901
 
1031
- def any(self, axis=(), keep_dims=False):
1032
- """
1033
- Check any tensor element along a given axis evaluate to True.
1034
-
1035
- Args:
1036
- axis (Union[None, int, tuple(int)]): Dimensions of reduction.
1037
- When the axis is None or empty tuple, reduce all dimensions. When the axis is int or
1038
- tuple(int), if the dimension of Tensor is dim, the value range is [-dim, dim). Default: ().
1039
- keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
1040
-
1041
- Returns:
1042
- Tensor, if any tensor element along the given axis evaluates to True, its value is True,
1043
- otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
1044
-
1045
- Supported Platforms:
1046
- ``Ascend`` ``GPU`` ``CPU``
1047
-
1048
- See also:
1049
- :func:`mindspore.Tensor.all`: Check all tensor elements along a given axis evaluate to True.
1050
-
1051
- Examples:
1052
- >>> from mindspore import Tensor
1053
- >>> a = Tensor([True, True, False])
1054
- >>> output = a.any()
1055
- >>> print(output)
1056
- True
902
+ def any(self, axis=None, keep_dims=False):
903
+ r"""
904
+ For details, please refer to :func:`mindspore.ops.any`.
1057
905
  """
1058
-
1059
906
  self._init_check()
1060
907
  if axis is None:
1061
908
  axis = ()
1062
909
  return tensor_operator_registry.get('any')(keep_dims)(self, axis)
1063
910
 
1064
- def atan2(self, y):
911
+ def atan2(self, other):
1065
912
  r"""
1066
913
  For details, please refer to :func:`mindspore.ops.atan2`.
1067
914
  """
1068
915
  self._init_check()
1069
- return tensor_operator_registry.get('atan2')(self, y)
916
+ return tensor_operator_registry.get('atan2')(self, other)
1070
917
 
1071
918
  def baddbmm(self, batch1, batch2, beta=1, alpha=1):
1072
919
  r"""
@@ -1105,26 +952,84 @@ class Tensor(Tensor_):
1105
952
  shape = shape[0]
1106
953
  return tensor_operator_registry.get('reshape')()(self, shape)
1107
954
 
1108
- def bitwise_and(self, x):
955
+ def view_as(self, other):
956
+ r"""
957
+ View self Tensor as the same shape as `other` .
958
+
959
+ Args:
960
+ other(Tensor): The returned Tensor has the same shape as `other`.
961
+
962
+ Returns:
963
+ Tensor, has the same shape as `other`.
964
+
965
+ Raises:
966
+ TypeError: If `other` is not a Tensor.
967
+
968
+ Supported Platforms:
969
+ ``Ascend`` ``GPU`` ``CPU``
970
+
971
+ Examples:
972
+ >>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
973
+ >>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
974
+ >>> output = a.view_as(b)
975
+ >>> print(output)
976
+ [1. 2. 3. 2. 3. 4.]
977
+ """
978
+ self._init_check()
979
+ if not isinstance(other, (Tensor, Tensor_)):
980
+ raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
981
+ return self.view(other.shape)
982
+
983
+ def t(self):
984
+ r"""
985
+ For details, please refer to :func:`mindspore.ops.t`.
986
+ """
987
+ self._init_check()
988
+ return tensor_operator_registry.get("t")(self)
989
+
990
+ def bitwise_and(self, other):
1109
991
  """
1110
992
  For details, please refer to :func:`mindspore.ops.bitwise_and`.
1111
993
  """
1112
994
  self._init_check()
1113
- return tensor_operator_registry.get('bitwise_and')(self, x)
995
+ return tensor_operator_registry.get('bitwise_and')(self, other)
1114
996
 
1115
- def bitwise_or(self, x):
997
+ def bitwise_or(self, other):
1116
998
  """
1117
999
  For details, please refer to :func:`mindspore.ops.bitwise_or`.
1118
1000
  """
1119
1001
  self._init_check()
1120
- return tensor_operator_registry.get('bitwise_or')(self, x)
1002
+ return tensor_operator_registry.get('bitwise_or')(self, other)
1121
1003
 
1122
- def bitwise_xor(self, x):
1004
+ def bitwise_xor(self, other):
1123
1005
  """
1124
1006
  For details, please refer to :func:`mindspore.ops.bitwise_xor`.
1125
1007
  """
1126
1008
  self._init_check()
1127
- return tensor_operator_registry.get('bitwise_xor')(self, x)
1009
+ return tensor_operator_registry.get('bitwise_xor')(self, other)
1010
+
1011
+ def bitwise_left_shift(self, other):
1012
+ """
1013
+ For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
1014
+ """
1015
+ self._init_check()
1016
+ return tensor_operator_registry.get('bitwise_left_shift')(self, other)
1017
+
1018
+ def bitwise_right_shift(self, other):
1019
+ """
1020
+ For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
1021
+ """
1022
+ self._init_check()
1023
+ _cast = tensor_operator_registry.get('cast')
1024
+ other = _cast(other, self.dtype)
1025
+ return tensor_operator_registry.get('bitwise_right_shift')(self, other)
1026
+
1027
+ def scatter(self, axis, index, src):
1028
+ """
1029
+ For details, please refer to :func:`mindspore.ops.scatter`.
1030
+ """
1031
+ self._init_check()
1032
+ return tensor_operator_registry.get('scatter')(self, axis, index, src)
1128
1033
 
1129
1034
  def scatter_mul(self, indices, updates):
1130
1035
  """
@@ -1140,12 +1045,12 @@ class Tensor(Tensor_):
1140
1045
  self._init_check()
1141
1046
  return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
1142
1047
 
1143
- def ger(self, x):
1048
+ def ger(self, vec2):
1144
1049
  """
1145
1050
  For details, please refer to :func:`mindspore.ops.ger`.
1146
1051
  """
1147
1052
  self._init_check()
1148
- return tensor_operator_registry.get('ger')(self, x)
1053
+ return tensor_operator_registry.get('ger')(self, vec2)
1149
1054
 
1150
1055
  def gt(self, x):
1151
1056
  """
@@ -1200,6 +1105,27 @@ class Tensor(Tensor_):
1200
1105
  self._init_check()
1201
1106
  return tensor_operator_registry.get('exp')()(self)
1202
1107
 
1108
+ def real(self):
1109
+ r"""
1110
+ For details, please refer to :func:`mindspore.ops.real`.
1111
+ """
1112
+ self._init_check()
1113
+ return tensor_operator_registry.get('real')(self)
1114
+
1115
+ def rsqrt(self):
1116
+ r"""
1117
+ For details, please refer to :func:`mindspore.ops.rsqrt`.
1118
+ """
1119
+ self._init_check()
1120
+ return tensor_operator_registry.get('rsqrt')(self)
1121
+
1122
+ def reciprocal(self):
1123
+ r"""
1124
+ For details, please refer to :func:`mindspore.ops.reciprocal`.
1125
+ """
1126
+ self._init_check()
1127
+ return tensor_operator_registry.get('reciprocal')(self)
1128
+
1203
1129
  def sqrt(self):
1204
1130
  """
1205
1131
  For details, please refer to :func:`mindspore.ops.sqrt`.
@@ -1257,55 +1183,21 @@ class Tensor(Tensor_):
1257
1183
 
1258
1184
  def cos(self):
1259
1185
  r"""
1260
- Computes cosine of input element-wise.
1261
-
1262
- .. math::
1263
- out_i = cos(x_i)
1264
-
1265
- .. warning::
1266
- Currently support Float16, Float32 data type. If use Float64, there may
1267
- be a problem of missing precision.
1268
-
1269
- Returns:
1270
- Tensor, has the same shape as `x`.
1271
-
1272
- Supported Platforms:
1273
- ``Ascend`` ``GPU`` ``CPU``
1274
-
1275
- Examples:
1276
- >>> from mindspore import Tensor
1277
- >>> a = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
1278
- >>> output = a.cos()
1279
- >>> print(output)
1280
- [0.971338 0.6748758 0.95233357 0.9959527]
1186
+ For details, please refer to :func:`mindspore.ops.cos`.
1281
1187
  """
1282
1188
  self._init_check()
1283
1189
  return tensor_operator_registry.get('cos')(self)
1284
1190
 
1285
- def acosh(self):
1191
+ def cov(self, *, correction=1, fweights=None, aweights=None):
1286
1192
  r"""
1287
- Computes inverse hyperbolic cosine of the inputs element-wise.
1288
-
1289
- .. math::
1290
-
1291
- out_i = \cosh^{-1}(input_i)
1292
-
1293
- .. warning::
1294
- Given an input tensor x, the function computes inverse hyperbolic cosine of every element.
1295
- Input range is [1, inf].
1296
-
1297
- Returns:
1298
- Tensor, has the same shape as `x`.
1299
-
1300
- Supported Platforms:
1301
- ``Ascend`` ``GPU`` ``CPU``
1193
+ For details, please refer to :func:`mindspore.ops.cov`.
1194
+ """
1195
+ self._init_check()
1196
+ return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
1302
1197
 
1303
- Examples:
1304
- >>> from mindspore import Tensor
1305
- >>> a = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
1306
- >>> output = a.acosh()
1307
- >>> print(output)
1308
- [0. 0.9624237 1.7627472 5.298292]
1198
+ def acosh(self):
1199
+ """
1200
+ For details, please refer to :func:`mindspore.ops.acosh`.
1309
1201
  """
1310
1202
  self._init_check()
1311
1203
  return tensor_operator_registry.get('acosh')(self)
@@ -1358,19 +1250,20 @@ class Tensor(Tensor_):
1358
1250
  self._init_check()
1359
1251
  return tensor_operator_registry.get("negative")(self)
1360
1252
 
1361
- def norm(self, axis, p=2, keep_dims=False, epsilon=1e-12):
1253
+ # pylint: disable=redefined-builtin
1254
+ def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
1362
1255
  """
1363
1256
  For details, please refer to :func:`mindspore.ops.norm`.
1364
1257
  """
1365
1258
  self._init_check()
1366
- return tensor_operator_registry.get('norm')(self, axis, p, keep_dims, epsilon)
1259
+ return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
1367
1260
 
1368
- def renorm(self, p, dim, maxnorm):
1261
+ def renorm(self, p, axis, maxnorm):
1369
1262
  """
1370
1263
  For details, please refer to :func:`mindspore.ops.renorm`.
1371
1264
  """
1372
1265
  self._init_check()
1373
- return tensor_operator_registry.get("renorm")(self, p, dim, maxnorm)
1266
+ return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
1374
1267
 
1375
1268
  def approximate_equal(self, other, tolerance=1e-5):
1376
1269
  r"""
@@ -1386,13 +1279,6 @@ class Tensor(Tensor_):
1386
1279
  tensor_operator_registry.get('__sub__')(input_x, input_y)
1387
1280
  ), tolerance)
1388
1281
 
1389
- def matrix_determinant(self):
1390
- r"""
1391
- For details, please refer to :func:`mindspore.ops.matrix_determinant`.
1392
- """
1393
- self._init_check()
1394
- return tensor_operator_registry.get('matrix_determinant')(self)
1395
-
1396
1282
  def log1p(self):
1397
1283
  r"""
1398
1284
  For details, please refer to :func:`mindspore.ops.log1p`.
@@ -1401,7 +1287,6 @@ class Tensor(Tensor_):
1401
1287
  return tensor_operator_registry.get('log1p')(self)
1402
1288
 
1403
1289
  def logit(self, eps=None):
1404
-
1405
1290
  r"""
1406
1291
  For details, please refer to :func:`mindspore.ops.logit`.
1407
1292
  """
@@ -1415,26 +1300,29 @@ class Tensor(Tensor_):
1415
1300
  r"""
1416
1301
  For details, please refer to :func:`mindspore.ops.logaddexp`.
1417
1302
  """
1303
+ self._init_check()
1418
1304
  return tensor_operator_registry.get('logaddexp')(self, other)
1419
1305
 
1420
1306
  def logaddexp2(self, other):
1421
1307
  r"""
1422
1308
  For details, please refer to :func:`mindspore.ops.logaddexp2`.
1423
1309
  """
1310
+ self._init_check()
1424
1311
  return tensor_operator_registry.get('logaddexp2')(self, other)
1425
1312
 
1426
- def logsumexp(self, dim, keepdim=False):
1313
+ def logsumexp(self, axis, keepdims=False):
1427
1314
  r"""
1428
1315
  For details, please refer to :func:`mindspore.ops.logsumexp`.
1429
1316
  """
1430
- return tensor_operator_registry.get('logsumexp')(self, dim, keepdim)
1317
+ self._init_check()
1318
+ return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
1431
1319
 
1432
- def log_matrix_determinant(self):
1320
+ def logdet(self):
1433
1321
  r"""
1434
- For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
1322
+ For details, please refer to :func:`mindspore.ops.logdet`.
1435
1323
  """
1436
1324
  self._init_check()
1437
- return tensor_operator_registry.get('log_matrix_determinant')(self)
1325
+ return tensor_operator_registry.get('logdet')(self)
1438
1326
 
1439
1327
  def i0(self):
1440
1328
  r"""
@@ -1450,6 +1338,27 @@ class Tensor(Tensor_):
1450
1338
  self._init_check()
1451
1339
  return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
1452
1340
 
1341
+ def isneginf(self):
1342
+ r"""
1343
+ For details, please refer to :func:`mindspore.ops.isneginf`.
1344
+ """
1345
+ self._init_check()
1346
+ return tensor_operator_registry.get('isneginf')(self)
1347
+
1348
+ def isposinf(self):
1349
+ r"""
1350
+ For details, please refer to :func:`mindspore.ops.isposinf`.
1351
+ """
1352
+ self._init_check()
1353
+ return tensor_operator_registry.get('isposinf')(self)
1354
+
1355
+ def isreal(self):
1356
+ r"""
1357
+ For details, please refer to :func:`mindspore.ops.isreal`.
1358
+ """
1359
+ self._init_check()
1360
+ return tensor_operator_registry.get('isreal')(self)
1361
+
1453
1362
  def isfinite(self):
1454
1363
  r"""
1455
1364
  For details, please refer to :func:`mindspore.ops.isfinite`.
@@ -1457,6 +1366,13 @@ class Tensor(Tensor_):
1457
1366
  self._init_check()
1458
1367
  return tensor_operator_registry.get('isfinite')()(self)
1459
1368
 
1369
+ def is_complex(self):
1370
+ r"""
1371
+ For details, please refer to :func:`mindspore.ops.is_complex`.
1372
+ """
1373
+ self._init_check()
1374
+ return tensor_operator_registry.get('is_complex')(self)
1375
+
1460
1376
  def inv(self):
1461
1377
  r"""
1462
1378
  For details, please refer to :func:`mindspore.ops.inv`.
@@ -1464,6 +1380,13 @@ class Tensor(Tensor_):
1464
1380
  self._init_check()
1465
1381
  return tensor_operator_registry.get('inv')(self)
1466
1382
 
1383
+ def inverse(self):
1384
+ r"""
1385
+ For details, please refer to :func:`mindspore.ops.inverse`.
1386
+ """
1387
+ self._init_check()
1388
+ return tensor_operator_registry.get('inverse')(self)
1389
+
1467
1390
  def invert(self):
1468
1391
  r"""
1469
1392
  For details, please refer to :func:`mindspore.ops.invert`.
@@ -1471,12 +1394,12 @@ class Tensor(Tensor_):
1471
1394
  self._init_check()
1472
1395
  return tensor_operator_registry.get('invert')(self)
1473
1396
 
1474
- def pow(self, power):
1397
+ def pow(self, exponent):
1475
1398
  r"""
1476
1399
  For details, please refer to :func:`mindspore.ops.pow`.
1477
1400
  """
1478
1401
  self._init_check()
1479
- return tensor_operator_registry.get('pow')()(self, power)
1402
+ return tensor_operator_registry.get('pow')()(self, exponent)
1480
1403
 
1481
1404
  def log(self):
1482
1405
  """
@@ -1499,7 +1422,7 @@ class Tensor(Tensor_):
1499
1422
  self._init_check()
1500
1423
  return tensor_operator_registry.get('log2')(self)
1501
1424
 
1502
- def mean(self, axis=(), keep_dims=False):
1425
+ def mean(self, axis=None, keep_dims=False):
1503
1426
  """
1504
1427
  For details, please refer to :func:`mindspore.ops.mean`.
1505
1428
  """
@@ -1508,12 +1431,14 @@ class Tensor(Tensor_):
1508
1431
  axis = ()
1509
1432
  return tensor_operator_registry.get('mean')(keep_dims)(self, axis)
1510
1433
 
1511
- def amin(self, axis=(), keep_dims=False):
1434
+ def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
1512
1435
  """
1513
1436
  For details, please refer to :func:`mindspore.ops.amin`.
1514
1437
  """
1515
1438
  self._init_check()
1516
- return tensor_operator_registry.get('amin')(self, axis, keep_dims)
1439
+ if axis is None:
1440
+ axis = ()
1441
+ return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
1517
1442
 
1518
1443
  def reverse(self, axis):
1519
1444
  """
@@ -1522,13 +1447,15 @@ class Tensor(Tensor_):
1522
1447
  self._init_check()
1523
1448
  return tensor_operator_registry.get('reverse')(axis)(self)
1524
1449
 
1525
- def amax(self, axis=(), keep_dims=False):
1450
+ def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
1526
1451
  """
1527
1452
  For details, please refer to :func:`mindspore.ops.amax`.
1528
1453
  """
1529
1454
  self._init_check()
1530
- return tensor_operator_registry.get('amax')(self, axis, keep_dims)
1531
-
1455
+ if axis is None:
1456
+ axis = ()
1457
+ return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
1458
+
1532
1459
  def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
1533
1460
  """
1534
1461
  For details, please refer to :func:`mindspore.ops.reverse_sequence`.
@@ -1536,7 +1463,7 @@ class Tensor(Tensor_):
1536
1463
  self._init_check()
1537
1464
  return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
1538
1465
 
1539
- def prod(self, axis=(), keep_dims=False):
1466
+ def prod(self, axis=None, keep_dims=False):
1540
1467
  """
1541
1468
  For details, please refer to :func:`mindspore.ops.prod`.
1542
1469
  """
@@ -1663,39 +1590,7 @@ class Tensor(Tensor_):
1663
1590
 
1664
1591
  def rot90(self, k, dims):
1665
1592
  r"""
1666
- Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
1667
- Rotation direction is from the first towards the second axis if k > 0,
1668
- and from the second towards the first for k < 0.
1669
-
1670
- Args:
1671
- k (int): Number of times to rotate.
1672
- dims (Union[list(int), tuple(int)]): Axis to rotate.
1673
-
1674
- Returns:
1675
- Tensor.
1676
-
1677
- Raises:
1678
- TypeError: If `x` is not a Tensor.
1679
- TypeError: If `k` is not an integer.
1680
- TypeError: If `dims` is not a list or a tuple of integers.
1681
- ValueError: If the length of `dims` is not `2`.
1682
- ValueError: If any dims is out of range of [-self.ndim, self.ndim).
1683
- RuntimeError: If rotation dims are not different.
1684
-
1685
- Supported Platforms:
1686
- ``Ascend`` ``GPU``
1687
-
1688
- Examples:
1689
- >>> import numpy as np
1690
- >>> import mindspore as ms
1691
- >>> from mindspore import Tensor
1692
- >>> x = Tensor(np.array([[0, 1], [2, 3]])).astype(np.float32)
1693
- >>> k = 1
1694
- >>> dims = [0, 1]
1695
- >>> output = x.rot90(k, dims)
1696
- >>> print(output)
1697
- [[1. 3.]
1698
- [0. 2.]]
1593
+ For details, please refer to :func:`mindspore.ops.rot90`.
1699
1594
  """
1700
1595
  self._init_check()
1701
1596
  return tensor_operator_registry.get('rot90')(self, k, dims)
@@ -1707,6 +1602,13 @@ class Tensor(Tensor_):
1707
1602
  self._init_check()
1708
1603
  return tensor_operator_registry.get('deg2rad')(self)
1709
1604
 
1605
+ def dot(self, other):
1606
+ r"""
1607
+ For details, please refer to :func:`mindspore.ops.dot`.
1608
+ """
1609
+ self._init_check()
1610
+ return tensor_operator_registry.get('dot')(self, other)
1611
+
1710
1612
  def rad2deg(self):
1711
1613
  r"""
1712
1614
  For details, please refer to :func:`mindspore.ops.rad2deg`.
@@ -1735,16 +1637,13 @@ class Tensor(Tensor_):
1735
1637
  self._init_check()
1736
1638
  return tensor_operator_registry.get('numel')(self)
1737
1639
 
1738
- def permute(self, *dims):
1640
+ def permute(self, *axis):
1739
1641
  """
1740
1642
  For details, please refer to :func:`mindspore.ops.permute`.
1741
1643
  """
1742
1644
  self._init_check()
1743
- if not dims:
1744
- raise ValueError(f"For Tensor.permute, the dims must not be none.")
1745
- if len(dims) == 1:
1746
- return tensor_operator_registry.get("permute")(self, *dims)
1747
- return tensor_operator_registry.get("permute")(self, dims)
1645
+ perm = validator.check_transpose_axis(axis, self.ndim)
1646
+ return tensor_operator_registry.get('permute')(self, perm)
1748
1647
 
1749
1648
  def positive(self):
1750
1649
  """
@@ -1760,20 +1659,26 @@ class Tensor(Tensor_):
1760
1659
  self._init_check()
1761
1660
  return tensor_operator_registry.get('remainder')(self, divisor)
1762
1661
 
1763
- def flatten(self, order='C'):
1662
+ def flatten(self, order='C', *, start_dim=0, end_dim=-1):
1764
1663
  r"""
1765
1664
  For details, please refer to :func:`mindspore.ops.flatten`.
1766
1665
  """
1767
1666
  self._init_check()
1768
- reshape_op = tensor_operator_registry.get('reshape')()
1769
- trans_op = tensor_operator_registry.get('transpose')()
1667
+ return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
1770
1668
 
1771
- order = validator.check_flatten_order(order)
1772
- if order == 'C':
1773
- return reshape_op(self, (-1,))
1669
+ def float_power(self, other):
1670
+ r"""
1671
+ For details, please refer to :func:`mindspore.ops.float_power`.
1672
+ """
1673
+ self._init_check()
1674
+ return tensor_operator_registry.get('float_power')(self, other)
1774
1675
 
1775
- perm = tuple(range(self.ndim - 1, -1, -1))
1776
- return reshape_op(trans_op(self, perm), (-1,))
1676
+ def fmod(self, other):
1677
+ r"""
1678
+ For details, please refer to :func:`mindspore.ops.fmod`.
1679
+ """
1680
+ self._init_check()
1681
+ return tensor_operator_registry.get('fmod')(self, other)
1777
1682
 
1778
1683
  def narrow(self, axis, start, length):
1779
1684
  """
@@ -1782,49 +1687,19 @@ class Tensor(Tensor_):
1782
1687
  self._init_check()
1783
1688
  return tensor_operator_registry.get('narrow')(self, axis, start, length)
1784
1689
 
1785
- def swapaxes(self, axis1, axis2):
1690
+ def swapaxes(self, axis0, axis1):
1786
1691
  """
1787
- Interchange two axes of a tensor.
1788
-
1789
- Args:
1790
- axis1 (int): First axis.
1791
- axis2 (int): Second axis.
1792
-
1793
- Returns:
1794
- Transposed tensor, has the same data type as the input.
1795
-
1796
- Raises:
1797
- TypeError: If `axis1` or `axis2` is not integer.
1798
- ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
1799
-
1800
- Supported Platforms:
1801
- ``Ascend`` ``GPU`` ``CPU``
1802
-
1803
- Examples:
1804
- >>> import numpy as np
1805
- >>> from mindspore import Tensor
1806
- >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
1807
- >>> output = x.swapaxes(0, 2)
1808
- >>> print(output.shape)
1809
- (4,3,2)
1692
+ For details, please refer to :func:`mindspore.ops.swapaxes`.
1810
1693
  """
1811
1694
  self._init_check()
1812
- axis1, axis2 = validator.check_swapaxes_axis((axis1, axis2), self.ndim)
1813
-
1814
- if axis1 == axis2:
1815
- return self
1816
- if axis1 > axis2:
1817
- axis1, axis2 = axis2, axis1
1818
-
1819
- perm = tuple(range(0, self.ndim))
1820
- if axis2 + 1 < self.ndim:
1821
- new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
1822
- perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] + perm[axis2 + 1:]
1823
- else:
1824
- new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
1825
- perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1]
1695
+ return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
1826
1696
 
1827
- return tensor_operator_registry.get('transpose')()(self, new_perm)
1697
+ def swapdims(self, dim0, dim1):
1698
+ """
1699
+ For details, please refer to :func:`mindspore.ops.swapdims`.
1700
+ """
1701
+ self._init_check()
1702
+ return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
1828
1703
 
1829
1704
  def squeeze(self, axis=None):
1830
1705
  """
@@ -1836,13 +1711,27 @@ class Tensor(Tensor_):
1836
1711
  new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
1837
1712
  return tensor_operator_registry.get('reshape')()(self, new_shape)
1838
1713
 
1714
+ def slogdet(self):
1715
+ """
1716
+ For details, please refer to :func:`mindspore.ops.slogdet`.
1717
+ """
1718
+ self._init_check()
1719
+ return tensor_operator_registry.get('slogdet')(self)
1720
+
1721
+ def tril(self, diagonal=0):
1722
+ """
1723
+ For details, please refer to :func:`mindspore.ops.tril`.
1724
+ """
1725
+ self._init_check()
1726
+ return tensor_operator_registry.get('tril')(self, diagonal)
1727
+
1839
1728
  def unsqueeze(self, dim):
1840
1729
  """
1841
1730
  For details, please refer to :func:`mindspore.ops.unsqueeze`.
1842
1731
  """
1843
1732
  self._init_check()
1844
1733
  validator.check_is_int(dim, 'dim')
1845
- validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, Rel.INC_LEFT, 'dim')
1734
+ validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
1846
1735
  return tensor_operator_registry.get('unsqueeze')(self, dim)
1847
1736
 
1848
1737
  def expand_dims(self, axis):
@@ -1851,7 +1740,7 @@ class Tensor(Tensor_):
1851
1740
  """
1852
1741
  self._init_check()
1853
1742
  validator.check_is_int(axis, 'axis')
1854
- validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, Rel.INC_LEFT, 'axis')
1743
+ validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
1855
1744
  return tensor_operator_registry.get('expand_dims')(self, axis)
1856
1745
 
1857
1746
  def astype(self, dtype, copy=True):
@@ -1860,7 +1749,7 @@ class Tensor(Tensor_):
1860
1749
 
1861
1750
  Args:
1862
1751
  dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype, can be in
1863
- format of :class:`mindspore.dtype.float32` or :class:`numpy.float32` or `float32`.
1752
+ format of `mindspore.dtype.float32` or `numpy.float32` or `float32`.
1864
1753
  copy (bool, optional): By default, astype always returns a newly allocated
1865
1754
  tensor. If this is set to false, the input tensor is returned instead
1866
1755
  of a copy. Default: True.
@@ -1962,12 +1851,12 @@ class Tensor(Tensor_):
1962
1851
 
1963
1852
  Examples:
1964
1853
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
1965
- >>> index, output = x.argmax_with_value()
1966
- >>> print(index, output)
1967
- 3 0.7
1968
- >>> index, output = x.argmax_with_value(keep_dims=True)
1969
- >>> print(index, output)
1970
- [3] [0.7]
1854
+ >>> output, index = x.argmax_with_value()
1855
+ >>> print(output, index)
1856
+ 0.7 3
1857
+ >>> output, index = x.argmax_with_value(keep_dims=True)
1858
+ >>> print(output, index)
1859
+ [0.7] [3]
1971
1860
  """
1972
1861
  if self.shape == ():
1973
1862
  return (Tensor(0), self)
@@ -2009,12 +1898,12 @@ class Tensor(Tensor_):
2009
1898
 
2010
1899
  Examples:
2011
1900
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2012
- >>> index, output = x.argmin_with_value()
2013
- >>> print(index, output)
2014
- 0 0.0
2015
- >>> index, output = x.argmin_with_value(keep_dims=True)
2016
- >>> print(index, output)
2017
- [0] [0.0]
1901
+ >>> output, index = x.argmin_with_value()
1902
+ >>> print(output, index
1903
+ 0.0 0
1904
+ >>> output, index = x.argmin_with_value(keep_dims=True)
1905
+ >>> print(output, index)
1906
+ [0.0] [0]
2018
1907
  """
2019
1908
  if self.shape == ():
2020
1909
  return (Tensor(0), self)
@@ -2050,18 +1939,25 @@ class Tensor(Tensor_):
2050
1939
  """
2051
1940
  return tensor_operator_registry.get('cummax')(self, axis)
2052
1941
 
2053
- def index_fill(self, dim, index, value):
1942
+ def index_fill(self, axis, index, value):
2054
1943
  """
2055
1944
  For details, please refer to :func:`mindspore.ops.index_fill`.
2056
1945
  """
2057
- return tensor_operator_registry.get('index_fill')(self, dim, index, value)
1946
+ return tensor_operator_registry.get('index_fill')(self, axis, index, value)
1947
+
1948
+ def index_select(self, axis, index):
1949
+ """
1950
+ For details, please refer to :func:`mindspore.ops.index_select`.
1951
+ """
1952
+ self._init_check()
1953
+ return tensor_operator_registry.get('index_select')(self, axis, index)
2058
1954
 
2059
1955
  def inplace_update(self, v, indices):
2060
1956
  """
2061
1957
  For details, please refer to :func:`mindspore.ops.inplace_update`.
2062
1958
  """
2063
1959
  self._init_check()
2064
- return tensor_operator_registry.get('inplace_update')(indices)(self, v)
1960
+ return tensor_operator_registry.get('inplace_update')()(self, indices, v)
2065
1961
 
2066
1962
  def copy(self):
2067
1963
  """
@@ -2099,7 +1995,7 @@ class Tensor(Tensor_):
2099
1995
  x = x.astype(origin_dtype)
2100
1996
  return x
2101
1997
 
2102
- def max(self, axis=None, keepdims=False, initial=None, where=True):
1998
+ def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2103
1999
  """
2104
2000
  Return the maximum of a tensor or maximum along an axis.
2105
2001
 
@@ -2112,13 +2008,17 @@ class Tensor(Tensor_):
2112
2008
  If this is set to True, the axes which are reduced are left in the
2113
2009
  result as dimensions with size one. With this option, the result will
2114
2010
  broadcast correctly against the input array. Default: False.
2011
+
2012
+ Keyword Args:
2115
2013
  initial (scalar, optional):
2116
2014
  The minimum value of an output element. Must be present to allow
2117
2015
  computation on empty slice. Default: None.
2118
- where (bool Tensor, optional):
2016
+ where (Tensor[bool], optional):
2119
2017
  A boolean tensor which is broadcasted to match the dimensions of array,
2120
2018
  and selects elements to include in the reduction. If non-default value
2121
2019
  is passed, initial must also be provided. Default: True.
2020
+ return_indices (bool, optional): Whether to return the index of the maximum value. Default: False.
2021
+ If `axis` is a list or tuple of ints, it must be False.
2122
2022
 
2123
2023
  Returns:
2124
2024
  Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
@@ -2144,22 +2044,99 @@ class Tensor(Tensor_):
2144
2044
  >>> output = a.max()
2145
2045
  >>> print(output)
2146
2046
  3.0
2047
+ >>> value, indices = a.max(axis=0, return_indices=True)
2048
+ >>> print(value)
2049
+ [2. 3.]
2050
+ >>> print(indices)
2051
+ [1 1]
2147
2052
  """
2148
- reduce_ = tensor_operator_registry.get("reduce")
2149
- reduce_max = tensor_operator_registry.get("reduce_max")
2150
- maximum = tensor_operator_registry.get("maximum")
2151
- return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
2152
- initial=initial, where=where)
2053
+ self._init_check()
2054
+ if isinstance(axis, (list, tuple)):
2055
+ reduce_ = tensor_operator_registry.get("reduce")
2056
+ reduce_max = tensor_operator_registry.get("reduce_max")
2057
+ maximum = tensor_operator_registry.get("maximum")
2058
+ return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
2059
+ initial=initial, where=where)
2060
+ values, indices = tensor_operator_registry.get("max")(self, axis, keepdims, initial=initial, where=where)
2061
+ if not return_indices:
2062
+ return values
2063
+ return values, indices
2153
2064
 
2154
- def min(self, axis=None, keepdims=False, initial=None, where=True):
2065
+ def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2155
2066
  """
2156
- For details, please refer to :func:`mindspore.ops.min`.
2157
- """
2158
- reduce_ = tensor_operator_registry.get("reduce")
2159
- reduce_min = tensor_operator_registry.get("reduce_min")
2160
- minimum = tensor_operator_registry.get("minimum")
2161
- return reduce_(self, reduce_min(keepdims), cmp_fn=minimum(), axis=axis, keepdims=keepdims,
2162
- initial=initial, where=where)
2067
+ Return the minimum of a tensor or minimum along an axis.
2068
+
2069
+ Args:
2070
+ axis (Union[None, int, list, tuple of ints], optional): An axis or
2071
+ axes along which to operate. By default, flattened input is used. If
2072
+ `axis` is a tuple of ints, the minimum is selected over multiple axes,
2073
+ instead of a single axis or all the axes as before. Default: None.
2074
+ keepdims (bool, optional):
2075
+ If True, the axes which are reduced are left in the
2076
+ result as dimensions with size one. With this option, the result will
2077
+ broadcast correctly against the input array. Default: False.
2078
+
2079
+ Keyword Args:
2080
+ initial (scalar, optional):
2081
+ The minimum value of an output element. Must be present to allow
2082
+ computation on empty slice. Default: None.
2083
+ where (bool Tensor, optional):
2084
+ A boolean tensor which is broadcasted to match the dimensions of array,
2085
+ and selects elements to include in the reduction. If non-default value
2086
+ is passed, initial must also be provided. Default: True.
2087
+ return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
2088
+ If `axis` is a list or tuple of ints, it must be False.
2089
+
2090
+ Returns:
2091
+ Tensor or scalar, minimum of input tensor. If `axis` is None, the result is a scalar
2092
+ value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2093
+
2094
+ Raises:
2095
+ TypeError: If arguments have types not specified above.
2096
+
2097
+ Supported Platforms:
2098
+ ``Ascend`` ``GPU`` ``CPU``
2099
+
2100
+ See also:
2101
+ :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2102
+
2103
+ :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2104
+
2105
+ :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
2106
+
2107
+ Examples:
2108
+ >>> import numpy as np
2109
+ >>> from mindspore import Tensor
2110
+ >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2111
+ >>> output = a.min()
2112
+ >>> print(output)
2113
+ 0.0
2114
+ >>> output = a.min(axis=0)
2115
+ >>> print(output)
2116
+ [0. 1.]
2117
+ >>> output = a.min(axis=0, initial=9, where=Tensor([False]))
2118
+ >>> print(output)
2119
+ [9. 9.]
2120
+ >>> output = a.min(axis=0, initial=9, where=Tensor([False, True]))
2121
+ >>> print(output)
2122
+ [9. 1.]
2123
+ >>> value, indices = a.min(axis=0, return_indices=True)
2124
+ >>> print(value)
2125
+ [0. 1.]
2126
+ >>> print(indices)
2127
+ [0 0]
2128
+ """
2129
+ self._init_check()
2130
+ if isinstance(axis, (list, tuple)):
2131
+ reduce_ = tensor_operator_registry.get("reduce")
2132
+ reduce_min = tensor_operator_registry.get("reduce_min")
2133
+ minimum = tensor_operator_registry.get("minimum")
2134
+ return reduce_(self, reduce_min(keepdims), cmp_fn=minimum(), axis=axis, keepdims=keepdims,
2135
+ initial=initial, where=where)
2136
+ values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
2137
+ if not return_indices:
2138
+ return values
2139
+ return values, indices
2163
2140
 
2164
2141
  def scatter_add(self, indices, updates):
2165
2142
  """
@@ -2232,7 +2209,7 @@ class Tensor(Tensor_):
2232
2209
 
2233
2210
  def fill(self, value):
2234
2211
  """
2235
- For details, please refer to :func:`mindspore.ops.fill`.
2212
+ `Tensor.fill` is deprecated, please use `ops.fill` instead.
2236
2213
  """
2237
2214
  if value is None:
2238
2215
  if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
@@ -2247,7 +2224,7 @@ class Tensor(Tensor_):
2247
2224
 
2248
2225
  def fills(self, value):
2249
2226
  """
2250
- For details, please refer to :func:`mindspore.ops.fills`.
2227
+ `Tensor.fills` is deprecated, please use `ops.fill` instead.
2251
2228
  """
2252
2229
  self._init_check()
2253
2230
  return tensor_operator_registry.get('fills')(self, value)
@@ -2314,68 +2291,18 @@ class Tensor(Tensor_):
2314
2291
  """
2315
2292
  return tensor_operator_registry.get('minimum')()(self, other)
2316
2293
 
2317
- def clip(self, xmin, xmax, dtype=None):
2294
+ def clamp(self, min=None, max=None):
2295
+ r"""
2296
+ For details, please refer to :func:`mindspore.ops.clamp`.
2318
2297
  """
2319
- Clips (limits) the values in a Tensor.
2320
-
2321
- Given an interval, values outside the interval are clipped to the interval edges.
2322
- For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
2323
- and values larger than 1 become 1.
2324
-
2325
- Note:
2326
- Currently, clip with `xmin=nan` or `xmax=nan` is not supported.
2327
-
2328
- Args:
2329
- xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
2330
- on the lower interval edge. Not more than one of `xmin` and `xmax` may be None.
2331
- xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
2332
- on the upper interval edge. Not more than one of `xmin` and `xmax` may be None.
2333
- If `xmin` or `xmax` are tensors, then `xmin`, `xmax` and the given tensor
2334
- will be broadcasted to match their shapes.
2335
- dtype (:class:`mindspore.dtype`, optional): Overrides the dtype of the
2336
- output Tensor. Default is None.
2337
-
2338
- Returns:
2339
- Tensor, a tensor with the elements of the input tensor, but where values
2340
- < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
2341
-
2342
- Raises:
2343
- TypeError: If inputs have types not specified above.
2344
- ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
2345
-
2346
- Supported Platforms:
2347
- ``Ascend`` ``GPU`` ``CPU``
2298
+ self._init_check()
2299
+ return tensor_operator_registry.get('clamp')(self, min, max)
2348
2300
 
2349
- Examples:
2350
- >>> from mindspore import Tensor
2351
- >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32")
2352
- >>> y = x.clip(0, 2)
2353
- >>> print(y)
2354
- [1. 2. 2. 0. 0. 2. 2. 0.]
2355
- >>> t = Tensor([1, 1, 1, 1, 1, 1, 1, 1])
2356
- >>> y = x.clip(t, 2)
2357
- >>> print(y)
2358
- [1. 2. 2. 1. 1. 2. 2. 1.]
2301
+ def clip(self, min=None, max=None):
2302
+ r"""
2303
+ Alias for :func:`mindspore.Tensor.clamp`.
2359
2304
  """
2360
- if xmin is None and xmax is None:
2361
- raise ValueError("For 'Tensor.clip', the argument 'xmin' and 'xman' cannot all be None.")
2362
- x = self
2363
- # F.maximum/minimum does not support when both operands are scalar
2364
- if xmin is not None:
2365
- xmin = Tensor(xmin).astype(x.dtype)
2366
- if x.ndim == 0 and xmin.ndim == 0:
2367
- x = tensor_operator_registry.get("maximum")(x.reshape((1,)), xmin).squeeze()
2368
- else:
2369
- x = tensor_operator_registry.get("maximum")(x, xmin)
2370
- if xmax is not None:
2371
- xmax = Tensor(xmax).astype(x.dtype)
2372
- if x.ndim == 0 and xmax.ndim == 0:
2373
- x = tensor_operator_registry.get("minimum")()(x.reshape((1,)), xmax).squeeze()
2374
- else:
2375
- x = tensor_operator_registry.get("minimum")()(x, xmax)
2376
- if dtype is not None and dtype != x.dtype:
2377
- return x.astype(dtype)
2378
- return x
2305
+ return self.clamp(min, max)
2379
2306
 
2380
2307
  def _init_check(self):
2381
2308
  if self.has_init:
@@ -2420,7 +2347,7 @@ class Tensor(Tensor_):
2420
2347
  # At embedding cache scenes, we need limit the size of memory for tensor.
2421
2348
  # And save out of range data to persistent storage to support TB-Level size of tensor.
2422
2349
  data_shape = list(shape)
2423
- slice_num_of_persistent_data = split_to_slice_if_need(self.dtype, shape)
2350
+ slice_num_of_persistent_data = get_slice_num(self.dtype, shape)
2424
2351
  if slice_num_of_persistent_data > 1:
2425
2352
  slice_first_dim = math.ceil(shape[0] / slice_num_of_persistent_data)
2426
2353
  data_shape[0] = slice_first_dim
@@ -2443,9 +2370,9 @@ class Tensor(Tensor_):
2443
2370
  self._np_seed = np.random.get_state()[1][0]
2444
2371
  self.need_set_seed = (slice_index is not None)
2445
2372
  self._global_seed = global_seed
2446
- self._device_num = 1
2373
+ self._seed_offset = 1
2447
2374
  if self.need_set_seed:
2448
- self._device_num = get_group_size()
2375
+ self._seed_offset = get_group_size() * 2
2449
2376
 
2450
2377
  def __enter__(self):
2451
2378
  if self.need_set_seed:
@@ -2456,7 +2383,7 @@ class Tensor(Tensor_):
2456
2383
  else:
2457
2384
  np.random.seed(slice_index + Tensor.delta_seed)
2458
2385
  self.init.seed = slice_index + Tensor.delta_seed
2459
- Tensor.delta_seed += self._device_num
2386
+ Tensor.delta_seed += self._seed_offset
2460
2387
 
2461
2388
  def __exit__(self, ptype, value, trace):
2462
2389
  if self.need_set_seed:
@@ -2465,10 +2392,6 @@ class Tensor(Tensor_):
2465
2392
 
2466
2393
  with seed_context(self.init):
2467
2394
  self.init(data)
2468
- if opt_shard_group:
2469
- rank = get_rank(opt_shard_group)
2470
- size = get_group_size(opt_shard_group)
2471
- data = np.split(data, size)[rank]
2472
2395
  self.init = None
2473
2396
 
2474
2397
  # At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
@@ -2478,44 +2401,6 @@ class Tensor(Tensor_):
2478
2401
  self.assign_value(Tensor_.from_numpy(data))
2479
2402
  return self
2480
2403
 
2481
- def to_tensor(self, slice_index=None, shape=None, opt_shard_group=None):
2482
- """
2483
- Return init_data() and get the tensor format data of this Tensor.
2484
-
2485
- Note:
2486
- The usage of `to_tensor` is deprecated. Please use `init_data`.
2487
-
2488
- Args:
2489
- slice_index (int): Slice index of a parameter's slices.
2490
- It is used when initialize a slice of a parameter, it guarantees that devices
2491
- using the same slice can generate the same tensor. Default: None.
2492
- shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
2493
- opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
2494
- to get one shard of a parameter's slice. Default: None.
2495
-
2496
- Returns:
2497
- Initialized Tensor.
2498
-
2499
- Raises:
2500
- TypeError: `indices` is neither int32 nor int64.
2501
- ValueError: The length of the shape of the tensor is less than the last dimension of `indices`.
2502
-
2503
- Supported Platforms:
2504
- ``Ascend`` ``GPU`` ``CPU``
2505
-
2506
- Examples:
2507
- >>> import mindspore as ms
2508
- >>> from mindspore.common.initializer import initializer, Constant
2509
- >>> x = initializer(Constant(1), [2, 2], ms.float32)
2510
- >>> out = x.to_tensor()
2511
- >>> print(out)
2512
- [[1. 1.]
2513
- [1. 1.]]
2514
- """
2515
- logger.warning("WARN_DEPRECATED: The usage of to_tensor is deprecated."
2516
- " Please use init_data")
2517
- return self.init_data(slice_index, shape, opt_shard_group)
2518
-
2519
2404
  def resize(self, *new_shape):
2520
2405
  """
2521
2406
  Changes shape and size of tensor in-place.
@@ -2575,10 +2460,59 @@ class Tensor(Tensor_):
2575
2460
 
2576
2461
  def det(self):
2577
2462
  r"""
2578
- Refer to :func:`mindspore.Tensor.matrix_determinant`.
2463
+ For details, please refer to :func:`mindspore.ops.det`.
2579
2464
  """
2580
2465
  self._init_check()
2581
- return tensor_operator_registry.get('matrix_determinant')(self)
2466
+ return tensor_operator_registry.get('det')(self)
2467
+
2468
+ def diff(self, n=1, axis=-1, prepend=None, append=None):
2469
+ r"""
2470
+ For details, please refer to :func:`mindspore.ops.diff`.
2471
+ """
2472
+ self._init_check()
2473
+ return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
2474
+
2475
+ def frac(self):
2476
+ r"""
2477
+ For details, please refer to :func:`mindspore.ops.frac`.
2478
+ """
2479
+ self._init_check()
2480
+ return tensor_operator_registry.get('frac')(self)
2481
+
2482
+ def argwhere(self):
2483
+ r"""
2484
+ For details, please refer to :func:`mindspore.ops.argwhere`.
2485
+ """
2486
+ self._init_check()
2487
+ return tensor_operator_registry.get('argwhere')(self)
2488
+
2489
+ def moveaxis(self, source, destination):
2490
+ r"""
2491
+ For details, please refer to :func:`mindspore.ops.moveaxis`.
2492
+ """
2493
+ self._init_check()
2494
+ return tensor_operator_registry.get('moveaxis')(self, source, destination)
2495
+
2496
+ def movedim(self, source, destination):
2497
+ r"""
2498
+ For details, please refer to :func:`mindspore.ops.movedim`.
2499
+ """
2500
+ self._init_check()
2501
+ return tensor_operator_registry.get('movedim')(self, source, destination)
2502
+
2503
+ def digamma(self):
2504
+ r"""
2505
+ For details, please refer to :func:`mindspore.ops.digamma`.
2506
+ """
2507
+ self._init_check()
2508
+ return tensor_operator_registry.get('digamma')(self)
2509
+
2510
+ def lgamma(self):
2511
+ r"""
2512
+ For details, please refer to :func:`mindspore.ops.lgamma`.
2513
+ """
2514
+ self._init_check()
2515
+ return tensor_operator_registry.get('lgamma')(self)
2582
2516
 
2583
2517
  def diagonal(self, offset=0, axis1=0, axis2=1):
2584
2518
  """
@@ -2607,11 +2541,11 @@ class Tensor(Tensor_):
2607
2541
  elif offset != 0:
2608
2542
  e = e.astype(mstype.float32)
2609
2543
  if offset > 0:
2610
- e_left = tensor_operator_registry.get('fill')(dtype, (n, offset), 0)
2544
+ e_left = tensor_operator_registry.get('fill')(mstype.float32, (n, offset), 0)
2611
2545
  e_right = e[..., 0:m - offset:1]
2612
2546
  e = tensor_operator_registry.get('concatenate')(1)((e_left, e_right)).astype(dtype)
2613
2547
  elif offset < 0:
2614
- e_upper = tensor_operator_registry.get('fill')(dtype, (-offset, m), 0)
2548
+ e_upper = tensor_operator_registry.get('fill')(mstype.float32, (-offset, m), 0)
2615
2549
  e_lower = e[0:n + offset:1, ...]
2616
2550
  e = tensor_operator_registry.get('concatenate')(0)((e_upper, e_lower)).astype(dtype)
2617
2551
  e = tensor_operator_registry.get('broadcast_to')(shape)(e)
@@ -2668,6 +2602,9 @@ class Tensor(Tensor_):
2668
2602
  >>> print(x.trace())
2669
2603
  3.0
2670
2604
  """
2605
+ if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
2606
+ self._init_check()
2607
+ return tensor_operator_registry.get('trace')(self)
2671
2608
  d = self.diagonal(offset, axis1=axis1, axis2=axis2)
2672
2609
  shape = d.shape
2673
2610
  if dtype is None:
@@ -2873,7 +2810,7 @@ class Tensor(Tensor_):
2873
2810
  i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
2874
2811
  j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
2875
2812
 
2876
- sort_range = tuple(range(validator.get_log2_size(tensor_operator_registry.get('shape_mul')(a.shape) + 1)))
2813
+ sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
2877
2814
  for _ in sort_range:
2878
2815
  mid = (i - -j) // 2
2879
2816
  mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
@@ -2886,16 +2823,17 @@ class Tensor(Tensor_):
2886
2823
  For details, please refer to :func:`mindspore.ops.gather_nd`.
2887
2824
  """
2888
2825
  self._init_check()
2889
- validator.check_value_type('indices', indices, (Tensor_,), 'Tensor.gather_nd')
2826
+ validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
2890
2827
  return tensor_operator_registry.get('gather_nd')(self, indices)
2891
2828
 
2892
- def gather(self, input_indices, axis):
2829
+ def gather(self, input_indices, axis, batch_dims=0):
2893
2830
  r"""
2894
2831
  For details, please refer to :func:`mindspore.ops.gather`.
2895
2832
  """
2896
2833
  self._init_check()
2897
2834
  validator.check_is_int(axis, 'axis')
2898
- return tensor_operator_registry.get('gather')(self, input_indices, axis)
2835
+ validator.check_is_int(batch_dims, "batch_dims")
2836
+ return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
2899
2837
 
2900
2838
  def var(self, axis=None, ddof=0, keepdims=False):
2901
2839
  """
@@ -3038,6 +2976,55 @@ class Tensor(Tensor_):
3038
2976
  res += initial
3039
2977
  return res.astype(dtype)
3040
2978
 
2979
+ def sum_to_size(self, *size):
2980
+ r"""
2981
+ Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
2982
+
2983
+ Args:
2984
+ size (Union[tuple(int), int]): The expected shape of output Tensor.
2985
+
2986
+ Returns:
2987
+ Tensor, the sum result of self Tensor according to the `size`.
2988
+
2989
+ Raises:
2990
+ ValueError: If `size` is not expandable to the size of self Tensor.
2991
+
2992
+ Supported Platforms:
2993
+ ``Ascend`` ``GPU`` ``CPU``
2994
+
2995
+ Examples:
2996
+ >>> x = Tensor(np.random.randn(3, 3, 3, 3, 3, 3), mindspore.float32)
2997
+ >>> output = x.sum_to_size((1, 3, 1, 3))
2998
+ >>> print(output.shape)
2999
+ (1, 3, 1, 3)
3000
+ """
3001
+ self._init_check()
3002
+ x = self
3003
+ if len(size) == 1 and isinstance(size[0], tuple):
3004
+ size = size[0]
3005
+ shape_x = x.shape
3006
+ if len(size) > x.ndim:
3007
+ raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
3008
+ if len(size) < x.ndim:
3009
+ pre_axis = tuple([axis for axis in range(x.ndim - len(size))])
3010
+ x = x.sum(pre_axis)
3011
+ axes = []
3012
+ for i, element in enumerate(size):
3013
+ if element != x.shape[i] and element == 1:
3014
+ axes.append(i)
3015
+ elif element != x.shape[i]:
3016
+ raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
3017
+ if axes:
3018
+ return x.sum(tuple(axes), keepdims=True)
3019
+ return x
3020
+
3021
+ def nansum(self, axis=None, keepdims=False, dtype=None):
3022
+ """
3023
+ For details, please refer to :func:`mindspore.ops.nansum`.
3024
+ """
3025
+ self._init_check()
3026
+ return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
3027
+
3041
3028
  def repeat(self, repeats, axis=None):
3042
3029
  """
3043
3030
  Repeat elements of a tensor.
@@ -3105,7 +3092,7 @@ class Tensor(Tensor_):
3105
3092
  raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
3106
3093
  f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
3107
3094
  f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
3108
- subs = tensor_operator_registry.get('split')(axis, size)(input_x)
3095
+ subs = tensor_operator_registry.get('tensor_split')(input_x, size, axis)
3109
3096
  repeated_subs = []
3110
3097
  for sub, rep in zip(subs, repeats):
3111
3098
  if rep != 0:
@@ -3117,10 +3104,9 @@ class Tensor(Tensor_):
3117
3104
  For details, please refer to :func:`mindspore.ops.repeat_interleave`.
3118
3105
  """
3119
3106
  self._init_check()
3120
- dim = dim if dim is not None else 0
3121
3107
  return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
3122
3108
 
3123
- def bernoulli(self, p=0.5, seed=-1):
3109
+ def bernoulli(self, p=0.5, seed=None):
3124
3110
  r"""
3125
3111
  For details, please refer to :func:`mindspore.ops.bernoulli`.
3126
3112
  """
@@ -3128,278 +3114,44 @@ class Tensor(Tensor_):
3128
3114
  validator.check_is_int(seed, 'seed')
3129
3115
  return tensor_operator_registry.get('bernoulli')(self, p, seed)
3130
3116
 
3131
- def multinomial(self, num_samples, seed=0, seed2=0):
3117
+ def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
3132
3118
  r"""
3133
- Returns a tensor sampled from the multinomial probability distribution located in the corresponding
3134
- row of tensor input.
3135
-
3136
- Note:
3137
- The rows of input do not need to sum to one (in which case we use the values as weights),
3138
- but must be non-negative, finite and have a non-zero sum. self must be the input tensor
3139
- containing the cumsum of probabilities, must be 1 or 2 dimensions.
3140
-
3141
- Args:
3142
- seed (int): Random seed, must be non-negative. Default: 0.
3143
- seed2 (int): Random seed2, must be non-negative. Default: 0.
3144
-
3145
- Inputs:
3146
- - **num_samples** (int32) - number of samples to draw.
3147
-
3148
- Outputs:
3149
- Tensor with the same rows as `self`, each row has num_samples sampled indices.
3150
-
3151
- Raises:
3152
- TypeError: If neither `seed` nor `seed2` is an int.
3153
- TypeError: If `self` is not a Tensor whose dtype is float32.
3154
- TypeError: If dtype of `num_samples` is not int32.
3119
+ For details, please refer to :func:`mindspore.ops.random_categorical`.
3120
+ """
3121
+ self._init_check()
3122
+ validator.check_is_int(num_sample, 'num_sample')
3123
+ validator.check_is_int(seed, 'seed')
3124
+ return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
3155
3125
 
3156
- Supported Platforms:
3157
- ``GPU``
3126
+ def masked_select(self, mask):
3127
+ """
3128
+ For details, please refer to :func:`mindspore.ops.masked_select`.
3129
+ """
3130
+ self._init_check()
3131
+ return tensor_operator_registry.get('masked_select')(self, mask)
3158
3132
 
3159
- Examples:
3160
- >>> from mindspore import Tensor
3161
- >>> import mindspore
3162
- >>> x = Tensor([0., 9., 4., 0.], mindspore.float32)
3163
- >>> output = x.multinomial(num_samples=2,seed=10)
3164
- >>> print(output)
3165
- [2 1]
3133
+ def gather_elements(self, dim, index):
3134
+ """
3135
+ For details, please refer to :func:`mindspore.ops.gather_elements`.
3166
3136
  """
3167
3137
  self._init_check()
3168
- validator.check_non_negative_int(seed, 'seed')
3169
- validator.check_non_negative_int(seed2, 'seed')
3170
- return tensor_operator_registry.get('multinomial')(seed, seed2)(self, num_samples)
3138
+ validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
3139
+ return tensor_operator_registry.get('gather_elements')(self, dim, index)
3171
3140
 
3172
- def rand_like(self, seed=None):
3173
- r"""
3174
- Returns a tensor with the same size as input that is filled with
3175
- random numbers from a uniform distribution on the interval [0, 1)
3141
+ def nonzero(self):
3142
+ """
3143
+ For details, please refer to :func:`mindspore.ops.nonzero`.
3144
+ """
3145
+ self._init_check()
3146
+ return tensor_operator_registry.get('nonzero')(self)
3176
3147
 
3177
- Args:
3178
- input_tensor (Union[Tensor, int, float]): the input tensor.
3179
- seed (int, option): set the random seed (0 to 2**32).
3180
-
3181
- Returns:
3182
- out (Union[Tensor, float]), with the same shape as input_tensor.
3183
-
3184
- Raises:
3185
- TypeError: If dtype of the input_tensor is not int or float.
3186
-
3187
- Supported Platforms:
3188
- ``Ascend`` ``GPU`` ``CPU``
3189
-
3190
- Examples:
3191
- >>> import mindspore
3192
- >>> import numpy as np
3193
- >>> from mindspore import Tensor
3194
- >>> input_x = Tensor(np.array([[1, 2, 3, 9], [1, 2, 3, 9]]), mindspore.int8)
3195
- >>> output = input_x.rand_like(seed = 0)
3196
- >>> print(output)
3197
- [[0.5488135 0.71518937 0.60276338 0.54488318]
3198
- [0.4236548 0.64589411 0.43758721 0.891773 ]]
3199
- >>> input_p = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
3200
- >>> output = input_p.rand_like(seed = 0)
3201
- >>> print(output)
3202
- [0.5488135 0.71518937 0.60276338]
3203
- """
3204
- input_tensor = self
3205
- input_tensor = np.array(input_tensor)
3206
- shape_ = input_tensor.shape
3207
- input_tensor = input_tensor.reshape(-1)
3208
- x = len(input_tensor)
3209
- np.random.seed(seed)
3210
- return Tensor(np.array([np.random.rand(1) for i in range(x)]).reshape(shape_))
3211
-
3212
- def randint_like(self, high, low=0, seed=None):
3213
- r"""
3214
- Returns a tensor with the same size as the input tensor,
3215
- and the numerical value is a random number on the interval [low, high],
3216
- if only one int type data is entered, the default value is high,
3217
- if two integers are entered, they are low and high respectively.
3218
-
3219
- Args:
3220
- input_tensor (Union[Tensor, int, float]): the size of input will determine size of the output tensor.
3221
- low (int, optional) – Lowest integer to be drawn from the distribution. Default: 0.
3222
- high (int) – One above the highest integer to be drawn from the distribution.
3223
- seed (int, optional): set the random seed (0 to 2**32).
3224
-
3225
- Returns:
3226
- out (Union[Tensor, int]), with the same shape as input_tensor.
3227
-
3228
- Raises:
3229
- TypeError: If dtype of the input_tensor is not int or float.
3230
-
3231
- Supported Platforms:
3232
- ``Ascend`` ``GPU`` ``CPU``
3233
-
3234
- Examples:
3235
- >>> import mindspore
3236
- >>> import numpy as np
3237
- >>> from mindspore import Tensor
3238
- >>> input_x = Tensor(np.array([1., 2., 3., 4., 5.]), mindspore.float32)
3239
- >>> output = input_x.randint_like(20, seed = 0)
3240
- >>> print(output)
3241
- [12 15 0 3 3]
3242
- >>> output = input_x.randint_like(20, 100, seed = 0)
3243
- >>> print(output)
3244
- [64 67 84 87 87]
3245
- """
3246
- input_tensor = self
3247
- input_tensor = np.array(input_tensor)
3248
- shape_ = input_tensor.shape
3249
- input_tensor = input_tensor.reshape(-1)
3250
- if low > high:
3251
- high, low = low, high
3252
- x = len(input_tensor)
3253
- np.random.seed(seed)
3254
- return Tensor(np.array([np.random.randint(low, high) for i in range(x)]).reshape(shape_))
3255
-
3256
- def randn_like(self, seed=None):
3257
- r"""
3258
- Returns a tensor with the same size as input that is filled with random
3259
- numbers from a normal distribution with mean 0 and variance 1.
3260
-
3261
- Args:
3262
- input_tensor (Union[Tensor, int, float]): the size of input will determine size of the output tensor.
3263
- seed (int, optional): set the random seed (0 to 2**32).
3264
-
3265
- Returns:
3266
- out (Union[Tensor, int]), with the same shape as input_tensor.
3267
-
3268
- Raises:
3269
- TypeError: If dtype of the input_tensor is not int or float.
3270
-
3271
- Supported Platforms:
3272
- ``Ascend`` ``GPU`` ``CPU``
3273
-
3274
- Examples:
3275
- >>> import mindspore
3276
- >>> import numpy as np
3277
- >>> from mindspore import Tensor
3278
- >>> input_x = Tensor(np.array([1., 2., 3., 4., 5.]), mindspore.float32)
3279
- >>> output = input_x.randn_like(seed = 0)
3280
- >>> print(output)
3281
- [1.7640524 0.4001572 0.978738 2.2408931 1.867558 ]
3282
- >>> input_p = Tensor(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]), mindspore.int32)
3283
- >>> output = input_p.randn_like(seed = 0)
3284
- >>> print(output)
3285
- [[ 1.7640524 0.4001572 0.978738 2.2408931 1.867558 ]
3286
- [-0.9772779 0.95008844 -0.1513572 -0.10321885 0.41059852]]
3287
- """
3288
- input_tensor = np.array(self)
3289
- shape_ = input_tensor.shape
3290
- input_tensor = input_tensor.reshape(-1)
3291
- x = len(input_tensor)
3292
- np.random.seed(seed)
3293
- return Tensor([np.random.randn() for i in range(x)]).reshape(shape_)
3294
-
3295
- def as_strided(self, shape=None, strides=None, subok=False, writeable=True):
3296
- r"""
3297
- as_strided(input, size, stride, storage_offset=0) -> Tensor
3298
- Create a view of an existing `mindspore.Tensor` :attr:`x` with specified
3299
- :attr:`shape`, :attr:`stride` and :attr:`subok`.
3300
-
3301
- Args:
3302
- x (Tensor): the input tensor.
3303
- shape (tuple or ints): the shape of the output tensor
3304
- stride (tuple or ints): the stride of the output tensor
3305
- subok (int, optional): the offset in the underlying storage of the output tensor
3306
-
3307
- Returns:
3308
- Tensor viewed by strides and subok.
3309
-
3310
- Supported Platforms:
3311
- ``Ascend`` ``GPU`` ``CPU``
3312
-
3313
- Examples:
3314
- >>> import numpy as np
3315
- >>> from mindspore import Tensor
3316
- >>> X = np.arange(9, dtype=np.int32).reshape(3,3)
3317
- >>> output = Tensor(X).as_strided((2, 2), (1, 1))
3318
- >>> print(output)
3319
- [[0 1]
3320
- [1 2]]
3321
- """
3322
- dtype_ = self.dtype
3323
- x = self.asnumpy()
3324
- n = x.strides[1]
3325
- strides = tuple(np.array(strides) * n)
3326
- return Tensor(np.lib.stride_tricks.as_strided(x, shape, strides, subok, writeable), dtype=dtype_)
3327
-
3328
- def randperm(self, max_length=1, pad=-1):
3329
- r"""
3330
- Generates n random samples from 0 to n-1 without repeating. If `max_length` > n,
3331
- the last `max_length-n` elements will be filled with `pad`.
3332
-
3333
- Args:
3334
- max_length (int): Number of items expected to get and the number must be greater than 0. Default: 1.
3335
- pad (int): The pad value to be filled. Default: -1.
3336
- dtype (mindspore.dtype): The type of output. Default: mindspore.int32.
3337
-
3338
- Inputs:
3339
- - **n** (Tensor[int32]) - The input tensor with shape: (1,) and the number must be in [0, `max_length`].
3340
-
3341
- Outputs:
3342
- - **output** (Tensor) - The output Tensor with shape: (`max_length`,) and type: `dtype`.
3343
-
3344
- Raises:
3345
- TypeError: If neither `max_length` nor `pad` is an int.
3346
- TypeError: If `self` has non-Int elements.
3347
- TypeError: If `self` has negative elements.
3348
-
3349
- Supported Platforms:
3350
- ``Ascend`` ``GPU``
3351
-
3352
- Examples:
3353
- >>> # The result of every execution is different because this operator will generate n random samples.
3354
- >>> from mindspore import Tensor
3355
- >>> import mindspore
3356
- >>> n = Tensor([20], dtype=mindspore.int32)
3357
- >>> output = n.randperm(max_length=30, pad=-1)
3358
- >>> print(output)
3359
- [15 6 11 19 14 16 9 5 13 18 4 10 8 0 17 2 1 12 3 7
3360
- -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]
3361
- """
3362
- self._init_check()
3363
- return tensor_operator_registry.get('randperm')(max_length, pad)(self)
3364
-
3365
- def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
3366
- r"""
3367
- For details, please refer to :func:`mindspore.ops.random_categorical`.
3368
- """
3369
- self._init_check()
3370
- validator.check_is_int(num_sample, 'num_sample')
3371
- validator.check_is_int(seed, 'seed')
3372
- return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
3373
-
3374
- def masked_select(self, mask):
3375
- """
3376
- For details, please refer to :func:`mindspore.ops.masked_select`.
3377
- """
3378
- self._init_check()
3379
- return tensor_operator_registry.get('masked_select')(self, mask)
3380
-
3381
- def gather_elements(self, dim, index):
3382
- """
3383
- For details, please refer to :func:`mindspore.ops.gather_elements`.
3384
- """
3385
- self._init_check()
3386
- validator.check_value_type('index', index, (Tensor_,), 'Tensor.gather_elements')
3387
- return tensor_operator_registry.get('gather_elements')(self, dim, index)
3388
-
3389
- def nonzero(self):
3390
- """
3391
- For details, please refer to :func:`mindspore.ops.nonzero`.
3392
- """
3393
- self._init_check()
3394
- return tensor_operator_registry.get('nonzero')(self)
3395
-
3396
- def svd(self, full_matrices=False, compute_uv=True):
3397
- """
3398
- For details, please refer to :func:`mindspore.ops.svd`.
3399
- """
3400
- svd_op = tensor_operator_registry.get("svd")
3401
- if compute_uv:
3402
- return svd_op(full_matrices, compute_uv)(self)
3148
+ def svd(self, full_matrices=False, compute_uv=True):
3149
+ """
3150
+ For details, please refer to :func:`mindspore.ops.svd`.
3151
+ """
3152
+ svd_op = tensor_operator_registry.get("svd")
3153
+ if compute_uv:
3154
+ return svd_op(full_matrices, compute_uv)(self)
3403
3155
 
3404
3156
  s, _, _ = svd_op(full_matrices, compute_uv)(self)
3405
3157
  return s
@@ -3418,7 +3170,6 @@ class Tensor(Tensor_):
3418
3170
  self._init_check()
3419
3171
  return tensor_operator_registry.get('heaviside')(self, values)
3420
3172
 
3421
-
3422
3173
  def hypot(self, other):
3423
3174
  r"""
3424
3175
  For details, please refer to :func:`mindspore.ops.hypot`.
@@ -3431,7 +3182,21 @@ class Tensor(Tensor_):
3431
3182
  For details, please refer to :func:`mindspore.ops.soft_shrink`.
3432
3183
  """
3433
3184
  self._init_check()
3434
- return tensor_operator_registry.get('soft_shrink')(lambd)(self)
3185
+ return tensor_operator_registry.get('soft_shrink')(self, lambd)
3186
+
3187
+ def matrix_determinant(self):
3188
+ r"""
3189
+ For details, please refer to :func:`mindspore.ops.matrix_determinant`.
3190
+ """
3191
+ self._init_check()
3192
+ return tensor_operator_registry.get('matrix_determinant')(self)
3193
+
3194
+ def log_matrix_determinant(self):
3195
+ r"""
3196
+ For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
3197
+ """
3198
+ self._init_check()
3199
+ return tensor_operator_registry.get('log_matrix_determinant')(self)
3435
3200
 
3436
3201
  def to_coo(self):
3437
3202
  """
@@ -3556,6 +3321,13 @@ class Tensor(Tensor_):
3556
3321
  self._init_check()
3557
3322
  return tensor_operator_registry.get('diag')()(self)
3558
3323
 
3324
+ def diagflat(self, offset=0):
3325
+ r"""
3326
+ For details, please refer to :func:`mindspore.ops.diagflat`.
3327
+ """
3328
+ self._init_check()
3329
+ return tensor_operator_registry.get('diagflat')(self, offset)
3330
+
3559
3331
  def xdivy(self, y):
3560
3332
  r"""
3561
3333
  For details, please refer to :func:`mindspore.ops.xdivy`.
@@ -3563,11 +3335,40 @@ class Tensor(Tensor_):
3563
3335
  self._init_check()
3564
3336
  return tensor_operator_registry.get("xdivy")()(self, y)
3565
3337
 
3566
- def split(self, axis=0, output_num=1):
3338
+ def split(self, split_size_or_sections, axis=0):
3567
3339
  """
3568
3340
  For details, please refer to :func:`mindspore.ops.split`.
3569
3341
  """
3570
- return tensor_operator_registry.get('split')(axis, output_num)(self)
3342
+ return tensor_operator_registry.get('split')(self, split_size_or_sections, axis)
3343
+
3344
+ def tensor_split(self, indices_or_sections, axis=0):
3345
+ """
3346
+ For details, please refer to :func:`mindspore.ops.tensor_split`.
3347
+ """
3348
+ self._init_check()
3349
+ return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
3350
+
3351
+ def vsplit(self, indices_or_sections):
3352
+ """
3353
+ For details, please refer to :func:`mindspore.ops.vsplit`.
3354
+ """
3355
+
3356
+ self._init_check()
3357
+ return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
3358
+
3359
+ def hsplit(self, indices_or_sections):
3360
+ """
3361
+ For details, please refer to :func:`mindspore.ops.hsplit`.
3362
+ """
3363
+ self._init_check()
3364
+ return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
3365
+
3366
+ def dsplit(self, indices_or_sections):
3367
+ """
3368
+ For details, please refer to :func:`mindspore.ops.dsplit`.
3369
+ """
3370
+ self._init_check()
3371
+ return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
3571
3372
 
3572
3373
  def xlogy(self, y):
3573
3374
  r"""
@@ -3593,9 +3394,16 @@ class Tensor(Tensor_):
3593
3394
  """
3594
3395
  return tensor_operator_registry.get('tile')()(self, multiples)
3595
3396
 
3397
+ def topk(self, k, dim=None, largest=True, sorted=True):
3398
+ r"""
3399
+ For details, please refer to :func:`mindspore.ops.topk`.
3400
+ """
3401
+ self._init_check()
3402
+ return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
3403
+
3596
3404
  def top_k(self, k, sorted=True):
3597
3405
  r"""
3598
- For details, please refer to :func:`mindspore.ops.top_k`.
3406
+ `Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
3599
3407
  """
3600
3408
  self._init_check()
3601
3409
  validator.check_is_int(k, 'k')
@@ -3603,7 +3411,7 @@ class Tensor(Tensor_):
3603
3411
  return tensor_operator_registry.get("top_k")(sorted)(self, k)
3604
3412
 
3605
3413
  def sigmoid(self):
3606
- """
3414
+ r"""
3607
3415
  For details, please refer to :func:`mindspore.ops.sigmoid`.
3608
3416
  """
3609
3417
  return tensor_operator_registry.get("sigmoid")()(self)
@@ -3618,47 +3426,10 @@ class Tensor(Tensor_):
3618
3426
 
3619
3427
  def addmv(self, mat, vec, beta=1, alpha=1):
3620
3428
  r"""
3621
- Multiplies matrix `mat` and vector `vec`. Input vector is added to the final result.
3622
-
3623
- If `mat` is a :math:`(N, M)` tensor, `vec` is a 1-D tensor of size :math:`M`, then `x` must be broadcastable
3624
- with a 1-D tensor of size :math:`N` and `out` will be 1-D tensor of size :math:`N`.
3625
-
3626
- The optional values `beta` and `alpha` are the matrix-vector product between `mat` and `vec` and the scale
3627
- factor for the added tensor `x` respectively. If `beta` is 0, then `x` will be ignored.
3628
-
3629
- .. math::
3630
- output = β x + α (mat @ vec)
3631
-
3632
- Args:
3633
- mat (Tensor): The first tensor to be multiplied. The shape of the tensor is :math:`(N, M)`.
3634
- vec (Tensor): The second tensor to be multiplied. The shape of the tensor is :math:`(M,)`.
3635
- beta (scalar[int, float, bool], optional): Multiplier for `x` (β). The `beta` must be int or
3636
- float or bool, Default: 1.
3637
- alpha (scalar[int, float, bool], optional): Multiplier for `mat` @ `vec` (α). The `alpha` must
3638
- be int or float or bool, Default: 1.
3639
-
3640
- Returns:
3641
- Tensor, the shape of the output tensor is :math:`(N,)`, has the same dtype as `x`.
3642
-
3643
- Raises:
3644
- TypeError: If `mat`, `vec`, `x` is not a Tensor.
3645
- TypeError: If input tensor and `x`, `mat`, 'vec' are not the same dtype.
3646
- ValueError: If `mat` is not a 2-D Tensor.
3647
- If `x`, `vec` is not a 1-D Tensor.
3648
-
3649
- Supported Platforms:
3650
- ``Ascend`` ``GPU`` ``CPU``
3651
-
3652
- Examples:
3653
- >>> x = Tensor(np.array([2., 3.]).astype(np.float32))
3654
- >>> mat = Tensor(np.array([[2., 5., 3.], [4., 2., 2.]]).astype(np.float32))
3655
- >>> vec = Tensor(np.array([3., 2., 4.]).astype(np.float32))
3656
- >>> output = x.addmv(mat, vec)
3657
- >>> print(output)
3658
- [30. 27.]
3429
+ For details, please refer to :func:`mindspore.ops.addmv`.
3659
3430
  """
3660
3431
  self._init_check()
3661
- return tensor_operator_registry.get('addmv')(self, mat, vec, beta=1, alpha=1)
3432
+ return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
3662
3433
 
3663
3434
  def asinh(self):
3664
3435
  r"""
@@ -3671,7 +3442,8 @@ class Tensor(Tensor_):
3671
3442
  r"""
3672
3443
  Alias for :func:`mindspore.Tensor.asinh`.
3673
3444
  """
3674
- return self.asinh()
3445
+ self._init_check()
3446
+ return tensor_operator_registry.get('arcsinh')(self)
3675
3447
 
3676
3448
  def atan(self):
3677
3449
  r"""
@@ -3691,7 +3463,8 @@ class Tensor(Tensor_):
3691
3463
  r"""
3692
3464
  Alias for :func:`mindspore.Tensor.atanh`.
3693
3465
  """
3694
- return self.atanh()
3466
+ self._init_check()
3467
+ return tensor_operator_registry.get('arctanh')(self)
3695
3468
 
3696
3469
  def bmm(self, mat2):
3697
3470
  r"""
@@ -3705,7 +3478,7 @@ class Tensor(Tensor_):
3705
3478
  Performs tensor dtype conversion.
3706
3479
 
3707
3480
  Args:
3708
- dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
3481
+ dtype (Number): The valid data type of the output tensor. Only constant value is allowed.
3709
3482
 
3710
3483
  Returns:
3711
3484
  Tensor, converted to the specified `dtype`.
@@ -3937,19 +3710,19 @@ class Tensor(Tensor_):
3937
3710
  self._init_check()
3938
3711
  return tensor_operator_registry.get('multiply')(self, value)
3939
3712
 
3940
- def div(self, other, rounding_mode=None):
3713
+ def div(self, value, *, rounding_mode=None):
3941
3714
  r"""
3942
3715
  For details, please refer to :func:`mindspore.ops.div`.
3943
3716
  """
3944
3717
  self._init_check()
3945
- return tensor_operator_registry.get('div')(self, other, rounding_mode)
3718
+ return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
3946
3719
 
3947
- def divide(self, other, *, rounding_mode=None):
3720
+ def divide(self, value, *, rounding_mode=None):
3948
3721
  r"""
3949
- For details, please refer to :func:`mindspore.ops.div`.
3722
+ Alias for :func:`mindspore.Tensor.div`.
3950
3723
  """
3951
3724
  self._init_check()
3952
- return tensor_operator_registry.get('div')(self, other, rounding_mode)
3725
+ return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
3953
3726
 
3954
3727
  def equal(self, other):
3955
3728
  r"""
@@ -4078,6 +3851,12 @@ class Tensor(Tensor_):
4078
3851
  self._init_check()
4079
3852
  return tensor_operator_registry.get('less')(self, other)
4080
3853
 
3854
+ def lt(self, other):
3855
+ """
3856
+ Alias for :func:`mindspore.Tensor.less`.
3857
+ """
3858
+ return self.less(other)
3859
+
4081
3860
  def logical_and(self, other):
4082
3861
  r"""
4083
3862
  For details, please refer to :func:`mindspore.ops.logical_and`.
@@ -4108,354 +3887,256 @@ class Tensor(Tensor_):
4108
3887
 
4109
3888
  def lstsq(self, A):
4110
3889
  r"""
4111
- Computes the solutions of the least squares and minimum norm problems of full-rank
4112
- matrix `x` of size :math:`(m \times n)` and matrix `a` of size :math:`(m \times k)`.
4113
-
4114
- If :math:`m \geq n`, `lstsq` solves the least-squares problem:
4115
-
4116
- .. math::
4117
-
4118
- \begin{array}{ll}
4119
- \min_y & \|xy-a\|_2.
4120
- \end{array}
4121
-
4122
- If :math:`m < n`, `lstsq` solves the least-norm problem:
4123
-
4124
- .. math::
4125
-
4126
- \begin{array}{llll}
4127
- \min_y & \|y\|_2 & \text{subject to} & xy = a.
4128
- \end{array}
4129
-
4130
- Args:
4131
- A (Tensor) - The m by k matrix equivalent to `a` in above.
4132
- The input tensor whose data type is float16, float32 or float64.
4133
-
4134
- Returns:
4135
- Tensor, the least squares or minimum norm problems solution, which has shape :math:`(n \times k)`.
4136
- The data type is the same with `input`.
4137
-
4138
- Raises:
4139
- TypeError: If `A` is not a Tensor.
4140
- TypeError: If dtype of input tensor or `A` is not one of: float16, float32, float64.
4141
- TypeError: If the dtypes of input tensor and `A` are not the same.
4142
- ValueError: If the dimension of input tensor is not equal to 2.
4143
- ValueError: If the dimension of `A` is not equal to 2 or 1.
4144
- ValueError: If the length of input_dims[0] is not equal to the length of A_dims[0].
4145
-
4146
- Supported Platforms:
4147
- ``CPU``
4148
-
4149
- Examples:
4150
- >>> x = Tensor(np.array([[2,1,5],[3,5,1],[1,1,1]]),mindspore.float32)
4151
- >>> a = Tensor(np.array([[10,5],[15,8],[7,4]]),mindspore.float32)
4152
- >>> output = x.lstsq(a)
4153
- >>> print(output)
4154
- [[17.000002 11.000002 ]
4155
- [-6.5000005 -4.500001 ]
4156
- [-3.500002 -2.5000017]]
3890
+ For details, please refer to :func:`mindspore.ops.lstsq`.
4157
3891
  """
4158
3892
  self._init_check()
4159
3893
  return tensor_operator_registry.get('lstsq')(self, A)
4160
3894
 
4161
- def mvlgamma(self, p):
3895
+ @property
3896
+ def mH(self):
4162
3897
  r"""
4163
- Computes the multivariate log-gamma function with dimension p element-wise.
4164
-
4165
- The following tex shows the mathematical calculation process of Mvlgamma:
4166
-
4167
- .. math::
4168
-
4169
- \log (\Gamma_{p}(a))=C+\sum_{i=1}^{p} \log (\Gamma(a-\frac{i-1}{2}))
4170
-
4171
- where :math:`C = \log(\pi) \times \frac{p(p-1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
4172
-
4173
- Args:
4174
- p (int): The number of dimensions. And the value of `p` must be greater than or equal to 1.
4175
-
4176
- Returns:
4177
- Tensor, has the same shape and type as input tensor.
4178
-
4179
- Raises:
4180
- TypeError: If dtype of input tensor is neither float32 nor float64.
4181
- TypeError: If `p` is not an int.
4182
- ValueError: If `p` is not greater than or equal to 1.
4183
- ValueError: If all elements of input tensor are not greater than (p-1)/2.
3898
+ Accessing this property is equivalent to Calling self.adjoint().
3899
+ For details, please refer to :func:`mindspore.ops.adjoint`.
3900
+ """
3901
+ return self.adjoint()
4184
3902
 
4185
- Supported Platforms:
4186
- ``CPU`` ``GPU``
3903
+ @property
3904
+ def mT(self):
3905
+ r"""
3906
+ Returns the Tensor that exchanges the last two dimensions.
3907
+ Accessing the attribute, x.mT, is equal to calling the method, x.swapaxes(-2, -1).
3908
+ For details, please refer to :func:`mindspore.Tensor.swapaxes`.
3909
+ """
3910
+ return self.swapaxes(-2, -1)
4187
3911
 
4188
- Examples:
4189
- >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
4190
- >>> y = x.mvlgamma(p=3)
4191
- >>> print(y)
4192
- [[2.694925 5.402975 9.140645]
4193
- [5.402975 1.596312 13.64045]]
3912
+ def mvlgamma(self, p):
3913
+ r"""
3914
+ For details, please refer to :func:`mindspore.ops.mvlgamma`.
4194
3915
  """
4195
3916
  self._init_check()
4196
3917
  return tensor_operator_registry.get('mvlgamma')(self, p)
4197
3918
 
4198
3919
  def matmul(self, tensor2):
4199
3920
  r"""
4200
- Returns the matrix product of two tensors.
4201
-
4202
- Note:
4203
- Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
4204
- not supported.
4205
- On CPU, the supported dtypes are np.float16 and np.float32.
4206
- On GPU, the supported dtypes are np.float16 and np.float32.
4207
-
4208
- Args:
4209
- tensor2 (Tensor): Second input tensor, scalar not allowed.
4210
- The last dimension of input tensor must be the same size as the second last dimension of `tensor2`.
4211
- And the shape of input tensor and tensor2 could be broadcast.
4212
-
4213
- Returns:
4214
- Tensor or scalar, the matrix product of the inputs. This is a scalar only
4215
- when both input tensor, `tensor2` are 1-d vectors.
4216
-
4217
- Raises:
4218
- ValueError: If the last dimension of input tensor is not the same size as the
4219
- second-to-last dimension of `tensor2`, or if a scalar value is passed in.
4220
- ValueError: If the shape of input tensor and `tensor2` could not broadcast together.
4221
-
4222
- Supported Platforms:
4223
- ``Ascend`` ``CPU`` ``GPU``
4224
-
4225
- Examples:
4226
- >>> x = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)
4227
- >>> y = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)
4228
- >>> output = x.matmul(y)
4229
- >>> print(output)
4230
- [[[ 70. 76. 82. 88. 94.]
4231
- [ 190. 212. 234. 256. 278.]
4232
- [ 310. 348. 386. 424. 462.]]
4233
- [[ 430. 484. 538. 592. 646.]
4234
- [ 550. 620. 690. 760. 830.]
4235
- [ 670. 756. 842. 928. 1014.]]]
3921
+ For details, please refer to :func:`mindspore.ops.matmul`.
4236
3922
  """
4237
3923
  self._init_check()
4238
3924
  return tensor_operator_registry.get('matmul')(self, tensor2)
4239
3925
 
4240
- def maximum(self, other):
3926
+ def inner(self, other):
4241
3927
  r"""
4242
- Computes the maximum of input tensors element-wise.
4243
-
4244
- Note:
4245
- - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data
4246
- types consistent.
4247
- - The inputs must be two tensors or one tensor and one scalar.
4248
- - When the inputs are two tensors,
4249
- dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
4250
- - When the inputs are one tensor and one scalar,
4251
- the scalar could only be a constant.
4252
- - Broadcasting is supported.
4253
- - If one of the elements being compared is a NaN, then that element is returned.
4254
-
4255
- .. math::
4256
- output_i = max(input_i, other_i)
4257
-
4258
- Args:
4259
- other (Union[Tensor, Number, bool]): The second input is a number or
4260
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
4261
-
4262
- Returns:
4263
- Tensor, the shape is the same as the one after broadcasting,
4264
- and the data type is the one with higher precision or higher digits among the two inputs.
3928
+ For details, please refer to :func:`mindspore.ops.inner`.
3929
+ """
3930
+ self._init_check()
3931
+ return tensor_operator_registry.get('inner')(self, other)
4265
3932
 
4266
- Raises:
4267
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4268
- ValueError: If `input` and `other` are not the same shape.
3933
+ def multinomial(self, num_samples, replacement=True, seed=None):
3934
+ r"""
3935
+ For details, please refer to :func:`mindspore.ops.multinomial`.
3936
+ """
3937
+ self._init_check()
3938
+ return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
4269
3939
 
4270
- Supported Platforms:
4271
- ``Ascend`` ``CPU`` ``GPU``
3940
+ def matrix_power(self, n):
3941
+ r"""
3942
+ For details, please refer to :func:`mindspore.ops.matrix_power`.
3943
+ """
3944
+ self._init_check()
3945
+ return tensor_operator_registry.get('matrix_power')(self, n)
4272
3946
 
4273
- Examples:
4274
- >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
4275
- >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
4276
- >>> output = x.maximum(y)
4277
- >>> print(output)
4278
- [4. 5. 6.]
3947
+ def maximum(self, other):
3948
+ r"""
3949
+ For details, please refer to :func:`mindspore.ops.maximum`.
4279
3950
  """
4280
3951
  self._init_check()
4281
3952
  return tensor_operator_registry.get('maximum')(self, other)
4282
3953
 
4283
- def mul(self, value):
3954
+ def mm(self, mat2):
4284
3955
  r"""
4285
- Multiplies two tensors element-wise.
4286
-
4287
- .. note::
4288
- - Inputs of input tensor and `value` comply with the implicit type conversion rules to make
4289
- the data types consistent.
4290
- - The inputs must be two tensors or one tensor and one scalar.
4291
- - When the inputs are two tensors,
4292
- dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
4293
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
4294
-
4295
- Args:
4296
- value (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4297
- the second input should be a number.Number or bool value, or a Tensor whose data type is number
4298
- or bool\_. When the first input is Scalar, the second input must be a Tensor whose data type is
4299
- number or bool\_.
4300
-
4301
- Returns:
4302
- Tensor, the shape is the same as the one after broadcasting,
4303
- and the data type is the one with higher precision or higher digits among the two inputs.
4304
-
4305
- Raises:
4306
- TypeError: If input tensor and `value` is not one of the following: Tensor, number.Number, bool.
4307
- ValueError: If input tensor and `value` are not the same shape.
3956
+ For details, please refer to :func:`mindspore.ops.mm`.
3957
+ """
3958
+ self._init_check()
3959
+ return tensor_operator_registry.get('mm')(self, mat2)
4308
3960
 
4309
- Supported Platforms:
4310
- ``Ascend`` ``CPU`` ``GPU``
3961
+ def msort(self):
3962
+ r"""
3963
+ For details, please refer to :func:`mindspore.ops.msort`.
3964
+ """
3965
+ self._init_check()
3966
+ return tensor_operator_registry.get('msort')(self)
4311
3967
 
4312
- Examples:
4313
- >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
4314
- >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
4315
- >>> output = x.mul(y)
4316
- >>> print(output)
4317
- [ 4. 10. 18.]
3968
+ def mul(self, value):
3969
+ r"""
3970
+ For details, please refer to :func:`mindspore.ops.mul`.
4318
3971
  """
4319
3972
  self._init_check()
4320
3973
  return tensor_operator_registry.get('mul')(self, value)
4321
3974
 
3975
+ def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
3976
+ """
3977
+ For details, please refer to :func:`mindspore.ops.nan_to_num`.
3978
+ """
3979
+ return tensor_operator_registry.get('nan_to_num')(self, nan, posinf, neginf)
3980
+
4322
3981
  def neg(self):
4323
3982
  r"""
4324
- Returns a tensor with negative values of the input tensor element-wise.
4325
-
4326
- .. math::
4327
-
4328
- out_{i} = - x_{i}
4329
-
4330
- Returns:
4331
- Tensor, has the same shape and dtype as input.
4332
-
4333
- Supported Platforms:
4334
- ``Ascend`` ``CPU`` ``GPU``
4335
-
4336
- Examples:
4337
- >>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
4338
- >>> output = x.neg()
4339
- >>> print(output)
4340
- [-1. -2. 1. -2. 0. 3.5]
3983
+ For details, please refer to :func:`mindspore.ops.neg`.
4341
3984
  """
4342
3985
  self._init_check()
4343
3986
  return tensor_operator_registry.get('neg')(self)
4344
3987
 
4345
3988
  def ne(self, other):
4346
3989
  r"""
4347
- Computes the non-equivalence of two tensors element-wise.
3990
+ For details, please refer to :func:`mindspore.ops.ne`.
3991
+ """
3992
+ self._init_check()
3993
+ return tensor_operator_registry.get('ne')(self, other)
4348
3994
 
4349
- Note:
4350
- - Input tensor and `other` comply with the implicit type conversion rules to make the data
4351
- types consistent.
4352
- - The inputs must be two tensors or one tensor and one scalar.
4353
- - When the inputs are two tensors, the shapes of them could be broadcast.
4354
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
4355
- - Broadcasting is supported.
3995
+ def not_equal(self, other):
3996
+ r"""
3997
+ For details, please refer to :func:`mindspore.ops.not_equal`.
3998
+ """
3999
+ self._init_check()
4000
+ return tensor_operator_registry.get('not_equal')(self, other)
4001
+
4002
+ def new_zeros(self, size, *, dtype=None):
4003
+ r"""
4004
+ Return a tensor of `size` filled with zeros.
4356
4005
 
4357
4006
  Args:
4358
- other (Union[Tensor, Number, bool]): The second input is a number or
4359
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
4007
+ size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
4008
+
4009
+ Keyword Args:
4010
+ dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
4011
+ thesame dtype as `self`. Default: None.
4360
4012
 
4361
4013
  Returns:
4362
- Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
4014
+ Tensor, the shape and dtype is defined above and filled with zeros.
4363
4015
 
4364
4016
  Raises:
4365
- TypeError: If input tensor and `other` is not one of the following: Tensor, Number, bool.
4366
- TypeError: If neither input tensor and `other` is a Tensor.
4017
+ TypeError: If `size` is not an int, list or tuple of integers.
4367
4018
 
4368
4019
  Supported Platforms:
4369
- ``Ascend`` ``CPU`` ``GPU``
4020
+ ``Ascend`` ``GPU`` ``CPU``
4370
4021
 
4371
4022
  Examples:
4372
4023
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4373
- >>> output = x.ne(2.0)
4024
+ >>> output = x.new_zeros((2, 2))
4374
4025
  >>> print(output)
4375
- [ True False True]
4026
+ [[0. 0.]
4027
+ [0. 0.]]
4376
4028
  """
4029
+ validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
4030
+ if isinstance(size, list):
4031
+ size = tuple(size)
4377
4032
  self._init_check()
4378
- return tensor_operator_registry.get('ne')(self, other)
4033
+ _dtype = self.dtype if dtype is None else dtype
4034
+ return tensor_operator_registry.get('zeros')(size, _dtype)
4379
4035
 
4380
- def sinh(self):
4036
+ def new_ones(self, size, *, dtype=None):
4381
4037
  r"""
4382
- Computes hyperbolic sine of the input element-wise.
4038
+ Return a tensor of `size` filled with ones.
4383
4039
 
4384
- .. math::
4040
+ Args:
4041
+ size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
4385
4042
 
4386
- out_i = \sinh(x_i)
4043
+ Keyword Args:
4044
+ dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
4045
+ tensor has the same dtype as `self`. Default: None.
4387
4046
 
4388
4047
  Returns:
4389
- Tensor, has the same shape as input tensor.
4048
+ Tensor, the shape and dtype is defined above and filled with ones.
4049
+
4050
+ Raises:
4051
+ TypeError: If `size` is not an int, list or tuple of integers.
4390
4052
 
4391
4053
  Supported Platforms:
4392
- ``Ascend`` ``CPU`` ``GPU``
4054
+ ``Ascend`` ``GPU`` ``CPU``
4393
4055
 
4394
4056
  Examples:
4395
- >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
4396
- >>> output = x.sinh()
4057
+ >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4058
+ >>> output = x.new_ones((2, 2))
4397
4059
  >>> print(output)
4398
- [0.6604918 0.28367308 0.44337422 0.6604918]
4060
+ [[1. 1.]
4061
+ [1. 1.]]
4399
4062
  """
4063
+ validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
4064
+ if isinstance(size, list):
4065
+ size = tuple(size)
4400
4066
  self._init_check()
4401
- return tensor_operator_registry.get('sinh')(self)
4067
+ _dtype = self.dtype if dtype is None else dtype
4068
+ return tensor_operator_registry.get('ones')(size, _dtype)
4402
4069
 
4403
- def sort(self, dim=-1, descending=False):
4070
+ def sign(self):
4404
4071
  r"""
4405
- Sorts the elements of the input tensor along a given dimension in ascending order by value.
4406
-
4407
- Args:
4408
- dim (int, optional): The dimension to sort along. Default: -1.
4409
- descending (bool, optional): Controls the sorting order. If descending is True, then the elements
4410
- are sorted in descending order by value. Default: False.
4072
+ For details, please refer to :func:`mindspore.ops.sign`.
4073
+ """
4074
+ self._init_check()
4075
+ return tensor_operator_registry.get('sign')(self)
4411
4076
 
4412
- Returns:
4413
- y1 (Tensor): A tensor whose values are the sorted values, with the same shape and dtype as input.
4414
- y2 (Tensor): The indices of the elements in the original input tensor. Tensor dtype is int32.
4077
+ def signbit(self):
4078
+ """
4079
+ For details, please refer to :func:`mindspore.ops.signbit`.
4080
+ """
4081
+ self._init_check()
4082
+ return tensor_operator_registry.get('signbit')(self)
4415
4083
 
4416
- Raises:
4417
- TypeError: If dtype of `dim` is not int.
4418
- TypeError: If dtype of `descending` is not bool.
4419
- TypeError: If dtype of input tensor is neither float16 nor float32.
4420
- ValueError: If `dim` is not in range of [-len(x.shape), len(x.shape)).
4084
+ def sgn(self):
4085
+ """
4086
+ For details, please refer to :func:`mindspore.ops.sgn`.
4087
+ """
4088
+ self._init_check()
4089
+ return tensor_operator_registry.get('sgn')(self)
4421
4090
 
4422
- Supported Platforms:
4423
- ``Ascend`` ``CPU`` ``GPU``
4091
+ def sin(self):
4092
+ r"""
4093
+ For details, please refer to :func:`mindspore.ops.sin`.
4094
+ """
4095
+ self._init_check()
4096
+ return tensor_operator_registry.get('sin')(self)
4424
4097
 
4425
- Examples:
4426
- >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
4427
- >>> output = x.sort()
4428
- >>> print(output)
4429
- (Tensor(shape=[3, 3], dtype=Float16, value=
4430
- [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
4431
- [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
4432
- [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
4433
- [[2, 1, 0],
4434
- [2, 0, 1],
4435
- [0, 1, 2]]))
4098
+ def sinc(self):
4099
+ r"""
4100
+ For details, please refer to :func:`mindspore.ops.sinc`.
4436
4101
  """
4437
4102
  self._init_check()
4438
- return tensor_operator_registry.get('sort')(axis=dim, descending=descending)(self)
4103
+ return tensor_operator_registry.get('sinc')(self)
4439
4104
 
4440
- def trunc(self):
4105
+ def sinh(self):
4441
4106
  r"""
4442
- Returns a new tensor with the truncated integer values of the elements of input.
4107
+ For details, please refer to :func:`mindspore.ops.sinh`.
4108
+ """
4109
+ self._init_check()
4110
+ return tensor_operator_registry.get('sinh')(self)
4443
4111
 
4444
- Returns:
4445
- Tensor, the same shape and dtype as the input.
4112
+ def sort(self, axis=-1, descending=False):
4113
+ r"""
4114
+ For details, please refer to :func:`mindspore.ops.sort`.
4115
+ """
4116
+ self._init_check()
4117
+ return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
4446
4118
 
4447
- Supported Platforms:
4448
- ``CPU``
4119
+ def argsort(self, axis=-1, descending=False):
4120
+ """
4121
+ For details, please refer to :func:`mindspore.ops.argsort`.
4122
+ """
4123
+ self._init_check()
4124
+ return tensor_operator_registry.get('argsort')(self, axis, descending)
4449
4125
 
4450
- Examples:
4451
- >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
4452
- >>> output = x.trunc()
4453
- >>> print(output)
4454
- [3. 0. 0. -3.]
4126
+ def trunc(self):
4127
+ r"""
4128
+ For details, please refer to :func:`mindspore.ops.trunc`.
4455
4129
  """
4456
4130
  self._init_check()
4457
4131
  return tensor_operator_registry.get('trunc')(self)
4458
4132
 
4133
+ def where(self, condition, y):
4134
+ r"""
4135
+ For details, please refer to :func:`mindspore.ops.where`.
4136
+ """
4137
+ self._init_check()
4138
+ return tensor_operator_registry.get('where')(condition, self, y)
4139
+
4459
4140
  def imag(self):
4460
4141
  r"""
4461
4142
  Returns a new tensor containing imaginary value of the input tensor.
@@ -4465,7 +4146,7 @@ class Tensor(Tensor_):
4465
4146
  Tensor, the shape is the same as the input tensor.
4466
4147
 
4467
4148
  Supported Platforms:
4468
- ``CPU`` ``GPU``
4149
+ ``GPU`` ``CPU``
4469
4150
 
4470
4151
  Examples:
4471
4152
  >>> x = Tensor(np.asarray(np.complex(1.3 + 0.4j)), mindspore.complex64)
@@ -4477,6 +4158,23 @@ class Tensor(Tensor_):
4477
4158
  return tensor_operator_registry.get('imag')(self)
4478
4159
 
4479
4160
 
4161
+ def nextafter(self, other):
4162
+ r"""
4163
+ For details, please refer to :func:`mindspore.ops.nextafter`.
4164
+ """
4165
+ self._init_check()
4166
+ return tensor_operator_registry.get('nextafter')(self, other)
4167
+
4168
+
4169
+ def qr(self, some=True):
4170
+ r"""
4171
+ For details, please refer to :func:`mindspore.ops.qr`.
4172
+ """
4173
+ self._init_check()
4174
+ validator.check_value_type('some', some, bool, 'Tensor.qr')
4175
+ return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
4176
+
4177
+
4480
4178
  def _vm_compare(*args):
4481
4179
  """Implement `vm_compare` for tensor."""
4482
4180
  obj_str = args[-1]