mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (655) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -2
  3. mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +102 -0
  7. mindspore/_checkparam.py +1066 -1001
  8. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  9. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  10. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  11. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  12. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  13. mindspore/_extends/parse/__init__.py +5 -3
  14. mindspore/_extends/parse/namespace.py +16 -1
  15. mindspore/_extends/parse/parser.py +107 -22
  16. mindspore/_extends/parse/resources.py +0 -7
  17. mindspore/_extends/parse/standard_method.py +885 -413
  18. mindspore/amp.py +52 -57
  19. mindspore/boost/boost.py +2 -2
  20. mindspore/boost/boost_cell_wrapper.py +38 -20
  21. mindspore/boost/dim_reduce.py +3 -3
  22. mindspore/boost/group_loss_scale_manager.py +1 -1
  23. mindspore/common/__init__.py +4 -6
  24. mindspore/common/_decorator.py +2 -0
  25. mindspore/common/_register_for_adapter.py +55 -0
  26. mindspore/common/_stub_tensor.py +201 -0
  27. mindspore/common/_utils.py +41 -7
  28. mindspore/common/api.py +215 -141
  29. mindspore/common/dtype.py +8 -1
  30. mindspore/common/dump.py +2 -2
  31. mindspore/common/initializer.py +4 -2
  32. mindspore/common/jit_config.py +17 -13
  33. mindspore/common/mutable.py +33 -13
  34. mindspore/common/parameter.py +23 -21
  35. mindspore/common/seed.py +8 -24
  36. mindspore/common/sparse_tensor.py +62 -41
  37. mindspore/common/tensor.py +852 -1154
  38. mindspore/communication/__init__.py +2 -2
  39. mindspore/communication/_comm_helper.py +11 -4
  40. mindspore/communication/management.py +22 -21
  41. mindspore/config/op_info.config +501 -1008
  42. mindspore/context.py +201 -23
  43. mindspore/dataset/__init__.py +6 -6
  44. mindspore/dataset/audio/__init__.py +7 -7
  45. mindspore/dataset/audio/transforms.py +670 -30
  46. mindspore/dataset/audio/utils.py +47 -4
  47. mindspore/dataset/audio/validators.py +223 -1
  48. mindspore/dataset/callback/ds_callback.py +2 -2
  49. mindspore/dataset/core/config.py +210 -14
  50. mindspore/dataset/core/validator_helpers.py +2 -2
  51. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  52. mindspore/dataset/debug/debug_hook.py +65 -0
  53. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  54. mindspore/dataset/engine/__init__.py +7 -3
  55. mindspore/dataset/engine/cache_client.py +1 -1
  56. mindspore/dataset/engine/datasets.py +322 -66
  57. mindspore/dataset/engine/datasets_audio.py +80 -76
  58. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  59. mindspore/dataset/engine/datasets_text.py +232 -118
  60. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  61. mindspore/dataset/engine/datasets_vision.py +746 -225
  62. mindspore/dataset/engine/graphdata.py +75 -10
  63. mindspore/dataset/engine/iterators.py +45 -5
  64. mindspore/dataset/engine/offload.py +48 -28
  65. mindspore/dataset/engine/validators.py +117 -8
  66. mindspore/dataset/text/__init__.py +6 -5
  67. mindspore/dataset/text/transforms.py +86 -3
  68. mindspore/dataset/text/utils.py +6 -4
  69. mindspore/dataset/text/validators.py +25 -0
  70. mindspore/dataset/transforms/__init__.py +3 -2
  71. mindspore/dataset/transforms/c_transforms.py +1 -1
  72. mindspore/dataset/transforms/transforms.py +2 -2
  73. mindspore/dataset/utils/__init__.py +2 -1
  74. mindspore/dataset/utils/line_reader.py +121 -0
  75. mindspore/dataset/vision/__init__.py +2 -3
  76. mindspore/dataset/vision/c_transforms.py +9 -9
  77. mindspore/dataset/vision/py_transforms.py +5 -5
  78. mindspore/dataset/vision/py_transforms_util.py +2 -0
  79. mindspore/dataset/vision/transforms.py +160 -161
  80. mindspore/dataset/vision/utils.py +3 -3
  81. mindspore/experimental/map_parameter.py +38 -26
  82. mindspore/include/OWNERS +0 -1
  83. mindspore/include/api/callback/callback.h +9 -13
  84. mindspore/include/api/callback/ckpt_saver.h +2 -2
  85. mindspore/include/api/callback/loss_monitor.h +2 -2
  86. mindspore/include/api/callback/lr_scheduler.h +5 -5
  87. mindspore/include/api/callback/time_monitor.h +2 -2
  88. mindspore/include/api/callback/train_accuracy.h +4 -6
  89. mindspore/include/api/cfg.h +19 -6
  90. mindspore/include/api/context.h +44 -9
  91. mindspore/include/api/delegate.h +1 -1
  92. mindspore/include/api/metrics/accuracy.h +2 -2
  93. mindspore/include/api/metrics/metrics.h +4 -3
  94. mindspore/include/api/model.h +9 -4
  95. mindspore/include/api/model_parallel_runner.h +2 -2
  96. mindspore/include/api/net.h +12 -11
  97. mindspore/include/api/serialization.h +19 -3
  98. mindspore/include/api/types.h +3 -3
  99. mindspore/include/dataset/constants.h +7 -0
  100. mindspore/include/dataset/text.h +59 -0
  101. mindspore/jpeg62.dll +0 -0
  102. mindspore/log.py +1 -1
  103. mindspore/mindrecord/filereader.py +18 -0
  104. mindspore/mindrecord/filewriter.py +197 -34
  105. mindspore/mindrecord/shardreader.py +9 -0
  106. mindspore/mindrecord/shardwriter.py +1 -1
  107. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  108. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  109. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  110. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  111. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  112. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  113. mindspore/mindspore_backend.dll +0 -0
  114. mindspore/mindspore_common.dll +0 -0
  115. mindspore/mindspore_core.dll +0 -0
  116. mindspore/mindspore_glog.dll +0 -0
  117. mindspore/mindspore_shared_lib.dll +0 -0
  118. mindspore/nn/__init__.py +0 -4
  119. mindspore/nn/cell.py +204 -132
  120. mindspore/nn/dynamic_lr.py +1 -1
  121. mindspore/nn/grad/cell_grad.py +7 -6
  122. mindspore/nn/layer/__init__.py +5 -4
  123. mindspore/nn/layer/activation.py +40 -89
  124. mindspore/nn/layer/basic.py +255 -624
  125. mindspore/nn/layer/channel_shuffle.py +7 -6
  126. mindspore/nn/layer/combined.py +1 -1
  127. mindspore/nn/layer/container.py +41 -4
  128. mindspore/nn/layer/conv.py +64 -28
  129. mindspore/nn/layer/dense.py +9 -8
  130. mindspore/nn/layer/embedding.py +27 -25
  131. mindspore/nn/layer/image.py +53 -46
  132. mindspore/nn/layer/math.py +97 -105
  133. mindspore/nn/layer/normalization.py +117 -86
  134. mindspore/nn/layer/padding.py +185 -95
  135. mindspore/nn/layer/pooling.py +817 -414
  136. mindspore/nn/layer/rnn_cells.py +10 -15
  137. mindspore/nn/layer/rnns.py +37 -38
  138. mindspore/nn/layer/thor_layer.py +11 -12
  139. mindspore/nn/layer/timedistributed.py +5 -5
  140. mindspore/nn/layer/transformer.py +701 -0
  141. mindspore/nn/learning_rate_schedule.py +8 -8
  142. mindspore/nn/loss/__init__.py +5 -4
  143. mindspore/nn/loss/loss.py +334 -199
  144. mindspore/nn/optim/ada_grad.py +6 -6
  145. mindspore/nn/optim/adadelta.py +2 -3
  146. mindspore/nn/optim/adafactor.py +4 -5
  147. mindspore/nn/optim/adam.py +126 -62
  148. mindspore/nn/optim/adamax.py +3 -4
  149. mindspore/nn/optim/adasum.py +6 -6
  150. mindspore/nn/optim/asgd.py +2 -2
  151. mindspore/nn/optim/ftrl.py +67 -38
  152. mindspore/nn/optim/lamb.py +4 -5
  153. mindspore/nn/optim/lars.py +2 -2
  154. mindspore/nn/optim/lazyadam.py +43 -4
  155. mindspore/nn/optim/momentum.py +6 -5
  156. mindspore/nn/optim/optimizer.py +3 -1
  157. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  158. mindspore/nn/optim/rmsprop.py +1 -1
  159. mindspore/nn/optim/rprop.py +8 -9
  160. mindspore/nn/optim/sgd.py +19 -13
  161. mindspore/nn/optim/thor.py +10 -15
  162. mindspore/nn/probability/__init__.py +0 -2
  163. mindspore/nn/probability/bijector/bijector.py +4 -4
  164. mindspore/nn/probability/bijector/invert.py +1 -1
  165. mindspore/nn/probability/bijector/softplus.py +2 -2
  166. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  167. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  168. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  169. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  170. mindspore/nn/probability/distribution/beta.py +1 -1
  171. mindspore/nn/probability/distribution/categorical.py +5 -7
  172. mindspore/nn/probability/distribution/cauchy.py +3 -3
  173. mindspore/nn/probability/distribution/distribution.py +2 -2
  174. mindspore/nn/probability/distribution/exponential.py +2 -2
  175. mindspore/nn/probability/distribution/gamma.py +3 -3
  176. mindspore/nn/probability/distribution/geometric.py +1 -1
  177. mindspore/nn/probability/distribution/gumbel.py +3 -3
  178. mindspore/nn/probability/distribution/half_normal.py +15 -11
  179. mindspore/nn/probability/distribution/laplace.py +16 -13
  180. mindspore/nn/probability/distribution/logistic.py +2 -2
  181. mindspore/nn/probability/distribution/normal.py +1 -1
  182. mindspore/nn/probability/distribution/poisson.py +1 -1
  183. mindspore/nn/probability/distribution/student_t.py +20 -15
  184. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  185. mindspore/nn/probability/distribution/uniform.py +2 -2
  186. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  187. mindspore/nn/reinforcement/tensor_array.py +2 -2
  188. mindspore/nn/sparse/sparse.py +2 -2
  189. mindspore/nn/wrap/cell_wrapper.py +27 -10
  190. mindspore/nn/wrap/grad_reducer.py +2 -2
  191. mindspore/nn/wrap/loss_scale.py +40 -24
  192. mindspore/numpy/array_creations.py +33 -22
  193. mindspore/numpy/array_ops.py +35 -30
  194. mindspore/numpy/logic_ops.py +6 -27
  195. mindspore/numpy/math_ops.py +22 -19
  196. mindspore/numpy/utils.py +1 -1
  197. mindspore/numpy/utils_const.py +108 -58
  198. mindspore/opencv_core452.dll +0 -0
  199. mindspore/opencv_imgcodecs452.dll +0 -0
  200. mindspore/opencv_imgproc452.dll +0 -0
  201. mindspore/ops/_constants.py +0 -6
  202. mindspore/ops/_grad/__init__.py +2 -1
  203. mindspore/ops/_grad/grad_array_ops.py +86 -117
  204. mindspore/ops/_grad/grad_base.py +23 -1
  205. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  206. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  207. mindspore/ops/_grad/grad_implementations.py +9 -45
  208. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  209. mindspore/ops/_grad/grad_math_ops.py +142 -117
  210. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  211. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  212. mindspore/ops/_grad/grad_sparse.py +7 -6
  213. mindspore/ops/_grad_experimental/__init__.py +1 -0
  214. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  215. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  216. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  217. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  218. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  219. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  220. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  221. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  222. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  223. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  224. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  225. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  226. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  227. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  228. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  229. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  230. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  231. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  232. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  233. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  234. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  235. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  236. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  237. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  238. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  239. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  240. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  241. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  242. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  243. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  244. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  245. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  246. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  248. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  249. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  250. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  251. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  255. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  256. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  257. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  258. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  259. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  260. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  261. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  262. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  263. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  264. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  265. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  266. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  267. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  268. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  269. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  270. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  271. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  272. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  273. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  274. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  275. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  276. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  277. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  278. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  279. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  280. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  281. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  282. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  283. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  284. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  285. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  286. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  287. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  288. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  289. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  290. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  291. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  292. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  293. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  294. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  295. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  296. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  297. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  298. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  299. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  300. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  301. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  302. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  303. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  304. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  305. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  306. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  307. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  308. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  309. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  310. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  311. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  312. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  313. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  314. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  315. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  316. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  317. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  318. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  319. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  320. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  321. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  322. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  323. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  324. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  325. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  326. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  327. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  328. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  329. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  330. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  331. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  332. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  333. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  334. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  335. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  336. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  337. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  338. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  339. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  340. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  341. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  342. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  343. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  344. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  345. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  346. mindspore/ops/_register_for_op.py +1 -0
  347. mindspore/ops/_utils/__init__.py +1 -2
  348. mindspore/ops/_utils/utils.py +19 -40
  349. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  350. mindspore/ops/_vmap/vmap_base.py +16 -9
  351. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  352. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  353. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  354. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  355. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  356. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  357. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  358. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  359. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  360. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  361. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  362. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  363. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  364. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  365. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  366. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  367. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  368. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  369. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  370. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  371. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  372. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  373. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  374. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  375. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  376. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  377. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  378. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  379. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  380. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  381. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  382. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  383. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  384. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  385. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  386. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  387. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  388. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  389. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  390. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  391. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  392. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  394. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  395. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  396. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  397. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  398. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  399. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  400. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  401. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  402. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  403. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  404. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  405. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  406. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  407. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  408. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  409. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  413. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  414. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  415. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  416. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  417. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  418. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  419. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  420. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  421. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  422. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  423. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  424. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  425. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  426. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  427. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  428. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  429. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  430. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  431. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  432. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  433. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  434. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  435. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  436. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  437. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  438. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  439. mindspore/ops/composite/__init__.py +7 -8
  440. mindspore/ops/composite/base.py +101 -47
  441. mindspore/ops/composite/math_ops.py +188 -158
  442. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  443. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  444. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  445. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  446. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  447. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  448. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  449. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  450. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  451. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  452. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  453. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  454. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  455. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  456. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  457. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  458. mindspore/ops/function/__init__.py +152 -8
  459. mindspore/ops/function/array_func.py +2555 -674
  460. mindspore/ops/function/clip_func.py +209 -13
  461. mindspore/ops/function/debug_func.py +2 -2
  462. mindspore/ops/function/grad/__init__.py +2 -1
  463. mindspore/ops/function/grad/grad_func.py +147 -62
  464. mindspore/ops/function/image_func.py +54 -38
  465. mindspore/ops/function/linalg_func.py +167 -16
  466. mindspore/ops/function/math_func.py +4849 -1492
  467. mindspore/ops/function/nn_func.py +2573 -988
  468. mindspore/ops/function/other_func.py +115 -0
  469. mindspore/ops/function/parameter_func.py +3 -3
  470. mindspore/ops/function/random_func.py +790 -73
  471. mindspore/ops/function/sparse_func.py +98 -78
  472. mindspore/ops/function/sparse_unary_func.py +54 -53
  473. mindspore/ops/function/spectral_func.py +27 -24
  474. mindspore/ops/function/vmap_func.py +22 -2
  475. mindspore/ops/functional.py +97 -37
  476. mindspore/ops/op_info_register.py +70 -28
  477. mindspore/ops/operations/__init__.py +47 -14
  478. mindspore/ops/operations/_csr_ops.py +7 -7
  479. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  480. mindspore/ops/operations/_grad_ops.py +276 -187
  481. mindspore/ops/operations/_inner_ops.py +319 -113
  482. mindspore/ops/operations/_ms_kernel.py +10 -8
  483. mindspore/ops/operations/_ocr_ops.py +9 -9
  484. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  485. mindspore/ops/operations/_quant_ops.py +137 -102
  486. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  487. mindspore/ops/operations/_scalar_ops.py +466 -0
  488. mindspore/ops/operations/_sequence_ops.py +1004 -2
  489. mindspore/ops/operations/_tensor_array.py +10 -11
  490. mindspore/ops/operations/_thor_ops.py +1 -1
  491. mindspore/ops/operations/array_ops.py +801 -466
  492. mindspore/ops/operations/comm_ops.py +51 -49
  493. mindspore/ops/operations/control_ops.py +2 -2
  494. mindspore/ops/operations/custom_ops.py +123 -44
  495. mindspore/ops/operations/debug_ops.py +24 -24
  496. mindspore/ops/operations/image_ops.py +240 -153
  497. mindspore/ops/operations/inner_ops.py +34 -50
  498. mindspore/ops/operations/linalg_ops.py +31 -9
  499. mindspore/ops/operations/math_ops.py +988 -757
  500. mindspore/ops/operations/nn_ops.py +965 -819
  501. mindspore/ops/operations/other_ops.py +51 -40
  502. mindspore/ops/operations/random_ops.py +204 -122
  503. mindspore/ops/operations/rl_ops.py +8 -9
  504. mindspore/ops/operations/sparse_ops.py +254 -93
  505. mindspore/ops/operations/spectral_ops.py +35 -3
  506. mindspore/ops/primitive.py +111 -9
  507. mindspore/parallel/_auto_parallel_context.py +189 -83
  508. mindspore/parallel/_offload_context.py +185 -0
  509. mindspore/parallel/_parallel_serialization.py +99 -7
  510. mindspore/parallel/_ps_context.py +9 -5
  511. mindspore/parallel/_recovery_context.py +1 -1
  512. mindspore/parallel/_tensor.py +7 -1
  513. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  514. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  515. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  516. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  517. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  518. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  519. mindspore/parallel/_utils.py +1 -2
  520. mindspore/parallel/algo_parameter_config.py +1 -1
  521. mindspore/parallel/checkpoint_transform.py +37 -34
  522. mindspore/parallel/shard.py +17 -18
  523. mindspore/profiler/common/validator/validate_path.py +2 -2
  524. mindspore/profiler/envprofiling.py +69 -47
  525. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  526. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  527. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  528. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  529. mindspore/profiler/parser/integrator.py +15 -14
  530. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  531. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  532. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  533. mindspore/profiler/parser/optime_parser.py +17 -18
  534. mindspore/profiler/parser/profiler_info.py +2 -1
  535. mindspore/profiler/profiling.py +218 -186
  536. mindspore/rewrite/__init__.py +3 -1
  537. mindspore/rewrite/api/node.py +1 -114
  538. mindspore/rewrite/api/node_type.py +3 -0
  539. mindspore/rewrite/api/pattern_engine.py +31 -1
  540. mindspore/rewrite/api/scoped_value.py +4 -4
  541. mindspore/rewrite/api/symbol_tree.py +3 -78
  542. mindspore/rewrite/api/tree_node_helper.py +1 -1
  543. mindspore/rewrite/ast_creator_register.py +1 -0
  544. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  545. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  546. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  547. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  548. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  549. mindspore/rewrite/namespace.py +0 -2
  550. mindspore/rewrite/node.py +157 -11
  551. mindspore/rewrite/parsers/assign_parser.py +231 -53
  552. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  553. mindspore/rewrite/parsers/for_parser.py +24 -14
  554. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  555. mindspore/rewrite/parsers/if_parser.py +6 -2
  556. mindspore/rewrite/sparsify/__init__.py +0 -0
  557. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  558. mindspore/rewrite/sparsify/sparsify.py +109 -0
  559. mindspore/rewrite/sparsify/utils.py +173 -0
  560. mindspore/rewrite/symbol_tree.py +256 -133
  561. mindspore/rewrite/symbol_tree_builder.py +38 -1
  562. mindspore/run_check/_check_version.py +69 -63
  563. mindspore/run_check/run_check.py +2 -1
  564. mindspore/tinyxml2.dll +0 -0
  565. mindspore/train/__init__.py +1 -1
  566. mindspore/train/_utils.py +28 -5
  567. mindspore/train/amp.py +273 -102
  568. mindspore/train/callback/_backup_and_restore.py +5 -5
  569. mindspore/train/callback/_callback.py +2 -2
  570. mindspore/train/callback/_checkpoint.py +3 -3
  571. mindspore/train/callback/_early_stop.py +3 -3
  572. mindspore/train/callback/_lambda_callback.py +2 -2
  573. mindspore/train/callback/_landscape.py +29 -31
  574. mindspore/train/callback/_loss_monitor.py +3 -3
  575. mindspore/train/callback/_on_request_exit.py +3 -3
  576. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  577. mindspore/train/callback/_summary_collector.py +23 -16
  578. mindspore/train/callback/_time_monitor.py +3 -3
  579. mindspore/train/checkpoint_pb2.py +68 -8
  580. mindspore/train/data_sink.py +15 -3
  581. mindspore/train/dataset_helper.py +10 -15
  582. mindspore/train/loss_scale_manager.py +8 -11
  583. mindspore/train/metrics/__init__.py +1 -1
  584. mindspore/train/metrics/bleu_score.py +1 -1
  585. mindspore/train/metrics/confusion_matrix.py +1 -1
  586. mindspore/train/metrics/cosine_similarity.py +1 -1
  587. mindspore/train/metrics/dice.py +2 -2
  588. mindspore/train/metrics/fbeta.py +1 -1
  589. mindspore/train/metrics/hausdorff_distance.py +4 -3
  590. mindspore/train/metrics/mean_surface_distance.py +2 -2
  591. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  592. mindspore/train/metrics/perplexity.py +1 -1
  593. mindspore/train/metrics/precision.py +1 -1
  594. mindspore/train/metrics/recall.py +1 -1
  595. mindspore/train/metrics/roc.py +2 -2
  596. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  597. mindspore/train/mind_ir_pb2.py +116 -37
  598. mindspore/train/model.py +45 -28
  599. mindspore/train/serialization.py +295 -188
  600. mindspore/train/summary/_summary_adapter.py +1 -1
  601. mindspore/train/summary/summary_record.py +43 -13
  602. mindspore/train/train_thor/convert_utils.py +2 -2
  603. mindspore/train/train_thor/dataset_helper.py +3 -3
  604. mindspore/turbojpeg.dll +0 -0
  605. mindspore/version.py +1 -1
  606. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  607. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
  608. mindspore/compression/__init__.py +0 -19
  609. mindspore/compression/common/constant.py +0 -124
  610. mindspore/compression/export/__init__.py +0 -19
  611. mindspore/compression/export/quant_export.py +0 -515
  612. mindspore/compression/quant/__init__.py +0 -28
  613. mindspore/compression/quant/qat.py +0 -634
  614. mindspore/compression/quant/quant_utils.py +0 -462
  615. mindspore/compression/quant/quantizer.py +0 -68
  616. mindspore/nn/layer/quant.py +0 -1868
  617. mindspore/nn/layer/rnn_utils.py +0 -90
  618. mindspore/nn/probability/dpn/__init__.py +0 -22
  619. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  620. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  621. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  622. mindspore/nn/probability/infer/__init__.py +0 -22
  623. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  624. mindspore/nn/probability/infer/variational/svi.py +0 -84
  625. mindspore/nn/probability/toolbox/__init__.py +0 -22
  626. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  627. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  628. mindspore/nn/probability/transforms/__init__.py +0 -22
  629. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  630. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  631. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  632. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  633. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  634. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  635. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  636. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  637. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  638. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  639. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  640. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  642. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  643. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  644. mindspore/ops/composite/array_ops.py +0 -241
  645. mindspore/ops/composite/clip_ops.py +0 -134
  646. mindspore/ops/composite/random_ops.py +0 -426
  647. mindspore/ops/composite/vmap_ops.py +0 -38
  648. mindspore/parallel/nn/__init__.py +0 -42
  649. mindspore/parallel/nn/loss.py +0 -22
  650. mindspore/parallel/nn/moe.py +0 -21
  651. mindspore/parallel/nn/op_parallel_config.py +0 -22
  652. mindspore/parallel/nn/transformer.py +0 -31
  653. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  654. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  655. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,6 @@
15
15
 
16
16
  """Operators for array."""
17
17
  import copy
18
- import functools
19
18
  import itertools
20
19
  import numbers
21
20
 
@@ -28,8 +27,7 @@ from mindspore.ops import signature as sig
28
27
  from mindspore.ops._utils import get_broadcast_shape
29
28
  from mindspore.common._utils import is_shape_unknown, is_dim_unknown
30
29
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
31
- from mindspore._checkparam import Rel
32
- from mindspore._checkparam import Validator as validator
30
+ from mindspore import _checkparam as validator
33
31
  from mindspore._checkparam import _check_3d_int_or_tuple
34
32
  from mindspore.common import dtype as mstype
35
33
  from mindspore.common._decorator import deprecated
@@ -76,19 +74,19 @@ class _ScatterOp(PrimitiveWithInfer):
76
74
 
77
75
  class UnravelIndex(Primitive):
78
76
  """
79
- Converts an array of flat indices into a tuple of coordinate arrays.
77
+ Transforms an array consisting of flattened indices into a tuple that contains coordinate arrays.
80
78
 
81
79
  Inputs:
82
- - **indices** (Tensor) - Input Tensor whose elements are indices converting into
83
- the flattened version of an array of dimensions dims.
80
+ - **indices** (Tensor) - The input Tensor, containing indices that will be transformed
81
+ into the flattened form of an array with dimensions specified by `dims`.
84
82
  The dimension of `indices` must be 0-D or 1-D.
85
83
  Must be one of the following types: int32, int64.
86
84
  - **dims** (Tensor) - The shape of the array to use for unraveling indices.
87
85
  The dimension of `dims` must be 1-D. Must have the same type as `indices`.
88
86
 
89
87
  Outputs:
90
- - **y** (Tensor) - Has the same type as `indices`.
91
- The dimension of `y` can be 2-D or 1-D(if `indices` is 0D).
88
+ - **y** (Tensor) - Tensor, it should be 2-D or 1-D(if `indices` is 0D)
89
+ and has the same type as `indices`.
92
90
 
93
91
  Raises:
94
92
  TypeError: If the data type of `indices` and `dims` are different.
@@ -97,7 +95,7 @@ class UnravelIndex(Primitive):
97
95
  ValueError: If `indices` contains negative elements.
98
96
 
99
97
  Supported Platforms:
100
- ``GPU`` ``CPU``
98
+ ``Ascend`` ``GPU`` ``CPU``
101
99
 
102
100
  Examples:
103
101
  >>> indices = Tensor(np.array([2, 5]), mindspore.int32)
@@ -159,7 +157,7 @@ class _ScatterNdOp(_ScatterOp):
159
157
 
160
158
  def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
161
159
  validator.check('the dimension of x', len(x_shape),
162
- 'the dimension of indices', indices_shape[-1], Rel.GE)
160
+ 'the dimension of indices', indices_shape[-1], validator.GE)
163
161
  if indices_shape[:-1] + x_shape[indices_shape[-1]:] != updates_shape:
164
162
  raise ValueError(f"For '{prim_name}', updates_shape = "
165
163
  f"indices_shape[:-1] + x_shape[indices_shape[-1]:], but got x_shape: {x_shape}, "
@@ -176,7 +174,10 @@ def _check_infer_attr_reduce(axis, keep_dims, prim_name):
176
174
 
177
175
  class Expand(Primitive):
178
176
  """
179
- Returns a new view of the self tensor with singleton dimensions expanded to a larger size.
177
+ Expands the Tensor along singleton dimensions(dim with size 1) to match given desired shape.
178
+
179
+ .. warning::
180
+ This is an experimental API that is subject to change or deletion.
180
181
 
181
182
  Refer to :func:`mindspore.ops.expand` for more details.
182
183
 
@@ -236,7 +237,7 @@ class DType(Primitive):
236
237
  Returns the data type of the input tensor as mindspore.dtype.
237
238
 
238
239
  Inputs:
239
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
240
+ - **input_x** (Tensor) - Input Tensor.
240
241
 
241
242
  Outputs:
242
243
  mindspore.dtype, the data type of a tensor.
@@ -267,17 +268,17 @@ class DType(Primitive):
267
268
 
268
269
  class CheckNumerics(Primitive):
269
270
  """
270
- Checks a tensor for NaN and Inf values.
271
+ Checks a tensor for NaN and Inf values. A runtime error is raised if input has NaN or Inf values.
271
272
 
272
273
  Inputs:
273
274
  - **x** (Tensor) - Input Tensor of any dimension. The data type is float16, float32 or float64.
274
275
 
275
276
  Outputs:
276
- Tensor, has the same shape and data type as `x` if `x` has no nan or inf values.
277
+ Tensor, has the same shape and data type as `x` if `x` has no NaN or Inf values.
277
278
 
278
279
  Raises:
279
280
  TypeError: If `x` data type is not float16, float32, float64.
280
- RuntimeError: If `x` has nan or inf values.
281
+ RuntimeError: If `x` has NaN or Inf values.
281
282
 
282
283
  Supported Platforms:
283
284
  ``Ascend`` ``GPU`` ``CPU``
@@ -297,7 +298,7 @@ class CheckNumerics(Primitive):
297
298
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
298
299
 
299
300
 
300
- class Cast(PrimitiveWithInfer):
301
+ class Cast(PrimitiveWithCheck):
301
302
  """
302
303
  Returns a tensor with the new specified data type.
303
304
 
@@ -330,7 +331,6 @@ class Cast(PrimitiveWithInfer):
330
331
 
331
332
  @prim_attr_register
332
333
  def __init__(self):
333
- # if primitive need setattr in __infer__ need add this flag
334
334
  """Initialize Cast"""
335
335
  self.init_prim_io_names(inputs=['x', 'dst_type'], outputs=['output'])
336
336
 
@@ -348,35 +348,26 @@ class Cast(PrimitiveWithInfer):
348
348
  return (True, Tensor(x, dtype=dtype))
349
349
  return (False, None)
350
350
 
351
- def __infer__(self, x, t):
352
- src_type = x['dtype']
353
- dst_type = t['value']
354
-
355
- validator.check_subclass("input_x", src_type, [mstype.tensor, mstype.number], self.name)
351
+ def infer_value(self, x, dst_type):
352
+ if x is None:
353
+ return None
354
+ src_type = mstype.get_py_obj_dtype(x)
355
+ validator.check_subclass("input_x", src_type,
356
+ [mstype.tensor, mstype.number], self.name)
356
357
  validator.check_subclass("type", dst_type, mstype.number, self.name)
357
358
 
358
359
  if isinstance(src_type, type(mstype.tensor)):
359
- src_type = x['dtype'].element_type()
360
+ src_type = src_type.element_type()
360
361
  if isinstance(dst_type, type(mstype.tensor)):
361
362
  dst_type = dst_type.element_type()
362
- self.add_prim_attr('DstT', dst_type)
363
- self.add_prim_attr('SrcT', src_type)
364
- self.add_prim_attr('dst_type', dst_type)
365
363
 
366
364
  value = None
367
- if x['value'] is not None:
368
- np_dst_type = mstype.dtype_to_nptype(dst_type)
369
- if isinstance(x['value'], (int, float)):
370
- value = Tensor(np.array(x['value']).astype(np_dst_type))
371
- else:
372
- value = Tensor(x['value'].asnumpy().astype(np_dst_type))
373
-
374
- out = {'shape': x['shape'],
375
- 'dtype': mstype.tensor_type(t['value']),
376
- 'value': value}
377
- if 'shape_value' in x:
378
- out['shape_value'] = tuple(np.array(x['shape_value']).astype(np.int64))
379
- return out
365
+ np_dst_type = mstype.dtype_to_nptype(dst_type)
366
+ if isinstance(x, (int, float)):
367
+ value = Tensor(np.array(x).astype(np_dst_type))
368
+ else:
369
+ value = Tensor(x.asnumpy().astype(np_dst_type))
370
+ return value
380
371
 
381
372
 
382
373
  class Im2Col(Primitive):
@@ -418,17 +409,8 @@ class Im2Col(Primitive):
418
409
  for height and width. If type is int, it means that height equal with width. Default: 1.
419
410
  dilations (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
420
411
  for height and width. If type is int, it means that height equal with width. Default: 1.
421
- padding_mode (str, optional): The optional value for pad mode, support "CALCULATED", "SAME" and "VALID".
422
- Default: "CALCULATED".
423
-
424
- - "SAME", the width and height of the output are the same as the value of the width and height of
425
- the input divided by 'strides' rounded up.
426
- - "VALID", return a valid calculated output without padding. Excess pixels that do not satisfy
427
- the calculation are discarded.
428
- - "CALCULATED", pads the input. Padding 'pads' size of zero on both sides of the input.
429
-
430
412
  pads (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be a tuple of
431
- one or two or four `int` for height and width. Default: 0.
413
+ one or two `int` for height and width. Default: 0.
432
414
 
433
415
  - If one int, :math:`pad\_height = pad\_width`.
434
416
  - If two int, :math:`pad\_height = pads[0]`, :math:`pad\_width = pads[1]`.
@@ -445,70 +427,58 @@ class Im2Col(Primitive):
445
427
  TypeError: If `ksizes` data type is not in Union[int, tuple[int], list[int]].
446
428
  TypeError: If `strides` data type is not in Union[int, tuple[int], list[int]].
447
429
  TypeError: If `dilations` data type is not in Union[int, tuple[int], list[int]].
448
- TypeError: If `padding_mode` data type is not str.
449
430
  TypeError: If `pads` data type isnot in Union[int, tuple[int], list[int]].
450
- when `padding_mode` is "CALCULATED".
451
431
  ValueError: If `ksizes` value is not greater than zero or elements number more than 2.
452
432
  ValueError: If `strides` value is not greater than zero or elements number more than 2.
453
433
  ValueError: If `dilations` value is not greater than zero or elements number more than 2.
454
- ValueError: If `padding_mode` value is not in ["SAME", "VALID", "CALCULATED"].
455
434
  ValueError: If `pads` value is not greater than zero.
456
435
 
457
436
  Supported Platforms:
458
- ``Ascend`` ``CPU``
437
+ ``Ascend`` ``GPU`` ``CPU``
459
438
 
460
439
  Examples:
461
- >>> x = Tensor(input_data=np.random.rand(4, 4, 32, 32), dtype=mstype.float16)
440
+ >>> x = Tensor(input_data=np.random.rand(4, 4, 32, 32), dtype=mstype.float64)
462
441
  >>> im2col = ops.Im2Col(ksizes=3, strides=1, dilations=1)
463
442
  >>> y = im2col(x)
464
443
  >>> print(y.shape)
465
- (4, 36, 30, 30)
444
+ (4, 4, 9, 900)
466
445
  """
467
446
 
468
447
  @prim_attr_register
469
- def __init__(self, ksizes, strides=1, dilations=1, padding_mode="CALCULATED", pads=0):
448
+ def __init__(self, ksizes, strides=1, dilations=1, pads=0):
470
449
  """Initialize Im2Col."""
471
450
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
472
451
 
473
452
  validator.check_value_type('ksizes', ksizes, [int, tuple, list], self.name)
474
453
  validator.check_value_type('strides', strides, [int, tuple, list], self.name)
475
454
  validator.check_value_type('dilations', dilations, [int, tuple, list], self.name)
476
- validator.check_value_type('padding_mode', padding_mode, [str], self.name)
477
455
  validator.check_value_type('pads', pads, [int, tuple, list], self.name)
478
456
 
479
- self.padding_mode = validator.check_string(
480
- padding_mode.upper(), ["SAME", "VALID", "CALCULATED"], 'padding_mode', self.name)
481
457
  self.ksizes = (ksizes, ksizes) if isinstance(ksizes, int) else ksizes
482
458
  self.strides = (strides, strides) if isinstance(strides, int) else strides
483
459
  self.dilations = (dilations, dilations) if isinstance(dilations, int) else dilations
484
- if isinstance(pads, (list, tuple)):
485
- if len(pads) == 2:
486
- self.pads = (pads[0], pads[0], pads[1], pads[1])
487
- else:
488
- self.pads = pads
489
- if isinstance(pads, int):
490
- self.pads = (pads, pads, pads, pads)
460
+ self.pads = (pads, pads) if isinstance(pads, int) else pads
491
461
 
492
- validator.check("ksizes size", len(self.ksizes), "", [1, 2], Rel.IN, self.name)
462
+ validator.check("ksizes size", len(self.ksizes), "", [1, 2], validator.IN, self.name)
493
463
  validator.check_positive_int_sequence(self.ksizes, "ksizes", self.name)
494
- validator.check("strides size", len(self.strides), "", [1, 2], Rel.IN, self.name)
464
+ validator.check("strides size", len(self.strides), "", [1, 2], validator.IN, self.name)
495
465
  validator.check_positive_int_sequence(self.strides, "strides", self.name)
496
- validator.check("dilations size", len(self.dilations), "", [1, 2], Rel.IN, self.name)
466
+ validator.check("dilations size", len(self.dilations), "", [1, 2], validator.IN, self.name)
497
467
  validator.check_positive_int_sequence(self.dilations, "dilations", self.name)
498
- if self.padding_mode == "CALCULATED":
499
- validator.check("pads size", len(self.pads), "", [1, 2, 4], Rel.IN, self.name)
500
- validator.check_non_negative_int_sequence(self.pads, "pads", self.pads)
468
+ validator.check("pads size", len(self.pads), "", [1, 2], validator.IN, self.name)
469
+ validator.check_non_negative_int_sequence(self.pads, "pads", self.name)
501
470
 
502
471
  self.add_prim_attr('ksizes', self.ksizes)
503
472
  self.add_prim_attr('strides', self.strides)
504
473
  self.add_prim_attr('dilations', self.dilations)
505
474
  self.add_prim_attr('pads', self.pads)
506
- self.add_prim_attr('padding_mode', self.padding_mode)
475
+ self.add_prim_attr('padding_mode', "CALCULATED")
507
476
 
508
477
 
509
478
  class Col2Im(Primitive):
510
479
  r"""
511
- Combines an array of sliding local blocks into a large containing tensor.
480
+ Combines an array of sliding local blocks into a large containing tensor. It is
481
+ usually used to reconstruct an image from a set of image patches(or sliding local blocks).
512
482
 
513
483
  Consider a batched :attr:`input` tensor containing sliding local blocks,
514
484
  e.g., patches of images, of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
@@ -521,34 +491,22 @@ class Col2Im(Primitive):
521
491
 
522
492
  .. math::
523
493
  L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
524
- - \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
525
-
526
- where :math:`d` is over all spatial dimensions.
494
+ - \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor
527
495
 
528
- :attr:`output_size` describes the spatial shape of the large containing
529
- tensor of the sliding local blocks. It is useful to resolve the ambiguity
530
- when multiple input shapes map to same number of sliding blocks, e.g.,
531
- with ``stride > 0``.
532
-
533
- The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
534
- how the sliding blocks are retrieved.
496
+ where :math:`d` is over all spatial dimensions. The `padding`, `stride`
497
+ and `dilation` arguments specify how the sliding blocks are retrieved.
535
498
 
536
- :attr:`stride` controls the stride for the sliding blocks.
537
-
538
- :attr:`padding` controls the amount of implicit zero-paddings on both
539
- sides for :attr:`padding` number of points for each dimension before
540
- reshaping.
541
-
542
- :attr:`dilation` controls the spacing between the kernel points.
499
+ .. warning::
500
+ This is an experimental API that is subject to change or deletion.
543
501
 
544
502
  Args:
545
503
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two positive int
546
504
  for height and width. If type is int, it means that height equal with width. Must be specified.
547
- dilation (Union[int, tuple[int], list[int]]): The size of the dilation, should be two positive int
505
+ dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two positive int
548
506
  for height and width. If type is int, it means that height equal with width. Default: 1.
549
- padding (Union[int, tuple[int], list[int]]): The size of the padding, should be two int
507
+ padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
550
508
  for height and width. If type is int, it means that height equal with width. Default: 0.
551
- stride (Union[int, tuple[int], list[int]]): The size of the stride, should be two positive int
509
+ stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two positive int
552
510
  for height and width. If type is int, it means that height equal with width. Default: 1.
553
511
 
554
512
  Inputs:
@@ -567,7 +525,7 @@ class Col2Im(Primitive):
567
525
  ValueError: If x.shape[3] does not match the calculated number of sliding blocks.
568
526
 
569
527
  Supported Platforms:
570
- ``CPU``
528
+ ``Ascend`` ``GPU`` ``CPU``
571
529
 
572
530
  Examples:
573
531
  >>> import numpy as np
@@ -595,13 +553,13 @@ class Col2Im(Primitive):
595
553
  self.padding = (padding, padding) if isinstance(padding, int) else padding
596
554
  self.stride = (stride, stride) if isinstance(stride, int) else stride
597
555
 
598
- validator.check("kernel_size size", len(self.kernel_size), "", 2, Rel.EQ, self.name)
556
+ validator.check("kernel_size size", len(self.kernel_size), "", 2, validator.EQ, self.name)
599
557
  validator.check_positive_int_sequence(self.kernel_size, "kernel_size", self.name)
600
- validator.check("dilation size", len(self.dilation), "", 2, Rel.EQ, self.name)
558
+ validator.check("dilation size", len(self.dilation), "", 2, validator.EQ, self.name)
601
559
  validator.check_positive_int_sequence(self.dilation, "dilation", self.name)
602
- validator.check("padding size", len(self.padding), "", 2, Rel.EQ, self.name)
560
+ validator.check("padding size", len(self.padding), "", 2, validator.EQ, self.name)
603
561
  validator.check_non_negative_int_sequence(self.padding, "padding", self.name)
604
- validator.check("stride size", len(self.stride), "", 2, Rel.EQ, self.name)
562
+ validator.check("stride size", len(self.stride), "", 2, validator.EQ, self.name)
605
563
  validator.check_positive_int_sequence(self.stride, "stride", self.name)
606
564
 
607
565
  self.add_prim_attr('kernel_size', self.kernel_size)
@@ -637,9 +595,9 @@ class Reshape(PrimitiveWithCheck):
637
595
  def infer_value(self, x, shape):
638
596
  """infer value"""
639
597
  # for shape is not constant
640
- if shape is None or x is None:
598
+ if shape is None or self.none_in_tuple_or_list(shape) or x is None:
641
599
  return None
642
- if isinstance(shape, Tensor_):
600
+ if isinstance(shape, (Tensor, Tensor_)):
643
601
  validator.check_tensor_dtype_valid("shape", mstype.tensor_type(shape.dtype),
644
602
  [mstype.int32, mstype.int64], self.name)
645
603
  shape = shape.asnumpy().tolist()
@@ -676,10 +634,13 @@ class Reshape(PrimitiveWithCheck):
676
634
  out = Tensor(x.asnumpy().reshape(shape))
677
635
  return out
678
636
 
637
+ def none_in_tuple_or_list(self, x):
638
+ return isinstance(x, (tuple, list)) and None in x
639
+
679
640
 
680
641
  class Shape(Primitive):
681
642
  """
682
- Returns the shape of the input tensor. And it used to be static shape.
643
+ Returns the shape of the input tensor.
683
644
 
684
645
  Refer to :func:`mindspore.ops.shape` for more details.
685
646
 
@@ -816,10 +777,8 @@ class ConjugateTranspose(Primitive):
816
777
  """
817
778
  Calculate the conjugate matrix of input x which has been transposed according to input perm.
818
779
 
819
- The type and rank of the output y is the same as the input x. And the shape and value of the input x
820
- and the output y satisfy:
821
- y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]
822
- y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])
780
+ .. math::
781
+ y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])
823
782
 
824
783
  Inputs:
825
784
  - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
@@ -829,7 +788,12 @@ class ConjugateTranspose(Primitive):
829
788
 
830
789
  Outputs:
831
790
  Tensor, the type of output tensor is the same as `x` and the shape of output tensor is decided by the
832
- shape of `x` and the value of `Conj(perm)`.
791
+ shape of `x` and the value of `Conj(perm)`:
792
+
793
+ .. math::
794
+ y.shape[i] = x.shape[perm[i]]
795
+
796
+ where i is in range [0, rank(x) - 1].
833
797
 
834
798
  Raises:
835
799
  TypeError: If `perm` is not a tuple.
@@ -837,7 +801,7 @@ class ConjugateTranspose(Primitive):
837
801
  ValueError: If the same element exists in `perm`.
838
802
 
839
803
  Supported Platforms:
840
- ``Ascend`` ``CPU``
804
+ ``Ascend`` ``GPU`` ``CPU``
841
805
 
842
806
  Examples:
843
807
  >>> x = Tensor(np.array([[1 + 1j,2 + 2j], [3 + 3j, 4 + 4j]]), mindspore.complex64)
@@ -920,18 +884,17 @@ class UniqueConsecutive(Primitive):
920
884
  """
921
885
  Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
922
886
 
887
+ .. warning::
888
+ This is an experimental API that is subject to change or deletion.
889
+
923
890
  Refer to :func:`mindspore.ops.unique_consecutive` for more details.
924
891
 
925
892
  Supported Platforms:
926
- ``Ascend`` ``GPU``
893
+ ``Ascend`` ``GPU`` ``CPU``
927
894
 
928
895
  Examples:
929
- >>> import numpy as np
930
- >>> from mindspore import Tensor
931
- >>> from mindspore import dtype as mstype
932
- >>> from mindspore.ops import UniqueConsecutive
933
896
  >>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
934
- >>> unique_consecutive = UniqueConsecutive(True, True, None)
897
+ >>> unique_consecutive = ops.UniqueConsecutive(True, True, None)
935
898
  >>> output, idx, counts = unique_consecutive(x)
936
899
  >>> print(output)
937
900
  [1 2 3 1 2]
@@ -943,6 +906,7 @@ class UniqueConsecutive(Primitive):
943
906
 
944
907
  @prim_attr_register
945
908
  def __init__(self, return_idx=False, return_counts=False, axis=None):
909
+ """Initialize UniqueConsecutive"""
946
910
  self.init_prim_io_names(inputs=['x'], outputs=['output'])
947
911
  validator.check_value_type("return_idx", return_idx, [bool], self.name)
948
912
  validator.check_value_type("return_counts", return_counts, [bool], self.name)
@@ -991,20 +955,22 @@ class Gather(Primitive):
991
955
  >>> print(output)
992
956
  [[1. 2. 3. 4.]
993
957
  [9. 10. 11. 12.]]
994
- >>> # case4: input_indices is a Tensor with shape (2, ). input_params is a Tensor with shape (3, 4) and axis is 1.
958
+ >>> # case4: input_indices is a Tensor with shape (2, ).
959
+ >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
995
960
  >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
996
- >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
961
+ >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
997
962
  >>> axis = 1
998
- >>> output = ops.Gather()(input_params, input_indices, axis)
963
+ >>> batch_dims = 1
964
+ >>> output = ops.Gather(batch_dims)(input_params, input_indices, axis)
999
965
  >>> print(output)
1000
- [[1. 3.]
1001
- [5. 7.]
1002
- [9. 11.]]
966
+ [ 1. 7. 10.]
1003
967
  """
1004
968
 
1005
969
  @prim_attr_register
1006
- def __init__(self):
970
+ def __init__(self, batch_dims=0):
1007
971
  """Initialize Gather"""
972
+ validator.check_value_type("batch_dims", batch_dims, [int], self.name)
973
+ self.add_prim_attr("batch_dims", batch_dims)
1008
974
  self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
1009
975
 
1010
976
 
@@ -1018,6 +984,7 @@ class GatherV2(PrimitiveWithCheck):
1018
984
  @prim_attr_register
1019
985
  def __init__(self):
1020
986
  """Initialize GatherV2"""
987
+ self.add_prim_attr("batch_dims", 0)
1021
988
  self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
1022
989
 
1023
990
  def __check__(self, params, indices, axis):
@@ -1027,7 +994,7 @@ class GatherV2(PrimitiveWithCheck):
1027
994
  axis_v = axis['value']
1028
995
  validator.check_value_type('axis', axis_v, [int], self.name)
1029
996
  rank = len(params['shape'])
1030
- validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, "axis", self.name)
997
+ validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
1031
998
 
1032
999
 
1033
1000
  class SparseGatherV2(PrimitiveWithCheck):
@@ -1074,7 +1041,7 @@ class SparseGatherV2(PrimitiveWithCheck):
1074
1041
  axis_v = axis['value']
1075
1042
  validator.check_value_type('axis', axis_v, [int], self.name)
1076
1043
  rank = len(params['shape'])
1077
- validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, "axis", self.name)
1044
+ validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
1078
1045
 
1079
1046
 
1080
1047
  class Padding(Primitive):
@@ -1139,6 +1106,17 @@ class Split(Primitive):
1139
1106
 
1140
1107
  Refer to :func:`mindspore.ops.split` for more details.
1141
1108
 
1109
+ Args:
1110
+ axis (int): Index of the split position. Default: 0.
1111
+ output_num (int): The number of output tensors. Must be positive int. Default: 1.
1112
+
1113
+ Inputs:
1114
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1115
+
1116
+ Outputs:
1117
+ tuple[Tensor], the shape of each output tensor is the same, which is
1118
+ :math:`(y_1, y_2, ..., y_S)`. And the data type is the same with `input_x`.
1119
+
1142
1120
  Supported Platforms:
1143
1121
  ``Ascend`` ``GPU`` ``CPU``
1144
1122
 
@@ -1180,7 +1158,7 @@ class Split(Primitive):
1180
1158
  self.add_prim_attr('num_split', self.output_num)
1181
1159
 
1182
1160
 
1183
- class Rank(PrimitiveWithInfer):
1161
+ class Rank(Primitive):
1184
1162
  """
1185
1163
  Returns the rank of a tensor.
1186
1164
 
@@ -1203,20 +1181,13 @@ class Rank(PrimitiveWithInfer):
1203
1181
  def __init__(self):
1204
1182
  """Initialize Rank"""
1205
1183
 
1206
- def __infer__(self, x):
1207
- validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
1208
- out = {'shape': None,
1209
- 'dtype': None,
1210
- 'value': len(x['shape'])}
1211
- return out
1212
-
1213
1184
  def __call__(self, x):
1214
1185
  if not isinstance(x, (Tensor, Tensor_)):
1215
1186
  raise TypeError("the input x must be Tensor!")
1216
1187
  return len(x.shape)
1217
1188
 
1218
1189
 
1219
- class Size(PrimitiveWithInfer):
1190
+ class Size(Primitive):
1220
1191
  r"""
1221
1192
  Returns a Scalar of type int that represents the size of the input Tensor and the total number of elements in the
1222
1193
  Tensor.
@@ -1238,23 +1209,13 @@ class Size(PrimitiveWithInfer):
1238
1209
  def __init__(self):
1239
1210
  """Initialize Size"""
1240
1211
 
1241
- def __infer__(self, x):
1242
- size = 1
1243
- validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
1244
- shp = x['shape']
1245
- if not shp:
1246
- size = 0
1247
- else:
1248
- size = functools.reduce(lambda x, y: x * y, x['shape'])
1249
- out = {'shape': None,
1250
- 'dtype': mstype.int64,
1251
- 'value': size}
1252
- return out
1253
-
1254
1212
 
1255
1213
  class MatrixDiagV3(Primitive):
1256
1214
  """
1257
- Returns a batched diagonal tensor with given batched diagonal values.
1215
+ Constructs a diagonal matrix or a batch of diagonal matrices from a given input Tensor.
1216
+
1217
+ .. warning::
1218
+ This is an experimental API that is subject to change or deletion.
1258
1219
 
1259
1220
  Refer to :func:`mindspore.ops.matrix_diag` for more details.
1260
1221
 
@@ -1291,6 +1252,9 @@ class MatrixDiagPartV3(Primitive):
1291
1252
  r"""
1292
1253
  Returns the diagonal part of a tensor.
1293
1254
 
1255
+ .. warning::
1256
+ This is an experimental API that is subject to change or deletion.
1257
+
1294
1258
  Refer to :func:`mindspore.ops.matrix_diag_part` for more details.
1295
1259
 
1296
1260
  Supported Platforms:
@@ -1323,47 +1287,62 @@ class MatrixDiagPartV3(Primitive):
1323
1287
 
1324
1288
  class MatrixSetDiagV3(Primitive):
1325
1289
  r"""
1326
- Returns a batched matrix tensor with new batched diagonal values.
1327
- Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
1328
- diagonals of the innermost matrices. These will be overwritten by the values in diagonal. Some diagonals are shorter
1329
- than `max_diag_len` and need to be padded, where `max_diag_len` is the longest diagonal value.
1330
- The diagonal.shape[-2] must be equal to num_diags calculated by :math:`k[1] - k[0] + 1` .
1331
- The diagonal.shape[-1] must be
1332
- equal to the longest diagonal value `max_diag_len` calculated
1333
- by :math:`min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))` .
1334
- Let x have r + 1 dimensions [I, J, ..., L, M, N].
1335
- The diagonal tensor has rank r with shape :math:`[I, J, ..., L, max\_diag\_len]`
1336
- when k is an integer or :math:`k[0] == k[1]` . Otherwise, it has rank r + 1
1337
- with shape :math:`[I, J, ..., L, num\_diags, max\_diag\_len]` .
1290
+ Updates the diagonal part of a batched tensor.
1291
+ It takes an Tensor `x` and `diagonal` as input and returns a Tensor in which
1292
+ the specified diagonal values in the innermost matrices will be replaced
1293
+ by the values in the `diagonal`.
1294
+
1295
+ Diagonals shorter than `max_diag_len` need to be padded, where `max_diag_len` is the
1296
+ longest diagonal value.
1297
+ The dimension of `diagonal` is :math:`shape[-2]` must be equal to num_diags calculated by
1298
+ :math:`num\_diags = k[1] - k[0] + 1`.
1299
+ The dimension of `diagonal` is :math:`shape[-1]` must be equal to the longest diagonal value `max_diag_len`
1300
+ calculated by :math:`max\_diag\_len = min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))`.
1301
+
1302
+ Assume `x` is an n-D Tensor with shape :math:`(d_1, d_2, ..., d_{n-2}, d_{n-1}, d_n)`.
1303
+ If `k` is an integer or :math:`k[0] == k[1]`, `diagonal` is an (n-1)-D Tensor with
1304
+ shape :math:`(d_1, d_2, ..., d_{n-2}, max\_diag\_len)`
1305
+ Otherwise, it has the same rank as `x`
1306
+ with shape :math:`(d_1, d_2, ..., d_{n-2}, num\_diags, max\_diag\_len)`.
1307
+
1308
+ .. warning::
1309
+ This is an experimental API that is subject to change or deletion.
1338
1310
 
1339
1311
  Args:
1340
- align (str, optional): An optional string from: "RIGHT_LEFT", "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT".
1341
- Align is a string specifying how superdiagonals and subdiagonals should be aligned, respectively.
1312
+ align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
1313
+ Supported values:"RIGHT_LEFT", "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT".
1342
1314
  Default: "RIGHT_LEFT".
1343
1315
 
1344
- - "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left
1345
- (right-pads the row).
1346
- - "LEFT_RIGHT" aligns superdiagonals to the left (right-pads the row) and subdiagonals to the right
1347
- (left-pads the row).
1348
- - "LEFT_LEFT" aligns superdiagonals to the left (right-pads the row) and subdiagonals to the left
1349
- (right-pads the row).
1350
- - "RIGHT_RIGHT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the right
1351
- (left-pads the row).
1316
+ - When set to "RIGHT_LEFT", the alignment of superdiagonals will be towards the right side
1317
+ (padding the row on the left), while subdiagonals will be towards the left side
1318
+ (padding the row on the right)
1319
+ - When set to "LEFT_RIGHT", the alignment of superdiagonals will be towards the left side
1320
+ (padding the row on the right), while subdiagonals will be towards the right side
1321
+ (padding the row on the left)
1322
+ - When set to "LEFT_LEFT", the alignment of both superdiagonals and subdiagonals will be towards
1323
+ the left side(padding the row on the right).
1324
+ - When set to "RIGHT_RIGHT", the alignment of both superdiagonals and subdiagonals will be towards
1325
+ the right side(padding the row on the left).
1352
1326
 
1353
1327
  Inputs:
1354
- - **x** (Tensor) - Rank r + 1, where r >= 1.
1355
- - **diagonal** (Tensor) - A Tensor. Have the same dtype as x. Rank r when k is an integer or k[0] == k[1].
1356
- Otherwise, it has rank r + 1.
1357
- - **k** (Tensor) - A Tensor of type int32. Diagonal offset(s). Positive value means superdiagonal, 0 refers to
1358
- the main diagonal, and negative value means subdiagonals. k can be a single integer (for a single diagonal) or
1359
- a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]` .
1360
- The value of `k` has restructions, meaning value of k must be in (-x.shape[-2], x.shape[-1]).
1361
- Input k must be const Tensor when taking Graph mode.
1328
+ - **x** (Tensor) - A n-D Tensor, where :math:`n >= 2`.
1329
+ - **diagonal** (Tensor) - A Tensor with the same dtype as `x`. Its rank depends on `k`.
1330
+ If `k` is an integer or :math:`k[0] == k[1]`, its dimension is :math:`n-1`.
1331
+ Otherwise, it has dimension :math:`n`.
1332
+ - **k** (Tensor) - Diagonal offset(s), Tensor of type int32.
1333
+ `k` can either be a single integer, which represents a single diagonal,
1334
+ or a pair of integers that specify the low and high ends of a matrix band.
1335
+ In this case, `k[0]` should not be greater than `k[1]`.
1336
+ The value of `k` has restructions, which means that value of `k` must be in range
1337
+ :math:`(-x.shape[-2], x.shape[-1])`.
1338
+ Input `k` must be const Tensor when taking Graph mode.
1339
+
1340
+ - `k > 0` refers to a superdiagonal.
1341
+ - `k = 0` refers to the main diagonal.
1342
+ - `k < 0` refers to subdiagonals.
1362
1343
 
1363
1344
  Outputs:
1364
- Tensor. The same type as x.
1365
- Let x has r+1 dimensions :math:`[I, J, ..., L, M, N]` .
1366
- The output is a tensor of rank r+1 with dimensions :math:`[I, J, ..., L, M, N]` , the same as input x.
1345
+ Tensor. The same type and shape as `x`.
1367
1346
 
1368
1347
  Raises:
1369
1348
  TypeError: If any input is not Tensor.
@@ -1376,13 +1355,13 @@ class MatrixSetDiagV3(Primitive):
1376
1355
  ValueError: If `k[1]` is not greater equal to `k[0]` in case the size of `k` is 2.
1377
1356
  ValueError: If the `diagonal` rank size don't match with input `x` rank size.
1378
1357
  ValueError: If the `diagonal` shape value don't match with input `x` shape value.
1379
- ValueError: If the diagonal.shape[-2] is not equal to num_diags calculated by :math:`k[1] - k[0] + 1` .
1380
- ValueError: If the value of `k` is not in (-x.shape[-2], x.shape[-1]).
1381
- ValueError: If the diagonal.shape[-1] is not equal to the max_diag_len calculated by
1358
+ ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by :math:`k[1] - k[0] + 1` .
1359
+ ValueError: If the value of `k` is not in :math:`(-x.shape[-2], x.shape[-1])`.
1360
+ ValueError: If the diagonal :math:`shape[-1]` is not equal to the max_diag_len calculated by
1382
1361
  :math:`min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))` .
1383
1362
 
1384
1363
  Supported Platforms:
1385
- ``GPU`` ``CPU``
1364
+ ``Ascend`` ``GPU`` ``CPU``
1386
1365
 
1387
1366
  Examples:
1388
1367
  >>> x = Tensor(np.array([[7, 7, 7, 7],
@@ -1402,6 +1381,11 @@ class MatrixSetDiagV3(Primitive):
1402
1381
  >>> print(output.shape)
1403
1382
  (3, 4)
1404
1383
  """
1384
+ __mindspore_signature__ = (
1385
+ sig.make_sig('x', dtype=sig.sig_dtype.T1),
1386
+ sig.make_sig('diagonal', dtype=sig.sig_dtype.T1),
1387
+ sig.make_sig('k', dtype=sig.sig_dtype.T2)
1388
+ )
1405
1389
 
1406
1390
  @prim_attr_register
1407
1391
  def __init__(self, align="RIGHT_LEFT"):
@@ -1414,12 +1398,13 @@ class MatrixSetDiagV3(Primitive):
1414
1398
 
1415
1399
  class MatrixBandPart(Primitive):
1416
1400
  r"""
1417
- Copy a tensor setting everything outside a central band in each innermost matrix to zero.
1401
+ Extracts the central diagonal band of each matrix in a tensor, with all values outside
1402
+ the central band set to zero.
1418
1403
 
1419
1404
  Refer to :func:`mindspore.ops.matrix_band_part` for more details.
1420
1405
 
1421
1406
  Supported Platforms:
1422
- ``GPU`` ``CPU``
1407
+
1423
1408
 
1424
1409
  Examples:
1425
1410
  >>> matrix_band_part = ops.MatrixBandPart()
@@ -1444,24 +1429,10 @@ class MatrixBandPart(Primitive):
1444
1429
 
1445
1430
  class Fill(PrimitiveWithCheck):
1446
1431
  """
1447
- Create a Tensor of the specified shape and fill it with the specified value.
1448
-
1449
- Refer to :func:`mindspore.ops.fill` for more details.
1432
+ The Fill interface is deprecated, please use the :class:`mindspore.ops.FillV2` instead.
1450
1433
 
1451
1434
  Supported Platforms:
1452
- ``Ascend`` ``GPU`` ``CPU``
1453
-
1454
- Examples:
1455
- >>> fill = ops.Fill()
1456
- >>> output = fill(mindspore.float32, (2, 2), 1)
1457
- >>> print(output)
1458
- [[1. 1.]
1459
- [1. 1.]]
1460
- >>> output = fill(mindspore.float32, (3, 3), 0)
1461
- >>> print(output)
1462
- [[0. 0. 0.]
1463
- [0. 0. 0.]
1464
- [0. 0. 0.]]
1435
+ Deprecated
1465
1436
  """
1466
1437
 
1467
1438
  @prim_attr_register
@@ -1469,9 +1440,26 @@ class Fill(PrimitiveWithCheck):
1469
1440
  """Initialize Fill"""
1470
1441
  self.init_prim_io_names(inputs=['type', 'shape', 'value'], outputs=['y'])
1471
1442
 
1443
+ def __call__(self, dtype, dims, x):
1444
+ if dtype not in mstype.all_types and dtype not in [mstype.uint16, mstype.uint32, mstype.uint64]:
1445
+ raise TypeError(
1446
+ f"For \'{self.name}\', the supported data type is ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', "
1447
+ "'uint16', 'uint32', 'uint64','float16', 'float32', 'float64'], but got an invalid dtype!.")
1448
+ x_nptype = mstype.dtype_to_nptype(dtype)
1449
+ if not isinstance(dims, Tensor) and not isinstance(dims, tuple):
1450
+ raise TypeError(f"For \'{self.name}\', input[1] must be tensor.")
1451
+ if not isinstance(x, Tensor) and not isinstance(x, float) and not isinstance(x, int):
1452
+ raise TypeError(f"For \'{self.name}\', the value input only takes scalar or scalar within a tensor!.")
1453
+ if isinstance(dims, Tensor):
1454
+ dims = dims.asnumpy()
1455
+ if isinstance(x, Tensor):
1456
+ x = x.asnumpy()
1457
+ ret = np.full(dims, x, x_nptype)
1458
+ return Tensor(ret)
1459
+
1472
1460
  def infer_value(self, dtype, dims, x):
1473
1461
  x_nptype = mstype.dtype_to_nptype(dtype)
1474
- if dims is not None and not is_shape_unknown(dims) and x is not None:
1462
+ if dims is not None and None not in dims and x is not None:
1475
1463
  if isinstance(dims, Tensor):
1476
1464
  dims = dims.asnumpy()
1477
1465
  if isinstance(x, Tensor):
@@ -1483,12 +1471,11 @@ class Fill(PrimitiveWithCheck):
1483
1471
 
1484
1472
  class Fills(Primitive):
1485
1473
  """
1486
- Create a tensor of the same shape and type as the input tensor and fill it with specified value.
1487
-
1488
- Refer to :func:`mindspore.ops.fills` for more details.
1474
+ The `Fills` primitive is deprecated.
1475
+ Please use :func:`mindspore.ops.fill` instead.
1489
1476
 
1490
1477
  Supported Platforms:
1491
- ``GPU``
1478
+ Deprecated
1492
1479
 
1493
1480
  Examples:
1494
1481
  >>> import numpy as np
@@ -1507,15 +1494,68 @@ class Fills(Primitive):
1507
1494
  self.init_prim_io_names(inputs=['x', 'value'], outputs=['y'])
1508
1495
 
1509
1496
 
1497
+ class FillV2(PrimitiveWithCheck):
1498
+ """
1499
+ Creates a tensor with shape described by `shape` and fills it with values in `value` .
1500
+
1501
+ Inputs:
1502
+ - **shape** (Union[Tuple[int], Tensor[int]]) - 1-D Tensor or Tuple, specify the shape
1503
+ of output tensor. Its dtype must be int32 or int64.
1504
+ - **value** (Tensor) - A 0-D Tensor, the value to fill the output tensor `y` .
1505
+
1506
+ Outputs:
1507
+ - **y** (Tensor) - A tensor, its shape and value are described above.
1508
+
1509
+ Raises:
1510
+ TypeError: If `shape` is not a 1-D tensor or tuple.
1511
+ TypeError: If the data type of `shape` is not int32 or int64.
1512
+ ValueError: If `value` is not a 0-D Tensor.
1513
+
1514
+ Supported Platforms:
1515
+ ``Ascend`` ``GPU`` ``CPU``
1516
+
1517
+ Examples:
1518
+ >>> fillV2 = ops.FillV2()
1519
+ >>> output = fillV2(Tensor([2, 3], mindspore.int32), Tensor(1, mindspore.float32))
1520
+ >>> print(output)
1521
+ [[1. 1. 1.]
1522
+ [1. 1. 1.]]
1523
+ >>> output = fillV2(Tensor([3, 3], mindspore.int64), Tensor(0, mindspore.int32))
1524
+ >>> print(output)
1525
+ [[0 0 0]
1526
+ [0 0 0]
1527
+ [0 0 0]]
1528
+ """
1529
+
1530
+ @prim_attr_register
1531
+ def __init__(self):
1532
+ """Initialize FillV2"""
1533
+ self.init_prim_io_names(inputs=['shape', 'value'], outputs=['y'])
1534
+
1535
+ def infer_value(self, dims, x):
1536
+ if isinstance(dims, (Tensor, Tensor_)):
1537
+ dims = dims.asnumpy()
1538
+ if isinstance(x, (Tensor, Tensor_)):
1539
+ x = x.asnumpy()
1540
+ if dims is not None and None not in dims and x is not None:
1541
+ ret = np.full(dims, x)
1542
+ return Tensor(ret)
1543
+ return None
1544
+
1545
+
1510
1546
  class Ones(Primitive):
1511
1547
  r"""
1512
1548
  Creates a tensor filled with value ones.
1513
1549
 
1514
- Creates a tensor with shape described by the first argument and
1515
- fills it with value ones in type of the second argument.
1516
-
1517
1550
  Refer to :func:`mindspore.ops.ones` for more details.
1518
1551
 
1552
+ Inputs:
1553
+ - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
1554
+ - **type** (:class:`mindspore.dtype`) - The specified type of output tensor.
1555
+
1556
+ Outputs:
1557
+ Tensor, has the same type and shape as input shape value.
1558
+
1519
1559
  Supported Platforms:
1520
1560
  ``Ascend`` ``GPU`` ``CPU``
1521
1561
 
@@ -1539,6 +1579,8 @@ class Ones(Primitive):
1539
1579
 
1540
1580
  class Zeros(Primitive):
1541
1581
  r"""
1582
+ Zeros will be deprecated in the future. Please use class `mindspore.ops.zeros` instead.
1583
+
1542
1584
  Creates a tensor filled with value zeros.
1543
1585
 
1544
1586
  Creates a tensor with shape described by the first argument and
@@ -1546,8 +1588,7 @@ class Zeros(Primitive):
1546
1588
 
1547
1589
  Inputs:
1548
1590
  - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
1549
- Only constant positive int is allowed.
1550
- - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.
1591
+ - **type** (mindspore.dtype) - The specified type of output tensor.
1551
1592
 
1552
1593
  Outputs:
1553
1594
  Tensor, has the same type and shape as input shape value.
@@ -1557,7 +1598,7 @@ class Zeros(Primitive):
1557
1598
  TypeError: If `shape` is a tuple whose elements are not all int.
1558
1599
 
1559
1600
  Supported Platforms:
1560
- ``Ascend`` ``GPU`` ``CPU``
1601
+ Deprecated
1561
1602
 
1562
1603
  Examples:
1563
1604
  >>> zeros = ops.Zeros()
@@ -1654,7 +1695,7 @@ class TupleToArray(PrimitiveWithInfer):
1654
1695
 
1655
1696
  def infer_value(self, x):
1656
1697
  validator.check_value_type("x", x, [tuple], self.name)
1657
- validator.check("size of x", len(x), '', 0, Rel.GT, self.name)
1698
+ validator.check("size of x", len(x), '', 0, validator.GT, self.name)
1658
1699
  dtype = type(x[0])
1659
1700
  for i, item in enumerate(x):
1660
1701
  validator.check_value_type(f"x[{i}]", item, [numbers.Number], self.name)
@@ -1675,6 +1716,24 @@ class TupleToArray(PrimitiveWithInfer):
1675
1716
  return _run_op(self, self.name, args)
1676
1717
 
1677
1718
 
1719
+ class ScalarToArray(PrimitiveWithInfer):
1720
+ """
1721
+ The `ScalarToArray` primitive is deprecated. Please use the :class:`mindspore.ops.ScalarToTensor` instead.
1722
+ """
1723
+ @deprecated("2.0", "ops.scalar_to_tensor", False)
1724
+ @prim_attr_register
1725
+ def __init__(self):
1726
+ pass
1727
+
1728
+ def infer_value(self, x):
1729
+ validator.check_value_type("x", x, [int, float], self.name)
1730
+ if isinstance(x, int):
1731
+ ret = np.array(x, np.int32)
1732
+ else:
1733
+ ret = np.array(x, np.float32)
1734
+ return Tensor(ret)
1735
+
1736
+
1678
1737
  class ScalarToTensor(PrimitiveWithInfer):
1679
1738
  """
1680
1739
  Converts a scalar to a `Tensor`, and converts the data type to the specified type.
@@ -1694,10 +1753,10 @@ class ScalarToTensor(PrimitiveWithInfer):
1694
1753
 
1695
1754
  @prim_attr_register
1696
1755
  def __init__(self):
1697
- pass
1756
+ self.init_prim_io_names(inputs=['input_scalar', 'dtype'], outputs=['output_data'])
1698
1757
 
1699
- def infer_value(self, x, dtype=mstype.float32):
1700
- validator.check_value_type("x", x, [int, float], self.name)
1758
+ def __call__(self, x, dtype=mstype.float32):
1759
+ validator.check_value_type("x", x, [bool, int, float], self.name)
1701
1760
  validator.check_subclass("dtype", dtype, mstype.number, self.name)
1702
1761
  data_type = mstype.dtype_to_nptype(dtype)
1703
1762
  return Tensor(np.array(x, data_type))
@@ -1768,13 +1827,13 @@ class InvertPermutation(PrimitiveWithInfer):
1768
1827
  if z[i - 1] == z[i]:
1769
1828
  raise ValueError(f"For '{self.name}', the 'input_x' can not contain duplicate values, "
1770
1829
  f"but got duplicated {z[i]} in the 'input_x'.")
1771
- validator.check(f'value min', min(x_value), '', 0, Rel.EQ, self.name)
1772
- validator.check(f'value max', max(x_value), '', len(x_value) - 1, Rel.EQ, self.name)
1830
+ validator.check(f'value min', min(x_value), '', 0, validator.EQ, self.name)
1831
+ validator.check(f'value max', max(x_value), '', len(x_value) - 1, validator.EQ, self.name)
1773
1832
 
1774
1833
  y = [None] * len(x_value)
1775
1834
  for i, value in enumerate(x_value):
1776
1835
  validator.check_value_type("input[%d]" % i, value, [int], self.name)
1777
- validator.check(f'value', z[i], f'index', i, Rel.EQ, self.name)
1836
+ validator.check(f'value', z[i], f'index', i, validator.EQ, self.name)
1778
1837
  y[value] = i
1779
1838
  z.append(value)
1780
1839
  return {'shape': x_shp,
@@ -1788,6 +1847,22 @@ class Argmax(Primitive):
1788
1847
 
1789
1848
  Refer to :func:`mindspore.ops.argmax` for more details.
1790
1849
 
1850
+ Args:
1851
+ axis (int): Axis where the Argmax operation applies to. Default: -1.
1852
+ output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
1853
+ Default: `mindspore.dtype.int32`.
1854
+
1855
+ Inputs:
1856
+ - **input_x** (Tensor) - Input tensor. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1857
+ Support data type list as follows:
1858
+
1859
+ - Ascend: Float16, Float32.
1860
+ - GPU: Float16, Float32.
1861
+ - CPU: Float16, Float32, Float64.
1862
+
1863
+ Outputs:
1864
+ Tensor, indices of the max value of input tensor across the axis.
1865
+
1791
1866
  Supported Platforms:
1792
1867
  ``Ascend`` ``GPU`` ``CPU``
1793
1868
 
@@ -1822,7 +1897,7 @@ class Argmin(Primitive):
1822
1897
 
1823
1898
  Inputs:
1824
1899
  - **input_x** (Tensor) - Input tensor.
1825
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1900
+ The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1826
1901
 
1827
1902
  - Ascend: Float16, Float32, Float64, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64.
1828
1903
 
@@ -1914,7 +1989,7 @@ class ArgminV2(Primitive):
1914
1989
 
1915
1990
  class ArgMaxWithValue(Primitive):
1916
1991
  """
1917
- Calculates the maximum value along with the given axis for the input tensor and, returns the maximum values and
1992
+ Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and
1918
1993
  indices.
1919
1994
 
1920
1995
  Note:
@@ -2121,11 +2196,15 @@ class Tile(PrimitiveWithInfer):
2121
2196
 
2122
2197
  def __infer__(self, x, multiples):
2123
2198
  multiples_v = multiples['value']
2124
- if multiples_v is None:
2199
+ if multiples_v is None or None in multiples_v:
2125
2200
  if 'max_value' not in multiples or 'min_value' not in multiples:
2126
- if len(multiples['shape']) != 1:
2201
+ if multiples_v is not None:
2202
+ shape = [len(multiples['shape'])]
2203
+ else:
2204
+ shape = multiples['shape']
2205
+ if len(shape) != 1:
2127
2206
  raise ValueError(f'For \'{self.name}\', the dim of multiples must be 1.')
2128
- rank = max(len(x['shape']), multiples['shape'][0])
2207
+ rank = max(len(x['shape']), shape[0])
2129
2208
  out_shape = [-1] * rank
2130
2209
  return {
2131
2210
  'shape': out_shape,
@@ -2218,20 +2297,22 @@ class UnsortedSegmentMin(PrimitiveWithCheck):
2218
2297
  def __check__(self, x, segment_ids, num_segments):
2219
2298
  x_shape = x['shape']
2220
2299
  segment_ids_shape = segment_ids['shape']
2221
- valid_type = [mstype.float16, mstype.float32, mstype.int32]
2300
+ valid_type = [mstype.float16, mstype.float32, mstype.int32, mstype.int8, mstype.uint8,
2301
+ mstype.int16, mstype.uint16, mstype.uint32, mstype.int64, mstype.uint64, mstype.float64]
2222
2302
  validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
2223
- validator.check_tensor_dtype_valid("segment_ids", segment_ids['dtype'], [mstype.int32], self.name)
2303
+ validator.check_tensor_dtype_valid("segment_ids", segment_ids['dtype'], [mstype.int32, mstype.int64], self.name)
2224
2304
 
2225
2305
  # support vmap : segment_ids_shape support batch rank
2226
2306
  if not hasattr(self, 'batch_rank'):
2227
- validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2307
+ if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
2308
+ validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2228
2309
 
2229
2310
  num_segments_type = num_segments['dtype']
2230
2311
  validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
2231
2312
  if not is_shape_unknown(x_shape) and not is_shape_unknown(segment_ids_shape):
2232
2313
  # only validate when both shapes fully known
2233
2314
  validator.check(f'first shape of input_x', x_shape[0],
2234
- 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)
2315
+ 'length of segments_id', segment_ids_shape[0], validator.EQ, self.name)
2235
2316
  num_segments_v = num_segments['value']
2236
2317
  validator.check_value_type('num_segments', num_segments_v, [int], self.name)
2237
2318
  validator.check_positive_int(num_segments_v, "num_segments", self.name)
@@ -2325,21 +2406,23 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
2325
2406
  def __check__(self, x, segment_ids, num_segments):
2326
2407
  x_shape = x['shape']
2327
2408
  segment_ids_shape = segment_ids['shape']
2328
- valid_type = [mstype.float16, mstype.float32, mstype.int32]
2409
+ valid_type = [mstype.float16, mstype.float32, mstype.int32, mstype.int8, mstype.uint8,
2410
+ mstype.int16, mstype.uint16, mstype.uint32, mstype.int64, mstype.uint64, mstype.float64]
2329
2411
  validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
2330
2412
  validator.check_tensors_dtypes_same_and_valid({"segment_ids": segment_ids['dtype']},
2331
2413
  [mstype.int32, mstype.int64], self.name)
2332
2414
 
2333
2415
  # support vmap : segment_ids_shape support batch rank
2334
2416
  if not hasattr(self, 'batch_rank'):
2335
- validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2417
+ if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
2418
+ validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2336
2419
 
2337
2420
  num_segments_type = num_segments['dtype']
2338
2421
  validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
2339
2422
  if not is_shape_unknown(x_shape) and not is_shape_unknown(segment_ids_shape):
2340
2423
  # only validate when both shapes fully known
2341
2424
  validator.check(f'first shape of input_x', x_shape[0],
2342
- 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)
2425
+ 'length of segments_id', segment_ids_shape[0], validator.EQ, self.name)
2343
2426
  num_segments_v = num_segments['value']
2344
2427
  validator.check_value_type('num_segments', num_segments_v, [int], self.name)
2345
2428
  validator.check_positive_int(num_segments_v, "num_segments", self.name)
@@ -2520,12 +2603,12 @@ class ParallelConcat(Primitive):
2520
2603
  def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2521
2604
  """for stack output shape"""
2522
2605
  validator.check_value_type("shape", x_shape, [tuple, list], prim_name)
2523
- validator.check_int(len(x_shape), 1, Rel.GE, "len of input_x", prim_name)
2606
+ validator.check_int(len(x_shape), 1, validator.GE, "len of input_x", prim_name)
2524
2607
  validator.check_subclass("input_x[0]", x_type[0], mstype.tensor, prim_name)
2525
2608
 
2526
2609
  out_n = len(x_shape)
2527
2610
  for i in range(1, out_n):
2528
- validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, prim_name, TypeError)
2611
+ validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], validator.EQ, prim_name, TypeError)
2529
2612
 
2530
2613
  new_x_shape = []
2531
2614
  for i, shp in enumerate(x_shape):
@@ -2543,14 +2626,14 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2543
2626
  rank_base = len(new_x_shape[0]["shape"])
2544
2627
  for i in range(1, n):
2545
2628
  validator.check('len of x_shape[%d]' % new_x_shape[i]["id"], len(new_x_shape[i]["shape"]),
2546
- 'len of x_shape[0]', rank_base, Rel.EQ, prim_name, ValueError)
2629
+ 'len of x_shape[0]', rank_base, validator.EQ, prim_name, ValueError)
2547
2630
  for j in range(0, rank_base):
2548
2631
  if new_x_shape[i]["shape"][j] != new_x_shape[0]["shape"][j] and \
2549
2632
  new_x_shape[i]["shape"][j] != -1 and new_x_shape[0]["shape"][j] != -1:
2550
2633
  raise ValueError("For \'{}\' element {} shape in input can not pack with first element".format(
2551
2634
  prim_name, new_x_shape[i]['id']))
2552
2635
 
2553
- validator.check_int_range(axis, -rank_base - 1, rank_base, Rel.INC_BOTH, 'axis', prim_name)
2636
+ validator.check_int_range(axis, -rank_base - 1, rank_base, validator.INC_BOTH, 'axis', prim_name)
2554
2637
  if axis < 0:
2555
2638
  axis = axis + rank_base + 1
2556
2639
 
@@ -2669,7 +2752,7 @@ class Unpack(PrimitiveWithInfer):
2669
2752
  validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
2670
2753
  x_shape = list(x['shape'])
2671
2754
  dim = len(x_shape)
2672
- validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
2755
+ validator.check_int_range(self.axis, -dim, dim, validator.INC_LEFT, 'axis value', self.name)
2673
2756
  if self.axis < 0:
2674
2757
  self.axis = self.axis + dim
2675
2758
  output_num = x_shape[self.axis]
@@ -2677,7 +2760,7 @@ class Unpack(PrimitiveWithInfer):
2677
2760
  validator.check_positive_int(output_num, "output_num", self.name)
2678
2761
  self.add_prim_attr('num', output_num)
2679
2762
  output_valid_check = x_shape[self.axis] - output_num
2680
- validator.check_int(output_valid_check, 0, Rel.EQ,
2763
+ validator.check_int(output_valid_check, 0, validator.EQ,
2681
2764
  "The dimension which to unstack divides output_num", self.name)
2682
2765
  out_shapes = []
2683
2766
  out_dtypes = []
@@ -2786,7 +2869,7 @@ class Coalesce(Primitive):
2786
2869
 
2787
2870
  Inputs:
2788
2871
  - **x_indices** (Tensor) - A 2-D Tensor, represents the indices of the nonzero elements of the sparse tensor.
2789
- Supported data type is int64. It's elements should be non-negative. The shape is :math:`(y, x)`.
2872
+ Supported data type is int64. Its elements should be non-negative. The shape is :math:`(y, x)`.
2790
2873
  - **x_values** (Tensor) - A 1-D Tensor, represents the values corresponding to the indices in `x_indices`.
2791
2874
  Supported data types are float16 and float32. The shape is :math:`(x,)`.
2792
2875
  - **x_shape** (Tensor) - A 1-D Tensor, specifies the shape of the sparse tensor.
@@ -2848,7 +2931,7 @@ class ReverseV2(Primitive):
2848
2931
 
2849
2932
  Inputs:
2850
2933
  - **input_x** (Tensor) - The target tensor. The data type is Number except float64.
2851
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2934
+ The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2852
2935
 
2853
2936
  Outputs:
2854
2937
  Tensor, has the same shape and type as `input_x`.
@@ -2856,6 +2939,7 @@ class ReverseV2(Primitive):
2856
2939
  Raises:
2857
2940
  TypeError: If `axis` is neither list nor tuple.
2858
2941
  TypeError: If element of `axis` is not an int.
2942
+ ValueError: There are multiple identical axes in `axis`.
2859
2943
 
2860
2944
  Supported Platforms:
2861
2945
  ``Ascend`` ``GPU`` ``CPU``
@@ -2925,7 +3009,7 @@ class Rint(Primitive):
2925
3009
  class Select(Primitive):
2926
3010
  r"""
2927
3011
  The conditional tensor determines whether the corresponding element in the output must be
2928
- selected from :math:`x` (if True) or :math:`y` (if False) based on the value of each
3012
+ selected from `x` (if True) or `y` (if False) based on the value of each
2929
3013
  element.
2930
3014
 
2931
3015
  It can be defined as:
@@ -3126,25 +3210,32 @@ class StridedSlice(PrimitiveWithInfer):
3126
3210
  if len(tuple(filter(lambda x: x == '1', bin(ellipsis_mask)[-1:1:-1]))) > 1:
3127
3211
  raise ValueError(f"For '{self.name}', only support one ellipsis in the index, but got {ellipsis_mask}.")
3128
3212
  validator.check_non_negative_int(new_axis_mask, 'new_axis_mask', self.name)
3129
- validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask', self.name)
3213
+ validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask',
3214
+ self.name)
3130
3215
 
3131
3216
  def __infer__(self, x, begin, end, strides):
3132
3217
  begin_v, begin_len = self._check_and_get_value(begin, 'begin')
3133
3218
  end_v, end_len = self._check_and_get_value(end, 'end')
3134
3219
  strides_v, strides_len = self._check_and_get_value(strides, 'strides')
3135
3220
 
3136
- if None in (begin_v['value'], end_v['value'], strides_v['value']) or is_shape_unknown(x['shape']):
3221
+ is_dynamic_tuple = (self._is_none_in_tuple(begin_v['value'])
3222
+ or self._is_none_in_tuple(end_v['value'])
3223
+ or self._is_none_in_tuple(strides_v['value']))
3224
+ is_dynamic = None in (begin_v['value'], end_v['value'], strides_v['value'])
3225
+
3226
+ if not is_dynamic and (begin_len != strides_len or end_len != strides_len):
3227
+ raise ValueError(
3228
+ f"For '{self.name}', 'begin', 'end' and 'strides' must be the same length, but got "
3229
+ f"'begin' length: {begin_len}, 'end' length: {end_len}, 'strides' length: {strides_len}."
3230
+ )
3231
+
3232
+ if is_dynamic or is_dynamic_tuple or is_shape_unknown(x['shape']):
3137
3233
  ret_shape = self._compute_dynamic_slicing_shape(x, begin_v, end_v, strides_v, begin_len)
3138
3234
  rets = {'shape': ret_shape,
3139
3235
  'dtype': x['dtype'],
3140
3236
  'value': None}
3141
-
3142
3237
  return rets
3143
3238
 
3144
- if begin_len != strides_len or end_len != strides_len:
3145
- raise ValueError(f"For '{self.name}', 'begin', 'end' and 'strides' must be the same length, but got "
3146
- f"'begin' length: {begin_len}, 'end' length: {end_len}, 'strides' length: {strides_len}.")
3147
-
3148
3239
  ret_shape = self._compute_slicing_shape(x['shape'], begin_v['value'], end_v['value'], strides_v['value'])
3149
3240
  if all(ret_shape):
3150
3241
  value = None
@@ -3183,7 +3274,7 @@ class StridedSlice(PrimitiveWithInfer):
3183
3274
  # When slicing forward, if begin >= end, the length of the slicing is 0.
3184
3275
  slicing_length = 0
3185
3276
  else:
3186
- slicing_length = 1 + (end - 1 - begin) // stride
3277
+ slicing_length = -1
3187
3278
  return slicing_length
3188
3279
  # When slicing forward, convert begin and end to positive numbers.
3189
3280
  if begin >= x_dim or end < -x_dim:
@@ -3214,7 +3305,7 @@ class StridedSlice(PrimitiveWithInfer):
3214
3305
  if begin <= end:
3215
3306
  slicing_length = 0
3216
3307
  else:
3217
- slicing_length = 1 + (end + 1 - begin) // stride
3308
+ slicing_length = -1
3218
3309
  return slicing_length
3219
3310
  # When slicing backward, convert begin and end to negative numbers.
3220
3311
  if begin < -x_dim or end >= x_dim:
@@ -3250,6 +3341,10 @@ class StridedSlice(PrimitiveWithInfer):
3250
3341
  strides_value = strides_v['shape_value']
3251
3342
  return begin_value, end_value, strides_value
3252
3343
 
3344
+ @staticmethod
3345
+ def _is_none_in_tuple(x):
3346
+ return isinstance(x, tuple) and None in x
3347
+
3253
3348
  def _compute_slicing_length(self, begin, end, stride, x_dim):
3254
3349
  """Computes the length of the slicing."""
3255
3350
  if stride > 0:
@@ -3357,7 +3452,7 @@ class StridedSlice(PrimitiveWithInfer):
3357
3452
  def _compute_dynamic_slicing_length(self, begin, end, stride, x_dim):
3358
3453
  """Computes the length of the slicing for dynamic shape."""
3359
3454
  slicing_length = -1
3360
- if -1 in (begin, end, stride):
3455
+ if None in (begin, end, stride) or -1 in (begin, end, stride):
3361
3456
  return slicing_length
3362
3457
  slicing_length = self._compute_slicing_length(begin, end, stride, x_dim)
3363
3458
  return slicing_length
@@ -3375,8 +3470,12 @@ class StridedSlice(PrimitiveWithInfer):
3375
3470
  ret_shape = []
3376
3471
  i, j = 0, 0
3377
3472
  slice_has_special_value = False
3378
- begin_value, end_value, strides_value = self._get_slice_value(begin_v, end_v, strides_v)
3379
- if None in (begin_v['value'], end_v['value'], strides_v['value']):
3473
+ begin_value, end_value, strides_value = self._get_slice_value(
3474
+ begin_v, end_v, strides_v)
3475
+ is_dynamic_tuple = (self._is_none_in_tuple(begin_value)
3476
+ or self._is_none_in_tuple(end_value)
3477
+ or self._is_none_in_tuple(strides_value))
3478
+ if None in (begin_v['value'], end_v['value'], strides_v['value']) or is_dynamic_tuple:
3380
3479
  slice_has_special_value = True
3381
3480
  while i < x_rank or j < slice_len:
3382
3481
  slicing_length = -1
@@ -3436,14 +3535,14 @@ class StridedSlice(PrimitiveWithInfer):
3436
3535
  }
3437
3536
  return slices, slice_shape[0]
3438
3537
 
3439
- if isinstance(slice_value, Tensor_):
3538
+ if isinstance(slice_value, (Tensor, Tensor_)):
3440
3539
  validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int64], self.name)
3441
3540
  slice_value = slice_value.asnumpy().tolist()
3442
3541
  elif not isinstance(slice_value, tuple):
3443
3542
  raise TypeError(f"For '{self.name}', both the 'begin', 'end', and 'strides' must be a tuple or Tensor, "
3444
3543
  f"but got '{name}': {slice_value}.")
3445
3544
 
3446
- if tuple(filter(lambda x: not isinstance(x, int), slice_value)):
3545
+ if tuple(filter(lambda x: x is not None and not isinstance(x, int), slice_value)):
3447
3546
  raise TypeError(f"For '{self.name}', the elements of 'begin', 'end', and 'strides' must be int, "
3448
3547
  f"but got {name}: {slice_value}.")
3449
3548
 
@@ -3465,6 +3564,9 @@ class Diag(PrimitiveWithCheck):
3465
3564
 
3466
3565
  Constructs a diagonal tensor with a given diagonal values.
3467
3566
 
3567
+ .. warning::
3568
+ This is an experimental API that is subject to change or deletion.
3569
+
3468
3570
  Refer to :func:`mindspore.ops.diag` for more details.
3469
3571
 
3470
3572
  Supported Platforms:
@@ -3498,11 +3600,11 @@ class Diag(PrimitiveWithCheck):
3498
3600
  class DiagPart(PrimitiveWithCheck):
3499
3601
  r"""
3500
3602
 
3501
- Extracts the diagonal part from given tensor.
3603
+ Extracts the diagonal elements from the given Tensor.
3502
3604
 
3503
- Assume input has dimensions :math:`[D_1,..., D_k, D_1,..., D_k]`, the output is a tensor
3504
- of rank k with dimensions :math:`[D_1,..., D_k]` where:
3505
- :math:`output[i_1,..., i_k] = input[i_1,..., i_k, i_1,..., i_k]`.
3605
+ If the input_x is a Tensor of shape :math:`[D_1,..., D_k, D_1,..., D_k]`, then the
3606
+ output will be a Tensor of rank k of shape :math:`[D_1,..., D_k]` where:
3607
+ :math:`output[i_1,..., i_k] = input_x[i_1,..., i_k, i_1,..., i_k]`.
3506
3608
 
3507
3609
  Inputs:
3508
3610
  - **input_x** (Tensor) - The rank of input tensor is 2k(k > 0).
@@ -3545,12 +3647,15 @@ class DiagPart(PrimitiveWithCheck):
3545
3647
 
3546
3648
  class Mvlgamma(Primitive):
3547
3649
  r"""
3548
- Computes the multivariate log-gamma function with dimension `p` element-wise.
3650
+ Calculates the multivariate log-gamma function element-wise for a given dimension `p`.
3651
+
3652
+ .. warning::
3653
+ This is an experimental API that is subject to change or deletion.
3549
3654
 
3550
3655
  Refer to :func:`mindspore.ops.mvlgamma` for more details.
3551
3656
 
3552
3657
  Supported Platforms:
3553
- ``GPU`` ``CPU``
3658
+ ``Ascend`` ``GPU`` ``CPU``
3554
3659
 
3555
3660
  Examples:
3556
3661
  >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
@@ -3575,6 +3680,17 @@ class Eye(Primitive):
3575
3680
 
3576
3681
  Refer to :func:`mindspore.ops.eye` for more details.
3577
3682
 
3683
+ Inputs:
3684
+ - **n** (int) - The number of rows of returned tensor. Constant value only.
3685
+ - **m** (int) - The number of columns of returned tensor. Constant value only.
3686
+ - **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
3687
+ The data type can be bool or Number.
3688
+ Default: None, the data type of the returned tensor is mindspore.float32.
3689
+
3690
+ Outputs:
3691
+ Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
3692
+ the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.
3693
+
3578
3694
  Supported Platforms:
3579
3695
  ``Ascend`` ``GPU`` ``CPU``
3580
3696
 
@@ -3769,7 +3885,7 @@ class ResizeNearestNeighborV2(Primitive):
3769
3885
  ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time.
3770
3886
 
3771
3887
  Supported Platforms:
3772
- ``CPU``
3888
+ ``Ascend`` ``GPU`` ``CPU``
3773
3889
 
3774
3890
  Examples:
3775
3891
  >>> input_tensor = Tensor(np.ones((1, 4, 4, 1)), mstype.float32)
@@ -3830,7 +3946,6 @@ class ScatterUpdate(Primitive):
3830
3946
  for each `i, ..., j` in `indices.shape`:
3831
3947
 
3832
3948
  .. math::
3833
-
3834
3949
  \text{input_x}[\text{indices}[i, ..., j], :] = \text{updates}[i, ..., j, :]
3835
3950
 
3836
3951
  Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
@@ -3842,7 +3957,7 @@ class ScatterUpdate(Primitive):
3842
3957
 
3843
3958
  Inputs:
3844
3959
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3845
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
3960
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
3846
3961
  - **indices** (Tensor) - The index of input tensor. With int32 data type.
3847
3962
  If there are duplicates in indices, the order for updating is undefined.
3848
3963
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
@@ -3911,7 +4026,7 @@ class ScatterNdUpdate(Primitive):
3911
4026
 
3912
4027
  Inputs:
3913
4028
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3914
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4029
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
3915
4030
  - **indices** (Tensor) - The index of input tensor, with int32 or int64 data type.
3916
4031
  - **updates** (Tensor) - N-D(2D or 3D) Tensor The tensor to be updated to the input tensor,
3917
4032
  has the same type as input. The shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
@@ -3978,7 +4093,7 @@ class ScatterMax(_ScatterOpDynamic):
3978
4093
 
3979
4094
  Inputs:
3980
4095
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3981
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4096
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
3982
4097
  - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32 or
3983
4098
  mindspore.int64.
3984
4099
  - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,
@@ -3997,7 +4112,7 @@ class ScatterMax(_ScatterOpDynamic):
3997
4112
  and `updates` is greater than 8 dimensions.
3998
4113
 
3999
4114
  Supported Platforms:
4000
- ``Ascend`` ``CPU`` ``GPU``
4115
+ ``Ascend`` ``GPU`` ``CPU``
4001
4116
 
4002
4117
  Examples:
4003
4118
  >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32),
@@ -4036,7 +4151,7 @@ class ScatterMin(_ScatterOpDynamic):
4036
4151
 
4037
4152
  Inputs:
4038
4153
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4039
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4154
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4040
4155
  - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
4041
4156
  mindspore.int64.
4042
4157
  - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
@@ -4097,7 +4212,7 @@ class ScatterAdd(Primitive):
4097
4212
 
4098
4213
  Inputs:
4099
4214
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4100
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4215
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4101
4216
  - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
4102
4217
  mindspore.int64.
4103
4218
  - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
@@ -4213,7 +4328,7 @@ class ScatterSub(Primitive):
4213
4328
 
4214
4329
  Inputs:
4215
4330
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4216
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4331
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4217
4332
  - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
4218
4333
  mindspore.int64.
4219
4334
  - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
@@ -4230,7 +4345,7 @@ class ScatterSub(Primitive):
4230
4345
  is required when data type conversion of Parameter is not supported.
4231
4346
 
4232
4347
  Supported Platforms:
4233
- ``Ascend`` ``CPU`` ``GPU``
4348
+ ``Ascend`` ``GPU`` ``CPU``
4234
4349
 
4235
4350
  Examples:
4236
4351
  >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x")
@@ -4309,16 +4424,17 @@ class ScatterSub(Primitive):
4309
4424
 
4310
4425
  class Triu(Primitive):
4311
4426
  """
4312
- Returns the higher triangular part of a single Tensor,
4313
- the other elements of the result tensor out are set to 0.
4314
- The higher triangular part of the matrix is defined as the elements on and above the diagonal.
4427
+ Returns the upper triangular portion of the 2-D matrix or the set of matrices
4428
+ in a batch. The remaining elements of the resulting Tensor are assigned a value of 0.
4429
+ The upper triangular section of the matrix comprises of the
4430
+ elements present on and above the main diagonal.
4315
4431
 
4316
4432
  Args:
4317
4433
  diagonal (int, optional): The index of diagonal. Default: 0, indicating the main diagonal.
4318
4434
 
4319
4435
  Inputs:
4320
- - **x** (Tensor) - The input tensor with shape :math:`(N,∗)`
4321
- where means any number of additional dimensions. The data type is Number.
4436
+ - **x** (Tensor) - The input tensor with shape :math:`(N, *)`
4437
+ where :math:`*` means any number of additional dimensions. The data type is Number.
4322
4438
 
4323
4439
  Outputs:
4324
4440
  - **y** (Tensor) - A tensor has the same shape and data type as input.
@@ -4329,7 +4445,7 @@ class Triu(Primitive):
4329
4445
  ValueError: If length of shape of x is less than 1.
4330
4446
 
4331
4447
  Supported Platforms:
4332
- ``Ascend`` ``GPU`` ``CPU``
4448
+ ``GPU`` ``CPU``
4333
4449
 
4334
4450
  Examples:
4335
4451
  >>> x = Tensor(np.array([[ 1, 2, 3, 4],
@@ -4397,7 +4513,7 @@ class ScatterMul(_ScatterOpDynamic):
4397
4513
 
4398
4514
  Inputs:
4399
4515
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4400
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4516
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4401
4517
  - **indices** (Tensor) - The index to do multiply operation whose data type must be mstype.int32 or
4402
4518
  mstype.int64.
4403
4519
  - **updates** (Tensor) - The tensor doing the multiply operation with `input_x`,
@@ -4417,10 +4533,6 @@ class ScatterMul(_ScatterOpDynamic):
4417
4533
  ``Ascend`` ``GPU`` ``CPU``
4418
4534
 
4419
4535
  Examples:
4420
- >>> from mindspore import Tensor
4421
- >>> from mindspore import dtype as mstype
4422
- >>> import mindspore.ops as ops
4423
- >>> import numpy as np
4424
4536
  >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
4425
4537
  >>> indices = Tensor(np.array([0, 1]), mstype.int32)
4426
4538
  >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
@@ -4490,7 +4602,7 @@ class ScatterDiv(_ScatterOpDynamic):
4490
4602
  Using given values to update tensor value through the div operation, along with the input indices.
4491
4603
  This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
4492
4604
 
4493
- for each `i, ..., j` in `indices.shape`:
4605
+ for each :math:`i, ..., j` in `indices.shape`:
4494
4606
 
4495
4607
  .. math::
4496
4608
 
@@ -4506,7 +4618,7 @@ class ScatterDiv(_ScatterOpDynamic):
4506
4618
 
4507
4619
  Inputs:
4508
4620
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4509
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
4621
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4510
4622
  - **indices** (Tensor) - The index to do divide operation whose data type must be mstype.int32 or
4511
4623
  mstype.int64.
4512
4624
  - **updates** (Tensor) - The tensor doing the divide operation with `input_x`,
@@ -4721,6 +4833,9 @@ class ScatterNdMul(_ScatterNdOp):
4721
4833
  Using given values to update parameter value through the multiplication operation, along with the input indices.
4722
4834
  This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
4723
4835
 
4836
+ .. warning::
4837
+ This is an experimental API that is subject to change or deletion.
4838
+
4724
4839
  Refer to :func:`mindspore.ops.scatter_nd_mul` for more details.
4725
4840
 
4726
4841
  Supported Platforms:
@@ -4767,6 +4882,9 @@ class ScatterNdDiv(_ScatterNdOp):
4767
4882
  Using given values to update tensor value through the division operation, along with the input indices.
4768
4883
  This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
4769
4884
 
4885
+ .. warning::
4886
+ This is an experimental API that is subject to change or deletion.
4887
+
4770
4888
  Refer to :func:`mindspore.ops.scatter_nd_div` for more details.
4771
4889
 
4772
4890
  Supported Platforms:
@@ -4854,6 +4972,11 @@ class ScatterNdMax(_ScatterNdOp):
4854
4972
  [1 1 1 1]]]
4855
4973
  """
4856
4974
 
4975
+ @prim_attr_register
4976
+ def __init__(self, use_locking=False):
4977
+ """Initialize ScatterNdMax"""
4978
+ super().__init__(use_locking)
4979
+
4857
4980
 
4858
4981
  class ScatterNdMin(_ScatterNdOp):
4859
4982
  r"""
@@ -4902,6 +5025,11 @@ class ScatterNdMin(_ScatterNdOp):
4902
5025
  [10 10 10 10]]]
4903
5026
  """
4904
5027
 
5028
+ @prim_attr_register
5029
+ def __init__(self, use_locking=False):
5030
+ """Initialize ScatterNdMin"""
5031
+ super().__init__(use_locking)
5032
+
4905
5033
 
4906
5034
  class ScatterNonAliasingAdd(Primitive):
4907
5035
  """
@@ -4977,7 +5105,7 @@ class SpaceToDepth(Primitive):
4977
5105
 
4978
5106
  Outputs:
4979
5107
  Tensor, the same data type as `x`. It must be a 4-D tensor. Tensor of shape
4980
- :math:`(N, ( C_{in} * \text{block_size} * 2), H_{in} / \text{block_size}, W_{in} / \text{block_size})`.
5108
+ :math:`(N, (C_{in} * \text{block_size} * 2), H_{in} / \text{block_size}, W_{in} / \text{block_size})`.
4981
5109
 
4982
5110
  Raises:
4983
5111
  TypeError: If `block_size` is not an int.
@@ -5000,7 +5128,7 @@ class SpaceToDepth(Primitive):
5000
5128
  def __init__(self, block_size):
5001
5129
  """Initialize SpaceToDepth"""
5002
5130
  validator.check_value_type('block_size', block_size, [int], self.name)
5003
- validator.check('block_size', block_size, self.name, 2, Rel.GE)
5131
+ validator.check('block_size', block_size, self.name, 2, validator.GE)
5004
5132
  self.block_size = block_size
5005
5133
  self.add_prim_attr("data_format", "NCHW")
5006
5134
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
@@ -5053,7 +5181,7 @@ class DepthToSpace(Primitive):
5053
5181
  def __init__(self, block_size):
5054
5182
  """Initialize DepthToSpace"""
5055
5183
  validator.check_value_type('block_size', block_size, [int], self.name)
5056
- validator.check('block_size', block_size, '', 2, Rel.GE, self.name)
5184
+ validator.check('block_size', block_size, '', 2, validator.GE, self.name)
5057
5185
  self.block_size = block_size
5058
5186
  self.add_prim_attr("data_format", "NCHW")
5059
5187
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
@@ -5119,9 +5247,9 @@ class SpaceToBatch(Primitive):
5119
5247
  logger.warning("WARN_DEPRECATED: The usage of SpaceToBatch is deprecated."
5120
5248
  " Please use SpaceToBatchND.")
5121
5249
  validator.check_value_type('block_size', block_size, [int], self.name)
5122
- validator.check('block_size', block_size, self.name, 2, Rel.GE, self.name)
5250
+ validator.check('block_size', block_size, self.name, 2, validator.GE, self.name)
5123
5251
  self.block_size = block_size
5124
- validator.check('paddings shape', np.array(paddings).shape, self.name, (2, 2), Rel.EQ, self.name)
5252
+ validator.check('paddings shape', np.array(paddings).shape, self.name, (2, 2), validator.EQ, self.name)
5125
5253
  for elem in itertools.chain(*paddings):
5126
5254
  validator.check_non_negative_int(elem, 'paddings element', self.name)
5127
5255
  validator.check_value_type('paddings element', elem, [int], self.name)
@@ -5149,8 +5277,8 @@ class BatchToSpace(PrimitiveWithInfer):
5149
5277
  product of `block_shape`. The data type is float16 or float32.
5150
5278
 
5151
5279
  Outputs:
5152
- Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_size
5153
- and crops. The output shape will be (n', c', h', w'), where
5280
+ Tensor, the output tensor with the same type as input. Assume input shape is :math:`(n, c, h, w)` with
5281
+ block_size and crops. The output shape will be :math:`(n', c', h', w')`, where
5154
5282
 
5155
5283
  :math:`n' = n//(block\_size*block\_size)`
5156
5284
 
@@ -5186,7 +5314,7 @@ class BatchToSpace(PrimitiveWithInfer):
5186
5314
  logger.warning("WARN_DEPRECATED: The usage of BatchToSpace is deprecated."
5187
5315
  " Please use BatchToSpaceND.")
5188
5316
  validator.check_value_type('block_size', block_size, [int], self.name)
5189
- validator.check('block_size', block_size, '', 2, Rel.GE, self.name)
5317
+ validator.check('block_size', block_size, '', 2, validator.GE, self.name)
5190
5318
  self.block_size = block_size
5191
5319
  validator.check_value_type('crops type', crops, [list, tuple], self.name)
5192
5320
  validator.check('crops shape', np.array(crops).shape, self.name, (2, 2))
@@ -5205,7 +5333,7 @@ class BatchToSpace(PrimitiveWithInfer):
5205
5333
  for i in range(2):
5206
5334
  x_block_prod = out_shape[i + 2] * self.block_size
5207
5335
  crops_sum = self.crops[i][0] + self.crops[i][1]
5208
- validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, Rel.GT, self.name)
5336
+ validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, validator.GT, self.name)
5209
5337
  out_shape[i + 2] = x_block_prod - crops_sum
5210
5338
  block_size_prod = self.block_size * self.block_size
5211
5339
  if out_shape[0] % block_size_prod != 0:
@@ -5249,9 +5377,11 @@ class SpaceToBatchND(Primitive):
5249
5377
  The shape of the output tensor will be :math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`,
5250
5378
  where
5251
5379
 
5252
- :math:`n' = n*(block\_shape[0]*...*block\_shape[M-1])`
5253
-
5254
- :math:`w'_i = (w_i+paddings[i-1][0]+paddings[i-1][1])//block\_shape[i-1]`
5380
+ .. math::
5381
+ \begin{array}{ll} \\
5382
+ n' = n*(block\_shape[0]*...*block\_shape[M-1]) \\
5383
+ w'_i = (w_i+paddings[i-1][0]+paddings[i-1][1])//block\_shape[i-1]
5384
+ \end{array}
5255
5385
 
5256
5386
  Raises:
5257
5387
  TypeError: If `block_shape` is not one of list, tuple, int.
@@ -5282,24 +5412,25 @@ class SpaceToBatchND(Primitive):
5282
5412
  def __init__(self, block_shape, paddings):
5283
5413
  """Initialize SpaceToBatchND"""
5284
5414
  validator.check_value_type('paddings type', paddings, [list, tuple], self.name)
5285
- validator.check('paddings length', len(paddings), '', 1, Rel.GE, self.name)
5415
+ validator.check('paddings length', len(paddings), '', 1, validator.GE, self.name)
5286
5416
 
5287
5417
  if isinstance(block_shape, int):
5288
5418
  block_shape = (block_shape,) * np.array(paddings).shape[0]
5289
5419
 
5290
5420
  self.add_prim_attr("block_shape", block_shape)
5291
5421
  validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
5292
- validator.check('block_shape shape', len(np.array(block_shape).shape), 'default value', 1, Rel.EQ, self.name)
5422
+ validator.check('block_shape shape', len(np.array(block_shape).shape),
5423
+ 'default value', 1, validator.EQ, self.name)
5293
5424
  block_rank = len(block_shape)
5294
5425
  if context.get_context("device_target") == "Ascend":
5295
- validator.check('block_shape length', block_rank, 'default value', 2, Rel.EQ, self.name)
5426
+ validator.check('block_shape length', block_rank, 'default value', 2, validator.EQ, self.name)
5296
5427
  for elem in block_shape:
5297
- validator.check('block_shape element', elem, 'min value', 1, Rel.GE, self.name)
5428
+ validator.check('block_shape element', elem, 'min value', 1, validator.GE, self.name)
5298
5429
  validator.check_value_type('block_shape element', elem, [int], self.name)
5299
5430
  self.block_shape = block_shape
5300
5431
 
5301
5432
  validator.check(
5302
- 'paddings shape', np.array(paddings).shape, 'default value', (block_rank, 2), Rel.EQ, self.name)
5433
+ 'paddings shape', np.array(paddings).shape, 'default value', (block_rank, 2), validator.EQ, self.name)
5303
5434
  for elem in itertools.chain(*paddings):
5304
5435
  validator.check_non_negative_int(elem, 'paddings element', self.name)
5305
5436
  validator.check_value_type('paddings element', elem, [int], self.name)
@@ -5308,9 +5439,8 @@ class SpaceToBatchND(Primitive):
5308
5439
 
5309
5440
  class BatchToSpaceND(Primitive):
5310
5441
  r"""
5311
- Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
5312
-
5313
- Refer to :func:`mindspore.ops.batch_to_space_nd` for more details.
5442
+ `ops.BatchToSpaceND` is deprecated from version 2.0 and will be removed in a future version,
5443
+ use `ops.batch_to_space_nd` instead.
5314
5444
 
5315
5445
  Supported Platforms:
5316
5446
  ``Ascend`` ``GPU`` ``CPU``
@@ -5326,6 +5456,7 @@ class BatchToSpaceND(Primitive):
5326
5456
  [3. 4.]]]]
5327
5457
  """
5328
5458
 
5459
+ @deprecated("2.0", "ops.batch_to_space_nd", False)
5329
5460
  @prim_attr_register
5330
5461
  def __init__(self, block_shape, crops):
5331
5462
  """Initialize BatchToSpaceND"""
@@ -5333,18 +5464,18 @@ class BatchToSpaceND(Primitive):
5333
5464
  block_shape = (block_shape,) * np.array(crops).shape[0]
5334
5465
  self.add_prim_attr("block_shape", block_shape)
5335
5466
  validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
5336
- validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, Rel.EQ, self.name)
5467
+ validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, validator.EQ, self.name)
5337
5468
  block_rank = len(block_shape)
5338
5469
  if context.get_context("device_target") == "Ascend":
5339
- validator.check('block_shape length', block_rank, '', 2, Rel.EQ, self.name)
5470
+ validator.check('block_shape length', block_rank, '', 2, validator.EQ, self.name)
5340
5471
  for elem in block_shape:
5341
- validator.check('block_shape element', elem, '', 1, Rel.GE, self.name)
5472
+ validator.check('block_shape element', elem, '', 1, validator.GE, self.name)
5342
5473
  validator.check_value_type('block_shape element', elem, [int], self.name)
5343
5474
  self.block_shape = block_shape
5344
5475
 
5345
5476
  validator.check_value_type('crops type', crops, [list, tuple], self.name)
5346
- validator.check('crops length', len(crops), '', 1, Rel.GE, self.name)
5347
- validator.check('crops shape', np.array(crops).shape, '', (block_rank, 2), Rel.EQ, self.name)
5477
+ validator.check('crops length', len(crops), '', 1, validator.GE, self.name)
5478
+ validator.check('crops shape', np.array(crops).shape, '', (block_rank, 2), validator.EQ, self.name)
5348
5479
  for elem in itertools.chain(*crops):
5349
5480
  validator.check_non_negative_int(elem, 'crops element', self.name)
5350
5481
  validator.check_value_type('crops element', elem, [int], self.name)
@@ -5356,6 +5487,9 @@ class BatchToSpaceNDV2(Primitive):
5356
5487
  Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
5357
5488
 
5358
5489
  Refer to :func:`mindspore.ops.batch_to_space_nd` for more details.
5490
+
5491
+ Supported Platforms:
5492
+ ``Ascend``
5359
5493
  """
5360
5494
 
5361
5495
  @prim_attr_register
@@ -5365,7 +5499,7 @@ class BatchToSpaceNDV2(Primitive):
5365
5499
  self.add_prim_attr('origin_format', 'NHWC')
5366
5500
 
5367
5501
 
5368
- class BroadcastTo(Primitive):
5502
+ class BroadcastTo(PrimitiveWithCheck):
5369
5503
  """
5370
5504
  Broadcasts input tensor to a given shape.
5371
5505
 
@@ -5394,24 +5528,41 @@ class BroadcastTo(Primitive):
5394
5528
  def __init__(self, shape):
5395
5529
  """Initialize BroadcastTo"""
5396
5530
  validator.check_value_type("shape", shape, (tuple), self.name)
5397
- validator.check("dimension of x", len(shape), "", 0, Rel.GT, self.name)
5531
+ validator.check("dimension of x", len(shape), "", 0, validator.GT, self.name)
5398
5532
  for ix, i in enumerate(shape):
5399
5533
  validator.check_value_type('target shape index -> ' + str(ix), i, [int], self.name)
5400
- validator.check("shape element", i, "shape element min limit", -1, Rel.GE, self.name)
5534
+ validator.check("shape element", i, "shape element min limit", -1, validator.GE, self.name)
5401
5535
  self.shape = shape
5402
5536
 
5537
+ def infer_value(self, x):
5538
+ if x is None:
5539
+ return None
5540
+ return Tensor(np.broadcast_to(x.asnumpy(), self.shape))
5541
+
5403
5542
 
5404
5543
  class Meshgrid(PrimitiveWithInfer):
5405
5544
  """
5406
5545
  Generates coordinate matrices from given coordinate tensors.
5407
5546
 
5408
- Given N one-dimensional coordinate tensors, returns a tuple outputs of N N-D
5409
- coordinate tensors for evaluating expressions on an N-D grid.
5410
-
5411
5547
  Refer to :func:`mindspore.ops.meshgrid` for more details.
5412
5548
 
5549
+ Args:
5550
+ indexing (str, optional): Cartesian ('xy', default) or
5551
+ matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
5552
+ inputs of length `M` and `N`, the outputs are of shape `(N, M)`
5553
+ for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D
5554
+ case with inputs of length `M`, `N` and `P`, outputs are of shape
5555
+ `(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing.
5556
+
5557
+ Inputs:
5558
+ - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
5559
+ The length of input should be greater than 1. The data type is Number.
5560
+
5561
+ Outputs:
5562
+ Tensors, A Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
5563
+
5413
5564
  Supported Platforms:
5414
- ``Ascend`` ``CPU`` ``GPU``
5565
+ ``Ascend`` ``GPU`` ``CPU``
5415
5566
 
5416
5567
  Examples:
5417
5568
  >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
@@ -5471,11 +5622,11 @@ class Meshgrid(PrimitiveWithInfer):
5471
5622
 
5472
5623
  def infer_shape(self, x_shape):
5473
5624
  validator.check_value_type("shape", x_shape, [tuple], self.name)
5474
- validator.check_int(len(x_shape), 2, Rel.GE, "len of input", self.name)
5625
+ validator.check_int(len(x_shape), 2, validator.GE, "len of input", self.name)
5475
5626
  n = len(x_shape)
5476
5627
  shape_0 = []
5477
5628
  for s in x_shape:
5478
- validator.check_int(len(s), 1, Rel.EQ, 'each input rank', self.name)
5629
+ validator.check_int(len(s), 1, validator.EQ, 'each input rank', self.name)
5479
5630
  shape_0.append(s[0])
5480
5631
  if self.indexing == "xy":
5481
5632
  shape_0[0], shape_0[1] = shape_0[1], shape_0[0]
@@ -5486,7 +5637,7 @@ class Meshgrid(PrimitiveWithInfer):
5486
5637
  validator.check_subclass("input[0]", x_type[0], mstype.tensor, self.name)
5487
5638
  n = len(x_type)
5488
5639
  for i in range(1, n):
5489
- validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, self.name, TypeError)
5640
+ validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], validator.EQ, self.name, TypeError)
5490
5641
  return x_type
5491
5642
 
5492
5643
 
@@ -5581,7 +5732,7 @@ class EditDistance(Primitive):
5581
5732
  \end{array}\right. &
5582
5733
  \end{array}\right.
5583
5734
 
5584
- Where the :math:`a` indicates the hypothesis and the :math:`a` indicates the truth. For ease of understanding,
5735
+ Where the :math:`a` indicates the hypothesis and the :math:`b` indicates the truth. For ease of understanding,
5585
5736
  i and j here in may be considered as lengths of a and b.
5586
5737
 
5587
5738
  .. warning::
@@ -5695,7 +5846,7 @@ class Sort(Primitive):
5695
5846
 
5696
5847
  Outputs:
5697
5848
  - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.
5698
- - **y2** (Tensor) - The indices of the elements in the original input tensor. Data type is int32.
5849
+ - **y2** (Tensor) - the indices of the elements in the original input tensor. Data type is int32.
5699
5850
 
5700
5851
  Raises:
5701
5852
  TypeError: If `axis` is not an int.
@@ -5754,7 +5905,7 @@ class EmbeddingLookup(PrimitiveWithCheck):
5754
5905
  ValueError: If length of shape of `input_params` is greater than 2.
5755
5906
 
5756
5907
  Supported Platforms:
5757
- ``Ascend`` ``CPU`` ``GPU``
5908
+ ``Ascend`` ``GPU`` ``CPU``
5758
5909
 
5759
5910
  Examples:
5760
5911
  >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
@@ -5848,11 +5999,11 @@ class IdentityN(Primitive):
5848
5999
  Return a tuple of tensors with the same shapes and contents as the input.
5849
6000
 
5850
6001
  This op can be used to override the gradient for complicated functions. For
5851
- example, suppose y = f(x) and we wish to apply a custom function g for backprop
5852
- such that dx = g(dy).
6002
+ example, suppose :math:`y = f(x)` and we wish to apply a custom function g for backprop
6003
+ such that :math:`dx=g(dy)`.
5853
6004
 
5854
6005
  Inputs:
5855
- - **x** (Tensors) - tuple(Tensor) or List(Tensor). The data type is RealNumber.
6006
+ - **x** (Union[tuple[Tensor], list[Tensor]]) - Input, the data type is RealNumber.
5856
6007
 
5857
6008
  Outputs:
5858
6009
  Tensors - tuple(Tensor), the shape of tensor and the data type are the same as input `x`.
@@ -5862,7 +6013,7 @@ class IdentityN(Primitive):
5862
6013
  TypeError: If input `x` type is not RealNumber.
5863
6014
 
5864
6015
  Supported Platforms:
5865
- ``GPU`` ``CPU``
6016
+ ``Ascend`` ``GPU`` ``CPU``
5866
6017
 
5867
6018
  Examples:
5868
6019
  >>> x = [Tensor(np.array([1, 2, 3, 4]), mstype.int64), Tensor(np.array([4, 3, 1, 1]), mstype.int64)]
@@ -5883,14 +6034,30 @@ class IdentityN(Primitive):
5883
6034
 
5884
6035
  class Range(PrimitiveWithCheck):
5885
6036
  r"""
5886
- Creates a sequence of numbers that begins at `start` and extends by increments of
5887
- `delta` up to but not including `limit`. Length of the created sequence can not exceed `maxlen`.
5888
- The default value of `maxlen` is 1000000.
6037
+ Creates a sequence of numbers that begins at `start` and extlimits by increments of
6038
+ `delta` up to but not including `limit`.
5889
6039
 
5890
6040
  Refer to :func:`mindspore.ops.range` for more details.
5891
6041
 
6042
+ Args:
6043
+ maxlen (int, optional): Memory that can fit `maxlen` many elements
6044
+ will be allocated for the output. Optional, must be positive, defaults to 1000000.
6045
+ If the output has more than `maxlen` elements, a runtime error
6046
+ will occur.
6047
+
6048
+ Inputs:
6049
+ - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have
6050
+ type: int32 ,int64, float32 or float64.
6051
+ - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must
6052
+ have type: int32 ,int64, float32 or float64.
6053
+ - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have
6054
+ type: int32 ,int64, float32 or float64.
6055
+
6056
+ Outputs:
6057
+ A 1-D Tensor, with the same type as the inputs.
6058
+
5892
6059
  Supported Platforms:
5893
- ``Ascend`` ``GPU`` ``CPU``
6060
+ ``GPU`` ``CPU``
5894
6061
 
5895
6062
  Examples:
5896
6063
  >>> start = Tensor(0, mstype.int32)
@@ -5910,9 +6077,12 @@ class Range(PrimitiveWithCheck):
5910
6077
  self.add_prim_attr('maxlen', maxlen)
5911
6078
 
5912
6079
  def check_shape(self, start_shape, limit_shape, delta_shape):
5913
- validator.check("start_shape", len(start_shape), "", 0, Rel.EQ, self.name)
5914
- validator.check("limit_shape", len(limit_shape), "", 0, Rel.EQ, self.name)
5915
- validator.check("delta_shape", len(delta_shape), "", 0, Rel.EQ, self.name)
6080
+ if not is_shape_unknown(start_shape):
6081
+ validator.check("start_shape", len(start_shape), "", 0, validator.EQ, self.name)
6082
+ if not is_shape_unknown(limit_shape):
6083
+ validator.check("limit_shape", len(limit_shape), "", 0, validator.EQ, self.name)
6084
+ if not is_shape_unknown(delta_shape):
6085
+ validator.check("delta_shape", len(delta_shape), "", 0, validator.EQ, self.name)
5916
6086
 
5917
6087
  def check_dtype(self, start_dtype, limit_dtype, delta_dtype):
5918
6088
  valid_dtypes = [mstype.int32, mstype.float32, mstype.int64, mstype.float64]
@@ -6093,14 +6263,33 @@ class MaskedSelect(PrimitiveWithCheck):
6093
6263
 
6094
6264
  class SearchSorted(Primitive):
6095
6265
  """
6096
- Find the indices from the innermost dimension of `sorted_sequence` such that the order of the innermost dimension
6097
- within `sorted_sequence` would be preserved when the corresponding values in `values` were inserted before the
6098
- indices.
6266
+ Returns the indices correspond to the positions where the given numbers in `values` should be inserted
6267
+ into `sorted_sequence` so that the order of the sequence is maintained.
6268
+
6269
+ .. warning::
6270
+ This is an experimental API that is subject to change or deletion.
6099
6271
 
6100
6272
  Refer to :func:`mindspore.ops.searchsorted` for more details.
6101
6273
 
6274
+ Args:
6275
+ dtype (:class:`mindspore.dtype`, optional): Output data type. An optional data type of
6276
+ `mstype.int32` and `mstype.int64`. Default: `mstype.int64`.
6277
+ right (bool, optional): Search Strategy. If True, return the last suitable index found;
6278
+ if False, return the first such index. Default: False.
6279
+
6280
+ Inputs:
6281
+ - **sorted_sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.
6282
+ It must contain a monotonically increasing sequence on the innermost dimension.
6283
+ - **values** (Tensor) - The value that should be inserted.
6284
+ The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_S)`.
6285
+
6286
+ Outputs:
6287
+ Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
6288
+ if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
6289
+ whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of `values`.
6290
+
6102
6291
  Supported Platforms:
6103
- ``Ascend`` ``CPU``
6292
+ ``Ascend`` ``GPU`` ``CPU``
6104
6293
 
6105
6294
  Examples:
6106
6295
  >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
@@ -6187,12 +6376,11 @@ class TensorScatterUpdate(_TensorScatterOp):
6187
6376
 
6188
6377
  Inputs:
6189
6378
  - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
6190
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
6379
+ The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
6191
6380
  The data type is Number.
6192
6381
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6193
6382
  The rank must be at least 2.
6194
6383
  - **update** (Tensor) - The tensor to update the input tensor, has the same type as input, and
6195
-
6196
6384
  :math:`update.shape = indices.shape[:-1]+input_x.shape[indices.shape[-1]:]`
6197
6385
 
6198
6386
  Outputs:
@@ -6202,6 +6390,7 @@ class TensorScatterUpdate(_TensorScatterOp):
6202
6390
  TypeError: If dtype of `indices` is neither int32 nor int64.
6203
6391
  ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
6204
6392
  ValueError: If the value of `input_x` are not match with input `indices`.
6393
+ RuntimeError: If a value of `indices` is not in `input_x`.
6205
6394
 
6206
6395
  Supported Platforms:
6207
6396
  ``Ascend`` ``GPU`` ``CPU``
@@ -6219,7 +6408,6 @@ class TensorScatterUpdate(_TensorScatterOp):
6219
6408
 
6220
6409
  @prim_attr_register
6221
6410
  def __init__(self):
6222
- super().__init__("TensorScatterUpdate")
6223
6411
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6224
6412
 
6225
6413
  def _infer_specified_value(self, input_x_value, indices_value, updates_value):
@@ -6289,7 +6477,6 @@ class TensorScatterMax(Primitive):
6289
6477
 
6290
6478
  @prim_attr_register
6291
6479
  def __init__(self):
6292
- super().__init__("TensorScatterMax")
6293
6480
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6294
6481
 
6295
6482
 
@@ -6325,7 +6512,6 @@ class TensorScatterMin(Primitive):
6325
6512
 
6326
6513
  @prim_attr_register
6327
6514
  def __init__(self):
6328
- super().__init__("TensorScatterMin")
6329
6515
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6330
6516
 
6331
6517
 
@@ -6363,7 +6549,6 @@ class TensorScatterSub(Primitive):
6363
6549
 
6364
6550
  @prim_attr_register
6365
6551
  def __init__(self):
6366
- super().__init__("TensorScatterSub")
6367
6552
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6368
6553
 
6369
6554
 
@@ -6402,7 +6587,6 @@ class TensorScatterAdd(Primitive):
6402
6587
 
6403
6588
  @prim_attr_register
6404
6589
  def __init__(self):
6405
- super().__init__("TensorScatterAdd")
6406
6590
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6407
6591
 
6408
6592
 
@@ -6440,7 +6624,6 @@ class TensorScatterMul(_TensorScatterOp):
6440
6624
 
6441
6625
  @prim_attr_register
6442
6626
  def __init__(self):
6443
- super().__init__("TensorScatterMul")
6444
6627
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6445
6628
 
6446
6629
 
@@ -6478,43 +6661,42 @@ class TensorScatterDiv(_TensorScatterOp):
6478
6661
 
6479
6662
  @prim_attr_register
6480
6663
  def __init__(self):
6481
- super().__init__("TensorScatterDiv")
6482
6664
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6483
6665
 
6484
6666
 
6485
6667
  class ListDiff(Primitive):
6486
- r"""Computes the difference between two lists of numbers.
6668
+ r"""
6669
+ This function calculates the disparity between two numerical lists.
6487
6670
 
6488
- Given a list `x` and a list `y`, this operation returns a list `out` that
6489
- represents all values that are in `x` but not in `y`. The returned list `out`
6490
- is sorted in the same order that the numbers appear in `x` (duplicates are
6491
- preserved). This operation also returns a list `idx` that represents the
6492
- position of each `out` element in `x`. In other words:
6671
+ It generates a list of all elements that are present in list `x` but not in list `y`.
6672
+ The output list `out` retains the same order as the original `x` including duplicate elements.
6673
+
6674
+ Additionally, this class outputs a list `idx` that identifies the position of each element
6675
+ in `out` within the original `x`. That is to say:
6493
6676
  :code:`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` .
6494
6677
 
6495
6678
  Args:
6496
6679
  out_idx (:class:`mindspore.dtype`, optional): The dtype of `idx`,
6497
- an optioanal datatype of `mindspore.dtype.int32` and `mindspore.dtype.int64`.
6498
- Default: `mindspore.dtype.int32`.
6680
+ an optioanal datatype of `mstype.int32` and `mstype.int64`.
6681
+ Default: `mstype.int32`.
6499
6682
 
6500
6683
  Inputs:
6501
- - **x** - A 1-D `Tensor`. Values to keep. type support list [float16, float32,
6502
- float64, uint8, uint16, int8, int16, int32, int64]
6503
- - **y** - A 1-D `Tensor`. Must have the same type as `x`. 1-D. Values to remove.
6684
+ - **x** - Values to keep. A 1-D `Tensor`.
6685
+ - **y** - Values to remove. A 1-D `Tensor`. Must have the same type as `x`. 1-D.
6504
6686
 
6505
6687
  Outputs:
6506
- - **out** - A 1-D `Tensor`. Has the same type as `x`.
6507
- - **idx** - A 1-D `Tensor` of type `out_idx`.
6688
+ - **out** - The kept values. A 1-D `Tensor`. Has the same type as `x`.
6689
+ - **idx** - The original index of kept values. A 1-D `Tensor` of type `out_idx`.
6508
6690
 
6509
6691
  Raises:
6510
6692
  ValueError: If `x` or `y` shape is not 1D.
6511
6693
  TypeError: If `x` or `y` is not a Tensor.
6512
- TypeError: If `x` or `y` datetype not in support list.
6694
+ TypeError: If `x` or `y` date type is not int or uint.
6513
6695
  TypeError: If `x` has different data type with `y`.
6514
- TypeError: If attr `out_idx` not in [mindspore.dtype.int32, mindspore.dtype.int64].
6696
+ TypeError: If attr `out_idx` not in [mstype.int32, mstype.int64].
6515
6697
 
6516
6698
  Supported Platforms:
6517
- ``GPU`` ``CPU``
6699
+ ``Ascend`` ``GPU`` ``CPU``
6518
6700
 
6519
6701
  Examples:
6520
6702
  >>> x = Tensor(np.arange(1, 7, 1), dtype=mindspore.dtype.int32) # [1, 2, 3, 4, 5, 6]
@@ -6532,7 +6714,8 @@ class ListDiff(Primitive):
6532
6714
  """Initialize ListDiff"""
6533
6715
  self.init_prim_io_names(inputs=['x', 'y'], outputs=['out', 'idx'])
6534
6716
  validator.check_value_type("out_idx", out_idx, [mstype.Type], self.name)
6535
- validator.check("out_idx", out_idx, "", [mstype.int32, mstype.int64], Rel.IN, self.name, excp_cls=TypeError)
6717
+ validator.check("out_idx", out_idx, "", [mstype.int32, mstype.int64], validator.IN,
6718
+ self.name, excp_cls=TypeError)
6536
6719
  self.out_idx = out_idx
6537
6720
  self.add_prim_attr('out_idx', out_idx)
6538
6721
 
@@ -6545,7 +6728,7 @@ class SplitV(Primitive):
6545
6728
  by `size_splits` along the split dimension. This requires that `input_x.shape(split_dim)`
6546
6729
  is equal to the sum of `size_splits`.
6547
6730
 
6548
- The shape of `input_x` is :math:`(x_1, x_2, ..., x_M, ..., x_R)`. The rank of `input_x`
6731
+ The shape of `input_x` is :math:`(x_1, x_2, ..., x_M, ..., x_R)` whose rank
6549
6732
  is `R`. Set the given `split_dim` as M, and :math:`-R \le M < R`. Set the given `num_split`
6550
6733
  as `N`, the given `size_splits` as :math:`(x_{m_1}, x_{m_2}, ..., x_{m_N})`,
6551
6734
  :math:`x_M=\sum_{i=1}^Nx_{m_i}`. The output is a list of tensor objects, for the
@@ -6559,11 +6742,12 @@ class SplitV(Primitive):
6559
6742
  (x_1, x_2, ..., x_{m_N}, ..., x_R))
6560
6743
 
6561
6744
  Args:
6562
- size_splits (Union[tuple, list]): The list containing the sizes of each output tensor
6563
- along the split dimension. Must sum to the dimension of value along `split_dim`.
6564
- Can contain one -1 indicating that dimension is to be inferred.
6565
- split_dim (int): The dimension along which to split. Must be in the range [-len(input_x.shape),
6566
- len(input_x.shape)).
6745
+ size_splits (Union[tuple, list]): A tuple or list of sizes of each output tensor along the split
6746
+ dimension, and the sum of these sizes should equal to the dimension of the
6747
+ input tensor along `split_dim`. The list may also contain a single instance of
6748
+ the value -1, which indicates that the size of that dimension should be inferred.
6749
+ split_dim (int): An int indicates the dimension along which to split.
6750
+ Must be in the range [-len(input_x.shape), len(input_x.shape)).
6567
6751
  num_split (int): The number of output tensors. Must be positive int.
6568
6752
 
6569
6753
  Inputs:
@@ -6626,16 +6810,16 @@ class SplitV(Primitive):
6626
6810
 
6627
6811
  class TensorScatterElements(Primitive):
6628
6812
  """
6629
- Updates the value of the output tensor through the reduction operation.
6813
+ Updates the value of the input Tensor through specified reduction operation.
6814
+
6630
6815
  Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
6631
6816
 
6632
6817
  .. warning::
6633
- The order in which updates are applied is nondeterministic, meaning that if there
6634
- are multiple index vectors in `indices` that correspond to the same position, the
6635
- value of that position in the output will be nondeterministic.
6818
+ If there are multiple index vectors in `indices` that correspond to the same position,
6819
+ the value of that position in the output will be nondeterministic.
6636
6820
 
6637
6821
  Supported Platforms:
6638
- ``Ascend`` ``CPU`` ``GPU``
6822
+ ``Ascend`` ``GPU`` ``CPU``
6639
6823
 
6640
6824
  Examples:
6641
6825
  >>> op = ops.TensorScatterElements(0, "none")
@@ -6684,7 +6868,7 @@ class ExtractVolumePatches(Primitive):
6684
6868
  padding (str): A string from: "SAME", "VALID". The type of padding algorithm to use.
6685
6869
 
6686
6870
  Inputs:
6687
- - **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.
6871
+ - **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`()`.
6688
6872
 
6689
6873
  Outputs:
6690
6874
  Tensor, has the same type as input.
@@ -6703,11 +6887,11 @@ class ExtractVolumePatches(Primitive):
6703
6887
  ValueError: If input_x is not a tensor in dimension 5.
6704
6888
  ValueError: If input_x's shape has zero.
6705
6889
  ValueError: If one of kernel_size or strides' first two numbers is not 1.
6706
- ValueError: If padding = "VALID" and input - kernel_size is less than 0 in d, h or w dimension.
6890
+ ValueError: If padding = "VALID" and :math:`input\_x - kernel\_size` is less than 0 in d, h or w dimension.
6707
6891
  ValueError: If padding = "SAME" and :math:`padding\_needed = ((input\_x + strides - 1) / strides - 1) *
6708
- strides + kernel\_size - input` is less than 0 in d, h or w dimension.
6709
- ValueError: If x_h is not 1 or x_w is not 1 and x_w + padding_needed - k_w - s_w is less than 0.
6710
- ValueError: If x_d * x_h * x_w is greater than 2048.
6892
+ strides + kernel\_size - input\_x` is less than 0 in d, h or w dimension.
6893
+ ValueError: If x_h is not 1 or x_w is not 1 and :math:`x_w + padding\_needed - k_w - s_w` is less than 0.
6894
+ ValueError: If :math:`x_d * x_h * x_w` is greater than 2048.
6711
6895
 
6712
6896
  Supported Platforms:
6713
6897
  ``Ascend`` ``GPU`` ``CPU``
@@ -6729,13 +6913,13 @@ class ExtractVolumePatches(Primitive):
6729
6913
  if isinstance(kernel_size, (list, tuple)):
6730
6914
  kernel_size = tuple(kernel_size)
6731
6915
  if len(kernel_size) == 5:
6732
- validator.check_int(kernel_size[0], 1, Rel.EQ, "kernel_size[0]", self.name)
6733
- validator.check_int(kernel_size[1], 1, Rel.EQ, "kernel_size[1]", self.name)
6916
+ validator.check_int(kernel_size[0], 1, validator.EQ, "kernel_size[0]", self.name)
6917
+ validator.check_int(kernel_size[1], 1, validator.EQ, "kernel_size[1]", self.name)
6734
6918
  if isinstance(strides, (list, tuple)):
6735
6919
  strides = tuple(strides)
6736
6920
  if len(strides) == 5:
6737
- validator.check_int(strides[0], 1, Rel.EQ, "strides[0]", self.name)
6738
- validator.check_int(strides[1], 1, Rel.EQ, "strides[1]", self.name)
6921
+ validator.check_int(strides[0], 1, validator.EQ, "strides[0]", self.name)
6922
+ validator.check_int(strides[1], 1, validator.EQ, "strides[1]", self.name)
6739
6923
  self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name,
6740
6924
  allow_five=True, ret_five=True, greater_zero=True)
6741
6925
  self.strides = _check_3d_int_or_tuple("strides", strides, self.name,
@@ -6749,31 +6933,11 @@ class ExtractVolumePatches(Primitive):
6749
6933
 
6750
6934
  class ScatterAddWithAxis(Primitive):
6751
6935
  """
6752
- The output of the operation is produced by creating a copy of the input input_x, and then
6753
- add updating its value to values specified by `updates` at specific index positions specified
6754
- by `indices`.
6755
-
6756
- Note:
6757
- The three inputs `input_x`, `updates` and `indices` must have the same rank r >= 1.
6758
-
6759
- Args:
6760
- axis (int, optional): Specifies which axis to do scatter add, default: 0.
6761
-
6762
- Inputs:
6763
- - **input_x** (Tensor) - The target tensor to be added.
6764
- - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6765
- - **updates** (Tensor) - The Tensor to update the `input_x`, has the same type as `input_x`
6766
- and the same shape as `indices`.
6767
-
6768
- Outputs:
6769
- Tensor, the updated `input_x`, has the same shape and type as `input_x`.
6770
-
6771
- Raises:
6772
- TypeError: If dtype of `indices` is neither int32 nor int64.
6773
- ValueError: If the shape of `indices` is not equal to the shape of `updates`.
6936
+ 'ops.ScatterAddWithAxis' is deprecated from version 2.0 and will be removed in a future version,
6937
+ use 'ops.TensorScatterElements' instead.
6774
6938
 
6775
6939
  Supported Platforms:
6776
- ``CPU``
6940
+ Deprecated
6777
6941
 
6778
6942
  Examples:
6779
6943
  >>> op = ops.ScatterAddWithAxis(0)
@@ -6799,6 +6963,7 @@ class ScatterAddWithAxis(Primitive):
6799
6963
  sig.make_sig('updates', dtype=sig.sig_dtype.T)
6800
6964
  )
6801
6965
 
6966
+ @deprecated("2.0", "ops.TensorScatterElements", False)
6802
6967
  @prim_attr_register
6803
6968
  def __init__(self, axis=0):
6804
6969
  """Initialize ScatterAddWithAxis"""
@@ -6817,7 +6982,7 @@ class Lstsq(Primitive):
6817
6982
  .. math::
6818
6983
 
6819
6984
  \begin{array}{ll}
6820
- \min_y & \|xy-a\|_2.
6985
+ \min_y & \|xy-a\|_2
6821
6986
  \end{array}
6822
6987
 
6823
6988
  If :math:`m < n`, `Lstsq` solves the least-norm problem:
@@ -6825,7 +6990,7 @@ class Lstsq(Primitive):
6825
6990
  .. math::
6826
6991
 
6827
6992
  \begin{array}{llll}
6828
- \min_y & \|y\|_2 & \text{subject to} & xy = a.
6993
+ \min_y & \|y\|_2 & \text{subject to} & xy = a
6829
6994
  \end{array}
6830
6995
 
6831
6996
  Args:
@@ -6834,14 +6999,14 @@ class Lstsq(Primitive):
6834
6999
  - If `fast` is True, then the solution is computed by solving
6835
7000
  the normal equations using Cholesky decomposition.
6836
7001
  - If `fast` is False, an algorithm based on numerically robust
6837
- completee orthogonal decomposition is used.
7002
+ completed orthogonal decomposition is used.
6838
7003
 
6839
7004
  l2_regularizer (float, optional): L2 regularization coefficient. Default: 0.0.
6840
7005
 
6841
7006
  Inputs:
6842
- - **x** (Tensor) - The m by n matrix `x`. The input tensor whose data type is
7007
+ - **x** (Tensor) - :math:`(m \times n)` matrix `x`. The input tensor whose data type is
6843
7008
  float16, float32 or float64.
6844
- - **a** (Tensor) - The m by k matrix `a`. The input tensor whose data type is
7009
+ - **a** (Tensor) - :math:`(m \times k)` matrix `a`. The input tensor whose data type is
6845
7010
  float16, float32 or float64.
6846
7011
 
6847
7012
  Outputs:
@@ -6881,8 +7046,7 @@ class Lstsq(Primitive):
6881
7046
 
6882
7047
  class LowerBound(Primitive):
6883
7048
  """
6884
- Returns a tensor that contains the index for finding the lower bound of the value
6885
- of the input values element in the input sorted_x.
7049
+ Find the index of the lower bound of `values` in sorted sequence `sorted_x` element-wise.
6886
7050
 
6887
7051
  Args:
6888
7052
  out_type (:class:`mindspore.dtype`, optional): An optional data type of
@@ -6910,7 +7074,7 @@ class LowerBound(Primitive):
6910
7074
  ValueError: If the first dimension of the shape of `sorted_x` is not equal to that of `values`.
6911
7075
 
6912
7076
  Supported Platforms:
6913
- ``CPU`` ``GPU``
7077
+ ``Ascend`` ``GPU`` ``CPU``
6914
7078
 
6915
7079
  Examples:
6916
7080
  >>> import mindspore
@@ -6963,7 +7127,7 @@ class UpperBound(Primitive):
6963
7127
  ValueError: If the number of rows of `sorted_x` is not consistent with that of `values`.
6964
7128
 
6965
7129
  Supported Platforms:
6966
- ``CPU`` ``GPU``
7130
+ ``Ascend`` ``GPU`` ``CPU``
6967
7131
 
6968
7132
  Examples:
6969
7133
  >>> import mindspore
@@ -7037,9 +7201,12 @@ class RightShift(Primitive):
7037
7201
  &out_{i} =x_{i} >> y_{i}
7038
7202
  \end{aligned}
7039
7203
 
7204
+ .. warning::
7205
+ This is an experimental API that is subject to change or deletion.
7206
+
7040
7207
  Inputs:
7041
7208
  - **input_x** (Tensor) - The target tensor, will be shifted to the right
7042
- by y in element-wise.
7209
+ by `input_y` bits element-wise.
7043
7210
  - **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
7044
7211
 
7045
7212
  Outputs:
@@ -7069,9 +7236,10 @@ class RightShift(Primitive):
7069
7236
 
7070
7237
  class LogSpace(Primitive):
7071
7238
  r"""
7072
- Returns a one-dimensional tensor of size steps whose values are evenly
7073
- spaced from :math:`base^{start}` to :math:`base^{end}` , inclusive,
7074
- on a logarithmic scale with base.
7239
+ Generates a 1-D Tensor with a length of steps. The tensor's
7240
+ values are uniformly distributed on a logarithmic scale, ranging from
7241
+ :math:`base^{start}` to :math:`base^{end}`, including both endpoints.
7242
+ The logarithmic scale is based on the specified `base`.
7075
7243
 
7076
7244
  .. math::
7077
7245
  \begin{aligned}
@@ -7079,33 +7247,36 @@ class LogSpace(Primitive):
7079
7247
  &output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
7080
7248
  \end{aligned}
7081
7249
 
7250
+ .. warning::
7251
+ This is an experimental API that is subject to change or deletion.
7252
+
7082
7253
  Args:
7083
7254
  steps (int, optional): The steps must be a non-negative integer. Default: 10.
7084
7255
  base (int, optional): The base must be a non-negative integer. Default: 10.
7085
7256
  dtype (mindspore.dtype, optional): The dtype of output, include mindspore.float16,
7086
- mindspore.float32 or mindspore.float64(for GPU). Default: mindspore.float32.
7257
+ mindspore.float32 or mindspore.float64. Default: mindspore.float32.
7087
7258
 
7088
7259
 
7089
7260
  Inputs:
7090
7261
  - **start** (Tensor) - Start value of interval, with shape of 0-D,
7091
- dtype is float16, float32 or float64(for GPU).
7262
+ dtype is float16, float32 or float64.
7092
7263
  - **end** (Tensor) - End value of interval, with shape of 0-D,
7093
- dtype is float16, float32 or float64(for GPU).
7264
+ dtype is float16, float32 or float64.
7094
7265
 
7095
7266
  Outputs:
7096
- Tensor has the shape as (step, ). Its datatype is set by the attr 'dtype'.
7267
+ Tensor has the shape as :math:`(step, )`. Its datatype is set by the attr 'dtype'.
7097
7268
 
7098
7269
  Raises:
7099
7270
  TypeError: If `input` is not a Tensor.
7100
7271
  TypeError: If `steps` is not an int.
7101
7272
  TypeError: If `base` is not an int.
7102
7273
  TypeError: If `dtype` is not mindspore.float16, mindspore.float32 or
7103
- mindspore.float64(for GPU).
7274
+ mindspore.float64.
7104
7275
  ValueError: If `steps` is not a non-negative integer.
7105
7276
  ValueError: If `base` is not a non-negative integer.
7106
7277
 
7107
7278
  Supported Platforms:
7108
- ``GPU`` ``CPU``
7279
+ ``Ascend`` ``GPU`` ``CPU``
7109
7280
 
7110
7281
  Examples:
7111
7282
  >>> logspace = ops.LogSpace(steps = 10, base = 10, dtype=mindspore.float32)
@@ -7149,6 +7320,13 @@ class NonZero(Primitive):
7149
7320
  >>> print(output)
7150
7321
  [[0 0 0]
7151
7322
  [0 1 0]]
7323
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
7324
+ >>> nonzero = NonZero()
7325
+ >>> output = nonzero(x)
7326
+ >>> print(output)
7327
+ [[0]
7328
+ [2]
7329
+ [4]]
7152
7330
  """
7153
7331
 
7154
7332
  @prim_attr_register
@@ -7158,9 +7336,13 @@ class NonZero(Primitive):
7158
7336
 
7159
7337
  class Tril(Primitive):
7160
7338
  """
7161
- Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices input,
7162
- the other elements of the result tensor out are set to 0.
7163
- The lower triangular part of the matrix is defined as the elements on and below the diagonal.
7339
+ Returns the lower triangular portion of the 2-D matrix or the set of matrices
7340
+ in a batch. The remaining elements of the resulting Tensor are assigned a value of 0.
7341
+ The lower triangular section of the matrix comprises of the
7342
+ elements present on and below the main diagonal.
7343
+
7344
+ .. warning::
7345
+ This is an experimental API that is subject to change or deletion.
7164
7346
 
7165
7347
  Args:
7166
7348
  diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
@@ -7227,13 +7409,16 @@ class Tril(Primitive):
7227
7409
 
7228
7410
  class IndexFill(Primitive):
7229
7411
  """
7230
- Fills the elements under the dim dimension of the input Tensor with the input value
7231
- by selecting the indices in the order given in index.
7412
+ Fills the elements under the `dim` dimension of the input Tensor `x` with the input `value`
7413
+ by selecting the indices in the order given in `index`.
7414
+
7415
+ .. warning::
7416
+ This is an experimental API that is subject to change or deletion.
7232
7417
 
7233
7418
  Refer to :func:`mindspore.ops.index_fill` for more details.
7234
7419
 
7235
7420
  Supported Platforms:
7236
- ``GPU`` ``CPU``
7421
+ ``Ascend`` ``GPU`` ``CPU``
7237
7422
 
7238
7423
  Examples:
7239
7424
  >>> index_fill = ops.IndexFill()
@@ -7253,12 +7438,68 @@ class IndexFill(Primitive):
7253
7438
  self.init_prim_io_names(inputs=['x', 'dim', 'index', 'value'], outputs=['y'])
7254
7439
 
7255
7440
 
7441
+ class IndexPut(Primitive):
7442
+ r"""
7443
+ According to the index number of indexes, replace the value corresponding to x1 with the value in x2.
7444
+
7445
+ Args:
7446
+ accumulate (int): If accumulate is 1, the elements in x2 are added to x1,
7447
+ else the elements in x2 replace the corresponding element in x1, should be 0 or 1. Default: 0.
7448
+ Inputs:
7449
+ - **x1** (Tensor) - The assigned target tensor, 1-D or higher dimensional.
7450
+ - **x2** (Tensor) - 1-D Tensor of the same type as "x1". if size= 1 will be broadcast
7451
+ - **indices** (tuple[Tensor], list[Tensor]) - the indices of type int32 or int64, used to index into x1.
7452
+ The rank of tensors in indices should be 1-D, size of indices should <= x1.rank and the tensors in indices
7453
+ should be broadcastable.
7454
+
7455
+ Outputs:
7456
+ The Tensor to be assigned. Should be of the same type and shape as "x1".
7457
+
7458
+ Raises:
7459
+ TypeError: If the dtype of `x1` is not equal to the dtype of `x2`.
7460
+ TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
7461
+ TypeError: If the dtype of tensors in `indices` are not int32 or int64.
7462
+ TypeError: If the dtype of tensors in `indices` are inconsistent.
7463
+ TypeError: If the dtype of `accumulate` are not int.
7464
+ ValueError: If rank(x2) is not 1-D.
7465
+ ValueError: If size(x2) is not 1 or max size of the tensors in `indices` when rank(x1) == size(indices).
7466
+ ValueError: If size(x2) is not 1 or x1.shape[-1] when rank(x1) > size(indices).
7467
+ ValueError: If the rank of tensors in `indices` is not 1-D.
7468
+ ValueError: If the tensors in `indices` is not be broadcastable.
7469
+ ValueError: If size(indices) > rank(x1).
7470
+ ValueError: If `accumulate` is not equal to 0 or 1.
7471
+
7472
+ Supported Platforms:
7473
+ ``CPU``
7474
+
7475
+ Examples:
7476
+ >>> x1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
7477
+ >>> x2 = Tensor(np.array([3]).astype(np.int32))
7478
+ >>> indices = [Tensor(np.array([0, 0]).astype(np.int32)), Tensor(np.array([0, 1]).astype(np.int32))]
7479
+ >>> accumulate = 1
7480
+ >>> op = ops.IndexPut(accumulate = accumulate)
7481
+ >>> output = op(x1, x2, indices)
7482
+ >>> print(output)
7483
+ [[4 5 3]
7484
+ [4 5 6]]
7485
+ """
7486
+
7487
+ @prim_attr_register
7488
+ def __init__(self, accumulate=0):
7489
+ self.accumulate = accumulate
7490
+ validator.check_value_type('accumulate', accumulate, [int], self.name)
7491
+ self.init_prim_io_names(inputs=['x1', 'x2', 'indices'], outputs=['y'])
7492
+
7493
+
7256
7494
  class SegmentMax(Primitive):
7257
7495
  r"""
7258
- Computes the maximum along segments of a tensor.
7496
+ Computes the maximum along segments of a Tensor.
7259
7497
 
7260
- Computes a tensor such that :math:`output_i=max_j(input\_x_j)` where max is over :math:`j` such that
7261
- :math:`segment\_ids[j] == i`. If the max is empty for a given segment ID :math:`i`, :math:`output[i] = 0`.
7498
+ Specifically, it generates a new Tensor `output` such that :math:`output_i=max_j(input\_x_j)`
7499
+ in which the maximum value is obtained from all elements corresponding
7500
+ to :math:`j` that meets :math:`segment\_ids[j] == i`.
7501
+ If a segment contains no elements for a given segment :math:`i`,
7502
+ then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
7262
7503
 
7263
7504
  Inputs:
7264
7505
  - **input_x** (Tensor) - The input tensor whose dtype is real number and whose rank is not less than 1.
@@ -7283,7 +7524,7 @@ class SegmentMax(Primitive):
7283
7524
  ValueError: If the values of `segment_ids` are not sorted in ascending order.
7284
7525
 
7285
7526
  Supported Platforms:
7286
- ``GPU`` ``CPU``
7527
+ ``Ascend`` ``GPU`` ``CPU``
7287
7528
 
7288
7529
  Examples:
7289
7530
  >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
@@ -7305,10 +7546,13 @@ class SegmentMax(Primitive):
7305
7546
 
7306
7547
  class SegmentMin(Primitive):
7307
7548
  r"""
7308
- Computes the minimum along segments of a tensor.
7549
+ Computes the minimum along segments of a Tensor.
7309
7550
 
7310
- Computes a tensor such that :math:`output_i=min_j(input\_x_j)` where :math:`min` is over :math:`j` such that
7311
- :math:`segment\_ids[j] == i`. If the min is empty for a given segment ID :math:`i`, :math:`output[i] = 0`.
7551
+ Specifically, it generates a new Tensor `output` such that :math:`output_i=min_j(input\_x_j)`
7552
+ in which the minimum value is obtained from all elements corresponding
7553
+ to :math:`j` that meets :math:`segment\_ids[j] == i`.
7554
+ If a segment contains no elements for a given segment :math:`i`,
7555
+ then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
7312
7556
 
7313
7557
  Inputs:
7314
7558
  - **input_x** (Tensor) - The input tensor whose dtype is real number and whose rank is not less than 1.
@@ -7333,7 +7577,7 @@ class SegmentMin(Primitive):
7333
7577
  ValueError: If the values of `segment_ids` are not sorted in ascending order.
7334
7578
 
7335
7579
  Supported Platforms:
7336
- ``GPU`` ``CPU``
7580
+ ``Ascend`` ``GPU`` ``CPU``
7337
7581
 
7338
7582
  Examples:
7339
7583
  >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
@@ -7355,10 +7599,13 @@ class SegmentMin(Primitive):
7355
7599
 
7356
7600
  class SegmentSum(Primitive):
7357
7601
  r"""
7358
- Computes the sum along segments of a tensor.
7602
+ Computes the cumulative sum along segments of a Tensor.
7359
7603
 
7360
- Computes a tensor such that :math:`output_i = \sum_j input\_x_j` where sum is over :math:`j` such that
7361
- :math:`segment\_ids[j] == i`. If the sum is empty for a given segment ID :math:`i`, :math:`output[i] = 0`.
7604
+ Specifically, it generates a new Tensor `output` such that :math:`output_i = \sum_j input\_x_j`
7605
+ in which the cumulative sum is obtained from all elements corresponding
7606
+ to :math:`j` that meets :math:`segment\_ids[j] == i`.
7607
+ If a segment contains no elements for a given segment :math:`i`,
7608
+ then the corresponding element in the output Tensor is set to 0: :math:`output[i] = 0`.
7362
7609
 
7363
7610
  .. warning::
7364
7611
  If the dtype of `input_x` is complex number, the gradient can not be calculated.
@@ -7387,7 +7634,7 @@ class SegmentSum(Primitive):
7387
7634
  ValueError: If the values of `segment_ids` are not sorted in ascending order.
7388
7635
 
7389
7636
  Supported Platforms:
7390
- ``GPU`` ``CPU``
7637
+ ``Ascend`` ``GPU`` ``CPU``
7391
7638
 
7392
7639
  Examples:
7393
7640
  >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
@@ -7420,6 +7667,9 @@ class LeftShift(Primitive):
7420
7667
  &out_{i} =x_{i} << y_{i}
7421
7668
  \end{aligned}
7422
7669
 
7670
+ .. warning::
7671
+ This is an experimental API that is subject to change or deletion.
7672
+
7423
7673
  Inputs:
7424
7674
  - **x1** (Tensor) - The target tensor whose dtype supports int8, int16, int32, int64,
7425
7675
  uint8, uint16, uint32, uint64, will be shifted to the left by x2 in element-wise.
@@ -7437,7 +7687,7 @@ class LeftShift(Primitive):
7437
7687
  ValueError: If `x1` and `x2` could not be broadcast.
7438
7688
 
7439
7689
  Supported Platforms:
7440
- ``GPU`` ``CPU``
7690
+ ``Ascend`` ``GPU`` ``CPU``
7441
7691
 
7442
7692
  Examples:
7443
7693
  >>> left_shift = ops.LeftShift()
@@ -7456,20 +7706,23 @@ class LeftShift(Primitive):
7456
7706
 
7457
7707
  class FillDiagonal(Primitive):
7458
7708
  """
7459
- Fill the main diagonal of a tensor that has at least 2-dimensions.
7460
- When dims>2, all dimensions of input must be of equal length.
7461
- This function modifies the input tensor in-place, and returns the input tensor.
7709
+ Fills the main diagonal of a Tensor in-place with a specified value and returns the result.
7710
+ The input has at least 2 dimensions, and all dimensions of input must be equal in length
7711
+ when the dimension of input is greater than 2.
7462
7712
 
7463
7713
  Args:
7464
- fill_value (float): The fill value.
7465
- wrap (bool, optional): the diagonal `wrapped` after N columns for tall matrices. Default: False.
7714
+ fill_value (float): The value to fill the diagonal of `input_x`.
7715
+ wrap (bool, optional): Controls whether the diagonal elements continue onto the
7716
+ remaining rows in case of a tall matrix(A matrix has more rows than columns).
7717
+ Examples blow demonstrates how it works on a tall matrix if `wrap` is set True.
7718
+ Default: False.
7466
7719
 
7467
7720
  Inputs:
7468
7721
  - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7469
7722
  The data type must be float32, int32 or int64.
7470
7723
 
7471
7724
  Outputs:
7472
- - **y** (Tensor) - Tensor, has the same shape and data type as the input `x`.
7725
+ - **y** (Tensor) - Tensor, has the same shape and data type as the input `input_x`.
7473
7726
 
7474
7727
  Raises:
7475
7728
  TypeError: If data type of `input_x` is not one of the following: float32, int32, int64.
@@ -7477,7 +7730,7 @@ class FillDiagonal(Primitive):
7477
7730
  ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
7478
7731
 
7479
7732
  Supported Platforms:
7480
- ``GPU`` ``CPU``
7733
+ ``Ascend`` ``GPU`` ``CPU``
7481
7734
 
7482
7735
  Examples:
7483
7736
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
@@ -7534,6 +7787,9 @@ class HammingWindow(Primitive):
7534
7787
 
7535
7788
  where :math:`N` is the full window size.
7536
7789
 
7790
+ .. warning::
7791
+ This is an experimental API that is subject to change or deletion.
7792
+
7537
7793
  Args:
7538
7794
  periodic (bool, optional): a flag determines whether the returned window trims off
7539
7795
  the last duplicate value from the symmetric window. Default: True.
@@ -7544,14 +7800,14 @@ class HammingWindow(Primitive):
7544
7800
 
7545
7801
  alpha (float, optional): The coefficient :math:`\alpha` in the equation above. Default: 0.54.
7546
7802
  beta (float, optional): The coefficient :math:`\beta` in the equation above. Default: 0.46.
7547
- dtype (:class:`mindspore.dtype`, optional): An optional data type of `mindspore.dtype.float16`,
7548
- `mindspore.dtype.float32` and `mindspore.dtype.float64`. Default: `mindspore.dtype.float32`.
7803
+ dtype (:class:`mindspore.dtype`, optional): An optional data type of `mstype.float16`,
7804
+ `mstype.float32` and `mstype.float64`. Default: `mstype.float32`.
7549
7805
 
7550
7806
  Inputs:
7551
7807
  - **length** (Tensor) - a positive integer tensor controlling the returned window size, must be 1D.
7552
7808
 
7553
7809
  Outputs:
7554
- Tensor, A 1-D tensor containing the window, whose shape is :math:`\text{length}`.
7810
+ Tensor, A 1-D tensor containing the window, whose shape is :math:`(\text{length},)`.
7555
7811
 
7556
7812
  Raises:
7557
7813
  TypeError: If `length` is not a Tensor.
@@ -7564,7 +7820,7 @@ class HammingWindow(Primitive):
7564
7820
  ValueError: If data of `length` is negative.
7565
7821
 
7566
7822
  Supported Platforms:
7567
- ``GPU`` ``CPU``
7823
+ ``Ascend`` ``GPU`` ``CPU``
7568
7824
 
7569
7825
  Examples:
7570
7826
  >>> # case 1: periodic=True.
@@ -7601,12 +7857,15 @@ class HammingWindow(Primitive):
7601
7857
 
7602
7858
  class AffineGrid(Primitive):
7603
7859
  r"""
7604
- Generates a 2D or 3D flow field (sampling grid), given a batch of affine matrices theta.
7860
+ Creates a 2D or 3D flow field (sampling grid) based on a batch of affine matrices `theta`.
7861
+
7862
+ .. warning::
7863
+ This is an experimental API that is subject to change or deletion.
7605
7864
 
7606
7865
  Refer to :func:`mindspore.ops.affine_grid` for more details.
7607
7866
 
7608
7867
  Supported Platforms:
7609
- ``GPU`` ``CPU``
7868
+ ``Ascend`` ``GPU`` ``CPU``
7610
7869
 
7611
7870
  Examples:
7612
7871
  >>> affinegrid = ops.AffineGrid(align_corners=False)
@@ -7631,10 +7890,13 @@ class AffineGrid(Primitive):
7631
7890
 
7632
7891
  class SegmentMean(Primitive):
7633
7892
  r"""
7634
- Computes the mean along segments of a tensor.
7893
+ Computes the mean along segments of a Tensor.
7635
7894
 
7636
- Computes a tensor such that :math:`output_i = mean_j(input\_x_j)` where mean is over :math:`j` such that
7637
- :math:`segment\_ids[j] == i`. If the mean is empty for a given segment ID :math:`i`, :math:`output[i] = 0`.
7895
+ Specifically, it generates a new Tensor `output` such that :math:`output_i=mean_j(input\_x_j)`
7896
+ in which the mean value is obtained from all elements corresponding
7897
+ to :math:`j` that meets :math:`segment\_ids[j] == i`.
7898
+ If a segment contains no elements for a given segment :math:`i`,
7899
+ then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
7638
7900
 
7639
7901
  .. warning::
7640
7902
  If the dtype of `input_x` is complex number, the gradient can not be calculated.
@@ -7663,7 +7925,7 @@ class SegmentMean(Primitive):
7663
7925
  ValueError: If the values of `segment_ids` are not sorted in ascending order.
7664
7926
 
7665
7927
  Supported Platforms:
7666
- ``GPU`` ``CPU``
7928
+ ``Ascend`` ``GPU`` ``CPU``
7667
7929
 
7668
7930
  Examples:
7669
7931
  >>> x = Tensor([[1, 2, 3], [1, 2, 3], [7, 8, 9]], mstype.float64)
@@ -7685,10 +7947,13 @@ class SegmentMean(Primitive):
7685
7947
 
7686
7948
  class SegmentProd(Primitive):
7687
7949
  r"""
7688
- Computes the prod along segments of a tensor.
7950
+ Computes the cumulative product along segments of a Tensor.
7689
7951
 
7690
- Computes a tensor such that :math:`output_i = \prod_j input\_x_j` where prod is over :math:`j` such that
7691
- :math:`segment\_ids[j] == i`. If the prod is empty for a given segment ID :math:`i`, :math:`output[i] = 0`.
7952
+ Specifically, it generates a new Tensor `output` such that :math:`output_i = \prod_j input\_x_j`
7953
+ in which the cumulative product is obtained from all elements corresponding
7954
+ to :math:`j` that meets :math:`segment\_ids[j] == i`.
7955
+ If a segment contains no elements for a given segment :math:`i`,
7956
+ then the corresponding element in the output Tensor is set to 1: :math:`output[i] = 1`.
7692
7957
 
7693
7958
  .. warning::
7694
7959
  If the dtype of `input_x` is complex number, the gradient can not be calculated.
@@ -7717,7 +7982,7 @@ class SegmentProd(Primitive):
7717
7982
  ValueError: If the values of `segment_ids` are not sorted in ascending order.
7718
7983
 
7719
7984
  Supported Platforms:
7720
- ``GPU`` ``CPU``
7985
+ ``Ascend`` ``GPU`` ``CPU``
7721
7986
 
7722
7987
  Examples:
7723
7988
  >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
@@ -7763,7 +8028,44 @@ class TopK(Primitive):
7763
8028
  """
7764
8029
  Finds values and indices of the `k` largest entries along the last dimension.
7765
8030
 
7766
- Refer to :func:`mindspore.ops.top_k` for more details.
8031
+ .. warning::
8032
+ - If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
8033
+ different memory layout and traversal methods on different platforms, the display order of calculation results
8034
+ may be inconsistent when `sorted` is False.
8035
+
8036
+ If the `input_x` is a one-dimensional Tensor, finds the `k` largest entries in the Tensor,
8037
+ and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input_x`,
8038
+ and its index is indices [`k`].
8039
+
8040
+ For a multi-dimensional matrix,
8041
+ calculates the first `k` entries in each row (corresponding vector along the last dimension), therefore:
8042
+
8043
+ .. math::
8044
+
8045
+ values.shape = indices.shape = input.shape[:-1] + [k].
8046
+
8047
+ If the two compared elements are the same, the one with the smaller index value is returned first.
8048
+
8049
+ Args:
8050
+ sorted (bool, optional): If True, the obtained elements will be sorted by the values in descending order.
8051
+ If False, the obtained elements will not be sorted. Default: True.
8052
+
8053
+ Inputs:
8054
+ - **input_x** (Tensor) - Input to be computed, data type must be float16, float32 or int32 on CPU,
8055
+ and float16 or float32 on GPU.
8056
+ - **k** (int) - The number of top elements to be computed along the last dimension, constant input is needed.
8057
+
8058
+ Outputs:
8059
+ A tuple consisting of `values` and `indexes`.
8060
+
8061
+ - **values** (Tensor) - The `k` largest elements in each slice of the last dimension.
8062
+ - **indices** (Tensor) - The indices of values within the last dimension of input.
8063
+
8064
+ Raises:
8065
+ TypeError: If `sorted` is not a bool.
8066
+ TypeError: If `input_x` is not a Tensor.
8067
+ TypeError: If `k` is not an int.
8068
+ TypeError: If dtype of `input_x` is not one of the following: float16, float32 or int32.
7767
8069
 
7768
8070
  Supported Platforms:
7769
8071
  ``Ascend`` ``GPU`` ``CPU``
@@ -7793,8 +8095,11 @@ class Bincount(Primitive):
7793
8095
  """
7794
8096
  Counts the number of occurrences of each value in an integer array.
7795
8097
 
8098
+ .. warning::
8099
+ This is an experimental API that is subject to change or deletion.
8100
+
7796
8101
  Inputs:
7797
- - **array** (Tensor) - A Tensor of type int32.
8102
+ - **array** (Tensor) - A Tensor of type int32, whose value can not be less than zero.
7798
8103
  - **size** (Tensor) - A non-negative Tensor of type int32.
7799
8104
  - **weights** (Tensor) - A Tensor with the same shape as array, or a length-0 Tensor, in which case it acts as
7800
8105
  all weights equal to 1. Must be one of the following types: int32, int64, float32, float64.
@@ -7827,3 +8132,33 @@ class Bincount(Primitive):
7827
8132
  def __init__(self):
7828
8133
  """Initialize Bincount"""
7829
8134
  self.init_prim_io_names(inputs=['array', 'size', 'weights'], outputs=['bins'])
8135
+
8136
+
8137
+ class CountNonZero(Primitive):
8138
+ """
8139
+ Calculates the total number of non-zero entries in the input tensor along the
8140
+ specified dimensions.
8141
+
8142
+ Refer to :func:`mindspore.ops.count_nonzero` for more details.
8143
+
8144
+ Supported Platforms:
8145
+ ``Ascend`` ``CPU``
8146
+
8147
+ Examples:
8148
+ >>> x = Tensor([[0, 0, 1], [1, 1, 2], [0, 0, 1]], dtype=mindspore.int64)
8149
+ >>> countnonzero = ops.CountNonZero(dims=[1])
8150
+ >>> y = countnonzero(x)
8151
+ >>> print(y)
8152
+ [1 3 1]
8153
+ """
8154
+
8155
+ @prim_attr_register
8156
+ def __init__(self, dims=None):
8157
+ dims = [] if dims is None else dims
8158
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
8159
+ validator.check_value_type('dims', dims, [int, list, tuple], "CountNonZero")
8160
+ if isinstance(dims, (list, tuple)):
8161
+ for i, each in enumerate(dims):
8162
+ validator.check_value_type(f'dims[{i}]', each, [int], "CountNonZero")
8163
+ self.dims = dims
8164
+ self.add_prim_attr("dims", self.dims)