mindspore 2.0.0a0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (693) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
  3. mindspore/__init__.py +4 -2
  4. mindspore/_akg/akg/composite/build_module.py +11 -0
  5. mindspore/_akg/akg/config/repository_cuda.json +11 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
  7. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  9. mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/_check_jit_forbidden_api.py +102 -0
  11. mindspore/_checkparam.py +1066 -1001
  12. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  13. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  14. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  15. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  16. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  17. mindspore/_extends/parse/__init__.py +5 -3
  18. mindspore/_extends/parse/namespace.py +16 -1
  19. mindspore/_extends/parse/parser.py +107 -22
  20. mindspore/_extends/parse/resources.py +0 -7
  21. mindspore/_extends/parse/standard_method.py +885 -413
  22. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  23. mindspore/amp.py +52 -57
  24. mindspore/bin/cache_admin +0 -0
  25. mindspore/bin/cache_server +0 -0
  26. mindspore/boost/boost.py +2 -2
  27. mindspore/boost/boost_cell_wrapper.py +38 -20
  28. mindspore/boost/dim_reduce.py +3 -3
  29. mindspore/boost/group_loss_scale_manager.py +1 -1
  30. mindspore/common/__init__.py +4 -6
  31. mindspore/common/_decorator.py +2 -0
  32. mindspore/common/_register_for_adapter.py +55 -0
  33. mindspore/common/_stub_tensor.py +201 -0
  34. mindspore/common/_utils.py +41 -7
  35. mindspore/common/api.py +215 -141
  36. mindspore/common/dtype.py +8 -1
  37. mindspore/common/dump.py +2 -2
  38. mindspore/common/initializer.py +4 -2
  39. mindspore/common/jit_config.py +17 -13
  40. mindspore/common/mutable.py +33 -13
  41. mindspore/common/parameter.py +23 -21
  42. mindspore/common/seed.py +8 -24
  43. mindspore/common/sparse_tensor.py +62 -41
  44. mindspore/common/tensor.py +852 -1154
  45. mindspore/communication/__init__.py +2 -2
  46. mindspore/communication/_comm_helper.py +11 -4
  47. mindspore/communication/management.py +22 -21
  48. mindspore/config/op_info.config +501 -1008
  49. mindspore/config/super_bar_config.json +512 -0
  50. mindspore/context.py +201 -23
  51. mindspore/dataset/__init__.py +6 -6
  52. mindspore/dataset/audio/__init__.py +7 -7
  53. mindspore/dataset/audio/transforms.py +670 -30
  54. mindspore/dataset/audio/utils.py +47 -4
  55. mindspore/dataset/audio/validators.py +223 -1
  56. mindspore/dataset/callback/ds_callback.py +2 -2
  57. mindspore/dataset/core/config.py +210 -14
  58. mindspore/dataset/core/validator_helpers.py +2 -2
  59. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  60. mindspore/dataset/debug/debug_hook.py +65 -0
  61. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  62. mindspore/dataset/engine/__init__.py +7 -3
  63. mindspore/dataset/engine/cache_client.py +1 -1
  64. mindspore/dataset/engine/datasets.py +322 -66
  65. mindspore/dataset/engine/datasets_audio.py +80 -76
  66. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  67. mindspore/dataset/engine/datasets_text.py +232 -118
  68. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  69. mindspore/dataset/engine/datasets_vision.py +746 -225
  70. mindspore/dataset/engine/graphdata.py +75 -10
  71. mindspore/dataset/engine/iterators.py +45 -5
  72. mindspore/dataset/engine/offload.py +48 -28
  73. mindspore/dataset/engine/validators.py +117 -8
  74. mindspore/dataset/text/__init__.py +6 -5
  75. mindspore/dataset/text/transforms.py +86 -3
  76. mindspore/dataset/text/utils.py +6 -4
  77. mindspore/dataset/text/validators.py +25 -0
  78. mindspore/dataset/transforms/__init__.py +3 -2
  79. mindspore/dataset/transforms/c_transforms.py +1 -1
  80. mindspore/dataset/transforms/transforms.py +2 -2
  81. mindspore/dataset/utils/__init__.py +2 -1
  82. mindspore/dataset/utils/line_reader.py +121 -0
  83. mindspore/dataset/vision/__init__.py +2 -3
  84. mindspore/dataset/vision/c_transforms.py +9 -9
  85. mindspore/dataset/vision/py_transforms.py +5 -5
  86. mindspore/dataset/vision/py_transforms_util.py +2 -0
  87. mindspore/dataset/vision/transforms.py +160 -161
  88. mindspore/dataset/vision/utils.py +3 -3
  89. mindspore/experimental/map_parameter.py +38 -26
  90. mindspore/include/OWNERS +0 -1
  91. mindspore/include/api/callback/callback.h +9 -13
  92. mindspore/include/api/callback/ckpt_saver.h +2 -2
  93. mindspore/include/api/callback/loss_monitor.h +2 -2
  94. mindspore/include/api/callback/lr_scheduler.h +5 -5
  95. mindspore/include/api/callback/time_monitor.h +2 -2
  96. mindspore/include/api/callback/train_accuracy.h +4 -6
  97. mindspore/include/api/cfg.h +19 -6
  98. mindspore/include/api/context.h +44 -9
  99. mindspore/include/api/delegate.h +1 -1
  100. mindspore/include/api/metrics/accuracy.h +2 -2
  101. mindspore/include/api/metrics/metrics.h +4 -3
  102. mindspore/include/api/model.h +9 -4
  103. mindspore/include/api/model_parallel_runner.h +2 -2
  104. mindspore/include/api/net.h +12 -11
  105. mindspore/include/api/serialization.h +19 -3
  106. mindspore/include/api/types.h +3 -3
  107. mindspore/include/dataset/constants.h +7 -0
  108. mindspore/include/dataset/text.h +59 -0
  109. mindspore/include/mindapi/base/type_id.h +1 -0
  110. mindspore/lib/libdnnl.so.2 +0 -0
  111. mindspore/lib/libicudata.so.69 +0 -0
  112. mindspore/lib/libicui18n.so.69 +0 -0
  113. mindspore/lib/libicuuc.so.69 +0 -0
  114. mindspore/lib/libmindspore.so +0 -0
  115. mindspore/lib/libmindspore_backend.so +0 -0
  116. mindspore/lib/libmindspore_common.so +0 -0
  117. mindspore/lib/libmindspore_core.so +0 -0
  118. mindspore/lib/libmindspore_glog.so.0 +0 -0
  119. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  120. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  121. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  122. mindspore/lib/libmindspore_shared_lib.so +0 -0
  123. mindspore/lib/libmpi_adapter.so +0 -0
  124. mindspore/lib/libmpi_collective.so +0 -0
  125. mindspore/lib/libnnacl.so +0 -0
  126. mindspore/lib/libopencv_core.so.4.5 +0 -0
  127. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  128. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  129. mindspore/lib/libps_cache.so +0 -0
  130. mindspore/lib/plugin/ascend/libakg.so +0 -0
  131. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  132. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  133. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  134. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  135. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  136. mindspore/lib/plugin/cpu/libakg.so +0 -0
  137. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  138. mindspore/lib/plugin/{libmindspore_ascend.so → libmindspore_ascend.so.2} +0 -0
  139. mindspore/log.py +1 -1
  140. mindspore/mindrecord/filereader.py +18 -0
  141. mindspore/mindrecord/filewriter.py +197 -34
  142. mindspore/mindrecord/shardreader.py +9 -0
  143. mindspore/mindrecord/shardwriter.py +1 -1
  144. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  145. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  146. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  147. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  148. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  149. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  150. mindspore/nn/__init__.py +0 -4
  151. mindspore/nn/cell.py +204 -132
  152. mindspore/nn/dynamic_lr.py +1 -1
  153. mindspore/nn/grad/cell_grad.py +7 -6
  154. mindspore/nn/layer/__init__.py +5 -4
  155. mindspore/nn/layer/activation.py +40 -89
  156. mindspore/nn/layer/basic.py +255 -624
  157. mindspore/nn/layer/channel_shuffle.py +7 -6
  158. mindspore/nn/layer/combined.py +1 -1
  159. mindspore/nn/layer/container.py +41 -4
  160. mindspore/nn/layer/conv.py +64 -28
  161. mindspore/nn/layer/dense.py +9 -8
  162. mindspore/nn/layer/embedding.py +27 -25
  163. mindspore/nn/layer/image.py +53 -46
  164. mindspore/nn/layer/math.py +97 -105
  165. mindspore/nn/layer/normalization.py +117 -86
  166. mindspore/nn/layer/padding.py +185 -95
  167. mindspore/nn/layer/pooling.py +817 -414
  168. mindspore/nn/layer/rnn_cells.py +10 -15
  169. mindspore/nn/layer/rnns.py +37 -38
  170. mindspore/nn/layer/thor_layer.py +11 -12
  171. mindspore/nn/layer/timedistributed.py +5 -5
  172. mindspore/nn/layer/transformer.py +701 -0
  173. mindspore/nn/learning_rate_schedule.py +8 -8
  174. mindspore/nn/loss/__init__.py +5 -4
  175. mindspore/nn/loss/loss.py +334 -199
  176. mindspore/nn/optim/ada_grad.py +6 -6
  177. mindspore/nn/optim/adadelta.py +2 -3
  178. mindspore/nn/optim/adafactor.py +4 -5
  179. mindspore/nn/optim/adam.py +126 -62
  180. mindspore/nn/optim/adamax.py +3 -4
  181. mindspore/nn/optim/adasum.py +6 -6
  182. mindspore/nn/optim/asgd.py +2 -2
  183. mindspore/nn/optim/ftrl.py +67 -38
  184. mindspore/nn/optim/lamb.py +4 -5
  185. mindspore/nn/optim/lars.py +2 -2
  186. mindspore/nn/optim/lazyadam.py +43 -4
  187. mindspore/nn/optim/momentum.py +6 -5
  188. mindspore/nn/optim/optimizer.py +3 -1
  189. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  190. mindspore/nn/optim/rmsprop.py +1 -1
  191. mindspore/nn/optim/rprop.py +8 -9
  192. mindspore/nn/optim/sgd.py +19 -13
  193. mindspore/nn/optim/thor.py +10 -15
  194. mindspore/nn/probability/__init__.py +0 -2
  195. mindspore/nn/probability/bijector/bijector.py +4 -4
  196. mindspore/nn/probability/bijector/invert.py +1 -1
  197. mindspore/nn/probability/bijector/softplus.py +2 -2
  198. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  199. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  200. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  201. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  202. mindspore/nn/probability/distribution/beta.py +1 -1
  203. mindspore/nn/probability/distribution/categorical.py +5 -7
  204. mindspore/nn/probability/distribution/cauchy.py +3 -3
  205. mindspore/nn/probability/distribution/distribution.py +2 -2
  206. mindspore/nn/probability/distribution/exponential.py +2 -2
  207. mindspore/nn/probability/distribution/gamma.py +3 -3
  208. mindspore/nn/probability/distribution/geometric.py +1 -1
  209. mindspore/nn/probability/distribution/gumbel.py +3 -3
  210. mindspore/nn/probability/distribution/half_normal.py +15 -11
  211. mindspore/nn/probability/distribution/laplace.py +16 -13
  212. mindspore/nn/probability/distribution/logistic.py +2 -2
  213. mindspore/nn/probability/distribution/normal.py +1 -1
  214. mindspore/nn/probability/distribution/poisson.py +1 -1
  215. mindspore/nn/probability/distribution/student_t.py +20 -15
  216. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  217. mindspore/nn/probability/distribution/uniform.py +2 -2
  218. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  219. mindspore/nn/reinforcement/tensor_array.py +2 -2
  220. mindspore/nn/sparse/sparse.py +2 -2
  221. mindspore/nn/wrap/cell_wrapper.py +27 -10
  222. mindspore/nn/wrap/grad_reducer.py +2 -2
  223. mindspore/nn/wrap/loss_scale.py +40 -24
  224. mindspore/numpy/array_creations.py +33 -22
  225. mindspore/numpy/array_ops.py +35 -30
  226. mindspore/numpy/logic_ops.py +6 -27
  227. mindspore/numpy/math_ops.py +22 -19
  228. mindspore/numpy/utils.py +1 -1
  229. mindspore/numpy/utils_const.py +108 -58
  230. mindspore/ops/_constants.py +0 -6
  231. mindspore/ops/_grad/__init__.py +2 -1
  232. mindspore/ops/_grad/grad_array_ops.py +86 -117
  233. mindspore/ops/_grad/grad_base.py +23 -1
  234. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  235. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  236. mindspore/ops/_grad/grad_implementations.py +9 -45
  237. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  238. mindspore/ops/_grad/grad_math_ops.py +142 -117
  239. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  240. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  241. mindspore/ops/_grad/grad_sparse.py +7 -6
  242. mindspore/ops/_grad_experimental/__init__.py +1 -0
  243. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  244. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  245. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  246. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  247. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  248. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  249. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  250. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  251. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  255. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  256. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  257. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  258. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  259. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  260. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  261. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  262. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  263. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  264. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  265. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  266. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  267. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  268. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  269. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  270. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  271. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  272. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  273. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  274. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  275. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  276. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  277. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  278. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  279. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  280. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  281. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  282. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  283. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  284. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  285. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  286. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  287. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  288. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  289. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  290. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  291. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  292. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  293. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  294. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  295. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  296. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  297. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  298. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  299. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  300. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  301. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  302. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  303. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  304. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  305. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  306. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  307. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  308. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  309. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  310. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  311. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  312. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  313. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  314. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  315. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  316. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  317. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  318. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  319. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  320. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  321. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  322. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  323. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  324. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  325. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  326. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  327. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  328. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  329. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  330. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  331. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  332. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  333. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  334. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  335. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  336. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  337. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  338. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  339. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  340. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  341. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  342. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  343. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  344. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  345. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  346. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  347. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  348. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  349. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  350. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  351. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  352. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  353. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  354. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  355. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  356. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  357. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  358. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  359. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  360. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  361. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  362. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  363. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  364. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  365. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  366. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  367. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  368. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  369. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  370. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  371. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  372. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  373. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  374. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  375. mindspore/ops/_register_for_op.py +1 -0
  376. mindspore/ops/_utils/__init__.py +1 -2
  377. mindspore/ops/_utils/utils.py +19 -40
  378. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  379. mindspore/ops/_vmap/vmap_base.py +16 -9
  380. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  381. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  382. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  383. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  384. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  385. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  386. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  387. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  388. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  389. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  390. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  391. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  392. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  394. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  395. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  396. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  397. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  398. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  399. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  400. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  401. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  402. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  403. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  404. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  405. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  406. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  407. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  408. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  409. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  413. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  414. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  415. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  416. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  417. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  418. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  419. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  420. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  421. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  422. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  423. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  424. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  425. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  426. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  427. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  428. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  429. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  430. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  431. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  432. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  433. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  434. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  435. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  436. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  437. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  438. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  439. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  440. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  441. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  442. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  443. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  444. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  445. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  446. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  447. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  448. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  449. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  450. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  451. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  452. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  453. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  454. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  455. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  456. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  457. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  458. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  459. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  460. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  461. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  462. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  463. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  464. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  465. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  466. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  467. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  468. mindspore/ops/composite/__init__.py +7 -8
  469. mindspore/ops/composite/base.py +101 -47
  470. mindspore/ops/composite/math_ops.py +188 -158
  471. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  472. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  473. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  474. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  475. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  476. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  477. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  478. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  479. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  480. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  481. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  482. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  483. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  484. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  485. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  486. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  487. mindspore/ops/function/__init__.py +152 -8
  488. mindspore/ops/function/array_func.py +2555 -674
  489. mindspore/ops/function/clip_func.py +209 -13
  490. mindspore/ops/function/debug_func.py +2 -2
  491. mindspore/ops/function/grad/__init__.py +2 -1
  492. mindspore/ops/function/grad/grad_func.py +147 -62
  493. mindspore/ops/function/image_func.py +54 -38
  494. mindspore/ops/function/linalg_func.py +167 -16
  495. mindspore/ops/function/math_func.py +4849 -1492
  496. mindspore/ops/function/nn_func.py +2573 -988
  497. mindspore/ops/function/other_func.py +115 -0
  498. mindspore/ops/function/parameter_func.py +3 -3
  499. mindspore/ops/function/random_func.py +790 -73
  500. mindspore/ops/function/sparse_func.py +98 -78
  501. mindspore/ops/function/sparse_unary_func.py +54 -53
  502. mindspore/ops/function/spectral_func.py +27 -24
  503. mindspore/ops/function/vmap_func.py +22 -2
  504. mindspore/ops/functional.py +97 -37
  505. mindspore/ops/op_info_register.py +70 -28
  506. mindspore/ops/operations/__init__.py +47 -14
  507. mindspore/ops/operations/_csr_ops.py +7 -7
  508. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  509. mindspore/ops/operations/_grad_ops.py +276 -187
  510. mindspore/ops/operations/_inner_ops.py +319 -113
  511. mindspore/ops/operations/_ms_kernel.py +10 -8
  512. mindspore/ops/operations/_ocr_ops.py +9 -9
  513. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  514. mindspore/ops/operations/_quant_ops.py +137 -102
  515. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  516. mindspore/ops/operations/_scalar_ops.py +466 -0
  517. mindspore/ops/operations/_sequence_ops.py +1004 -2
  518. mindspore/ops/operations/_tensor_array.py +10 -11
  519. mindspore/ops/operations/_thor_ops.py +1 -1
  520. mindspore/ops/operations/array_ops.py +801 -466
  521. mindspore/ops/operations/comm_ops.py +51 -49
  522. mindspore/ops/operations/control_ops.py +2 -2
  523. mindspore/ops/operations/custom_ops.py +123 -44
  524. mindspore/ops/operations/debug_ops.py +24 -24
  525. mindspore/ops/operations/image_ops.py +240 -153
  526. mindspore/ops/operations/inner_ops.py +34 -50
  527. mindspore/ops/operations/linalg_ops.py +31 -9
  528. mindspore/ops/operations/math_ops.py +988 -757
  529. mindspore/ops/operations/nn_ops.py +965 -819
  530. mindspore/ops/operations/other_ops.py +51 -40
  531. mindspore/ops/operations/random_ops.py +204 -122
  532. mindspore/ops/operations/rl_ops.py +8 -9
  533. mindspore/ops/operations/sparse_ops.py +254 -93
  534. mindspore/ops/operations/spectral_ops.py +35 -3
  535. mindspore/ops/primitive.py +111 -9
  536. mindspore/parallel/_auto_parallel_context.py +189 -83
  537. mindspore/parallel/_offload_context.py +185 -0
  538. mindspore/parallel/_parallel_serialization.py +99 -7
  539. mindspore/parallel/_ps_context.py +9 -5
  540. mindspore/parallel/_recovery_context.py +1 -1
  541. mindspore/parallel/_tensor.py +7 -1
  542. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  543. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  544. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  545. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  546. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  547. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  548. mindspore/parallel/_utils.py +1 -2
  549. mindspore/parallel/algo_parameter_config.py +1 -1
  550. mindspore/parallel/checkpoint_transform.py +37 -34
  551. mindspore/parallel/shard.py +17 -18
  552. mindspore/profiler/common/validator/validate_path.py +2 -2
  553. mindspore/profiler/envprofiling.py +69 -47
  554. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  555. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  556. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  557. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  558. mindspore/profiler/parser/integrator.py +15 -14
  559. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  560. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  561. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  562. mindspore/profiler/parser/optime_parser.py +17 -18
  563. mindspore/profiler/parser/profiler_info.py +2 -1
  564. mindspore/profiler/profiling.py +218 -186
  565. mindspore/rewrite/__init__.py +3 -1
  566. mindspore/rewrite/api/node.py +1 -114
  567. mindspore/rewrite/api/node_type.py +3 -0
  568. mindspore/rewrite/api/pattern_engine.py +31 -1
  569. mindspore/rewrite/api/scoped_value.py +4 -4
  570. mindspore/rewrite/api/symbol_tree.py +3 -78
  571. mindspore/rewrite/api/tree_node_helper.py +1 -1
  572. mindspore/rewrite/ast_creator_register.py +1 -0
  573. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  574. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  575. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  576. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  577. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  578. mindspore/rewrite/namespace.py +0 -2
  579. mindspore/rewrite/node.py +157 -11
  580. mindspore/rewrite/parsers/assign_parser.py +231 -53
  581. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  582. mindspore/rewrite/parsers/for_parser.py +24 -14
  583. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  584. mindspore/rewrite/parsers/if_parser.py +6 -2
  585. mindspore/rewrite/sparsify/__init__.py +0 -0
  586. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  587. mindspore/rewrite/sparsify/sparsify.py +109 -0
  588. mindspore/rewrite/sparsify/utils.py +173 -0
  589. mindspore/rewrite/symbol_tree.py +256 -133
  590. mindspore/rewrite/symbol_tree_builder.py +38 -1
  591. mindspore/run_check/_check_version.py +69 -63
  592. mindspore/run_check/run_check.py +2 -1
  593. mindspore/scipy/linalg.py +10 -114
  594. mindspore/scipy/ops.py +2 -2
  595. mindspore/scipy/ops_wrapper.py +1 -1
  596. mindspore/scipy/optimize/_bfgs.py +1 -1
  597. mindspore/scipy/optimize/_lagrange.py +200 -0
  598. mindspore/scipy/optimize/line_search.py +3 -2
  599. mindspore/scipy/optimize/minimize.py +41 -2
  600. mindspore/scipy/sparse/__init__.py +2 -2
  601. mindspore/scipy/sparse/linalg.py +4 -464
  602. mindspore/scipy/utils.py +1 -1
  603. mindspore/scipy/utils_const.py +7 -1
  604. mindspore/train/__init__.py +1 -1
  605. mindspore/train/_utils.py +28 -5
  606. mindspore/train/amp.py +273 -102
  607. mindspore/train/callback/_backup_and_restore.py +5 -5
  608. mindspore/train/callback/_callback.py +2 -2
  609. mindspore/train/callback/_checkpoint.py +3 -3
  610. mindspore/train/callback/_early_stop.py +3 -3
  611. mindspore/train/callback/_lambda_callback.py +2 -2
  612. mindspore/train/callback/_landscape.py +29 -31
  613. mindspore/train/callback/_loss_monitor.py +3 -3
  614. mindspore/train/callback/_on_request_exit.py +3 -3
  615. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  616. mindspore/train/callback/_summary_collector.py +23 -16
  617. mindspore/train/callback/_time_monitor.py +3 -3
  618. mindspore/train/checkpoint_pb2.py +68 -8
  619. mindspore/train/data_sink.py +15 -3
  620. mindspore/train/dataset_helper.py +10 -15
  621. mindspore/train/loss_scale_manager.py +8 -11
  622. mindspore/train/metrics/__init__.py +1 -1
  623. mindspore/train/metrics/bleu_score.py +1 -1
  624. mindspore/train/metrics/confusion_matrix.py +1 -1
  625. mindspore/train/metrics/cosine_similarity.py +1 -1
  626. mindspore/train/metrics/dice.py +2 -2
  627. mindspore/train/metrics/fbeta.py +1 -1
  628. mindspore/train/metrics/hausdorff_distance.py +4 -3
  629. mindspore/train/metrics/mean_surface_distance.py +2 -2
  630. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  631. mindspore/train/metrics/perplexity.py +1 -1
  632. mindspore/train/metrics/precision.py +1 -1
  633. mindspore/train/metrics/recall.py +1 -1
  634. mindspore/train/metrics/roc.py +2 -2
  635. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  636. mindspore/train/mind_ir_pb2.py +116 -37
  637. mindspore/train/model.py +45 -28
  638. mindspore/train/serialization.py +295 -188
  639. mindspore/train/summary/_summary_adapter.py +1 -1
  640. mindspore/train/summary/summary_record.py +43 -13
  641. mindspore/train/train_thor/convert_utils.py +2 -2
  642. mindspore/train/train_thor/dataset_helper.py +3 -3
  643. mindspore/version.py +1 -1
  644. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  645. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +648 -574
  646. mindspore/compression/__init__.py +0 -19
  647. mindspore/compression/common/constant.py +0 -124
  648. mindspore/compression/export/__init__.py +0 -19
  649. mindspore/compression/export/quant_export.py +0 -515
  650. mindspore/compression/quant/__init__.py +0 -28
  651. mindspore/compression/quant/qat.py +0 -634
  652. mindspore/compression/quant/quant_utils.py +0 -462
  653. mindspore/compression/quant/quantizer.py +0 -68
  654. mindspore/nn/layer/quant.py +0 -1868
  655. mindspore/nn/layer/rnn_utils.py +0 -90
  656. mindspore/nn/probability/dpn/__init__.py +0 -22
  657. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  658. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  659. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  660. mindspore/nn/probability/infer/__init__.py +0 -22
  661. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  662. mindspore/nn/probability/infer/variational/svi.py +0 -84
  663. mindspore/nn/probability/toolbox/__init__.py +0 -22
  664. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  665. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  666. mindspore/nn/probability/transforms/__init__.py +0 -22
  667. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  668. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  669. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  670. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  671. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  672. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  673. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  674. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  675. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  676. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  677. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  678. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  679. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  680. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  681. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  682. mindspore/ops/composite/array_ops.py +0 -241
  683. mindspore/ops/composite/clip_ops.py +0 -134
  684. mindspore/ops/composite/random_ops.py +0 -426
  685. mindspore/ops/composite/vmap_ops.py +0 -38
  686. mindspore/parallel/nn/__init__.py +0 -42
  687. mindspore/parallel/nn/loss.py +0 -22
  688. mindspore/parallel/nn/moe.py +0 -21
  689. mindspore/parallel/nn/op_parallel_config.py +0 -22
  690. mindspore/parallel/nn/transformer.py +0 -31
  691. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  692. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  693. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -20,23 +20,23 @@ from __future__ import division
20
20
  import numpy as np
21
21
 
22
22
  from mindspore import context
23
+ from mindspore import log as logger
23
24
  from mindspore.ops import signature as sig
24
- from mindspore._checkparam import Validator as validator
25
- from mindspore._checkparam import Rel
25
+ from mindspore import _checkparam as validator
26
26
  from mindspore.common import dtype as mstype
27
27
  from mindspore.common.tensor import Tensor
28
28
  from mindspore.common._decorator import deprecated
29
29
  from mindspore.ops._utils import get_broadcast_shape
30
30
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
31
31
  from mindspore._c_expression import Tensor as Tensor_
32
- from mindspore.ops._utils import is_shape_unknown
32
+ from mindspore.common._utils import is_shape_unknown
33
33
 
34
34
 
35
35
  def _infer_shape_reduce(x, axis, keep_dims, prim_name):
36
36
  """Common infer for reduce operator"""
37
37
 
38
38
  def reduce_one_axis(one_axis):
39
- validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
39
+ validator.check_int_range(one_axis, -dim, dim, validator.INC_LEFT, 'axis', prim_name)
40
40
  if one_axis < 0:
41
41
  one_axis += dim
42
42
  axis_reduce.add(one_axis)
@@ -260,6 +260,7 @@ class Addcdiv(Primitive):
260
260
 
261
261
  Raises:
262
262
  TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
263
+ TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
263
264
  ValueError: If `x1` could not be broadcast to `x2`.
264
265
  ValueError: If `value` could not be broadcast to `x1/x2`.
265
266
  ValueError: If `input_data` could not be broadcast to `value*(x1/x2)`.
@@ -303,9 +304,7 @@ class Addcmul(Primitive):
303
304
 
304
305
  Raises:
305
306
  TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
306
- TypeError: If dtype of `input_data` is not one of: float32, float16, int32.
307
- TypeError: If dtype of `x1` or `x2` is not one of: float32, float16, int32.
308
- TypeError: If dtype of `value` is not one of: float32, float16, int32.
307
+ TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
309
308
  ValueError: If `x1` could not be broadcast to `x2`.
310
309
  ValueError: If `value` could not be broadcast to `x1` * `x2`.
311
310
  ValueError: If `input_data` could not be broadcast to `value*(x1*x2)`.
@@ -338,8 +337,9 @@ class AddV2(Primitive):
338
337
 
339
338
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
340
339
  The inputs must be two tensors or one tensor and one scalar.
341
- When the inputs are two tensors, the shapes of them should be the same.
340
+ When the inputs are two tensors, and the shapes of them can be broadcast.
342
341
  When the inputs are one tensor and one scalar, the scalar could only be a constant.
342
+ CPU/Ascend does not support broadcast for now.
343
343
 
344
344
  .. math::
345
345
 
@@ -358,14 +358,14 @@ class AddV2(Primitive):
358
358
  and the data type is the one with higher precision or higher digits among the two inputs.
359
359
 
360
360
  Raises:
361
- TypeError: If neither `x` nor `y` is a Tensor .
361
+ TypeError: If neither `x` nor `y` is a Tensor.
362
362
  TypeError: If dtype of `x` or `y` is not in [float16, float32, float64,
363
363
  uint8, int8, int16, int32, int64, complex64, complex128].
364
- ValueError: If the shape of 'x' and 'y' is not the same.
364
+ ValueError: If the shape of 'x' and 'y' is not the same for CPU and Ascend.
365
365
 
366
366
 
367
367
  Supported Platforms:
368
- ``CPU``
368
+ ``Ascend`` ``GPU`` ``CPU``
369
369
 
370
370
  Examples:
371
371
  >>> from mindspore.ops.operations.math_ops import AddV2
@@ -488,7 +488,7 @@ class _Reduce(PrimitiveWithCheck):
488
488
  Definition of base class of reduction class operators.
489
489
 
490
490
  Args:
491
- keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1.
491
+ keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
492
492
  If false, don't keep these dimensions. Default: False.
493
493
  """
494
494
 
@@ -538,18 +538,17 @@ class _Reduce(PrimitiveWithCheck):
538
538
 
539
539
  class EuclideanNorm(Primitive):
540
540
  """
541
- Computes the euclidean norm of elements across dimensions of a tensor.
542
- Reduces input along the dimensions given in axis.
541
+ Calculates the Euclidean norm(aka L2 norm) of a Tensor along the specified axes.
542
+ The specified `axes` are removed by default.
543
543
 
544
544
  Args:
545
- keep_dims (bool, optional): If true, the reduceed dimensions are retained with length 1.
546
- If false, don't keep these dimensions. Default: False.
545
+ keep_dims (bool, optional): whether to retain the reduced dimensions. If true, retains them with length 1.
546
+ If false, these dimensions are removed. Default: False.
547
547
 
548
548
  Inputs:
549
- - **x** (Tensor) - The input tensor. Must be one of the following types :float16, float32, float64, int8, int16,
550
- int32, int64, complex64, complex128, uint8, uint16, uint32, uint64. The tensor to reduce.
551
- - **axes** (Tensor) - The dimensions to reduce. Must be one of the following types: int32, int64.
552
- Must be in the range [-rank(x), rank(x)).
549
+ - **x** (Tensor) - The input Tensor to reduce.
550
+ - **axes** (Tensor) - The axes to perform reduction on. Must be one of the following types: int32, int64.
551
+ It must be in range :math:`[-rank(x), rank(x))`.
553
552
 
554
553
  Outputs:
555
554
  Tensor, has the same type as the 'x'.
@@ -590,7 +589,7 @@ class ReduceMean(_Reduce):
590
589
 
591
590
  Inputs:
592
591
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
593
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
592
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
594
593
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
595
594
  Only constant value is allowed. Must be in the range [-r, r).
596
595
 
@@ -664,30 +663,24 @@ class ReduceMean(_Reduce):
664
663
 
665
664
  class CumulativeLogsumexp(Primitive):
666
665
  """
667
- Compute the cumulative log-sum-exp of the tensor `x` along `axis` .
668
-
669
- When `exclusive` is set `False`, this operation performs an inclusive cumulative log-sum-exp, which means that the
670
- first element of the input is identical to the first element of the output. For example, when takes a tensor
671
- [a, b, c] as input, this operation outputs [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]. When `reverse`
672
- is set `True`, the cumulative log-sum-exp is performed in the opposite direction and thus get the output
673
- [log(exp(a) + exp(b) + exp(c)), log(exp(b) + exp(c)), c].
674
-
675
- When `exclusive` is set `True`, this operation performs an exclusive cumulative log-sum-exp instead. For example,
676
- when takes a tensor [a, b, c] as input, this operation outputs [-inf, a, log(exp(a) * exp(b))]. Note that the
677
- neutral element of the log-sum-exp operation is -inf, however, for performance reasons, the minimal value
678
- representable by the floating point type is used instead. When `reverse` is set `True`, the cumulative log-sum-exp
679
- is performed in the opposite direction and thus get the output [log(exp(b) * exp(c)), c, -inf].
666
+ Compute the cumulative log-sum-exp of the input tensor `x` along `axis` . For example, with all parameters at
667
+ default values, if the input `x` is a tensor [a, b, c], the output will be [a, log(exp(a) + exp(b)),
668
+ log(exp(a) + exp(b) + exp(c))].
680
669
 
681
670
  Args:
682
- exclusive (bool, optional): If true, perform exclusive cumulative log-sum-exp.
683
- If false, perform inclusive cumulative log-sum-exp. Default: False.
684
- reverse (bool, optional): If true, the cumulative log-sum-exp is performed in the opposite direction.
685
- If false, the cumulative log-sum-exp is performed in the forward direction.
686
- Default: False.
671
+ exclusive (bool, optional): If true, the last element will be skipped during the calculation and thus an
672
+ exclusive cumulative log-sum-exp will be performed. For example, this operation
673
+ will output [-inf, a, log(exp(a) * exp(b))] with tensor [a, b, c] as the input.
674
+ Note that the minimal value -inf, for performance reasons, is representable by the
675
+ floating point type. Default: False.
676
+ reverse (bool, optional): If true, the function accumulation values will be calculated after the elements of
677
+ `x` on `axis` are flipped, and the calculation result will be flipped afterwards. For
678
+ example, this operation will output [log(exp(c) + exp(b) + exp(a)), log(exp(c) +
679
+ exp(b)), c] with tensor [a, b, c] as the input. Default: False.
687
680
 
688
681
  Inputs:
689
- - **x** (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64.
690
- The dimension of `x` must greater than 0.
682
+ - **x** (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64. The
683
+ dimension of `x` must greater than 0.
691
684
  - **axis** (Tensor) - A 0-D tensor describing the dimension to compute the cumulative product. Must be one of
692
685
  the following types: int64, int32, int16. Must be in the range [-rank(x), rank(x)). Default: 0.
693
686
 
@@ -703,7 +696,7 @@ class CumulativeLogsumexp(Primitive):
703
696
  RuntimeError: If `axis` is out of range [-rank(x), rank(x)).
704
697
 
705
698
  Supported Platforms:
706
- ``Ascend`` ``CPU``
699
+ ``Ascend`` ``CPU`` ``GPU``
707
700
 
708
701
  Examples:
709
702
  >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
@@ -752,7 +745,7 @@ class ReduceSum(PrimitiveWithCheck):
752
745
 
753
746
  Inputs:
754
747
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
755
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
748
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
756
749
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions
757
750
  when skip_mode is false. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
758
751
 
@@ -979,7 +972,7 @@ class ReduceMax(_Reduce):
979
972
 
980
973
  Inputs:
981
974
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
982
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
975
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
983
976
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
984
977
  Only constant value is allowed. Must be in the range [-r, r).
985
978
 
@@ -1063,7 +1056,7 @@ class ReduceMin(_Reduce):
1063
1056
 
1064
1057
  Inputs:
1065
1058
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
1066
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1059
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1067
1060
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1068
1061
  Only constant value is allowed. Must be in the range [-r, r).
1069
1062
 
@@ -1131,7 +1124,7 @@ class ReduceMin(_Reduce):
1131
1124
 
1132
1125
  class Bucketize(Primitive):
1133
1126
  """
1134
- Bucketizes 'input' based on 'boundaries'.
1127
+ Bucketizes `input` based on `boundaries`.
1135
1128
 
1136
1129
  Args:
1137
1130
  boundaries (list[float]): A sorted list of floats gives the boundary of the buckets, and no default value.
@@ -1147,7 +1140,7 @@ class Bucketize(Primitive):
1147
1140
  TypeError: If `input` is not a Tensor.
1148
1141
 
1149
1142
  Supported Platforms:
1150
- ``GPU`` ``CPU``
1143
+ ``Ascend`` ``GPU`` ``CPU``
1151
1144
 
1152
1145
  Examples:
1153
1146
  >>> class Bucketize(nn.Cell):
@@ -1186,7 +1179,7 @@ class ReduceProd(_Reduce):
1186
1179
 
1187
1180
  Inputs:
1188
1181
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
1189
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1182
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1190
1183
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1191
1184
  Only constant value is allowed. Must be in the range [-r, r).
1192
1185
 
@@ -1331,7 +1324,10 @@ class Lcm(Primitive):
1331
1324
  """
1332
1325
  Computes least common multiplier of input tensors element-wise.
1333
1326
  The shape of two inputs should be broadcastable, and data type of them should be
1334
- one of: int32, int64
1327
+ one of: int32, int64.
1328
+
1329
+ .. warning::
1330
+ This is an experimental API that is subject to change or deletion.
1335
1331
 
1336
1332
  Inputs:
1337
1333
  - **x1** (Tensor) - The first input tensor.
@@ -1346,7 +1342,7 @@ class Lcm(Primitive):
1346
1342
  ValueError: If shape of two inputs are not broadcastable.
1347
1343
 
1348
1344
  Supported Platforms:
1349
- ``GPU`` ``CPU``
1345
+ ``Ascend`` ``GPU`` ``CPU``
1350
1346
 
1351
1347
  Examples:
1352
1348
  >>> x1 = Tensor(np.array([7, 8, 9]))
@@ -1402,7 +1398,29 @@ class LpNorm(Primitive):
1402
1398
  .. math::
1403
1399
  output = sum(abs(input)**p)**(1/p)
1404
1400
 
1405
- Refer to :func:`mindspore.ops.norm` for more details.
1401
+ Args:
1402
+ axis(int,list,tuple): Specifies which dimension or dimensions of input to calculate the norm across.
1403
+ p(int, optional): The order of norm. Default: 2.
1404
+ keep_dims(bool, optional): Whether the output tensors have dim retained or not. Default: False.
1405
+ epsilon(float, optional): A value added to the denominator for numerical stability. Default: 1e-12.
1406
+
1407
+ Inputs:
1408
+ - **input** (Tensor) - Input tensor.
1409
+
1410
+ Outputs:
1411
+ Tensor, has the same dtype as `input`, its shape depends on `axis`. For example, if the shape of input
1412
+ is :math:`(2, 3, 4)`, `axis` is :math:`[0, 1]`, output shape will be :math:`(4,)`.
1413
+
1414
+ Raises:
1415
+ TypeError: If `input` is not a Tensor.
1416
+ TypeError: If dtype of `input` is not one of: float16, float32.
1417
+ TypeError: If `p` is not an int.
1418
+ TypeError: If `axis` is not an int, a tuple or a list.
1419
+ TypeError: If `axis` is a tuple or a list, but the element of `axis` is not an int.
1420
+ TypeError: If `keep_dims` is not a bool.
1421
+ ValueError: If the element of `axis` is out of the range :math:`[-r, r)`,
1422
+ where :math:`r` is the rank of `input`.
1423
+ ValueError: If the length of shape of `axis` is bigger than the length of shape of `input`.
1406
1424
 
1407
1425
  Supported Platforms:
1408
1426
  ``Ascend`` ``GPU`` ``CPU``
@@ -1452,9 +1470,9 @@ class MatMul(PrimitiveWithCheck):
1452
1470
 
1453
1471
  Inputs:
1454
1472
  - **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
1455
- `transpose_a` is True, its shape must be :math:`(N, C)` after transpose.
1473
+ `transpose_a` is True, its shape must be :math:`(C, N)` after transpose.
1456
1474
  - **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
1457
- `transpose_b` is True, its shape must be :math:`(C, M)` after transpose.
1475
+ `transpose_b` is True, its shape must be :math:`(M, C)` after transpose.
1458
1476
 
1459
1477
  Outputs:
1460
1478
  Tensor, the shape of the output tensor is :math:`(N, M)`.
@@ -1561,28 +1579,14 @@ class BatchMatMul(Primitive):
1561
1579
  >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1562
1580
  >>> batmatmul = ops.BatchMatMul()
1563
1581
  >>> output = batmatmul(x, y)
1564
- >>> print(output)
1565
- [[[[3. 3. 3. 3.]]
1566
- [[3. 3. 3. 3.]]
1567
- [[3. 3. 3. 3.]]
1568
- [[3. 3. 3. 3.]]]
1569
- [[[3. 3. 3. 3.]]
1570
- [[3. 3. 3. 3.]]
1571
- [[3. 3. 3. 3.]]
1572
- [[3. 3. 3. 3.]]]]
1582
+ >>> print(output.shape)
1583
+ (2, 4, 1, 4)
1573
1584
  >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
1574
1585
  >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1575
1586
  >>> batmatmul = ops.BatchMatMul(transpose_a=True)
1576
1587
  >>> output = batmatmul(x, y)
1577
- >>> print(output)
1578
- [[[[3. 3. 3. 3.]]
1579
- [[3. 3. 3. 3.]]
1580
- [[3. 3. 3. 3.]]
1581
- [[3. 3. 3. 3.]]]
1582
- [[[3. 3. 3. 3.]]
1583
- [[3. 3. 3. 3.]]
1584
- [[3. 3. 3. 3.]]
1585
- [[3. 3. 3. 3.]]]]
1588
+ >>> print(output.shape)
1589
+ (2, 4, 1, 4)
1586
1590
  """
1587
1591
 
1588
1592
  @prim_attr_register
@@ -1598,10 +1602,9 @@ class BatchMatMul(Primitive):
1598
1602
 
1599
1603
  class Betainc(Primitive):
1600
1604
  r"""
1601
- Computes the regularized incomplete beta integral
1602
- :math:`I_{x}(a, b)`.
1603
-
1604
- The regularized incomplete beta integral is defined as:
1605
+ Calculates the regularized incomplete beta function
1606
+ :math:`I_{x}(a, b)`. It is defined as the ratio of the incomplete beta function
1607
+ to the complete beta function:
1605
1608
 
1606
1609
  .. math::
1607
1610
 
@@ -1611,14 +1614,23 @@ class Betainc(Primitive):
1611
1614
 
1612
1615
  .. math::
1613
1616
 
1614
- B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} d t
1617
+ B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} dt
1618
+
1619
+ is the incomplete beta function and
1620
+
1621
+ .. math::
1622
+
1623
+ B(a, b) = \int_0^1 t^{a-1} (1-t)^{b-1} dt
1615
1624
 
1616
- is the incomplete beta function and B(a, b) is the complete beta function
1625
+ is the complete beta function.
1617
1626
 
1618
1627
  Inputs:
1619
- - **a** (Tensor) - A Tensor of types: float32, float64.
1620
- - **b** (Tensor) - A Tensor, must have the same dtype and shape as `a` .
1621
- - **x** (Tensor) - A Tensor, must have the same dtype and shape as `a` .
1628
+ - **a** (Tensor) - Peak location of beta distribution.
1629
+ A Tensor of types: float32, float64.
1630
+ - **b** (Tensor) - Spread of the beta distribution.
1631
+ A Tensor, must have the same dtype and shape as `a` .
1632
+ - **x** (Tensor) - Upper limit of integration of the incomplete beta function.
1633
+ A Tensor, must have the same dtype and shape as `a` .
1622
1634
 
1623
1635
  Outputs:
1624
1636
  A Tensor, has the same dtype and shape as `a` .
@@ -1633,12 +1645,12 @@ class Betainc(Primitive):
1633
1645
  ``Ascend`` ``GPU`` ``CPU``
1634
1646
 
1635
1647
  Examples:
1636
- >>> a = Tensor(np.array([1, 1, 1]), mindspore.float32)
1637
- >>> b = Tensor(np.array([1, 1, 1]), mindspore.float32)
1638
- >>> x = Tensor(np.array([1, 1,1 ]), mindspore.float32)
1648
+ >>> a = Tensor(np.array([0.3, 0.1, 0.4]), mindspore.float32)
1649
+ >>> b = Tensor(np.array([0.4, 0.5, 0.9]), mindspore.float32)
1650
+ >>> x = Tensor(np.array([0.2, 0.6, 0.5]), mindspore.float32)
1639
1651
  >>> betainc = ops.Betainc()
1640
1652
  >>> print(betainc(a, b, x))
1641
- [1. 1. 1.]
1653
+ [0.41462693 0.8706035 0.7298298 ]
1642
1654
  """
1643
1655
 
1644
1656
  @prim_attr_register
@@ -1768,7 +1780,7 @@ class AccumulateNV2(Primitive):
1768
1780
  Refer to :func:`mindspore.ops.accumulate_n` for more details.
1769
1781
 
1770
1782
  Supported Platforms:
1771
- ``Ascend``
1783
+ ``Ascend`` ``GPU``
1772
1784
 
1773
1785
  Examples:
1774
1786
  >>> class NetAccumulateNV2(nn.Cell):
@@ -1828,37 +1840,25 @@ class Neg(Primitive):
1828
1840
 
1829
1841
  class InplaceUpdateV2(Primitive):
1830
1842
  r"""
1831
- Updates specified rows with values in `v`.
1832
-
1833
- Note:
1834
- This operator only supports dynamic shape. As for static shape, please use operator 'InplaceUpdate' instead.
1843
+ Updates specified values in `x` to `v` according to `indices`.
1835
1844
 
1836
- Args:
1837
-
1838
- Inputs:
1839
- - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
1840
- float32, float16 and int32.
1841
- - **indices** (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
1842
- to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
1843
- - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
1844
- the first dimension, which must be the same as the size of `indices`.
1845
-
1846
- Outputs:
1847
- Tensor, with the same type and shape as the input `x`.
1845
+ .. warning::
1846
+ This is an experimental API that is subject to change or deletion.
1848
1847
 
1849
- Raises:
1850
- TypeError: If `indices` is neither int nor tuple.
1851
- TypeError: If `indices` is a tuple and its element is not an int.
1848
+ Refer to :func:`mindspore.ops.inplace_update` for more details.
1852
1849
 
1853
1850
  Supported Platforms:
1854
- ``Ascend``
1851
+ ``GPU`` ``CPU``
1855
1852
 
1856
1853
  Examples:
1854
+ >>> import numpy as np
1855
+ >>> import mindspore
1856
+ >>> from mindspore import Tensor, ops
1857
1857
  >>> indices = (0, 1)
1858
1858
  >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1859
1859
  >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1860
- >>> inplace_update = ops.InplaceUpdate(indices)
1861
- >>> output = inplace_update(x, v)
1860
+ >>> inplace_update_v2 = ops.InplaceUpdateV2()
1861
+ >>> output = inplace_update_v2(x, indices, v)
1862
1862
  >>> print(output)
1863
1863
  [[0.5 1. ]
1864
1864
  [1. 1.5]
@@ -1876,42 +1876,15 @@ class InplaceUpdateV2(Primitive):
1876
1876
  return output
1877
1877
 
1878
1878
 
1879
- class InplaceUpdate(PrimitiveWithInfer):
1879
+ class InplaceUpdate(Primitive):
1880
1880
  r"""
1881
- Updates specified rows with values in `v`.
1882
-
1883
- Args:
1884
- indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
1885
- to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
1886
-
1887
- Inputs:
1888
- - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
1889
- float32, float16 and int32.
1890
- - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
1891
- the first dimension, which must be the same as the size of `indices`.
1892
-
1893
- Outputs:
1894
- Tensor, with the same type and shape as the input `x`.
1895
-
1896
- Raises:
1897
- TypeError: If `indices` is neither int nor tuple.
1898
- TypeError: If `indices` is a tuple and its element is not an int.
1881
+ The InplaceUpdate interface is deprecated. Please use the :class:`mindspore.ops.InplaceUpdateV2` instead.
1899
1882
 
1900
1883
  Supported Platforms:
1901
- ``Ascend`` ``GPU`` ``CPU``
1902
-
1903
- Examples:
1904
- >>> indices = (0, 1)
1905
- >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1906
- >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1907
- >>> inplace_update = ops.InplaceUpdate(indices)
1908
- >>> output = inplace_update(x, v)
1909
- >>> print(output)
1910
- [[0.5 1. ]
1911
- [1. 1.5]
1912
- [5. 6. ]]
1884
+ Deprecated
1913
1885
  """
1914
1886
 
1887
+ @deprecated("2.0", "ops.InplaceUpdateV2", False)
1915
1888
  @prim_attr_register
1916
1889
  def __init__(self, indices):
1917
1890
  """Initialize InplaceUpdate"""
@@ -1924,14 +1897,14 @@ class InplaceUpdate(PrimitiveWithInfer):
1924
1897
  validator.check_value_type("item of indices", item, [int], self.name)
1925
1898
 
1926
1899
 
1927
- class InplaceAdd(PrimitiveWithInfer):
1900
+ class InplaceAdd(Primitive):
1928
1901
  """
1929
1902
  Adds `v` into specified rows of `x`. Computes `y` = `x`; y[i,] += `v`.
1930
1903
 
1931
1904
  Refer to :func:`mindspore.ops.inplace_add` for more details.
1932
1905
 
1933
1906
  Supported Platforms:
1934
- ``Ascend`` ``CPU``
1907
+ ``Ascend`` ``GPU`` ``CPU``
1935
1908
 
1936
1909
  Examples:
1937
1910
  >>> import numpy as np
@@ -1959,39 +1932,21 @@ class InplaceAdd(PrimitiveWithInfer):
1959
1932
  for item in self.indices:
1960
1933
  validator.check_value_type("item of indices", item, [int], self.name)
1961
1934
 
1962
- def infer_dtype(self, x_dtype, v_dtype):
1963
- args = {'x': x_dtype, 'v': v_dtype}
1964
- valid_type = [mstype.int32, mstype.float16, mstype.float32]
1965
- validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
1966
- return x_dtype
1967
-
1968
- def infer_shape(self, x_shape, v_shape):
1969
- validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
1970
- validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
1971
- Rel.EQ, self.name)
1972
- for i in self.indices:
1973
- if i < 0 or i >= x_shape[0]:
1974
- raise ValueError(f"For '{self.name}', the value of 'indices' must be "
1975
- f"in [0, {x_shape[0]}), but got {i}.")
1976
- x_rank = len(x_shape)
1977
- for idx in range(x_rank)[1:]:
1978
- validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
1979
-
1980
- return x_shape
1981
-
1982
1935
 
1983
1936
  class InplaceIndexAdd(Primitive):
1984
1937
  """
1985
- Adds tensor `updates` to specified axis and indices of tensor `var`. The axis should be in [0, len(var.dim) - 1],
1986
- and indices should be in [0, the size of `var` - 1] at the axis dimension.
1938
+ Adds Tensor `updates` to specified axis and indices of Tensor `var` element-wise.
1939
+
1940
+ .. warning::
1941
+ This is an experimental API that is subject to change or deletion.
1987
1942
 
1988
1943
  Refer to :func:`mindspore.ops.inplace_index_add` for more details.
1989
1944
 
1990
1945
  Supported Platforms:
1991
- ``CPU``
1946
+ ``Ascend`` ``CPU``
1992
1947
 
1993
1948
  Examples:
1994
- >>> var = Parameter(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1949
+ >>> var = Parameter(Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32))
1995
1950
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
1996
1951
  >>> updates = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1997
1952
  >>> inplaceIndexAdd = ops.InplaceIndexAdd(axis=0)
@@ -2016,14 +1971,14 @@ class InplaceIndexAdd(Primitive):
2016
1971
  validator.check_value_type('axis', axis, [int], self.name)
2017
1972
 
2018
1973
 
2019
- class InplaceSub(PrimitiveWithInfer):
1974
+ class InplaceSub(Primitive):
2020
1975
  r"""
2021
1976
  Subtracts `v` into specified rows of `x`. Computes :math:`y = x`; :math:`y[i,] -= input\_v`.
2022
1977
 
2023
1978
  Refer to :func:`mindspore.ops.inplace_sub` for more details.
2024
1979
 
2025
1980
  Supported Platforms:
2026
- ``Ascend`` ``CPU``
1981
+ ``Ascend`` ``GPU`` ``CPU``
2027
1982
 
2028
1983
  Examples:
2029
1984
  >>> import numpy as np
@@ -2052,26 +2007,6 @@ class InplaceSub(PrimitiveWithInfer):
2052
2007
  validator.check_value_type("item of indices", item, [int], self.name)
2053
2008
  self.add_prim_attr("indices", self.indices)
2054
2009
 
2055
- def infer_dtype(self, x_dtype, v_dtype):
2056
- args = {'x': x_dtype, 'v': v_dtype}
2057
- valid_type = [mstype.int32, mstype.float16, mstype.float32]
2058
- validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
2059
- return x_dtype
2060
-
2061
- def infer_shape(self, x_shape, v_shape):
2062
- validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
2063
- validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
2064
- Rel.EQ, self.name)
2065
- for i in self.indices:
2066
- if i < 0 or i >= x_shape[0]:
2067
- raise ValueError(f"For '{self.name}', the value of 'indices' must be "
2068
- f"in [0, {x_shape[0]}), but got {i}.")
2069
- x_rank = len(x_shape)
2070
- for idx in range(x_rank)[1:]:
2071
- validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
2072
-
2073
- return x_shape
2074
-
2075
2010
 
2076
2011
  class Sub(_MathBinaryOp):
2077
2012
  r"""
@@ -2119,6 +2054,17 @@ class Mul(_MathBinaryOp):
2119
2054
  [ 4. 10. 18.]
2120
2055
  """
2121
2056
 
2057
+ # Let x/y using same sig_dtype to enable implicit conversion for compatibility
2058
+ __mindspore_signature__ = (
2059
+ sig.make_sig('x', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
2060
+ sig.make_sig('y', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T)
2061
+ )
2062
+
2063
+ @prim_attr_register
2064
+ def __init__(self):
2065
+ """Initialize Xdivy."""
2066
+ self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
2067
+
2122
2068
  @staticmethod
2123
2069
  def _infer_specified_mul_value(x, y):
2124
2070
  """Calculate min/max value for output of Mul op"""
@@ -2285,7 +2231,8 @@ class Sqrt(Primitive):
2285
2231
  out_{i} = \sqrt{x_{i}}
2286
2232
 
2287
2233
  Inputs:
2288
- - **x** (Tensor) - The input tensor with a dtype of Number, its rank must be in [0, 7] inclusive.
2234
+ - **x** (Tensor) - The input tensor with a dtype of Number, the shape is :math:`(N, *)`
2235
+ where :math:`*` means, any number of additional dimensions.
2289
2236
 
2290
2237
  Outputs:
2291
2238
  Tensor, has the same shape and data type as the `x`.
@@ -2429,21 +2376,22 @@ class Logit(Primitive):
2429
2376
  r"""
2430
2377
  Calculate the logit of a tensor element-wise. Element in `x` is clamped to [eps, 1-eps].
2431
2378
 
2432
- .. math::
2433
- \begin{align}
2434
- y_{i} & = \ln(\frac{z_{i}}{1 - z_{i}}) \\
2435
- z_{i} & = \begin{cases}
2436
- x_{i} & \text{if eps is None} \\
2437
- \text{eps} & \text{if } x_{i} \lt \text{eps} \\
2438
- x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
2439
- 1 - \text{eps} & \text{if } x_{i} \gt 1 - \text{eps}
2440
- \end{cases}
2441
- \end{align}
2379
+ .. warning::
2380
+ This is an experimental API that is subject to change or deletion.
2442
2381
 
2443
2382
  Refer to :func:`mindspore.ops.logit` for more details.
2444
2383
 
2384
+ Args:
2385
+ eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: -1.0.
2386
+
2387
+ Inputs:
2388
+ - **x** (Tensor) - The input tensor.
2389
+
2390
+ Outputs:
2391
+ Tensor, with the same shape and dtype as the `x`.
2392
+
2445
2393
  Supported Platforms:
2446
- ``GPU`` ``CPU``
2394
+ ``Ascend`` ``GPU`` ``CPU``
2447
2395
 
2448
2396
  Examples:
2449
2397
  >>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
@@ -2463,10 +2411,33 @@ class Logit(Primitive):
2463
2411
 
2464
2412
  class ReduceStd(Primitive):
2465
2413
  """
2466
- Returns the standard-deviation and mean of each row of the input tensor in the dimension `axis`.
2467
- If `axis` is a list of dimensions, reduce over all of them.
2414
+ Returns the standard-deviation and mean of the input Tensor along
2415
+ dimension(s) specified by `axis`.
2416
+
2417
+ Args:
2418
+ axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
2419
+ Default: (), reduce all dimensions. Only constant value is allowed.
2420
+ Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`.
2421
+ unbiased (bool, optional): Whether to use Bessel’s correction.
2422
+ If True, will use the Bessel correction unbiased estimation.
2423
+ If False, will through the biased estimation to calculate the standard deviation.
2424
+ Default: True.
2425
+ keep_dims (bool, optional): Whether the output Tensor has dim retained or not.
2426
+ If True, keep these reduced dimensions specified by `axis` and the length is 1.
2427
+ If False, don't keep these dimensions.
2428
+ Default: Fasle.
2468
2429
 
2469
- Refer to :func:`mindspore.ops.std` for more details.
2430
+ Inputs:
2431
+ - **input_x** (Tensor[Number]) - The input Tensor, it has dtype Number with shape
2432
+ :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2433
+
2434
+ Outputs:
2435
+ Tuple(output_std, output_mean) containing the standard deviation and mean.
2436
+
2437
+ Raises:
2438
+ TypeError: If `keep_dims` is not a bool.
2439
+ TypeError: If `input_x` is not a Tensor.
2440
+ ValueError: If `axis` is not one of the following: int, tuple or list.
2470
2441
 
2471
2442
  Supported Platforms:
2472
2443
  ``Ascend`` ``CPU``
@@ -2685,7 +2656,7 @@ class Expm1(Primitive):
2685
2656
 
2686
2657
  class Histogram(Primitive):
2687
2658
  """
2688
- Computes the histogram of a tensor.
2659
+ Computes the histogram of Tensor element distribution.
2689
2660
 
2690
2661
  The elements are sorted into equal width bins between `min` and `max`.
2691
2662
  If `min` and `max` are both zero, the minimum and maximum values of the data are used.
@@ -2693,12 +2664,12 @@ class Histogram(Primitive):
2693
2664
  Elements lower than min and higher than max are ignored.
2694
2665
 
2695
2666
  Args:
2696
- bins (int, optional) : Number of histogram bins, optional. Default 100. If specified, must be positive.
2667
+ bins (int, optional): Number of histogram bins, optional. Default 100. If specified, must be positive.
2697
2668
  min (float, optional): An optional float of the lower end of the range (inclusive). Default value is 0.0.
2698
2669
  max (float, optional): An optional float of the upper end of the range (inclusive). Default value is 0.0.
2699
2670
 
2700
2671
  Inputs:
2701
- - **x** (Tensor) - the input tensor, type support list [float16, float32, int32]
2672
+ - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
2702
2673
 
2703
2674
  Outputs:
2704
2675
  Tensor, 1-D Tensor with type int32.
@@ -2712,7 +2683,7 @@ class Histogram(Primitive):
2712
2683
  ValueError: If attr `bins` <= 0.
2713
2684
 
2714
2685
  Supported Platforms:
2715
- ``CPU``
2686
+ ``Ascend`` ``CPU``
2716
2687
 
2717
2688
  Examples:
2718
2689
  >>> x = Tensor([1., 2, 1])
@@ -2730,7 +2701,7 @@ class Histogram(Primitive):
2730
2701
  validator.check_value_type("min", min, [float], self.name)
2731
2702
  validator.check_value_type("max", max, [float], self.name)
2732
2703
  validator.check_positive_int(bins, 'bins', self.name)
2733
- validator.check('min', min, 'max', max, Rel.LE, self.name)
2704
+ validator.check('min', min, 'max', max, validator.LE, self.name)
2734
2705
 
2735
2706
 
2736
2707
  class HistogramFixedWidth(PrimitiveWithInfer):
@@ -2744,7 +2715,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2744
2715
 
2745
2716
  Inputs:
2746
2717
  - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
2747
- - **range** (Tensor) - Must have the same data type as `x`, and the shape is (2,).
2718
+ - **range** (Tensor) - Must have the same data type as `x`, and the shape is :math:`(2,)`.
2748
2719
  x <= range[0] will be mapped to histogram[0], x >= range[1] will be mapped to histogram[-1].
2749
2720
 
2750
2721
  Outputs:
@@ -2771,7 +2742,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2771
2742
  def __init__(self, nbins, dtype='int32'):
2772
2743
  """Initialize HistogramFixedWidth."""
2773
2744
  self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
2774
- validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
2745
+ validator.check_int(nbins, 1, validator.GE, "nbins", self.name)
2775
2746
  valid_values = ['int32']
2776
2747
  self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
2777
2748
  self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
@@ -2832,7 +2803,10 @@ class Hypot(Primitive):
2832
2803
  """
2833
2804
  Computes hypotenuse of input tensors element-wise as legs of a right triangle.
2834
2805
  The shape of two inputs should be broadcastable, and data type of them should be
2835
- one of: float32, float64
2806
+ one of: float32, float64.
2807
+
2808
+ .. warning::
2809
+ This is an experimental API that is subject to change or deletion.
2836
2810
 
2837
2811
  Inputs:
2838
2812
  - **x1** (Tensor) - The first input tensor.
@@ -2847,7 +2821,7 @@ class Hypot(Primitive):
2847
2821
  ValueError: If shape of two inputs are not broadcastable.
2848
2822
 
2849
2823
  Supported Platforms:
2850
- ``GPU`` ``CPU``
2824
+ ``Ascend`` ``GPU`` ``CPU``
2851
2825
 
2852
2826
  Examples:
2853
2827
  >>> x1 = Tensor(np.array([3., 5., 7.]))
@@ -2867,7 +2841,7 @@ class Hypot(Primitive):
2867
2841
 
2868
2842
  class Heaviside(Primitive):
2869
2843
  r"""
2870
- Computes the Heaviside step function for each element in input.
2844
+ Applies the Heaviside step function for input `x` element-wise.
2871
2845
 
2872
2846
  .. math::
2873
2847
  \text { heaviside }(\text { x, values })=\left\{\begin{array}{ll}
@@ -2876,14 +2850,16 @@ class Heaviside(Primitive):
2876
2850
  1, & \text { if x }>0
2877
2851
  \end{array}\right.
2878
2852
 
2853
+ .. warning::
2854
+ This is an experimental API that is subject to change or deletion.
2855
+
2879
2856
  Inputs:
2880
2857
  - **x** (Tensor) - The input tensor. With real number data type.
2881
2858
  - **values** (Tensor) - The values to use where `x` is zero.
2882
- Values can be broadcast with `x` . 'x' should have the same
2883
- dtype with 'values'.
2859
+ It should be able to broadcast with `x` have the same dtype as `x`.
2884
2860
 
2885
2861
  Outputs:
2886
- Tensor, has the same type as 'x' and 'values'.
2862
+ Tensor, has the same type as `x` and `values`.
2887
2863
 
2888
2864
  Raises:
2889
2865
  TypeError: If `x` or `values` is not Tensor.
@@ -2891,7 +2867,7 @@ class Heaviside(Primitive):
2891
2867
  ValueError: If shape of two inputs are not broadcastable.
2892
2868
 
2893
2869
  Supported Platforms:
2894
- ``GPU`` ``CPU``
2870
+ ``Ascend`` ``GPU`` ``CPU``
2895
2871
 
2896
2872
  Examples:
2897
2873
  >>> x = Tensor(np.array([-1.5, 0., 2.]))
@@ -3020,6 +2996,16 @@ class RealDiv(_MathBinaryOp):
3020
2996
 
3021
2997
  Refer to :func:`mindspore.ops.div` for more details.
3022
2998
 
2999
+ Inputs:
3000
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3001
+ a bool or a tensor whose data type is number or bool.
3002
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3003
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
3004
+
3005
+ Outputs:
3006
+ Tensor, the shape is the same as the one after broadcasting,
3007
+ and the data type is the one with higher precision or higher digits among the two inputs.
3008
+
3023
3009
  Supported Platforms:
3024
3010
  ``Ascend`` ``GPU`` ``CPU``
3025
3011
 
@@ -3060,8 +3046,8 @@ class Div(_MathBinaryOp):
3060
3046
  Inputs:
3061
3047
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
3062
3048
  a bool or a tensor whose data type is
3063
- `number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_ or
3064
- `bool_ <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_.
3049
+ `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3050
+ `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_.
3065
3051
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
3066
3052
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3067
3053
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -3152,8 +3138,9 @@ class DivNoNan(Primitive):
3152
3138
  Inputs:
3153
3139
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
3154
3140
  a bool or a tensor whose data type is
3155
- `number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_ or
3156
- `bool_ <https://www.mindspore.cn/docs/zh-CN/r2.0.0-alpha/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
3141
+ `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3142
+ `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`
3143
+ _.
3157
3144
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
3158
3145
  a bool when the first input is a bool or a tensor whose data type is number or bool\_.
3159
3146
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -3281,8 +3268,8 @@ class FloorDiv(Primitive):
3281
3268
 
3282
3269
  class TruncateDiv(Primitive):
3283
3270
  """
3284
- Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will
3285
- round fractional quantities towards zero.
3271
+ Divides the first input tensor by the second input tensor element-wise and rounds the results
3272
+ of division towards zero. Equivalent to C-style integer division.
3286
3273
 
3287
3274
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3288
3275
  The inputs must be two tensors or one tensor and one scalar.
@@ -3308,7 +3295,7 @@ class TruncateDiv(Primitive):
3308
3295
  TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3309
3296
 
3310
3297
  Supported Platforms:
3311
- ``Ascend`` ``CPU`` ``GPU``
3298
+ ``Ascend`` ``GPU`` ``CPU``
3312
3299
 
3313
3300
  Examples:
3314
3301
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
@@ -3343,7 +3330,7 @@ class TruncateMod(Primitive):
3343
3330
  - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
3344
3331
  double thousandths in the mini form.
3345
3332
  - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
3346
- - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
3333
+ - If shape is expressed as :math:`(D1, D2, ..., Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
3347
3334
 
3348
3335
  Inputs:
3349
3336
  - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, or a bool,
@@ -3361,7 +3348,7 @@ class TruncateMod(Primitive):
3361
3348
  ValueError: If the shape `x` and `y` cannot be broadcasted to each other.
3362
3349
 
3363
3350
  Supported Platforms:
3364
- ``Ascend`` ``CPU`` ``GPU``
3351
+ ``Ascend`` ``GPU`` ``CPU``
3365
3352
 
3366
3353
  Examples:
3367
3354
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
@@ -3700,26 +3687,13 @@ class Sinc(Primitive):
3700
3687
  r"""
3701
3688
  Computes the normalized sinc of input.
3702
3689
 
3703
- Refer to :func:`mindspore.ops.sinc` for more details.
3704
-
3705
- .. math::
3706
-
3707
- y_i = \begin{cases}1 & \text{ if } x_i= 0\\ \frac{sin(\pi x_i)}{x_i} &
3708
- \text{ otherwise } \end{cases}
3709
-
3710
- Inputs:
3711
- - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3712
-
3713
- Outputs:
3714
- Tensor, has the same shape as the `x`. The dtype of output is float32 when dtype of `x` is in
3715
- [uint8, uint8, uint16, int16, uint32, int32, uint64, int64, bool]. Otherwise output has the
3716
- same dtype as the `x`.
3690
+ .. warning::
3691
+ This is an experimental API that is subject to change or deletion.
3717
3692
 
3718
- Raises:
3719
- TypeError: If `x` is not a Tensor.
3693
+ Refer to :func:`mindspore.ops.sinc` for more details.
3720
3694
 
3721
3695
  Supported Platforms:
3722
- ``GPU`` ``CPU``
3696
+ ``Ascend`` ``GPU`` ``CPU``
3723
3697
 
3724
3698
  Examples:
3725
3699
  >>> import mindspore
@@ -3776,6 +3750,42 @@ class _LogicBinaryOp(_BinaryOp):
3776
3750
  return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
3777
3751
 
3778
3752
 
3753
+ class Quantile(Primitive):
3754
+ r"""
3755
+ Computes the q-th quantiles of all elements in the input tensor, doing a linear interpolation when the
3756
+ q-th quantile lies between two data points.
3757
+
3758
+ Refer to :func:`mindspore.ops.quantile` and :func:`mindspore.ops.nanquantile` for more details.
3759
+
3760
+ Supported Platforms:
3761
+
3762
+
3763
+ Examples:
3764
+ >>> quantile = ops.Quantile()
3765
+ >>> input = Tensor(np.array([0.0700, -0.5446, 0.9214]), mindspore.float32)
3766
+ >>> q = Tensor(np.array([0, 0.5, 1]), mindspore.float32)
3767
+ >>> output = quantile(input, q)
3768
+ >>> print(output)
3769
+ [-0.5446 0.07 0.9214]
3770
+ """
3771
+
3772
+ @prim_attr_register
3773
+ def __init__(self, dim=None, keep_dims=False, ignore_nan=False):
3774
+ """Initialize Quantile"""
3775
+ if dim is not None:
3776
+ validator.check_value_type("dim", dim, [int], self.name)
3777
+ else:
3778
+ self.add_prim_attr("dim", 10000)
3779
+ if keep_dims is not None:
3780
+ validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
3781
+ else:
3782
+ self.add_prim_attr("keep_dims", False)
3783
+ if ignore_nan is not None:
3784
+ validator.check_value_type("ignore_nan", ignore_nan, [bool], self.name)
3785
+ else:
3786
+ self.add_prim_attr("ignore_nan", False)
3787
+
3788
+
3779
3789
  class Equal(Primitive):
3780
3790
  r"""
3781
3791
  Computes the equivalence between two tensors element-wise.
@@ -3869,12 +3879,12 @@ class EqualCount(PrimitiveWithInfer):
3869
3879
  Inputs:
3870
3880
  - **x** (Tensor) - The first input tensor. If the data type and shape of `y` are determined, then `x`
3871
3881
  must be the same as `y`, and vice versa.
3872
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3882
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3873
3883
  - **y** (Tensor) - The second input tensor. If the data type and shape of `x` are determined, then `y`
3874
3884
  must be the same as `x`, and vice versa.
3875
3885
 
3876
3886
  Outputs:
3877
- Tensor, with the type same as input tensor and shape as (1,).
3887
+ Tensor, with the type same as input tensor and shape as :math:`(1,)`.
3878
3888
 
3879
3889
  Raises:
3880
3890
  TypeError: If `x` or `y` is not a Tensor.
@@ -3998,10 +4008,19 @@ class GreaterEqual(PrimitiveWithCheck):
3998
4008
 
3999
4009
  class Lerp(Primitive):
4000
4010
  """
4001
- Calculate the linear interpolation between two tensors based on the weight parameter.
4011
+ Does a linear interpolation of two tensors start and end based on a float or tensor weight.
4002
4012
 
4003
4013
  Refer to :func:`mindspore.ops.lerp` for more details.
4004
4014
 
4015
+ Inputs:
4016
+ - **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
4017
+ - **end** (Tensor) - The tensor with the ending points. Data type must be the same as `start`.
4018
+ - **weight** (Union[float, Tensor]) - The weight for the interpolation formula. Must be a float
4019
+ or a scalar tensor with float16 or float32 data type.
4020
+
4021
+ Outputs:
4022
+ Tensor, has the same type and shape as input `start`.
4023
+
4005
4024
  Supported Platforms:
4006
4025
  ``Ascend`` ``GPU`` ``CPU``
4007
4026
 
@@ -4016,7 +4035,6 @@ class Lerp(Primitive):
4016
4035
 
4017
4036
  @prim_attr_register
4018
4037
  def __init__(self):
4019
- super().__init__("Lerp")
4020
4038
  self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
4021
4039
 
4022
4040
 
@@ -4024,7 +4042,10 @@ class Gcd(Primitive):
4024
4042
  """
4025
4043
  Computes greatest common divisor of input tensors element-wise.
4026
4044
  The shape of two inputs should be broadcastable, and data type of them should be
4027
- one of: int32, int64
4045
+ one of: int32, int64.
4046
+
4047
+ .. warning::
4048
+ This is an experimental API that is subject to change or deletion.
4028
4049
 
4029
4050
  Inputs:
4030
4051
  - **x1** (Tensor) - The first input tensor.
@@ -4032,14 +4053,14 @@ class Gcd(Primitive):
4032
4053
 
4033
4054
  Outputs:
4034
4055
  Tensor, the shape is the same as the one after broadcasting, and the data type is one
4035
- with higher digits in the two inputs.
4056
+ with higher precision in the two inputs.
4036
4057
 
4037
4058
  Raises:
4038
4059
  TypeError: If data type `x1` or `x2` is not int32 or int64.
4039
4060
  ValueError: If shape of two inputs are not broadcastable.
4040
4061
 
4041
4062
  Supported Platforms:
4042
- ``GPU`` ``CPU``
4063
+ ``Ascend`` ``GPU`` ``CPU``
4043
4064
 
4044
4065
  Examples:
4045
4066
  >>> x1 = Tensor(np.array([7, 8, 9]))
@@ -4186,24 +4207,13 @@ class LogicalXor(Primitive):
4186
4207
  r"""
4187
4208
  Computes the "logical XOR" of two tensors element-wise.
4188
4209
 
4189
- .. math::
4190
-
4191
- out_{i} = x_{i} \oplus y_{i}
4192
-
4193
- Inputs:
4194
- - **x** (Tensor) - The first input is a tensor whose data type is bool.
4195
- - **y** (Tensor) - The second input is a the tensor to compute XOR with the first input.
4196
- Datatype must be bool.
4197
-
4198
- Outputs:
4199
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4210
+ .. warning::
4211
+ This is an experimental API that is subject to change or deletion.
4200
4212
 
4201
- Raises:
4202
- TypeError: If neither `x` nor `y` is a Tensor whose data type is bool.
4203
- ValueError: If the shape of two inputs cannot be broadcast.
4213
+ Refer to :func:`mindspore.ops.logical_xor` for more details.
4204
4214
 
4205
4215
  Supported Platforms:
4206
- ``CPU``
4216
+ ``Ascend`` ``CPU``
4207
4217
 
4208
4218
  Examples:
4209
4219
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
@@ -4295,10 +4305,10 @@ class FloatStatus(Primitive):
4295
4305
 
4296
4306
  Inputs:
4297
4307
  - **x** (Tensor) - The input tensor. The data type must be float16, float32 or float64.
4298
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4308
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4299
4309
 
4300
4310
  Outputs:
4301
- Tensor, has the shape of `(1,)`, and the dtype is `mindspore.dtype.float32`.
4311
+ Tensor, has the shape of :math:`(1,)`, and the dtype is `mindspore.dtype.float32`.
4302
4312
 
4303
4313
  Raises:
4304
4314
  TypeError: If dtype of `x` is not in [float16, float32, float64].
@@ -4324,13 +4334,13 @@ class NPUAllocFloatStatus(Primitive):
4324
4334
  """
4325
4335
  Allocates a flag to store the overflow status.
4326
4336
 
4327
- The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4337
+ The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
4328
4338
 
4329
4339
  Note:
4330
4340
  Please refer to the Examples of :class:`mindspore.ops.NPUGetFloatStatus`.
4331
4341
 
4332
4342
  Outputs:
4333
- Tensor, has the shape of `(8,)`.
4343
+ Tensor, has the shape of :math:`(8,)`.
4334
4344
 
4335
4345
  Supported Platforms:
4336
4346
  ``Ascend``
@@ -4345,16 +4355,17 @@ class NPUAllocFloatStatus(Primitive):
4345
4355
  @prim_attr_register
4346
4356
  def __init__(self):
4347
4357
  """Initialize NPUAllocFloatStatus"""
4358
+ logger.warning("The 'NPUAllocFloatStatus' operator will be deprecated in the future. Please don't use it.")
4348
4359
 
4349
4360
 
4350
4361
  class NPUGetFloatStatus(Primitive):
4351
4362
  """
4352
- :class:`mindspore.ops.NPUGetFloatStatus` updates the flag which is
4363
+ `mindspore.ops.NPUGetFloatStatus` updates the flag which is
4353
4364
  the output tensor of :class:`mindspore.ops.NPUAllocFloatStatus` with the latest overflow status.
4354
4365
 
4355
4366
 
4356
4367
  Note:
4357
- The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4368
+ The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
4358
4369
  If the sum of the flag equals to 0, there is no overflow happened. If the sum of the
4359
4370
  flag is bigger than 0, there is overflow happened.
4360
4371
  In addition, there are strict sequencing requirements for use, i.e., before
@@ -4414,6 +4425,7 @@ class NPUGetFloatStatus(Primitive):
4414
4425
  @prim_attr_register
4415
4426
  def __init__(self):
4416
4427
  """Initialize NPUGetFloatStatus"""
4428
+ logger.warning("The 'NPUGetFloatStatus' operator will be deprecated in the future. Please don't use it.")
4417
4429
 
4418
4430
 
4419
4431
  class NPUClearFloatStatus(Primitive):
@@ -4477,61 +4489,226 @@ class NPUClearFloatStatus(Primitive):
4477
4489
  @prim_attr_register
4478
4490
  def __init__(self):
4479
4491
  """Initialize NPUClearFloatStatus"""
4492
+ logger.warning("The 'NPUClearFloatStatus' operator will be deprecated in the future. Please don't use it.")
4480
4493
 
4481
4494
 
4482
- class Cos(Primitive):
4483
- r"""
4484
- Computes cosine of input element-wise.
4485
-
4486
- Refer to :func:`mindspore.ops.cos` for more details.
4487
-
4488
- Supported Platforms:
4489
- ``Ascend`` ``GPU`` ``CPU``
4490
-
4491
- Examples:
4492
- >>> cos = ops.Cos()
4493
- >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4494
- >>> output = cos(x)
4495
- >>> print(output)
4496
- [0.971338 0.6748758 0.95233357 0.9959527]
4495
+ class NPUGetFloatStatusV2(Primitive):
4497
4496
  """
4497
+ Get the flag for storage overflow status. This flag is located in a register at a
4498
+ fixed address on the `Ascend` device, and overflow information is automatically
4499
+ written to this register.
4500
+ The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
4501
+ If the value of flag is zero, no overflow has occurred, otherwise, overflow.
4502
+ When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
4503
+ reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
4504
+ status after the network execution is completed.
4498
4505
 
4499
- @prim_attr_register
4500
- def __init__(self):
4501
- """Initialize Cos"""
4506
+ Note:
4507
+ - In order to avoid mis-optimization by the compiler, additional input is added to
4508
+ this operator. The input is defined as a shape of: math:`(8,)` and data type of
4509
+ `mindspore.dtype.int32` Tensor, meaningless.
4510
+ - Since this op lacks contextual dependencies with parameters in the network,
4511
+ :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
4502
4512
 
4513
+ Inputs:
4514
+ Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
4515
+ data type is `mindspore.dtype.int32`, and has no actual meaning.
4516
+ Usually use the output of `NPUClearFloatStatusV2`.
4503
4517
 
4504
- class ACos(Primitive):
4505
- r"""
4506
- Computes arccosine of input tensors element-wise.
4518
+ Outputs:
4519
+ Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
4507
4520
 
4508
- Refer to :func:`mindspore.ops.acos` for more details.
4521
+ Raises:
4522
+ TypeError: If `x` is not a Tensor.
4523
+ TypeError: If dtype of `x` is not int32.
4524
+ ValueError: If shape of `x` is not equal to :math:`(8,)`.
4509
4525
 
4510
4526
  Supported Platforms:
4511
- ``Ascend`` ``GPU`` ``CPU``
4527
+ ``Ascend``
4512
4528
 
4513
4529
  Examples:
4514
- >>> acos = ops.ACos()
4515
- >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4516
- >>> output = acos(x)
4517
- >>> print(output)
4518
- [0.737726 1.5307857 1.2661036 0.9764105]
4530
+ >>> import mindspore as ms
4531
+ >>> import numpy as np
4532
+ >>> from mindspore import ops, nn, Tensor
4533
+ >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
4534
+ >>> class Net(nn.Cell):
4535
+ ... def __init__(self):
4536
+ ... super().__init__()
4537
+ ... self.clear_status = NPUClearFloatStatusV2()
4538
+ ... self.get_status = NPUGetFloatStatusV2()
4539
+ ... self.sub = ops.Sub()
4540
+ ... self.neg = ops.Neg()
4541
+ ... self.not_equal = ops.NotEqual()
4542
+ ... self.reduce_any = ops.ReduceAny(keep_dims=False)
4543
+ ... self.base = Tensor([0], dtype=ms.int32)
4544
+ ...
4545
+ ... def construct(self, x):
4546
+ ... init = Tensor([0]*8, dtype=ms.int32)
4547
+ ... clear_status = self.clear_status(init)
4548
+ ... x = ops.depend(x, clear_status)
4549
+ ... res = self.sub(x, self.neg(x))
4550
+ ... init = ops.depend(init, res)
4551
+ ... get_status = self.get_status(init)
4552
+ ... flag = self.not_equal(self.base, get_status)
4553
+ ... overflow = self.reduce_any(flag)
4554
+ ... return overflow
4555
+ ...
4556
+ >>> value = 65504
4557
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4558
+ >>> x = Tensor(data, dtype=ms.float16)
4559
+ >>> net = Net()
4560
+ >>> res = net(x)
4561
+ >>> print(res)
4562
+ True
4563
+ >>> value = 10
4564
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4565
+ >>> x = Tensor(data, dtype=ms.float16)
4566
+ >>> net = Net()
4567
+ >>> res = net(x)
4568
+ >>> print(res)
4569
+ False
4519
4570
  """
4520
4571
 
4521
4572
  @prim_attr_register
4522
4573
  def __init__(self):
4523
- """Initialize ACos"""
4524
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
4574
+ """Initialize NPUGetFloatStatusV2"""
4525
4575
 
4526
4576
 
4527
- class Sin(Primitive):
4528
- r"""
4529
- Computes sine of the input element-wise.
4530
4577
 
4531
- Refer to :func:`mindspore.ops.sin` for more details.
4578
+ class NPUClearFloatStatusV2(Primitive):
4579
+ """
4580
+ Clear the flag for storage overflow status. This flag is located in a register at a
4581
+ fixed address on the `Ascend` device, and overflow information is automatically
4582
+ written to this register.
4583
+ The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
4584
+ If the value of flag is zero, no overflow has occurred, otherwise, overflow.
4585
+ When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
4586
+ reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
4587
+ status after the network execution is completed.
4532
4588
 
4533
- Supported Platforms:
4534
- ``Ascend`` ``GPU`` ``CPU``
4589
+ Note:
4590
+ - In order to avoid mis-optimization by the compiler, additional input and output are added to
4591
+ this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
4592
+ `mindspore.dtype.int32` Tensor, meaningless.
4593
+ - Since this op lacks contextual dependencies with parameters in the network,
4594
+ :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
4595
+
4596
+ Inputs:
4597
+ Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
4598
+ data type is `mindspore.dtype.int32`, and has no actual meaning.
4599
+
4600
+ Outputs:
4601
+ Tensor, shape and data type are the same as input, meaningless.
4602
+
4603
+ Raises:
4604
+ TypeError: If `x` is not a Tensor.
4605
+ TypeError: If dtype of `x` is not int32.
4606
+ ValueError: If shape of `x` is not equal to :math:`(8,)`.
4607
+
4608
+ Supported Platforms:
4609
+ ``Ascend``
4610
+
4611
+ Examples:
4612
+ >>> import mindspore as ms
4613
+ >>> import numpy as np
4614
+ >>> from mindspore import ops, nn, Tensor
4615
+ >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
4616
+ >>> class Net(nn.Cell):
4617
+ ... def __init__(self):
4618
+ ... super().__init__()
4619
+ ... self.clear_status = NPUClearFloatStatusV2()
4620
+ ... self.get_status = NPUGetFloatStatusV2()
4621
+ ... self.sub = ops.Sub()
4622
+ ... self.neg = ops.Neg()
4623
+ ... self.not_equal = ops.NotEqual()
4624
+ ... self.reduce_any = ops.ReduceAny(keep_dims=False)
4625
+ ... self.base = Tensor([0], dtype=ms.int32)
4626
+ ...
4627
+ ... def construct(self, x):
4628
+ ... init = Tensor([0]*8, dtype=ms.int32)
4629
+ ... clear_status = self.clear_status(init)
4630
+ ... x = ops.depend(x, clear_status)
4631
+ ... res = self.sub(x, self.neg(x))
4632
+ ... init = ops.depend(init, res)
4633
+ ... get_status = self.get_status(init)
4634
+ ... flag = self.not_equal(self.base, get_status)
4635
+ ... overflow = self.reduce_any(flag)
4636
+ ... return overflow
4637
+ ...
4638
+ >>> value = 65504
4639
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4640
+ >>> x = Tensor(data, dtype=ms.float16)
4641
+ >>> net = Net()
4642
+ >>> res = net(x)
4643
+ >>> print(res)
4644
+ True
4645
+ >>> value = 10
4646
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4647
+ >>> x = Tensor(data, dtype=ms.float16)
4648
+ >>> net = Net()
4649
+ >>> res = net(x)
4650
+ >>> print(res)
4651
+ False
4652
+ """
4653
+
4654
+ @prim_attr_register
4655
+ def __init__(self):
4656
+ """Initialize NPUClearFloatStatusV2"""
4657
+
4658
+
4659
+ class Cos(Primitive):
4660
+ r"""
4661
+ Computes cosine of input element-wise.
4662
+
4663
+ Refer to :func:`mindspore.ops.cos` for more details.
4664
+
4665
+ Supported Platforms:
4666
+ ``Ascend`` ``GPU`` ``CPU``
4667
+
4668
+ Examples:
4669
+ >>> cos = ops.Cos()
4670
+ >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4671
+ >>> output = cos(x)
4672
+ >>> print(output)
4673
+ [0.971338 0.6748758 0.95233357 0.9959527]
4674
+ """
4675
+
4676
+ @prim_attr_register
4677
+ def __init__(self):
4678
+ """Initialize Cos"""
4679
+
4680
+
4681
+ class ACos(Primitive):
4682
+ r"""
4683
+ Computes arccosine of input tensors element-wise.
4684
+
4685
+ Refer to :func:`mindspore.ops.acos` for more details.
4686
+
4687
+ Supported Platforms:
4688
+ ``Ascend`` ``GPU`` ``CPU``
4689
+
4690
+ Examples:
4691
+ >>> acos = ops.ACos()
4692
+ >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4693
+ >>> output = acos(x)
4694
+ >>> print(output)
4695
+ [0.737726 1.5307857 1.2661036 0.9764105]
4696
+ """
4697
+
4698
+ @prim_attr_register
4699
+ def __init__(self):
4700
+ """Initialize ACos"""
4701
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
4702
+
4703
+
4704
+ class Sin(Primitive):
4705
+ r"""
4706
+ Computes sine of the input element-wise.
4707
+
4708
+ Refer to :func:`mindspore.ops.sin` for more details.
4709
+
4710
+ Supported Platforms:
4711
+ ``Ascend`` ``GPU`` ``CPU``
4535
4712
 
4536
4713
  Examples:
4537
4714
  >>> sin = ops.Sin()
@@ -4683,7 +4860,7 @@ class Sign(Primitive):
4683
4860
 
4684
4861
  Inputs:
4685
4862
  - **x** (Tensor) - The input tensor.
4686
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4863
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4687
4864
 
4688
4865
  Outputs:
4689
4866
  Tensor, has the same shape and dtype as the `x`.
@@ -4717,11 +4894,11 @@ class Round(Primitive):
4717
4894
  ``Ascend`` ``GPU`` ``CPU``
4718
4895
 
4719
4896
  Examples:
4720
- >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4721
- >>> round = ops.Round()
4722
- >>> output = round(x)
4723
- >>> print(output)
4724
- [ 1. 2. 2. 2. -4.]
4897
+ >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4898
+ >>> round = ops.Round()
4899
+ >>> output = round(x)
4900
+ >>> print(output)
4901
+ [ 1. 2. 2. 2. -4.]
4725
4902
  """
4726
4903
 
4727
4904
  @prim_attr_register
@@ -4781,7 +4958,7 @@ class Atanh(Primitive):
4781
4958
  Computes inverse hyperbolic tangent of the input element-wise.
4782
4959
 
4783
4960
  .. warning::
4784
- This is an experimental prototype that is subject to change and/or deletion.
4961
+ This is an experimental API that is subject to change or deletion.
4785
4962
 
4786
4963
  Refer to :func:`mindspore.ops.atanh` for more details.
4787
4964
 
@@ -4809,7 +4986,7 @@ class Atan2(_MathBinaryOp):
4809
4986
  Refer to :func:`mindspore.ops.atan2` for more details.
4810
4987
 
4811
4988
  Supported Platforms:
4812
- ``Ascend`` ``CPU`` ``GPU``
4989
+ ``Ascend`` ``GPU`` ``CPU``
4813
4990
 
4814
4991
  Examples:
4815
4992
  >>> x = Tensor(np.array([0, 1]), mindspore.float32)
@@ -4819,6 +4996,7 @@ class Atan2(_MathBinaryOp):
4819
4996
  >>> print(output)
4820
4997
  [0. 0.7853982]
4821
4998
  """
4999
+
4822
5000
  @prim_attr_register
4823
5001
  def __init__(self):
4824
5002
  """Initialize Atan2"""
@@ -4840,7 +5018,7 @@ class SquareSumAll(Primitive):
4840
5018
 
4841
5019
  Inputs:
4842
5020
  - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
4843
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5021
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4844
5022
  - **y** (Tensor) - The input tensor has the same type and shape as the `x`.
4845
5023
 
4846
5024
  Outputs:
@@ -4881,7 +5059,7 @@ class BitwiseAnd(_BitwiseBinaryOp):
4881
5059
  Refer to :func:`mindspore.ops.bitwise_and` for more details.
4882
5060
 
4883
5061
  Supported Platforms:
4884
- ``Ascend`` ``CPU`` ``GPU``
5062
+ ``Ascend`` ``GPU`` ``CPU``
4885
5063
 
4886
5064
  Examples:
4887
5065
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
@@ -4900,7 +5078,7 @@ class BitwiseOr(_BitwiseBinaryOp):
4900
5078
  Refer to :func:`mindspore.ops.bitwise_or` for more details.
4901
5079
 
4902
5080
  Supported Platforms:
4903
- ``Ascend`` ``CPU`` ``GPU``
5081
+ ``Ascend`` ``GPU`` ``CPU``
4904
5082
 
4905
5083
  Examples:
4906
5084
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
@@ -4919,7 +5097,7 @@ class BitwiseXor(_BitwiseBinaryOp):
4919
5097
  Refer to :func:`mindspore.ops.bitwise_xor` for more details.
4920
5098
 
4921
5099
  Supported Platforms:
4922
- ``Ascend`` ``CPU`` ``GPU``
5100
+ ``Ascend`` ``GPU`` ``CPU``
4923
5101
 
4924
5102
  Examples:
4925
5103
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
@@ -4935,16 +5113,10 @@ class BesselI0(Primitive):
4935
5113
  """
4936
5114
  Computes BesselI0 of input element-wise.
4937
5115
 
4938
- Inputs:
4939
- - **x** (Tensor) - The shape of tensor is
4940
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4941
- Data type must be float16, float32 or float64.
4942
-
4943
- Outputs:
4944
- Tensor, has the same shape as `x`.
5116
+ .. warning::
5117
+ This is an experimental API that is subject to change or deletion.
4945
5118
 
4946
- Raises:
4947
- TypeError: If `x` is not a Tensor of float16, float32 or float64.
5119
+ Refer to :func:`mindspore.ops.bessel_i0` for more details.
4948
5120
 
4949
5121
  Supported Platforms:
4950
5122
  ``GPU`` ``CPU``
@@ -4954,28 +5126,22 @@ class BesselI0(Primitive):
4954
5126
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4955
5127
  >>> output = bessel_i0(x)
4956
5128
  >>> print(output)
4957
- [1.014452 1.179784 1.0241697 1.0020261]
5129
+ [1.0144521 1.1797839 1.0241698 1.0020262]
4958
5130
  """
4959
5131
 
4960
5132
  @prim_attr_register
4961
5133
  def __init__(self):
4962
- """Initialize BesselI0"""
5134
+ self.init_prim_io_names(inputs=['x'], outputs='y')
4963
5135
 
4964
5136
 
4965
5137
  class BesselI1(Primitive):
4966
5138
  """
4967
5139
  Computes BesselI1 of input element-wise.
4968
5140
 
4969
- Inputs:
4970
- - **x** (Tensor) - The shape of tensor is
4971
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4972
- Data type must be float16 or float32.
4973
-
4974
- Outputs:
4975
- Tensor, has the same shape as `x`.
5141
+ .. warning::
5142
+ This is an experimental API that is subject to change or deletion.
4976
5143
 
4977
- Raises:
4978
- TypeError: If `x` is not a Tensor of float16, float32 or float64.
5144
+ Refer to :func:`mindspore.ops.bessel_i1` for more details.
4979
5145
 
4980
5146
  Supported Platforms:
4981
5147
  ``GPU`` ``CPU``
@@ -5005,8 +5171,7 @@ class BesselI0e(Primitive):
5005
5171
  where bessel_i0 is Bessel function of the first kind with 0 order.
5006
5172
 
5007
5173
  Inputs:
5008
- - **x** (Tensor) - The shape of tensor is
5009
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5174
+ - **x** (Tensor) - The input tensor.
5010
5175
  Data type must be float16, float32 or float64.
5011
5176
 
5012
5177
  Outputs:
@@ -5045,8 +5210,7 @@ class BesselI1e(Primitive):
5045
5210
  where bessel_i1 is Bessel function of the first kind with 1 order.
5046
5211
 
5047
5212
  Inputs:
5048
- - **x** (Tensor) - The shape of tensor is
5049
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5213
+ - **x** (Tensor) - The input tensor.
5050
5214
  Data type must be float16 or float32, float64.
5051
5215
 
5052
5216
  Outputs:
@@ -5077,9 +5241,11 @@ class BesselK0(Primitive):
5077
5241
  r"""
5078
5242
  Computes BesselK0 of input element-wise.
5079
5243
 
5244
+ .. warning::
5245
+ This is an experimental API that is subject to change or deletion.
5246
+
5080
5247
  Inputs:
5081
- - **x** (Tensor) - The shape of tensor is
5082
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5248
+ - **x** (Tensor) - The input tensor.
5083
5249
  Data type must be float16, float32, float64.
5084
5250
 
5085
5251
  Outputs:
@@ -5108,9 +5274,11 @@ class BesselK1(Primitive):
5108
5274
  r"""
5109
5275
  Computes BesselK1 of input element-wise.
5110
5276
 
5277
+ .. warning::
5278
+ This is an experimental API that is subject to change or deletion.
5279
+
5111
5280
  Inputs:
5112
- - **x** (Tensor) - The shape of tensor is
5113
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5281
+ - **x** (Tensor) - The input tensor.
5114
5282
  Data type must be float16, float32, float64.
5115
5283
 
5116
5284
  Outputs:
@@ -5139,9 +5307,11 @@ class BesselK0e(Primitive):
5139
5307
  """
5140
5308
  Computes BesselK0e of input element-wise.
5141
5309
 
5310
+ .. warning::
5311
+ This is an experimental API that is subject to change or deletion.
5312
+
5142
5313
  Inputs:
5143
- - **x** (Tensor) - The shape of tensor is
5144
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5314
+ - **x** (Tensor) - The input tensor.
5145
5315
  Data type must be float16, float32, float64.
5146
5316
 
5147
5317
  Outputs:
@@ -5170,9 +5340,11 @@ class BesselK1e(Primitive):
5170
5340
  """
5171
5341
  Computes BesselK1e of input element-wise.
5172
5342
 
5343
+ .. warning::
5344
+ This is an experimental API that is subject to change or deletion.
5345
+
5173
5346
  Inputs:
5174
- - **x** (Tensor) - The shape of tensor is
5175
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5347
+ - **x** (Tensor) - The input tensor.
5176
5348
  Data type must be float16, float32, float64.
5177
5349
 
5178
5350
  Outputs:
@@ -5201,9 +5373,11 @@ class BesselJ0(Primitive):
5201
5373
  """
5202
5374
  Computes BesselJ0 of input element-wise.
5203
5375
 
5376
+ .. warning::
5377
+ This is an experimental API that is subject to change or deletion.
5378
+
5204
5379
  Inputs:
5205
- - **x** (Tensor) - The shape of tensor is
5206
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5380
+ - **x** (Tensor) - The input tensor.
5207
5381
  Data type must be float16, float32 or float64.
5208
5382
 
5209
5383
  Outputs:
@@ -5233,9 +5407,11 @@ class BesselJ1(Primitive):
5233
5407
  """
5234
5408
  Computes BesselJ1 of input element-wise.
5235
5409
 
5410
+ .. warning::
5411
+ This is an experimental API that is subject to change or deletion.
5412
+
5236
5413
  Inputs:
5237
- - **x** (Tensor) - The shape of tensor is
5238
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5414
+ - **x** (Tensor) - The input tensor.
5239
5415
  Data type must be float16, float32 or float64.
5240
5416
 
5241
5417
  Outputs:
@@ -5265,9 +5441,11 @@ class BesselY0(Primitive):
5265
5441
  """
5266
5442
  Computes BesselY0 of input element-wise.
5267
5443
 
5444
+ .. warning::
5445
+ This is an experimental API that is subject to change or deletion.
5446
+
5268
5447
  Inputs:
5269
- - **x** (Tensor) - The shape of tensor is
5270
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5448
+ - **x** (Tensor) - The input tensor.
5271
5449
  Data type must be float16, float32 or float64.
5272
5450
 
5273
5451
  Outputs:
@@ -5297,9 +5475,11 @@ class BesselY1(Primitive):
5297
5475
  """
5298
5476
  Computes BesselY1 of input element-wise.
5299
5477
 
5478
+ .. warning::
5479
+ This is an experimental API that is subject to change or deletion.
5480
+
5300
5481
  Inputs:
5301
- - **x** (Tensor) - The shape of tensor is
5302
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5482
+ - **x** (Tensor) - The input tensor.
5303
5483
  Data type must be float16, float32 or float64.
5304
5484
 
5305
5485
  Outputs:
@@ -5473,7 +5653,7 @@ class MatrixInverse(Primitive):
5473
5653
  ValueError: If the dimension of `x` is less than 2.
5474
5654
 
5475
5655
  Supported Platforms:
5476
- ``GPU`` ``CPU``
5656
+ ``Ascend`` ``GPU`` ``CPU``
5477
5657
 
5478
5658
  Examples:
5479
5659
  >>> x = Tensor(np.array([[[-0.710504 , -1.1207525],
@@ -5498,9 +5678,9 @@ class MatrixInverse(Primitive):
5498
5678
 
5499
5679
  class MatrixPower(Primitive):
5500
5680
  """
5501
- Computes the n-th power of a batch of square matrices.
5502
- If n = 0, it returns a batch of identity matrices. If n is negative, it
5503
- returns the inverse of each matrix (if invertible) raised to the power of abs(n).
5681
+ Calculates the n-th power of a batch of square matrices.
5682
+ When n equals 0, it returns a group of identity matrices. If n is negative,
5683
+ it computes the inverse of each matrix (if possible) raised to the power of abs(n).
5504
5684
 
5505
5685
  Args:
5506
5686
  n (int) : The exponent, a required int.
@@ -5521,7 +5701,7 @@ class MatrixPower(Primitive):
5521
5701
  ValueError: If n is negative but got input x has singular matrices.
5522
5702
 
5523
5703
  Supported Platforms:
5524
- ``Ascend`` ``CPU``
5704
+
5525
5705
 
5526
5706
  Examples:
5527
5707
  >>> x = Tensor([[[0, 1], [-1, 0]], [[1, 0], [0, -1]]], dtype=ms.float32)
@@ -5542,12 +5722,12 @@ class MatrixPower(Primitive):
5542
5722
 
5543
5723
  class MatrixDeterminant(Primitive):
5544
5724
  """
5545
- Computes the determinant of one or more square matrices.
5725
+ Calculates the value of the determinant for one or more square matrices.
5546
5726
 
5547
- Refer to :func:`mindspore.ops.matrix_determinant` for more details.
5727
+ Refer to :func:`mindspore.ops.det` for more details.
5548
5728
 
5549
5729
  Supported Platforms:
5550
- ``GPU`` ``CPU``
5730
+
5551
5731
 
5552
5732
  Examples:
5553
5733
  >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
@@ -5566,12 +5746,12 @@ class MatrixDeterminant(Primitive):
5566
5746
 
5567
5747
  class LogMatrixDeterminant(Primitive):
5568
5748
  """
5569
- Computes the sign and the log of the absolute value of the determinant of one or more square matrices.
5749
+ Calculates the sign and logarithm of the determinant of one or more square matrices.
5570
5750
 
5571
- Refer to :func:`mindspore.ops.log_matrix_determinant` for more details.
5751
+ Refer to :func:`mindspore.ops.slogdet` for more details.
5572
5752
 
5573
5753
  Supported Platforms:
5574
- ``Ascend`` ``GPU`` ``CPU``
5754
+
5575
5755
 
5576
5756
  Examples:
5577
5757
  >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
@@ -5599,13 +5779,13 @@ class MatrixLogarithm(Primitive):
5599
5779
  Must be one of the following types:complex64, complex128. And shape must be 2D-7D.
5600
5780
 
5601
5781
  Outputs:
5602
- - **y** (Tensor), has the same shape and type as input.
5782
+ - **y** (Tensor) - has the same shape and type as input.
5603
5783
 
5604
5784
  Raises:
5605
5785
  TypeError: If `x` is not a Tensor.
5606
5786
  TypeError: If dtype of `x` is not one of: complex64, complex128.
5607
5787
  ValueError: If the dimension of `x` is less to 2.
5608
- ValueError: If the inner two dimension is not equal.
5788
+ ValueError: If the size of last two dimensions are not equal.
5609
5789
 
5610
5790
  Supported Platforms:
5611
5791
  ``Ascend`` ``CPU``
@@ -5627,7 +5807,7 @@ class MatrixLogarithm(Primitive):
5627
5807
 
5628
5808
  class IndexAdd(Primitive):
5629
5809
  """
5630
- Adds tensor `y` to specified axis and indices of tensor `x`. The axis should be in [0, len(x.dim) - 1],
5810
+ Adds tensor `y` to specified axis and indices of tensor `x`. The axis should be in [-len(x.dim), len(x.dim) - 1],
5631
5811
  and indices should be in [0, the size of `x` - 1] at the axis dimension.
5632
5812
 
5633
5813
  Args:
@@ -5770,25 +5950,29 @@ class ComplexAbs(Primitive):
5770
5950
  r"""
5771
5951
  Returns a Tensor that contains the magnitudes of the input.
5772
5952
 
5773
- The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
5953
+ The complex numbers in input must be of the form :math:`a + bj`,
5954
+ where :math:`a` is the real part and :math:`b` is the imaginary part.
5774
5955
 
5775
5956
  .. math::
5776
5957
 
5777
- y = \sqrt{a^2+b^2}.
5958
+ y = \sqrt{a^2+b^2}
5959
+
5960
+ .. warning::
5961
+ This is an experimental API that is subject to change or deletion.
5778
5962
 
5779
5963
  Inputs:
5780
5964
  - **x** (Tensor) - A Tensor, types: complex64, complex128.
5781
5965
 
5782
5966
  Outputs:
5783
- - **y** (Tensor) - Tensor, has the same shape as x. If the type of x is complex64, the type of y is float32.
5784
- If the type of x is complex128, the type of y is float64.
5967
+ Tensor, has the same shape as x. If the type of x is complex64, the type of output is float32.
5968
+ If the type of x is complex128, the type of output is float64.
5785
5969
 
5786
5970
  Raises:
5787
5971
  TypeError: If the input is not a Tensor.
5788
5972
  TypeError: If the input type is not complex64 or complex128.
5789
5973
 
5790
5974
  Supported Platforms:
5791
- ``GPU`` ``CPU``
5975
+ ``Ascend`` ``GPU`` ``CPU``
5792
5976
 
5793
5977
  Examples:
5794
5978
  >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
@@ -5819,7 +6003,7 @@ class Real(Primitive):
5819
6003
  TypeError: If the input is not a Tensor.
5820
6004
 
5821
6005
  Supported Platforms:
5822
- ``GPU`` ``CPU``
6006
+ ``Ascend`` ``GPU`` ``CPU``
5823
6007
 
5824
6008
  Examples:
5825
6009
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
@@ -5839,6 +6023,9 @@ class Complex(Primitive):
5839
6023
  """
5840
6024
  Returns a complex Tensor from the real part and the imag part.
5841
6025
 
6026
+ .. warning::
6027
+ This is an experimental API that is subject to change or deletion.
6028
+
5842
6029
  Inputs:
5843
6030
  - **real** (Tensor) - The real input tensor. types: float32, float64.
5844
6031
  - **imag** (Tensor) - The imag input tensor. types: float32, float64.
@@ -5851,7 +6038,7 @@ class Complex(Primitive):
5851
6038
  TypeError: If the dtypes of two inputs are not same.
5852
6039
 
5853
6040
  Supported Platforms:
5854
- ``GPU`` ``CPU``
6041
+ ``Ascend`` ``GPU`` ``CPU``
5855
6042
 
5856
6043
  Examples:
5857
6044
  >>> real = Tensor(np.array([1]), mindspore.float32)
@@ -5883,7 +6070,7 @@ class Imag(Primitive):
5883
6070
  TypeError: If the input is not a Tensor.
5884
6071
 
5885
6072
  Supported Platforms:
5886
- ``GPU`` ``CPU``
6073
+ ``Ascend`` ``GPU`` ``CPU``
5887
6074
 
5888
6075
  Examples:
5889
6076
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
@@ -5902,21 +6089,14 @@ class Imag(Primitive):
5902
6089
  class Angle(Primitive):
5903
6090
  """
5904
6091
  Returns the element-wise argument of a complex tensor.
5905
- The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
5906
- is the imaginary part. The argument returned by this function is of the form atan2(b,a).
5907
-
5908
- Inputs:
5909
- - **input** (Tensor) - The input tensor. types: complex64, complex128.
5910
6092
 
5911
- Outputs:
5912
- Tensor, has the float32 or float64 type and the same shape as input.
6093
+ .. warning::
6094
+ This is an experimental API that is subject to change or deletion.
5913
6095
 
5914
- Raises:
5915
- TypeError: If `input` is not a Tensor.
5916
- TypeError: If the dtype of input is not one of: complex64, complex128.
6096
+ Refer to :func:`mindspore.ops.angle` for more details.
5917
6097
 
5918
6098
  Supported Platforms:
5919
- ``CPU``
6099
+ ``Ascend`` ``GPU`` ``CPU``
5920
6100
 
5921
6101
  Examples:
5922
6102
  >>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
@@ -5961,18 +6141,18 @@ class TridiagonalMatMul(Primitive):
5961
6141
  Inputs:
5962
6142
  - **superdiag** (Tensor) - Superdiagonals of Tridiagonal Matrices to the left of multiplication.
5963
6143
  Data types must be: float16, float32, double, complex64, complex128.
5964
- The shape is [..., 1, M].
6144
+ The shape is :math:`(..., 1, M)`.
5965
6145
  Last element is ignored.
5966
6146
  - **maindiag** (Tensor) - Maindiagonals of Tridiagonal Matrices to the left of multiplication.
5967
6147
  Data types must be: float16, float32, double, complex64, complex128.
5968
- The shape is [..., 1, M].
6148
+ The shape is :math:`(..., 1, M)`.
5969
6149
  - **subdiag** (Tensor) - Subdiagonals of Tridiagonal Matrices to the left of multiplication.
5970
6150
  Data types must be: float16, float32, double, complex64, complex128.
5971
- The shape is [..., 1, M].
6151
+ The shape is :math:`(..., 1, M)`.
5972
6152
  First element is ignored.
5973
6153
  - **rhs** (Tensor) - MxN Matrices to the right of multiplication.
5974
6154
  Data types must be: float16, float32, double, complex64, complex128.
5975
- The shape is [..., M, N].
6155
+ The shape is :math:`(..., 1, M)`.
5976
6156
 
5977
6157
  Outputs:
5978
6158
  Tensor, with the same shape and data type as the `rhs`.
@@ -6022,44 +6202,19 @@ class TridiagonalMatMul(Primitive):
6022
6202
  class Igamma(Primitive):
6023
6203
  r"""
6024
6204
  Calculates lower regularized incomplete Gamma function.
6025
- The lower regularized incomplete Gamma function is defined as:
6026
-
6027
- .. math::
6028
- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)
6029
-
6030
- where
6031
-
6032
- .. math::
6033
- gamma(a, x) = \int_0^x t^{a-1} \exp^{-t} dt
6034
-
6035
- is the lower incomplete Gamma function.
6036
-
6037
- Above :math:`Q(a, x)` is the upper regularized complete Gamma function.
6038
6205
 
6039
6206
  .. warning::
6040
- This is an experimental prototype that is subject to change and/or deletion.
6041
-
6042
- Inputs:
6043
- - **a** (Tensor) - The input tensor. With type of float32 or float64.
6044
- - **x** (Tensor) - The input tensor. With float32 or float64 type. `x` should have
6045
- the same dtype with `a`.
6046
-
6047
- Outputs:
6048
- Tensor, has the same dtype as `a` and `x`.
6207
+ This is an experimental API that is subject to change or deletion.
6049
6208
 
6050
- Raises:
6051
- TypeError: If a or x is not a Tensor.
6052
- TypeError: If dtype of input x and a is not float32 nor float64.
6053
- TypeError: If x has different dtype with a.
6054
- ValueError: If `a` could not be broadcast to a tensor with shape of `x`.
6209
+ Refer to :func:`mindspore.ops.igamma` for more details.
6055
6210
 
6056
6211
  Supported Platforms:
6057
- ``GPU`` ``CPU``
6212
+ ``Ascend`` ``GPU`` ``CPU``
6058
6213
 
6059
6214
  Examples:
6060
6215
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
6061
6216
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
6062
- >>> igamma = P.Igamma()
6217
+ >>> igamma = ops.Igamma()
6063
6218
  >>> output = igamma(a, x)
6064
6219
  >>> print (output)
6065
6220
  [0.593994 0.35276785 0.21486944 0.13337152]
@@ -6075,39 +6230,15 @@ class Igammac(Primitive):
6075
6230
  r"""
6076
6231
  Compute the upper regularized incomplete Gamma function Q(a, x).
6077
6232
 
6078
- The upper regularized incomplete Gamma function is defined as:
6079
- \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)
6080
- where
6081
- \(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\)
6082
-
6083
- is the upper incomplete Gama function.
6084
-
6085
- Note, above P(a, x) (Igamma) is the lower regularized complete Gamma function.
6086
-
6087
- .. warning::
6088
- This is an experimental prototype that is subject to change and/or deletion.
6089
-
6090
- Inputs:
6091
- - **a** (Tensor) - The input tensor of igammac. With float32 or float64 data type.
6092
- - **x** (Tensor) - The input tensor of igammac. With float32 or float64 type. `x` should have
6093
- the same type with `a`.
6094
-
6095
- Outputs:
6096
- A Tensor, has the same dtype as `a` and `x`.
6097
-
6098
- Raises:
6099
- TypeError: If dtype of input x and a is not float32 nor float64.
6100
- TypeError: If a or x is not a Tensor.
6101
- TypeError: If x has different dtype with a.
6102
- ValueError: If `a` could not be broadcast to a tensor with shape of `x`.
6233
+ Refer to :func:`mindspore.ops.igammac` for more details.
6103
6234
 
6104
6235
  Supported Platforms:
6105
- ``GPU`` ``CPU``
6236
+ ``Ascend`` ``GPU`` ``CPU``
6106
6237
 
6107
6238
  Examples:
6108
6239
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
6109
6240
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
6110
- >>> igammac = P.Igammac()
6241
+ >>> igammac = ops.Igammac()
6111
6242
  >>> output = igammac(a, x)
6112
6243
  >>> print (output)
6113
6244
  [0.40600586 0.6472318 0.7851304 0.8666283 ]
@@ -6121,7 +6252,8 @@ class Igammac(Primitive):
6121
6252
 
6122
6253
  class IsClose(Primitive):
6123
6254
  r"""
6124
- Returns a boolean Tensor where two tensors are element-wise equal within a tolerance.
6255
+ Returns a tensor of Boolean values indicating whether two input tensors
6256
+ are element-wise equal within a given tolerance.
6125
6257
 
6126
6258
  Refer to :func:`mindspore.ops.isclose` for more details.
6127
6259
 
@@ -6160,7 +6292,7 @@ class MatrixExp(Primitive):
6160
6292
  Refer to :func:`mindspore.ops.matrix_exp` for more details.
6161
6293
 
6162
6294
  Supported Platforms:
6163
- ``CPU``
6295
+
6164
6296
 
6165
6297
  Examples:
6166
6298
  >>> matrix_exp = ops.MatrixExp()
@@ -6181,19 +6313,19 @@ class MatrixSolve(Primitive):
6181
6313
  Solves systems of linear equations.
6182
6314
 
6183
6315
  Args:
6184
- adjoint (bool, optional): Indicating whether to solve with matrix or
6185
- its (block-wise) adjoint. Default: False.
6316
+ adjoint (bool, optional): Indicates whether the adjoint of the
6317
+ matrix is used during the computation. Default: False, use its transpose instead.
6186
6318
 
6187
6319
  Inputs:
6188
- - **matrix** (Tensor) - A tensor of shape :math:`[..., M, M]`,
6320
+ - **matrix** (Tensor) - A tensor of shape :math:`(..., M, M)`,
6189
6321
  is a matrix of coefficients for a system of linear equations.
6190
- - **rhs** (Tensor) - A tensor of shape :math:`[..., M, K]`,
6322
+ - **rhs** (Tensor) - A tensor of shape :math:`(..., M, K)`,
6191
6323
  is a matrix of the resulting values of a system of linear equations.
6192
- 'rhs' must have the same type as `matrix`.
6324
+ `rhs` must have the same type as `matrix`.
6193
6325
 
6194
6326
  Outputs:
6195
6327
  Tensor, a matrix composed of solutions to a system of linear equations,
6196
- which has the same type and shape as 'rhs'.
6328
+ which has the same type and shape as `rhs`.
6197
6329
 
6198
6330
  Raises:
6199
6331
  TypeError: If `adjoint` is not the type of bool.
@@ -6276,27 +6408,79 @@ class MatrixSolveLs(Primitive):
6276
6408
  validator.check_value_type('fast', fast, [bool], self.name)
6277
6409
 
6278
6410
 
6411
+ class Lu(Primitive):
6412
+ """
6413
+ Computes the LU decomposition of one or more square matrices.
6414
+
6415
+ Args:
6416
+ output_idx_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
6417
+ Default: `mindspore.dtype.int32`.
6418
+
6419
+ Inputs:
6420
+ - **input** (Tensor) - A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
6421
+ matrices of size `[M, M]`, with data type float32, float64, complex64, complex128.
6422
+
6423
+ Outputs:
6424
+ - **lu** (Tensor) - A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the lower
6425
+ triangular factor `L` with unit diagonal. Upper triangular part denotes the upper triangular factor `U`.
6426
+ - **p** (Tensor) - Permutation of the rows encoded as a list of indices in `0..M-1`, shape is `[..., M]`.
6427
+
6428
+ Raises:
6429
+ TypeError: If the dtype of `input` is not one of the following dtype:
6430
+ float32, float64, complex64, complex128.
6431
+ TypeError: If `output_idx_type` is neither int32 nor int64.
6432
+ ValueError: If `input` rank is less than 2.
6433
+ ValueError: If input[-1] is not equal to input[-2].
6434
+
6435
+ Supported Platforms:
6436
+ ``GPU``
6437
+
6438
+ Examples:
6439
+ >>> input = Tensor(np.array([[2.5,3.1,3.5], [4.7,1.9,0.2], [1.1,3.6,2.0]]), mindspore.float32)
6440
+ >>> lu, p = ops.Lu(output_idx_type=mindspore.int32)(input)
6441
+ >>> print(lu)
6442
+ [[4.7 1.9 0.2 ]
6443
+ [0.23404257 3.155319 1.9531915 ]
6444
+ [0.5319149 0.6621713 2.1002696 ]]
6445
+ >>> print(p)
6446
+ [1 2 0]
6447
+ """
6448
+
6449
+ @prim_attr_register
6450
+ def __init__(self, output_idx_type):
6451
+ super().__init__(name="Lu")
6452
+ self.init_prim_io_names(inputs=['input'], outputs=['lu', 'p'])
6453
+ validator.check_type_name("output_idx_type", output_idx_type, [mstype.int32, mstype.int64], self.name)
6454
+ self.add_prim_attr('output_idx_type', output_idx_type)
6455
+
6456
+
6279
6457
  class LuSolve(Primitive):
6280
6458
  r"""
6281
- Return the solution of the linear equation :math:`Ax = b` .
6459
+ Computes the solution y to the system of linear equations :math:`Ay = b` ,
6460
+ given LU decomposition A and column vector b.
6461
+
6462
+ LU decomposition of a matrix can be generated from :func:`mindspore.scipy.linalg.lu` .
6282
6463
 
6283
6464
  Note:
6284
6465
  The batch dimensions of lu_pivots must match the batch dimensions of lu_data, the size of the dimension and the
6285
- number of each dimension must be the same. For example, lu_data is (3, 3, 2, 2) lu_pivots is (3, 3, 2),
6286
- lu_data's batch dimensions is (3, 3), lu_pivots's batch dimensions is (3, 3).
6466
+ number of each dimension must be the same. For example, lu_data is :math:`(3, 3, 2, 2)` lu_pivots is
6467
+ :math:`(3, 3, 2)`,
6468
+ lu_data's batch dimensions is :math:`(3, 3)`, lu_pivots's batch dimensions is :math:`(3, 3)`.
6287
6469
 
6288
6470
  The batch dimensions of lu_data must match the batch dimensions of x, the batch dimensions may have
6289
6471
  different sizes, from right to left, the corresponding dimensions must be equal. For example, lu_data
6290
- is (3, 3, 2, 2) x is (2, 3, 3, 2, 1), lu_data's batch dimensions is (3, 3), x's batch dimensions is (2, 3, 3).
6472
+ is :math:`(3, 3, 2, 2)` x is :math:`(2, 3, 3, 2, 1)`, lu_data's batch dimensions is
6473
+ :math:`(3, 3)`, x's batch dimensions is :math:`(2, 3, 3)`.
6291
6474
 
6292
6475
  Inputs:
6293
- - **x** (Tensor) - The input is a tensor of size `(*, m, k)`, where * is batch dimensions, with data type
6294
- float32, float16.
6295
- - **lu_data** (Tensor) - The input is a tensor of size `(*, m, m)`, where * is batch dimensions, that can
6296
- be decomposed into an upper
6297
- triangular matrix U and a lower triangular matrix L, with data type float32, float16.
6298
- - **lu_pivots** (Tensor) - The input is a tensor of size `(*, m)`, where * is batch dimensions, that can
6299
- be converted to a permutation matrix P, with data type int32.
6476
+ - **x** (Tensor) - Column vector `b` in the above equation. It has shape :math:`(*, m, k)`,
6477
+ where :math:`*` is batch dimensions, with data type float32, float16.
6478
+ - **lu_data** (Tensor) - LU decomposition. It has shape :math:`(*, m, m)`, where * is batch
6479
+ dimensions, that can be decomposed into an upper triangular matrix U and a lower triangular
6480
+ matrix L, with data type float32, float16.
6481
+ - **lu_pivots** (Tensor) - Permutation matrix P of LU decomposition. It has
6482
+ shape :math:`(*, m)`, where :math:`*` is batch dimensions, that can be converted
6483
+ to a permutation matrix P, with data type int32.
6300
6484
 
6301
6485
  Outputs:
6302
6486
  Tensor, the same data type as the x and lu_data.
@@ -6310,7 +6494,7 @@ class LuSolve(Primitive):
6310
6494
  ValueError: If `x` dimension less than 2, `lu_data` dimension less than 2 or `lu_pivots` dimension less than 1.
6311
6495
 
6312
6496
  Supported Platforms:
6313
- ``GPU`` ``CPU``
6497
+ ``Ascend`` ``GPU`` ``CPU``
6314
6498
 
6315
6499
  Examples:
6316
6500
  >>> x = Tensor(np.array([[1], [3], [3]]), mindspore.float32)
@@ -6331,41 +6515,18 @@ class LuSolve(Primitive):
6331
6515
 
6332
6516
  class LuUnpack(Primitive):
6333
6517
  """
6334
- Unpack the LU_data and LU_pivots from a LU factorization of a tensor.
6518
+ Converts `LU_data` and `LU_pivots` back into P, L and U matrices, where
6519
+ P is a permutation matrix, L is a lower triangular matrix, and U is an
6520
+ upper triangular matrix. Typically, `LU_data` and `LU_pivots` are generated
6521
+ from the LU decomposition of a matrix.
6335
6522
 
6336
- Args:
6337
- unpack_data (bool, optional): A flag indicating if the LU_data should be unpacked.
6338
- If False, then the returned L and U are None. Default: True.
6339
- unpack_pivots (bool, optional): A flag indicating if the LU_pivots should be unpacked
6340
- into a permutation matrix P. If False, then the returned P is None. Default: True.
6341
-
6342
- Inputs:
6343
- - **LU_data** (Tensor) - The packed LU factorization data. A tensor of size `[*, M, N]`,
6344
- where * is batch dimensions, with data type int8, uint8, int16, int32, int64, float16,
6345
- float32, float64. The dims of LU_data must be equal to or greater than 2.
6346
- - **LU_pivots** (Tensor) - The packed LU factorization pivots. A tensor of size `[*, min(M, N)]`,
6347
- where * is batch dimensions, with data type int8, uint8, int16, int32, int64.
6348
-
6349
- Outputs:
6350
- - **pivots** (Tensor) - The permutation matrix of LU factorization. The shape is `[*, M, M]`,
6351
- the dtype is same as `LU_data`.
6352
- - **L** (Tensor) - The L matrix of LU factorization. The dtype is the same as `LU_data`.
6353
- - **U** (Tensor) - The U matrix of LU factorization. The dtype is the same as `LU_data`.
6523
+ .. warning::
6524
+ This is an experimental API that is subject to change or deletion.
6354
6525
 
6355
- Raises:
6356
- TypeError: If the dtype of `LU_data` is not one of the following: int8, uint8, int16, int32,
6357
- int64, float16, float32, float64.
6358
- TypeError: If the dtype of `LU_pivots` is not one of the following: int8, uint8, int16, int32, int64.
6359
- ValueError: If the dimension of `LU_data` is less than 2.
6360
- ValueError: If the dimension of `LU_pivots` is less than 1.
6361
- ValueError: If the size of the last dimension of LU_pivots is not equal to the minimum of the sizes of
6362
- the last two dimensions of LU_data.
6363
- ValueError: If the batch dimensions of LU_data's does not match LU_pivots's batch dimensions.
6364
- ValueError: On the CPU platform, if the value of `LU_pivots` are out of range[1, LU_data.shape[-2]).
6365
- RuntimeError: On the Ascend platform, if the value of `LU_pivots` are out of range[1, LU_data.shape[-2]).
6526
+ Refer to :func:`mindspore.ops.lu_unpack` for more details.
6366
6527
 
6367
6528
  Supported Platforms:
6368
- ``CPU``
6529
+ ``GPU`` ``CPU``
6369
6530
 
6370
6531
  Examples:
6371
6532
  >>> LU_data = Tensor(np.array([[[-0.3806, -0.4872, 0.5536],
@@ -6377,10 +6538,31 @@ class LuUnpack(Primitive):
6377
6538
  >>> LU_pivots = Tensor(np.array([[1, 3, 3],
6378
6539
  ... [2, 3, 3]]), mstype.int32)
6379
6540
  >>> lu_unpack = ops.LuUnpack()
6380
- >>> pivots, L, U = lu_unpack(LU_data, LU_pivots, unpack_data, unpack_pivots)
6541
+ >>> pivots, L, U = lu_unpack(LU_data, LU_pivots)
6381
6542
  >>> print(pivots)
6543
+ [[[1. 0. 0.]
6544
+ [0. 0. 1.]
6545
+ [0. 1. 0.]]
6546
+ <BLANKLINE>
6547
+ [[0. 0. 1.]
6548
+ [1. 0. 0.]
6549
+ [0. 1. 0.]]]
6382
6550
  >>> print(L)
6551
+ [[[ 1. 0. 0. ]
6552
+ [-0.1287 1. 0. ]
6553
+ [ 0.2583 0.5239 1. ]]
6554
+ <BLANKLINE>
6555
+ [[ 1. 0. 0. ]
6556
+ [-0.6401 1. 0. ]
6557
+ [ 0.1015 -0.5363 1. ]]]
6383
6558
  >>> print(U)
6559
+ [[[-0.3806 -0.4872 0.5536]
6560
+ [ 0. 0.6508 -0.2396]
6561
+ [ 0. 0. 0.6902]]
6562
+ <BLANKLINE>
6563
+ [[ 0.6706 -1.1782 0.4574]
6564
+ [ 0. -0.4779 0.6701]
6565
+ [ 0. 0. 0.6165]]]
6384
6566
  """
6385
6567
 
6386
6568
  @prim_attr_register
@@ -6392,26 +6574,12 @@ class LuUnpack(Primitive):
6392
6574
 
6393
6575
  class Lgamma(Primitive):
6394
6576
  r"""
6395
- Computes the natural logarithm of the absolute value of the gamma function on `input`.
6396
-
6397
- .. math::
6398
- \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
6399
-
6400
- Args:
6401
- input (Tensor): the tensor to compute the lgamma function.
6402
-
6403
- Inputs:
6404
- - **x** (Tensor) - The input tensor, types: float16, float32, float64.
6577
+ Computes the natural logarithm of the absolute value of the gamma function on input.
6405
6578
 
6406
- Outputs:
6407
- Tensor, has the same dtype as `x`.
6408
-
6409
- Raises:
6410
- TypeError: If x is not a Tensor.
6411
- TypeError: If dtype of input x is not one of: float16, float32, float64.
6579
+ Refer to :func:`mindspore.ops.lgamma` for more details.
6412
6580
 
6413
6581
  Supported Platforms:
6414
- ``GPU``
6582
+ ``GPU`` ``CPU``
6415
6583
 
6416
6584
  Examples:
6417
6585
  >>> x = Tensor(np.array([0.5, 3.2, 8.5]), mindspore.float32)
@@ -6435,7 +6603,7 @@ class Digamma(Primitive):
6435
6603
  P(x) = grad(ln(gamma(x)))
6436
6604
 
6437
6605
  .. warning::
6438
- This is an experimental prototype that is subject to change and/or deletion.
6606
+ This is an experimental API that is subject to change or deletion.
6439
6607
 
6440
6608
  Inputs:
6441
6609
  - **x** (Tensor) - The input tensor. With type of float16 or float32 or float64.
@@ -6448,7 +6616,7 @@ class Digamma(Primitive):
6448
6616
  TypeError: If dtype of input x is not float16 or float32 or float64.
6449
6617
 
6450
6618
  Supported Platforms:
6451
- ``GPU``
6619
+ ``GPU`` ``CPU``
6452
6620
 
6453
6621
  Examples:
6454
6622
  >>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
@@ -6466,29 +6634,15 @@ class Digamma(Primitive):
6466
6634
 
6467
6635
  class Polygamma(Primitive):
6468
6636
  r"""
6469
- Computes the :math:`a^{th}` derivative of the polygamma function on `x`.
6470
-
6471
- .. math::
6472
- \psi^{(a)}(x) = \frac{d^{(a)}}{dx^{(a)}} \psi(x)
6473
-
6474
- Args:
6475
- :math:`a \geq 0`: the order of the polygamma function.
6476
- input (Tensor): the tensor to compute the polygamma function.
6637
+ Computes the :math:`a`th derivative of the polygamma function on `x`.
6477
6638
 
6478
- Inputs:
6479
- - **a** (Tensor) - The order of the polygamma function, types: int32, int64.
6480
- - **x** (Tensor) - The input tensor, types: float16, float32, float64.
6481
-
6482
- Outputs:
6483
- Tensor, has the same dtype as `x`.
6639
+ .. warning::
6640
+ This is an experimental API that is subject to change or deletion.
6484
6641
 
6485
- Raises:
6486
- TypeError: If x is not a Tensor.
6487
- TypeError: If dtype of input x is not one of: float16, float32, float64.
6488
- TypeError: If dtype of input a is not one of: int32, int64.
6642
+ Refer to :func:`mindspore.ops.polygamma` for more details.
6489
6643
 
6490
6644
  Supported Platforms:
6491
- ``GPU``
6645
+ ``GPU`` ``CPU``
6492
6646
 
6493
6647
  Examples:
6494
6648
  >>> x = Tensor(np.array([1.0, -0.5]), mindspore.float32)
@@ -6519,12 +6673,12 @@ class Polygamma(Primitive):
6519
6673
 
6520
6674
  class CholeskyInverse(Primitive):
6521
6675
  """
6522
- Returns the inverse of the positive definite matrix using cholesky matrix factorization.
6676
+ Returns the inverse of the positive definite matrix using cholesky matrix factorization given its Cholesky factor.
6523
6677
 
6524
6678
  Refer to :func:`mindspore.ops.cholesky_inverse` for more details.
6525
6679
 
6526
6680
  Supported Platforms:
6527
- ``GPU`` ``CPU``
6681
+ ``Ascend`` ``CPU``
6528
6682
 
6529
6683
  Examples:
6530
6684
  >>> x = Tensor(np.array([[2,0,0], [4,1,0], [-1,1,2]]), mindspore.float32)
@@ -6547,10 +6701,24 @@ class Cross(Primitive):
6547
6701
  """
6548
6702
  Returns the cross product of vectors in dimension `dim` of x1 and x2.
6549
6703
 
6704
+ .. warning::
6705
+ This is an experimental API that is subject to change or deletion.
6706
+
6550
6707
  Refer to :func:`mindspore.ops.cross` for more details.
6551
6708
 
6709
+ Args:
6710
+ dim (int): Spefcified dim along which to cumpute cross product with. Default: -65530.
6711
+
6712
+ Inputs:
6713
+ - **x1** (Tensor) - Input Tensor.
6714
+ - **x2** (Tensor) - Another input Tensor, must have the same shape and
6715
+ the same type as `x1`, and the size of their `dim` dimension should be 3.
6716
+
6717
+ Outputs:
6718
+ Tensor, has the same shape and type as inputs.
6719
+
6552
6720
  Supported Platforms:
6553
- ``CPU``
6721
+ ``Ascend`` ``CPU``
6554
6722
 
6555
6723
  Examples:
6556
6724
  >>> import mindspore
@@ -6576,17 +6744,17 @@ class RaggedRange(Primitive):
6576
6744
  """
6577
6745
  Returns a `RaggedTensor` containing the specified sequences of numbers.
6578
6746
 
6579
- Args:
6747
+ Args:
6580
6748
  Tsplits (mindspore.dtype): An mindspore.dtype from: mindspore.int32, mindspore.int64.
6581
6749
 
6582
- Inputs:
6750
+ Inputs:
6583
6751
  - **starts** (Tensor) - The starts of each range, whose type is int32, int64, float32 or float64,
6584
6752
  and shape is 0D or 1D.
6585
6753
  - **limits** (Tensor) - The limits of each range, whose type and shape should be same as input `starts`.
6586
6754
  - **deltas** (Tensor) - The deltas of each range, whose type and shape should be same as input `starts`,
6587
6755
  and each element in the tensor should not be equal to 0.
6588
6756
 
6589
- Outputs:
6757
+ Outputs:
6590
6758
  - **rt_nested_splits** (Tensor) - The nested splits of the return `RaggedTensor`,
6591
6759
  and type of the tensor is `Tsplits`,
6592
6760
  shape of the tensor is equal to shape of input `starts` plus 1.
@@ -6596,10 +6764,10 @@ class RaggedRange(Primitive):
6596
6764
 
6597
6765
  - if type of the input `starts`, input `limits` and input `deltas`
6598
6766
  are int32 or int64, shape of the output `rt_dense_values` is equal to
6599
- sum(abs(limits[i] - starts[i]) + abs(deltas[i]) - 1) / abs(deltas[i])),
6767
+ :math:`sum(abs(limits[i] - starts[i]) + abs(deltas[i] - 1) / abs(deltas[i]))`.
6600
6768
  - if type of the input `starts`, input `limits` and input `deltas`
6601
6769
  are float32 or float64, shape of the output `rt_dense_values` is equal to
6602
- sum(ceil(abs((limits[i] - starts[i]) / deltas[i]))).
6770
+ :math:`sum(ceil(abs((limits[i] - starts[i]) / deltas[i])))`.
6603
6771
 
6604
6772
  Raises:
6605
6773
  TypeError: If any input is not Tensor.
@@ -6611,7 +6779,7 @@ class RaggedRange(Primitive):
6611
6779
  ValueError: If the shape of `starts`, `limits` and `deltas` are not same.
6612
6780
 
6613
6781
  Supported Platforms:
6614
- ``CPU``
6782
+ ``Ascend`` ``GPU`` ``CPU``
6615
6783
 
6616
6784
  Examples:
6617
6785
  >>> raggedrange = ops.RaggedRange(Tsplits=mstype.int64)
@@ -6642,11 +6810,14 @@ class Trace(Primitive):
6642
6810
  Note:
6643
6811
  Input must be matrix, and complex number is not supported at present.
6644
6812
 
6813
+ .. warning::
6814
+ This is an experimental API that is subject to change or deletion.
6815
+
6645
6816
  Inputs:
6646
6817
  - **x** (Tensor) - A matrix to be calculated. The matrix must be two dimensional.
6647
6818
 
6648
6819
  Outputs:
6649
- Tensor, with the same data type as input `x`, and size equals to 1.
6820
+ Tensor, 0D Tensor with 1 element, it has the same data type as input `x`.
6650
6821
 
6651
6822
  Raises:
6652
6823
  TypeError: If `x` is not a Tensor.
@@ -6660,7 +6831,17 @@ class Trace(Primitive):
6660
6831
  >>> trace = ops.Trace()
6661
6832
  >>> output = trace(x)
6662
6833
  >>> print(output)
6663
- 15.
6834
+ 15.0
6835
+ >>> x = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
6836
+ >>> trace = ops.Trace()
6837
+ >>> output = trace(x)
6838
+ >>> print(output)
6839
+ 18.0
6840
+ >>> x = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
6841
+ >>> trace = ops.Trace()
6842
+ >>> output = trace(x)
6843
+ >>> print(output)
6844
+ 24.0
6664
6845
  """
6665
6846
 
6666
6847
  @prim_attr_register
@@ -6670,32 +6851,36 @@ class Trace(Primitive):
6670
6851
 
6671
6852
  class Median(Primitive):
6672
6853
  """
6673
- Computes the median of elements of input tensor in the `axis` dimension. If `global_median` is True, computes the
6674
- median of all elements of tensor.
6854
+ Computes the median and its corresponding indices of input tensor in the `axis` dimension.
6855
+ If `global_median` is True, computes the median of all elements of tensor.
6675
6856
 
6676
6857
  .. warning::
6677
6858
  When attr `global_median` is True, the value of the second output tensor `indices` is meaningless.
6678
6859
 
6679
6860
  Args:
6680
- global_median (bool): Whether the output tensor is the median of all input tensor elements or not.
6681
- axis (int): The dimension need to reduce. Default: 0.
6682
- keep_dims (bool): Whether the output tensor need to retain `axis` dimension or not. Default: False.
6861
+ global_median (bool, optional): Whether the output tensor is the median of all
6862
+ input tensor elements or not. Default: Fasle.
6863
+ axis (int, optional): The specified dimension to compute median. Default: 0.
6864
+ keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not. Default: False.
6683
6865
 
6684
6866
  Inputs:
6685
- - **x** (Tensor) - A Tensor, whose dtype is int16, int32, int64, float32 or float64.
6867
+ - **x** (Tensor) - A Tensor to calculate median with. Supported dtype:int16, int32, int64, float32 or float64.
6686
6868
 
6687
6869
  Outputs:
6688
- - **y** (Tensor) - A Tensor, Has the same dtype as the `x`. If `global_median` is true, the `y` has only one
6689
- element. If `keep_dims` is true, the `y` has the same shape as the `x` except the shape of `y` in dimension
6690
- `axis` is size 1. Otherwise, the `y` lacks `axis` dimension than input.
6691
- - **indices** (Tensor) - A Tensor, Has the same shape as the `y`, but dtype is int64.
6870
+ - **y** (Tensor) - Median, has the same dtype as the `x`.
6871
+
6872
+ - If `global_median` is True, the `y` has only one element.
6873
+ - If `keep_dims` is True, the `y` has the same shape as the `x` except the size
6874
+ of `y` in dimension `axis` is 1.
6875
+ - Otherwise, the `y` lacks `axis` dimension than input.
6876
+
6877
+ - **indices** (Tensor) - Indices, Has the same shape as the `y`, with dtype int64.
6692
6878
 
6693
6879
  Raises:
6694
- TypeError: If dtype of `x` is not one of the following: int16, int32, int64, float32, double.
6880
+ TypeError: If dtype of `x` is not one of the following: int16, int32, int64, float32, float64.
6695
6881
  TypeError: If input `x` is not a Tensor.
6696
- TypeError: If `global_median` is not a bool.
6697
- TypeError: If `axis` is not a int.
6698
- TypeError: If `keep_dims` is not a bool.
6882
+ TypeError: If `global_median` or `keep_dims` is assigned a nonboolean value.
6883
+ TypeError: If `axis` is not int.
6699
6884
  ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
6700
6885
 
6701
6886
  Supported Platforms:
@@ -6703,20 +6888,18 @@ class Median(Primitive):
6703
6888
 
6704
6889
  Examples:
6705
6890
  >>> # case 1 : common median compute
6706
- >>> from mindspore import Tensor
6707
- >>> from mindspore.ops.operations.math_ops import Median
6891
+ >>> from mindspore import Tensor, ops
6708
6892
  >>> import numpy as np
6709
6893
  >>> x = Tensor(np.array([[5, 1, 2],[3, 5, 7], [1, 6, 4]]).astype(np.int64))
6710
- >>> median = Median(global_median=False, axis=0, keep_dims=False)
6894
+ >>> median = ops.Median(global_median=False, axis=0, keep_dims=False)
6711
6895
  >>> y = median(x)
6712
6896
  >>> print(y)
6713
6897
  (Tensor(shape=[3], dtype=Int64, value= [3, 5, 4]), Tensor(shape=[3], dtype=Int64, value= [1, 1, 2]))
6714
6898
  >>> # case 2 : global median compute
6715
- >>> from mindspore import Tensor
6716
- >>> from mindspore.ops.operations.math_ops import Median
6899
+ >>> from mindspore import Tensor, ops
6717
6900
  >>> import numpy as np
6718
6901
  >>> x = Tensor(np.array([[1, 7, 6],[5, 1, 3],[9, 17, 1]]).astype(np.int32))
6719
- >>> median = Median(global_median=True)
6902
+ >>> median = ops.Median(global_median=True)
6720
6903
  >>> y = median(x)
6721
6904
  >>> print(y)
6722
6905
  (Tensor(shape=[], dtype=Int32, value= 5), Tensor(shape=[], dtype=Int64, value= 0))
@@ -6762,15 +6945,14 @@ class SparseSegmentMean(Primitive):
6762
6945
 
6763
6946
 
6764
6947
  class Zeta(Primitive):
6765
- """
6948
+ r"""
6766
6949
  Compute the Hurwitz zeta function ζ(x,q) of input Tensor.
6767
6950
 
6768
6951
  .. warning::
6769
- This is an experimental prototype that is subject to change and/or deletion.
6952
+ This is an experimental API that is subject to change or deletion.
6770
6953
 
6771
6954
  .. math::
6772
-
6773
- \\zeta \\left ( x,q \\right )= \\textstyle \\sum_{n=0} ^ {\\infty} \\left ( q+n\\right )^{-x}
6955
+ \zeta \left ( x,q \right )= \textstyle \sum_{n=0} ^ {\infty} \left ( q+n\right )^{-x}
6774
6956
 
6775
6957
  Inputs:
6776
6958
  - **x** (Tensor) - A Tensor, types: float32, float64.
@@ -6806,25 +6988,28 @@ class Bernoulli(Primitive):
6806
6988
  """
6807
6989
  Randomly set the elements of output to 0 or 1 with the probability of P which follows the Bernoulli distribution.
6808
6990
 
6991
+ .. warning::
6992
+ This is an experimental API that is subject to change or deletion.
6993
+
6809
6994
  Refer to :func:`mindspore.ops.bernoulli` for more details.
6810
6995
 
6811
6996
  Supported Platforms:
6812
- ``GPU``
6997
+ ``GPU`` ``CPU``
6813
6998
 
6814
6999
  Examples:
6815
- >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int8)
6816
- >>> bernoulli = Bernoulli()
6817
- >>> output = bernoulli(input_x, 1.0)
7000
+ >>> input_x = Tensor([0.1, 0.2, 0.3], mindspore.float32)
7001
+ >>> bernoulli = ops.Bernoulli()
7002
+ >>> output = bernoulli(input_x, Tensor([1.0]))
6818
7003
  >>> print(output)
6819
- [1 1 1]
6820
- >>> input_p = Tensor(np.array([0.0, 1.0, 1.0]), mindspore.float32)
7004
+ [1. 1. 1.]
7005
+ >>> input_p = Tensor([0.0, 1.0, 1.0], mindspore.float32)
6821
7006
  >>> output = bernoulli(input_x, input_p)
6822
7007
  >>> print(output)
6823
- [0 1 1]
7008
+ [0. 1. 1.]
6824
7009
  """
6825
7010
 
6826
7011
  @prim_attr_register
6827
- def __init__(self, seed=-1):
7012
+ def __init__(self, seed=-1, offset=0):
6828
7013
  """Initialize Bernoulli"""
6829
7014
  self.init_prim_io_names(inputs=['x', 'p'], outputs=['y'])
6830
7015
  validator.check_value_type("seed", seed, [int], self.name)
@@ -6893,7 +7078,7 @@ class Renorm(Primitive):
6893
7078
  Refer to :func:`mindspore.ops.renorm` for more details.
6894
7079
 
6895
7080
  Supported Platforms:
6896
- ``Ascend`` ``CPU`` ``GPU``
7081
+ ``Ascend`` ``GPU`` ``CPU``
6897
7082
 
6898
7083
  Examples:
6899
7084
  >>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
@@ -6918,13 +7103,16 @@ class Renorm(Primitive):
6918
7103
 
6919
7104
  class Cholesky(Primitive):
6920
7105
  """
6921
- Computes the Cholesky decomposition of a symmetric positive-definite matrix `A`
6922
- or for batches of symmetric positive-definite matrices.
7106
+ Performs the Cholesky decomposition on a single or a batch of
7107
+ symmetric positive-definite matrices.
7108
+
7109
+ .. warning::
7110
+ This is an experimental API that is subject to change or deletion.
6923
7111
 
6924
7112
  Refer to :func:`mindspore.ops.cholesky` for more details.
6925
7113
 
6926
7114
  Supported Platforms:
6927
- ``CPU``
7115
+ ``GPU`` ``CPU``
6928
7116
 
6929
7117
  Examples:
6930
7118
  >>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
@@ -6944,25 +7132,16 @@ class Cholesky(Primitive):
6944
7132
 
6945
7133
  class STFT(Primitive):
6946
7134
  """
6947
- STFTs can be used as a way of quantifying the change of a nonstationary signal’s
6948
- frequency and phase content over time.
7135
+ Applies Short-time Fourier transform (STFT) on input signal.
6949
7136
 
6950
- Args:
6951
- n_fft (int): The size of Fourier transform.
6952
- hop_length (int): The distance between neighboring sliding window frames.
6953
- win_length (int): the size of window frame and STFT filter.
6954
- normalized (bool): controls whether to return the normalized STFT results.
6955
- onesided (bool): controls whether to return half of results to
6956
- avoid redundancy for real inputs.
6957
- return_complex (bool): If True, return a complex tensor. If False, return
6958
- a real tensor with an extra last dimension for the real and imaginary components.
7137
+ STFT segments the signal into narrow time intervals and takes the Fourier transform
7138
+ of each segment to quantify the change of a nonstationary signal’s frequency
7139
+ and phase content over time.
6959
7140
 
6960
- Inputs:
6961
- - **x** (Tensor) - Time sequence of stft, must be either a 1-D time tensor or a 2-D tensor.
6962
- - **window** (Tensor) - the optional window function.
7141
+ Refer to :func:`mindspore.ops.stft` for more details.
6963
7142
 
6964
- Outputs:
6965
- - **y** (Tensor) - A tensor containing the STFT result with shape described above.
7143
+ Supported Platforms:
7144
+ ``Ascend`` ``CPU``
6966
7145
 
6967
7146
  Examples:
6968
7147
  >>> import mindspore as ms
@@ -6990,21 +7169,22 @@ class STFT(Primitive):
6990
7169
 
6991
7170
  class CholeskySolve(Primitive):
6992
7171
  """
6993
- Given its Cholesky factor `u`, solves a linear system of equations with a positive definite matrix.
7172
+ Computes the solution of a set of linear equations with a positive definite matrix,
7173
+ according to its Cholesky decomposition factor `u` , and outputs the result as `c`.
6994
7174
 
6995
- If `upper` is `True`, `u` is upper triangular and `c` is returned such that:
7175
+ If `upper` is set to `True`, `u` is upper triangular and `c` is returned such that:
6996
7176
 
6997
7177
  .. math::
6998
7178
  c = (u^{T}u)^{{-1}}b
6999
7179
 
7000
- If `upper` is `False`, `u` is lower triangular and `c` is returned such that:
7180
+ If `upper` is set to `False`, `u` is lower triangular and `c` is returned such that:
7001
7181
 
7002
7182
  .. math::
7003
7183
  c = (uu^{T})^{{-1}}b
7004
7184
 
7005
7185
  Args:
7006
- upper (bool, optional): Flag which indicates whether to consider the Cholesky factor
7007
- as a lower or upper triangular matrix. Default: False.
7186
+ upper (bool, optional): A flag indicates whether to treat the Cholesky factor
7187
+ as an upper or a lower triangular matrix. Default: False.
7008
7188
 
7009
7189
  Inputs:
7010
7190
  - **x1** (Tensor) - Tensor of shape :math:`(*, N, M)`, indicating 2D or 3D matrices,
@@ -7027,7 +7207,7 @@ class CholeskySolve(Primitive):
7027
7207
  ValueError: If `x2` is not 2D or 3D square matrices.
7028
7208
 
7029
7209
  Supported Platforms:
7030
- ``GPU`` ``CPU``
7210
+ ``Ascend`` ``GPU`` ``CPU``
7031
7211
 
7032
7212
  Examples:
7033
7213
  >>> x1 = Tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), mindspore.float32)
@@ -7079,21 +7259,33 @@ class FFTWithSize(Primitive):
7079
7259
  Args:
7080
7260
  signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions
7081
7261
  of the fourier transform are realized, can only be 1, 2 or 3.
7082
- inverse (bool): Whether it is the inverse transformation, used to select FFT or IFFT and RFFT or
7083
- IRFFT. inverse=False means FFT or RFFT, inverse=True means IFFT or IRFFT.
7084
- real (bool): Whether it is the real transformation, used to select FFT/IFFT or RFFT/IRFFT.
7085
- real=False means FFT or IFFT, real=True means RFFT or IRFFT.
7262
+ inverse (bool): Whether it is the inverse transformation.
7263
+ real (bool): Whether it is the real transformation.
7264
+
7265
+ - "inverse:False real:False" corresponds to FFT.
7266
+ - "inverse:True real:False" corresponds to IFFT.
7267
+ - "inverse:False real:True" corresponds to RFFT.
7268
+ - "inverse:True real:True" corresponds to IRFFT.
7269
+
7086
7270
  norm (str, optional): The normalization, optional values: ["backward", "forward", "ortho"].
7087
7271
  Default value: "backward".
7088
7272
 
7089
- - "backward" has the direct (forward) transforms unscaled and the inverse (backward) transforms
7090
- scaled by 1/n, where n is the input x's element numbers.
7091
- - "ortho" has both direct and inverse transforms are scaled by :math:`1/\sqrt(n)` .
7092
- - "forward" has the direct transforms scaled by 1/n and the inverse transforms unscaled.
7273
+ - "backward" has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
7274
+ where n is the input x's element numbers.
7275
+ - "ortho" has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
7276
+ - "forward" has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
7093
7277
 
7094
7278
  onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: True.
7095
- signal_sizes (list, optional): Size of the original signal (the signal before rfft, no batch dimension),
7096
- only in irfft mode and set onesided=true requires the parameter. Default: [].
7279
+ signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension),
7280
+ only in IRFFT mode and set `onesided` to True requires the parameter, the following conditions must be
7281
+ satisfied. Default: ().
7282
+
7283
+ - The length of `signal_sizes` is equal to the signal_ndim of the IRFFT:
7284
+ :math:`len(signal_sizes)=signal_ndim`.
7285
+ - The last dimension of `signal_sizes` divided by 2 is equal to
7286
+ the last dimension of the IRFFT input: :math:`signal_size[-1]/2+1=x.shape[-1]`.
7287
+ - `signal_sizes` has exactly the same dimensions as the input shape
7288
+ except for the last dimension: :math:`signal_sizes[:-1]=x.shape[:-1]`.
7097
7289
 
7098
7290
  Inputs:
7099
7291
  - **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim.
@@ -7113,33 +7305,33 @@ class FFTWithSize(Primitive):
7113
7305
  ``GPU`` ``CPU``
7114
7306
 
7115
7307
  Examples:
7116
- >>> # case FFT: signal_ndim: 1, inverse: False, real: False.
7117
- >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
7118
- >>> fft_net = math_ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
7119
- >>> fft_output = fft_net(fft_in)
7120
- >>> print(fft_output)
7121
- [5.0000005 +2.9802322e-08j 0.50000036+8.6602569e-01j
7122
- 0.49999955-8.6602527e-01j]
7123
- >>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
7124
- >>> ifft_in = fft_output
7125
- >>> ifft_net = math_ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
7126
- >>> ifft_output = ifft_net(ifft_in)
7127
- >>> print(ifft_output)
7128
- [2. +1.291434e-07j 1.0000004+7.947286e-08j 2.0000005-7.947286e-08j]
7129
- >>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
7130
- >>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
7131
- >>> rfft_net = math_ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
7132
- >>> rfft_output = rfft_net(rfft_in)
7133
- >>> print(rfft_output)
7134
- [[ 1.5000001e+01+2.0954278e-07j 1.1920929e-06+5.1961541e+00j]
7135
- [-5.0000005e+00-5.9604645e-08j 9.9999934e-01-3.4641027e+00j]]
7136
- >>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
7137
- >>> irfft_in = rfft_output
7138
- >>> irfft_net = math_ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
7139
- >>> irfft_output = irfft_net(irfft_in)
7140
- >>> print(irfft_output)
7141
- [[2.0000002 0.99999976 2.0000005 ]
7142
- [3.0000007 0.999999 6.000002 ]]
7308
+ >>> # case FFT: signal_ndim: 1, inverse: False, real: False.
7309
+ >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
7310
+ >>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
7311
+ >>> fft_output = fft_net(fft_in)
7312
+ >>> print(fft_output)
7313
+ [5. +0.j 0.5 +0.86602545j 0.50000006-0.8660255j ]
7314
+ >>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
7315
+ >>> ifft_in = fft_output
7316
+ >>> ifft_net = ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
7317
+ >>> ifft_output = ifft_net(ifft_in)
7318
+ >>> print(ifft_output)
7319
+ [2. -1.9868216e-08j 0.99999994+0.0000000e+00j
7320
+ 1.9999999 +7.9472862e-08j]
7321
+ >>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
7322
+ >>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
7323
+ >>> rfft_net = ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
7324
+ >>> rfft_output = rfft_net(rfft_in)
7325
+ >>> print(rfft_output)
7326
+ [[ 1.5000000e+01+1.1920929e-07j -2.3841858e-07+5.1961522e+00j]
7327
+ [-5.0000000e+00-2.9802322e-08j 9.9999988e-01-3.4641016e+00j]]
7328
+ >>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
7329
+ >>> irfft_in = rfft_output
7330
+ >>> irfft_net = ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
7331
+ >>> irfft_output = irfft_net(irfft_in)
7332
+ >>> print(irfft_output)
7333
+ [[2. 1. 2. ]
7334
+ [3. 0.99999994 5.9999995 ]]
7143
7335
  """
7144
7336
 
7145
7337
  @prim_attr_register
@@ -7155,34 +7347,12 @@ class FFTWithSize(Primitive):
7155
7347
 
7156
7348
  class Polar(Primitive):
7157
7349
  r"""
7158
- Returns a complex tensor whose elements are Cartesian coordinates corresponding to the polar
7159
- coordinates with absolute value and angle.
7160
-
7161
- .. math::
7162
-
7163
- y_{i} = abs_{i} * cos(angle_{i}) + abs_{i} * sin(angle_{i}) * j
7350
+ Converts polar coordinates to Cartesian coordinates.
7164
7351
 
7165
- Inputs:
7166
- - **abs** (Tensor) - The shape of tensor is
7167
- :math:`(N,*)` where :math:`N` means the batchsize of the input tensor,
7168
- math:`*` means, any number of additional dimensions.
7169
- Must be one of the following types: float32, float64.
7170
-
7171
- - **angle** (Tensor) - The shape of tensor is
7172
- the same as the input tensor abs.
7173
- Must be the same type as the input tensor abs.
7174
-
7175
- Outputs:
7176
- Tensor, has the same shape and data type as `abs`.
7177
-
7178
- Raises:
7179
- TypeError: If neither `abs` nor `angle` is a Tensor.
7180
- TypeError: If the dtype of input is not one of: float32, float64.
7181
- TypeError: If the dtypes of two inputs are not the same.
7182
- ValueError: If `abs`'s shape is not the same as `angle`.
7352
+ Refer to :func:`mindspore.ops.polar` for more details.
7183
7353
 
7184
7354
  Supported Platforms:
7185
- ``GPU``
7355
+ ``GPU`` ``CPU``
7186
7356
 
7187
7357
  Examples:
7188
7358
  >>> polar = ops.Polar()
@@ -7203,15 +7373,19 @@ class NextAfter(Primitive):
7203
7373
  """
7204
7374
  Returns the next representable floating-point value after `x1` towards `x2` element-wise.
7205
7375
 
7206
- Say there are two float32 numbers :math:`a`, :math:`b`, and let the
7376
+ Say there are two float32 numbers :math:`a, b`, and let the
7207
7377
  representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
7208
7378
  then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
7379
+ If :math:`a > b`,
7209
7380
  the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
7210
7381
 
7211
7382
  .. math::
7212
7383
 
7213
7384
  out_{i} = nextafter({x1_{i}, x2_{i}})
7214
7385
 
7386
+ .. warning::
7387
+ This is an experimental API that is subject to change or deletion.
7388
+
7215
7389
  Inputs:
7216
7390
  - **x1** (Tensor) - The shape of tensor is
7217
7391
  :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
@@ -7231,7 +7405,7 @@ class NextAfter(Primitive):
7231
7405
  ValueError: If `x1`'s shape is not the same as `x2`.
7232
7406
 
7233
7407
  Supported Platforms:
7234
- ``GPU`` ``CPU``
7408
+ ``Ascend`` ``GPU`` ``CPU``
7235
7409
 
7236
7410
  Examples:
7237
7411
  >>> nextafter = ops.NextAfter()
@@ -7250,16 +7424,13 @@ class NextAfter(Primitive):
7250
7424
 
7251
7425
  class TrilIndices(Primitive):
7252
7426
  r"""
7253
- Returns the indices of the lower triangular part of a `row` -by- `col` matrix in a Tensor.
7254
- The Tensor has a shape :math:`(2, tril\_size)` where :math:`tril\_size` is the number of
7255
- elements in the lower triangular matrix. The first row contains row coordinates of
7256
- all indices and the second row contains column coordinates.
7257
- Indices are ordered based on rows and then columns.
7427
+ Calculates the indices of the lower triangular elements in a `row` * `col` matrix
7428
+ and returns them as a 2-by-N Tensor.
7258
7429
 
7259
- The lower triangular part of the matrix is defined as the elements on and below the diagonal.
7430
+ .. warning::
7431
+ This is an experimental API that is subject to change or deletion.
7260
7432
 
7261
- Note:
7262
- When running on CUDA, row * col must be less than 2^59 to prevent overflow during calculation.
7433
+ Refer to :func:`mindspore.ops.tril_indices` for more details.
7263
7434
 
7264
7435
  Args:
7265
7436
  row (int): number of rows in the 2-D matrix.
@@ -7273,13 +7444,8 @@ class TrilIndices(Primitive):
7273
7444
  The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
7274
7445
  lower triangular matrix.
7275
7446
 
7276
- Raises:
7277
- TypeError: If `row`, `col` or `offset` is not an int.
7278
- TypeError: If `dtype` is neither int32 nor int64.
7279
- ValueError: If `row` or `col` < 0.
7280
-
7281
7447
  Supported Platforms:
7282
- ``GPU`` ``CPU``
7448
+ ``Ascend`` ``GPU`` ``CPU``
7283
7449
 
7284
7450
  Examples:
7285
7451
  >>> net = ops.TrilIndices(4, 3, -1, mstype.int64)
@@ -7295,8 +7461,8 @@ class TrilIndices(Primitive):
7295
7461
  def __init__(self, row, col, offset=0, dtype=mstype.int32):
7296
7462
  """Initialize TrilIndices"""
7297
7463
  self.init_prim_io_names(inputs=[], outputs=['y'])
7298
- validator.check_int(row, 0, Rel.GE, "row", self.name)
7299
- validator.check_int(col, 0, Rel.GE, "col", self.name)
7464
+ validator.check_int(row, 0, validator.GE, "row", self.name)
7465
+ validator.check_int(col, 0, validator.GE, "col", self.name)
7300
7466
  validator.check_value_type("offset", offset, [int], self.name)
7301
7467
  valid_values = (mstype.int32, mstype.int64)
7302
7468
  validator.check_type_name("dtype", dtype, valid_values, self.name)
@@ -7304,14 +7470,17 @@ class TrilIndices(Primitive):
7304
7470
 
7305
7471
  class MatrixTriangularSolve(Primitive):
7306
7472
  r"""
7307
- Returns a new tensor with the solotion of a linear equation system with an
7473
+ Returns a new tensor with the solution of a linear equation system with an
7308
7474
  upper or lower triangular matrix.
7309
7475
 
7476
+ Note:
7477
+ Only GPU platforms now support the broadcast mechanism.
7478
+
7310
7479
  Args:
7311
7480
  lower (bool, optional): If True, the innermost matrices in `matrix` is
7312
7481
  are lower triangular. Default: True.
7313
- adjoint (bool, optional): If True, solve with the adjoint of `matrix`.
7314
- Default: False.
7482
+ adjoint (bool, optional): Indicates whether the adjoint of the
7483
+ matrix is used during the computation. Default: False, use its transpose instead.
7315
7484
 
7316
7485
  Inputs:
7317
7486
  - **matrix** (Tensor) - Tensor of shape :math:`(*, M, M)`,
@@ -7325,24 +7494,26 @@ class MatrixTriangularSolve(Primitive):
7325
7494
  Raises:
7326
7495
  TypeError: If `matrix` or `rhs` is not a Tensor.
7327
7496
  TypeError: If `lower` or `adjoint` is not bool.
7328
- ValueError: If the batch sizes of `matrix` and `rhs` are not equal.
7497
+ ValueError: For GPU platform, if the batch sizes of `matrix` and `rhs` do not satisfy broadcasting rules.
7498
+ For other platforms, if the batch sizes of `matrix` and `rhs` are not equal.
7329
7499
  ValueError: If the inner-most 2 dimensions of `matrix` are not equal.
7330
7500
  ValueError: If the second-last dimensions of `matrix` and `rhs` are not equal.
7331
7501
 
7332
7502
  Supported Platforms:
7333
- ``Ascend`` ``CPU``
7503
+ ``Ascend`` ``GPU`` ``CPU``
7334
7504
 
7335
7505
  Examples:
7336
7506
  >>> matrix_triangular_solve = ops.MatrixTriangularSolve(lower=True, adjoint=False)
7337
- >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
7338
- >>> b = np.array([[1, 0],[2, 2],[1, 5],[0, 3]])
7339
- >>> output = matrix_triangular_solve(Tensor(a, mindspore.float32), Tensor(b, mindspore.float32))
7507
+ >>> matrix = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
7508
+ >>> rhs = np.array([[1, 0],[2, 2],[1, 5],[0, 3]])
7509
+ >>> output = matrix_triangular_solve(Tensor(matrix, mindspore.float32), Tensor(rhs, mindspore.float32))
7340
7510
  >>> print(output)
7341
7511
  [[ 0.33333334 0. ]
7342
7512
  [ 1.3333333 2. ]
7343
7513
  [ 0.6666666 5. ]
7344
7514
  [-2.3333333 -4. ]]
7345
7515
  """
7516
+
7346
7517
  @prim_attr_register
7347
7518
  def __init__(self, lower=True, adjoint=False):
7348
7519
  """Initialize MatrixTriangularSolve"""
@@ -7356,7 +7527,8 @@ class CompareAndBitpack(Primitive):
7356
7527
 
7357
7528
  Each comparison returns a boolean true (if x_value > threshold) or and false otherwise.
7358
7529
 
7359
- Given an `x` shaped `[s0, s1, ..., s_n]`, the output is a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
7530
+ Given an `x` shaped :math:`(s_0, s_1, ..., s_n)`, the output is a `uint8`
7531
+ Tensor shaped :math:`(s_0, s_1, ..., s_n / 8)`.
7360
7532
 
7361
7533
  Inputs:
7362
7534
  - **x** (Tensor) - Input tensor. Values to compare against `threshold` and bitpack. The data type must be
@@ -7376,7 +7548,7 @@ class CompareAndBitpack(Primitive):
7376
7548
  ValueError: If the innermost dimension of `x`'s shape is not disvisible by 8.
7377
7549
 
7378
7550
  Supported Platforms:
7379
- ``Ascend`` ``GPU`` ``CPU``
7551
+ ``Ascend`` ``CPU``
7380
7552
 
7381
7553
  Examples:
7382
7554
  >>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32)
@@ -7394,29 +7566,13 @@ class CompareAndBitpack(Primitive):
7394
7566
 
7395
7567
  class NanToNum(Primitive):
7396
7568
  """
7397
- Replaces `NaN`, positive infinity, and negative infinity values in the `x` with the values
7398
- specified by `nan`, `posinf`, and `neginf`, respectively. By default, NaN is replaced by 0,
7399
- positive infinity is replaced by the largest finite value representable by the x dtype,
7400
- and negative infinity is replaced by the smallest finite value representable by the x dtype.
7401
-
7402
- Args:
7403
- nan (float): The value to replace `NaN`. Default value is 0.0.
7404
- posinf (float): If a Number, the value to replace positive infinity values with. If None, positive
7405
- infinity values are replaced with the greatest finite value representable by `x`'s dtype.
7406
- Default value is None.
7407
- neginf (float): if a Number, the value to replace negative infinity values with. If None, negative
7408
- infinity values are replaced with the lowest finite value representable by `x`'s dtype.
7409
- Default value is None.
7569
+ Replaces `NaN`, positive infinity and negative infinity values in the input Tensor with the values
7570
+ specified by `nan`, `posinf` and `neginf` respectively.
7410
7571
 
7411
- Inputs:
7412
- - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. With float32 or float16 data type.
7572
+ .. warning::
7573
+ This is an experimental API that is subject to change or deletion.
7413
7574
 
7414
- Outputs:
7415
- Tensor, has the same shape and dtype as the `x`.
7416
-
7417
- Raises:
7418
- TypeError: If `x` is not a Tensor.
7419
- TypeError: If dtype of `x` is not float16 or float32.
7575
+ Refer to :func:`mindspore.ops.nan_to_num` for more details.
7420
7576
 
7421
7577
  Supported Platforms:
7422
7578
  ``Ascend`` ``CPU``
@@ -7448,12 +7604,16 @@ class NanToNum(Primitive):
7448
7604
 
7449
7605
  class Orgqr(Primitive):
7450
7606
  r"""
7451
- Computes the first :math:`N` columns of a product of Householder matrices.
7607
+ Calculates the explicit representation of the orthogonal matrix :math:`Q`
7608
+ returned by :class:`mindspore.ops.Geqrf`.
7609
+
7610
+ .. warning::
7611
+ This is an experimental API that is subject to change or deletion.
7452
7612
 
7453
7613
  Refer to :func:`mindspore.ops.orgqr` for more details.
7454
7614
 
7455
7615
  Supported Platforms:
7456
- ``GPU`` ``CPU``
7616
+ ``Ascend`` ``GPU`` ``CPU``
7457
7617
 
7458
7618
  Examples:
7459
7619
  >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62.]]), mindspore.float32)
@@ -7474,16 +7634,13 @@ class Orgqr(Primitive):
7474
7634
 
7475
7635
  class TriuIndices(Primitive):
7476
7636
  r"""
7477
- Returns the indices of the upper triangular part of a `row` -by- `col` matrix in a Tensor.
7478
- The Tensor has a shape :math:`(2, tril\_size)` where :math:`tril\_size` is the number of
7479
- elements in the upper triangular matrix. The first row contains row coordinates of
7480
- all indices and the second row contains column coordinates.
7481
- Indices are ordered based on rows and then columns.
7637
+ Calculates the indices of the upper triangular elements in a `row` * `col` matrix
7638
+ and returns them as a 2-by-N Tensor.
7482
7639
 
7483
- The upper triangular part of the matrix is defined as the elements on and above the diagonal.
7640
+ .. warning::
7641
+ This is an experimental API that is subject to change or deletion.
7484
7642
 
7485
- Note:
7486
- When running on CUDA, row * col must be less than 2^59 to prevent overflow during calculation.
7643
+ Refer to :func:`mindspore.ops.triu_indices` for more details.
7487
7644
 
7488
7645
  Args:
7489
7646
  row (int): number of rows in the 2-D matrix.
@@ -7497,13 +7654,8 @@ class TriuIndices(Primitive):
7497
7654
  The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
7498
7655
  lower triangular matrix.
7499
7656
 
7500
- Raises:
7501
- TypeError: If `row`, `col` or `offset` is not an int.
7502
- TypeError: If `dtype` is neither int32 nor int64.
7503
- ValueError: If `row` or `col` < 0.
7504
-
7505
7657
  Supported Platforms:
7506
- ``GPU`` ``CPU``
7658
+ ``Ascend`` ``GPU`` ``CPU``
7507
7659
 
7508
7660
  Examples:
7509
7661
  >>> net = ops.TriuIndices(5, 4, 2, mstype.int64)
@@ -7519,21 +7671,80 @@ class TriuIndices(Primitive):
7519
7671
  def __init__(self, row, col, offset=0, dtype=mstype.int32):
7520
7672
  """Initialize TriuIndices"""
7521
7673
  self.init_prim_io_names(inputs=[], outputs=['y'])
7522
- validator.check_int(row, 0, Rel.GE, "row", self.name)
7523
- validator.check_int(col, 0, Rel.GE, "col", self.name)
7674
+ validator.check_int(row, 0, validator.GE, "row", self.name)
7675
+ validator.check_int(col, 0, validator.GE, "col", self.name)
7524
7676
  validator.check_value_type("offset", offset, [int], self.name)
7525
7677
  valid_values = (mstype.int32, mstype.int64)
7526
7678
  validator.check_type_name("dtype", dtype, valid_values, self.name)
7527
7679
 
7528
7680
 
7681
+ class Fmin(Primitive):
7682
+ """
7683
+ Computes the minimum of input tensors element-wise.
7684
+
7685
+ Refer to :func:`mindspore.ops.fmin` for more detail.
7686
+
7687
+ Supported Platforms:
7688
+
7689
+
7690
+ Examples:
7691
+ >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
7692
+ >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
7693
+ >>> fmin = ops.Fmin()
7694
+ >>> output = fmin(x1, x2)
7695
+ >>> print(output)
7696
+ [1. 2. 3.]
7697
+ """
7698
+
7699
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
7700
+
7701
+ @prim_attr_register
7702
+ def __init__(self):
7703
+ """Initialize Fmin"""
7704
+ self.add_prim_attr('ignore_nan', True)
7705
+ self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
7706
+
7707
+
7708
+ class Fmax(Primitive):
7709
+ """
7710
+ Computes the maximum of input tensors element-wise.
7711
+
7712
+ .. warning::
7713
+ This is an experimental API that is subject to change or deletion.
7714
+
7715
+ Refer to :func:`mindspore.ops.fmax` for more detail.
7716
+
7717
+ Supported Platforms:
7718
+ ``CPU``
7719
+
7720
+ Examples:
7721
+ >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
7722
+ >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
7723
+ >>> fmax = ops.Fmax()
7724
+ >>> output = fmax(x1, x2)
7725
+ >>> print(output)
7726
+ [4. 5. 6.]
7727
+ """
7728
+
7729
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
7730
+
7731
+ @prim_attr_register
7732
+ def __init__(self):
7733
+ """Initialize Fmax"""
7734
+ self.add_prim_attr('ignore_nan', True)
7735
+ self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
7736
+
7737
+
7529
7738
  class Eig(Primitive):
7530
7739
  """
7531
7740
  Computes the eigenvalues and eigenvectors of a square matrix(batch square matrices).
7532
7741
 
7742
+ .. warning::
7743
+ This is an experimental API that is subject to change or deletion.
7744
+
7533
7745
  Args:
7534
7746
  compute_v (bool, optional): If `True`, compute both eigenvalues and eigenvectors;
7535
7747
  If `False`, just eigenvalues will be computed. Default: False.
7536
-
7537
7748
  Inputs:
7538
7749
  - **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`,
7539
7750
  with float32, float64, complex64 or complex128 data type.
@@ -7611,6 +7822,7 @@ class SelfAdjointEig(Primitive):
7611
7822
  [[1. 0.]
7612
7823
  [0. 1.]]
7613
7824
  """
7825
+
7614
7826
  @prim_attr_register
7615
7827
  def __init__(self, compute_v=True):
7616
7828
  """Initialize SelfAdjointEig."""
@@ -7623,13 +7835,16 @@ class Qr(Primitive):
7623
7835
  Returns the QR decomposition of one or more matrices. If `full_matrices` is true, compute full-sized q and r,
7624
7836
  If False (the default), compute the P columns of q where P is minimum of the 2 innermost dimensions of x.
7625
7837
 
7838
+ .. warning::
7839
+ This is an experimental API that is subject to change or deletion.
7840
+
7626
7841
  Args:
7627
7842
  full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: False.
7628
7843
 
7629
7844
  Inputs:
7630
7845
  - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions.
7631
7846
  types: float16, float32, float64, complex64, complex128.
7632
- Define the shape of x as (..., m, n), p as the minimum values of m and n.
7847
+ Define the shape of x as :math:`(..., m, n)` p as the minimum values of m and n.
7633
7848
 
7634
7849
  Outputs:
7635
7850
  - **q** (Tensor) - The orthonormal matrices of x.
@@ -7645,20 +7860,20 @@ class Qr(Primitive):
7645
7860
  ValueError: If the dimension of `x` is less than 2.
7646
7861
 
7647
7862
  Supported Platforms:
7648
- ``Ascend`` ``CPU``
7863
+ ``Ascend`` ``GPU`` ``CPU``
7649
7864
 
7650
7865
  Examples:
7651
7866
  >>> qr_op = ops.Qr(full_matrices=False)
7652
- >>> x = Tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]], mstype.float32)
7867
+ >>> x = Tensor([[20., -31, 7], [4, 270, -90], [-8, 17, -32]], mstype.float32)
7653
7868
  >>> q, r = qr_op(x)
7654
7869
  >>> print(q)
7655
- [[-0.8571428 0.39428577 0.3314286 ]
7656
- [-0.42857143 -0.90285724 -0.03428572]
7657
- [ 0.2857143 -0.17142859 0.94285715]]
7870
+ [[-0.912871 0.16366126 0.37400758]
7871
+ [-0.18257418 -0.9830709 -0.01544376]
7872
+ [ 0.36514837 -0.08238228 0.92729706]]
7658
7873
  >>> print(r)
7659
- [[ -14. -21.000008 13.999999]
7660
- [ 0. -175. 70.000015]
7661
- [ 0. 0. -34.999996]]
7874
+ [[ -21.908903 -14.788506 -1.6431675]
7875
+ [ 0. -271.9031 92.25824 ]
7876
+ [ 0. 0. -25.665514 ]]
7662
7877
  """
7663
7878
 
7664
7879
  @prim_attr_register
@@ -7670,6 +7885,7 @@ class Qr(Primitive):
7670
7885
  class Cauchy(Primitive):
7671
7886
  r"""
7672
7887
  Create a tensor of shape `size` with random numbers drawn from Cauchy distribution.
7888
+ It is defined as follows:
7673
7889
 
7674
7890
  .. math::
7675
7891
  f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
@@ -7715,21 +7931,23 @@ class Ormqr(Primitive):
7715
7931
  r"""
7716
7932
  Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
7717
7933
  Multiplies a(m, n) matrix C (given by other) with a matrix Q, where Q is represented using Householder
7718
- reflectors (x, tau), which is the output of torch.geqrf().
7934
+ reflectors (x, tau), which is the output of geqrf().
7719
7935
 
7720
7936
  Args:
7721
7937
  left (bool, optional): controls the order of multiplication. If true, compute op(Q)*C.
7722
- If false, compute C*op(Q). Default: True.
7938
+ If false, compute C*op(Q). Default: True.
7723
7939
  transpose(bool, optional): controls whether the matrix Q is conjugate transposed or not.Default: False.
7724
7940
 
7725
7941
  Inputs:
7726
- - **x** (Tensor) - Tensor of shape: (*, mn, k) where mn equals to m or n depending on the left.
7727
- with float32, float64, complex64 and complex128 data type.
7728
- - **tau** (Tensor) - Tensor of shape (*, min(mn, k)) which have the same type as x.
7729
- - **other** (Tensor) - tensor of shape (*, m, n) where * is zero or more batch dimensions.
7942
+ - **x** (Tensor) - Tensor of shape: (*, mn, k) where mn equals to m or n depending on the the args of `left`,
7943
+ and `*` is zero or more batch dimensions.
7944
+ - **tau** (Tensor) - Tensor of shape (*, min(mn, k)) where `*` is zero or more batch dimensions,
7945
+ and its type is the same as `x`.
7946
+ - **other** (Tensor) - Tensor of shape (*, m, n) where `*` is zero or more batch dimensions,
7947
+ and its type is the same as `x`.
7730
7948
 
7731
7949
  Outputs:
7732
- - **y** (Tensor) - the output Tensor.
7950
+ - **y** (Tensor) - the output Tensor, has the same shape and data type as `other`.
7733
7951
 
7734
7952
  Raises:
7735
7953
  TypeError: If `x` or `tau` or `other` is not Tensor.
@@ -7759,6 +7977,7 @@ class Ormqr(Primitive):
7759
7977
  [ -53.659264 -28.157839 -70.42702 ]
7760
7978
  [ -79.54292 24.00183 -41.34253 ]]
7761
7979
  """
7980
+
7762
7981
  @prim_attr_register
7763
7982
  def __init__(self, left=True, transpose=False):
7764
7983
  """Initialize Ormqr"""
@@ -7775,8 +7994,20 @@ class Roll(Primitive):
7775
7994
 
7776
7995
  Refer to :func:`mindspore.ops.roll` for more details.
7777
7996
 
7997
+ Args:
7998
+ shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
7999
+ positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
8000
+ in the opposite direction.
8001
+ axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
8002
+
8003
+ Inputs:
8004
+ - **input_x** (Tensor) - Input tensor.
8005
+
8006
+ Outputs:
8007
+ Tensor, has the same shape and type as `input_x`.
8008
+
7778
8009
  Supported Platforms:
7779
- ``Ascend`` ``GPU``
8010
+ ``GPU``
7780
8011
 
7781
8012
  Examples:
7782
8013
  >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
@@ -7808,5 +8039,5 @@ class Roll(Primitive):
7808
8039
  validator.check_equal_int(len(axis), 1, "shift size", self.name)
7809
8040
  validator.check_equal_int(axis[0], 0, "axis", self.name)
7810
8041
  elif isinstance(shift, int) and isinstance(axis, int):
7811
- validator.check_equal_int(axis, 0, "axis", self.name)
8042
+ validator.check_is_int(axis, "axis", self.name)
7812
8043
  self.init_prim_io_names(inputs=['input_x'], outputs=['output'])