mindspore 1.10.0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (966) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +9 -4
  18. mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +102 -0
  22. mindspore/_checkparam.py +1066 -1001
  23. mindspore/_extends/builtin_operations.py +32 -4
  24. mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
  25. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
  26. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
  27. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
  28. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
  29. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
  30. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  31. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
  32. mindspore/_extends/parse/__init__.py +5 -3
  33. mindspore/_extends/parse/namespace.py +17 -2
  34. mindspore/_extends/parse/parser.py +193 -34
  35. mindspore/_extends/parse/resources.py +7 -8
  36. mindspore/_extends/parse/standard_method.py +1780 -435
  37. mindspore/_extends/parse/trope.py +3 -1
  38. mindspore/amp.py +53 -58
  39. mindspore/atlprov.dll +0 -0
  40. mindspore/boost/adasum.py +3 -2
  41. mindspore/boost/boost.py +2 -2
  42. mindspore/boost/boost_cell_wrapper.py +46 -26
  43. mindspore/boost/dim_reduce.py +6 -5
  44. mindspore/boost/grad_accumulation.py +2 -1
  45. mindspore/boost/group_loss_scale_manager.py +1 -1
  46. mindspore/c1.dll +0 -0
  47. mindspore/c1xx.dll +0 -0
  48. mindspore/c2.dll +0 -0
  49. mindspore/cfgpersist.dll +0 -0
  50. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  51. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  52. mindspore/common/__init__.py +11 -10
  53. mindspore/common/_decorator.py +2 -0
  54. mindspore/common/_register_for_adapter.py +55 -0
  55. mindspore/common/_stub_tensor.py +201 -0
  56. mindspore/common/_utils.py +57 -0
  57. mindspore/common/api.py +582 -297
  58. mindspore/common/dtype.py +66 -18
  59. mindspore/common/dump.py +2 -2
  60. mindspore/common/initializer.py +38 -1
  61. mindspore/common/jit_config.py +25 -13
  62. mindspore/common/mutable.py +53 -24
  63. mindspore/common/parameter.py +60 -37
  64. mindspore/common/seed.py +8 -24
  65. mindspore/common/sparse_tensor.py +927 -0
  66. mindspore/common/tensor.py +1627 -3900
  67. mindspore/communication/__init__.py +10 -5
  68. mindspore/communication/_comm_helper.py +78 -214
  69. mindspore/communication/_hccl_management.py +2 -1
  70. mindspore/communication/management.py +136 -47
  71. mindspore/config/op_info.config +501 -1008
  72. mindspore/context.py +291 -56
  73. mindspore/d3dcompiler_47.dll +0 -0
  74. mindspore/dataset/__init__.py +12 -8
  75. mindspore/dataset/audio/__init__.py +9 -9
  76. mindspore/dataset/audio/transforms.py +1090 -228
  77. mindspore/dataset/audio/utils.py +87 -39
  78. mindspore/dataset/audio/validators.py +223 -1
  79. mindspore/dataset/callback/ds_callback.py +17 -15
  80. mindspore/dataset/core/config.py +246 -17
  81. mindspore/dataset/core/py_util_helpers.py +4 -3
  82. mindspore/dataset/core/validator_helpers.py +10 -10
  83. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  84. mindspore/dataset/debug/debug_hook.py +65 -0
  85. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  86. mindspore/dataset/engine/__init__.py +7 -3
  87. mindspore/dataset/engine/cache_client.py +9 -9
  88. mindspore/dataset/engine/datasets.py +648 -477
  89. mindspore/dataset/engine/datasets_audio.py +165 -167
  90. mindspore/dataset/engine/datasets_standard_format.py +93 -67
  91. mindspore/dataset/engine/datasets_text.py +492 -342
  92. mindspore/dataset/engine/datasets_user_defined.py +85 -50
  93. mindspore/dataset/engine/datasets_vision.py +1224 -699
  94. mindspore/dataset/engine/graphdata.py +134 -69
  95. mindspore/dataset/engine/iterators.py +50 -9
  96. mindspore/dataset/engine/offload.py +52 -31
  97. mindspore/dataset/engine/samplers.py +27 -24
  98. mindspore/dataset/engine/serializer_deserializer.py +14 -15
  99. mindspore/dataset/engine/validators.py +213 -52
  100. mindspore/dataset/text/__init__.py +10 -8
  101. mindspore/dataset/text/transforms.py +152 -57
  102. mindspore/dataset/text/utils.py +98 -49
  103. mindspore/dataset/text/validators.py +25 -0
  104. mindspore/dataset/transforms/__init__.py +4 -2
  105. mindspore/dataset/transforms/c_transforms.py +11 -13
  106. mindspore/dataset/transforms/py_transforms.py +2 -2
  107. mindspore/dataset/transforms/py_transforms_util.py +10 -0
  108. mindspore/dataset/transforms/transforms.py +13 -15
  109. mindspore/dataset/transforms/validators.py +7 -7
  110. mindspore/dataset/utils/__init__.py +2 -1
  111. mindspore/dataset/utils/browse_dataset.py +13 -13
  112. mindspore/dataset/utils/line_reader.py +121 -0
  113. mindspore/dataset/vision/__init__.py +8 -7
  114. mindspore/dataset/vision/c_transforms.py +125 -126
  115. mindspore/dataset/vision/py_transforms.py +37 -37
  116. mindspore/dataset/vision/py_transforms_util.py +23 -20
  117. mindspore/dataset/vision/transforms.py +316 -315
  118. mindspore/dataset/vision/utils.py +313 -17
  119. mindspore/dataset/vision/validators.py +6 -6
  120. mindspore/default_config.py +0 -1
  121. mindspore/dpcmi.dll +0 -0
  122. mindspore/{compression → experimental}/__init__.py +6 -5
  123. mindspore/experimental/map_parameter.py +275 -0
  124. mindspore/include/OWNERS +0 -1
  125. mindspore/include/api/callback/callback.h +9 -13
  126. mindspore/include/api/callback/ckpt_saver.h +2 -2
  127. mindspore/include/api/callback/loss_monitor.h +2 -2
  128. mindspore/include/api/callback/lr_scheduler.h +5 -5
  129. mindspore/include/api/callback/time_monitor.h +2 -2
  130. mindspore/include/api/callback/train_accuracy.h +4 -6
  131. mindspore/include/api/cfg.h +19 -6
  132. mindspore/include/api/context.h +70 -9
  133. mindspore/include/api/delegate.h +8 -1
  134. mindspore/include/api/dual_abi_helper.h +8 -24
  135. mindspore/include/api/metrics/accuracy.h +2 -2
  136. mindspore/include/api/metrics/metrics.h +4 -3
  137. mindspore/include/api/model.h +9 -4
  138. mindspore/include/api/model_group.h +68 -0
  139. mindspore/include/api/model_parallel_runner.h +17 -17
  140. mindspore/include/api/net.h +12 -11
  141. mindspore/include/api/serialization.h +20 -4
  142. mindspore/include/api/status.h +7 -1
  143. mindspore/include/api/types.h +25 -21
  144. mindspore/include/api/visible.h +4 -0
  145. mindspore/include/c_api/model_c.h +5 -0
  146. mindspore/include/c_api/status_c.h +1 -1
  147. mindspore/include/dataset/config.h +1 -1
  148. mindspore/include/dataset/constants.h +14 -0
  149. mindspore/include/dataset/text.h +59 -0
  150. mindspore/include/dataset/vision.h +56 -117
  151. mindspore/include/dataset/vision_lite.h +102 -0
  152. mindspore/jpeg62.dll +0 -0
  153. mindspore/log.py +28 -28
  154. mindspore/mindrecord/common/exceptions.py +2 -4
  155. mindspore/mindrecord/filereader.py +19 -1
  156. mindspore/mindrecord/filewriter.py +250 -88
  157. mindspore/mindrecord/mindpage.py +13 -13
  158. mindspore/mindrecord/shardheader.py +15 -15
  159. mindspore/mindrecord/shardreader.py +9 -0
  160. mindspore/mindrecord/shardwriter.py +29 -29
  161. mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
  162. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
  163. mindspore/mindrecord/tools/csv_to_mr.py +4 -4
  164. mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
  165. mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
  166. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  167. mindspore/{libmindspore_backend.dll → mindspore_backend.dll} +0 -0
  168. mindspore/mindspore_common.dll +0 -0
  169. mindspore/mindspore_core.dll +0 -0
  170. mindspore/mindspore_glog.dll +0 -0
  171. mindspore/mindspore_shared_lib.dll +0 -0
  172. mindspore/msobj140.dll +0 -0
  173. mindspore/mspdb140.dll +0 -0
  174. mindspore/mspdbcore.dll +0 -0
  175. mindspore/mspdbst.dll +0 -0
  176. mindspore/mspft140.dll +0 -0
  177. mindspore/msvcdis140.dll +0 -0
  178. mindspore/msvcp140_1.dll +0 -0
  179. mindspore/msvcp140_2.dll +0 -0
  180. mindspore/msvcp140_atomic_wait.dll +0 -0
  181. mindspore/msvcp140_codecvt_ids.dll +0 -0
  182. mindspore/nn/__init__.py +1 -5
  183. mindspore/nn/cell.py +297 -234
  184. mindspore/nn/dynamic_lr.py +1 -1
  185. mindspore/nn/grad/cell_grad.py +17 -42
  186. mindspore/nn/layer/__init__.py +7 -4
  187. mindspore/nn/layer/activation.py +131 -88
  188. mindspore/nn/layer/basic.py +313 -613
  189. mindspore/nn/layer/channel_shuffle.py +103 -0
  190. mindspore/nn/layer/combined.py +1 -1
  191. mindspore/nn/layer/container.py +52 -6
  192. mindspore/nn/layer/conv.py +112 -43
  193. mindspore/nn/layer/dense.py +10 -9
  194. mindspore/nn/layer/embedding.py +36 -34
  195. mindspore/nn/layer/image.py +123 -27
  196. mindspore/nn/layer/math.py +108 -107
  197. mindspore/nn/layer/normalization.py +212 -366
  198. mindspore/nn/layer/padding.py +370 -42
  199. mindspore/nn/layer/pooling.py +1443 -219
  200. mindspore/nn/layer/rnn_cells.py +11 -16
  201. mindspore/nn/layer/rnns.py +38 -39
  202. mindspore/nn/layer/thor_layer.py +24 -25
  203. mindspore/nn/layer/timedistributed.py +5 -5
  204. mindspore/nn/layer/transformer.py +701 -0
  205. mindspore/nn/learning_rate_schedule.py +8 -8
  206. mindspore/nn/loss/__init__.py +9 -6
  207. mindspore/nn/loss/loss.py +678 -142
  208. mindspore/nn/metrics.py +53 -0
  209. mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
  210. mindspore/nn/optim/ada_grad.py +8 -8
  211. mindspore/nn/optim/adadelta.py +2 -3
  212. mindspore/nn/optim/adafactor.py +18 -14
  213. mindspore/nn/optim/adam.py +429 -87
  214. mindspore/nn/optim/adamax.py +5 -6
  215. mindspore/nn/optim/adasum.py +10 -8
  216. mindspore/nn/optim/asgd.py +7 -7
  217. mindspore/nn/optim/ftrl.py +81 -11
  218. mindspore/nn/optim/lamb.py +7 -8
  219. mindspore/nn/optim/lars.py +4 -4
  220. mindspore/nn/optim/lazyadam.py +82 -7
  221. mindspore/nn/optim/momentum.py +8 -7
  222. mindspore/nn/optim/optimizer.py +19 -10
  223. mindspore/nn/optim/proximal_ada_grad.py +6 -5
  224. mindspore/nn/optim/rmsprop.py +3 -3
  225. mindspore/nn/optim/rprop.py +20 -16
  226. mindspore/nn/optim/sgd.py +21 -15
  227. mindspore/nn/optim/thor.py +23 -21
  228. mindspore/nn/probability/__init__.py +0 -2
  229. mindspore/nn/probability/bijector/bijector.py +7 -6
  230. mindspore/nn/probability/bijector/invert.py +4 -2
  231. mindspore/nn/probability/bijector/softplus.py +2 -2
  232. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  233. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  234. mindspore/nn/probability/distribution/__init__.py +6 -0
  235. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
  236. mindspore/nn/probability/distribution/_utils/utils.py +11 -17
  237. mindspore/nn/probability/distribution/bernoulli.py +6 -6
  238. mindspore/nn/probability/distribution/beta.py +1 -1
  239. mindspore/nn/probability/distribution/categorical.py +9 -9
  240. mindspore/nn/probability/distribution/cauchy.py +8 -8
  241. mindspore/nn/probability/distribution/distribution.py +12 -6
  242. mindspore/nn/probability/distribution/exponential.py +5 -5
  243. mindspore/nn/probability/distribution/gamma.py +3 -3
  244. mindspore/nn/probability/distribution/geometric.py +6 -5
  245. mindspore/nn/probability/distribution/gumbel.py +5 -5
  246. mindspore/nn/probability/distribution/half_normal.py +133 -0
  247. mindspore/nn/probability/distribution/laplace.py +128 -0
  248. mindspore/nn/probability/distribution/log_normal.py +0 -1
  249. mindspore/nn/probability/distribution/logistic.py +4 -5
  250. mindspore/nn/probability/distribution/normal.py +11 -15
  251. mindspore/nn/probability/distribution/poisson.py +6 -2
  252. mindspore/nn/probability/distribution/student_t.py +150 -0
  253. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  254. mindspore/nn/probability/distribution/uniform.py +5 -5
  255. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  256. mindspore/nn/reinforcement/tensor_array.py +2 -2
  257. mindspore/nn/sparse/sparse.py +8 -1
  258. mindspore/nn/wrap/cell_wrapper.py +55 -27
  259. mindspore/nn/wrap/grad_reducer.py +20 -11
  260. mindspore/nn/wrap/loss_scale.py +47 -30
  261. mindspore/numpy/array_creations.py +33 -22
  262. mindspore/numpy/array_ops.py +46 -42
  263. mindspore/numpy/logic_ops.py +6 -27
  264. mindspore/numpy/math_ops.py +26 -19
  265. mindspore/numpy/utils.py +1 -8
  266. mindspore/numpy/utils_const.py +112 -62
  267. mindspore/opencv_core452.dll +0 -0
  268. mindspore/opencv_imgcodecs452.dll +0 -0
  269. mindspore/opencv_imgproc452.dll +0 -0
  270. mindspore/ops/__init__.py +6 -3
  271. mindspore/ops/_constants.py +0 -6
  272. mindspore/ops/_grad/__init__.py +2 -1
  273. mindspore/ops/_grad/grad_array_ops.py +209 -152
  274. mindspore/ops/_grad/grad_base.py +55 -17
  275. mindspore/ops/_grad/grad_clip_ops.py +11 -3
  276. mindspore/ops/_grad/grad_comm_ops.py +58 -47
  277. mindspore/ops/_grad/grad_implementations.py +21 -61
  278. mindspore/ops/_grad/grad_inner_ops.py +48 -6
  279. mindspore/ops/_grad/grad_math_ops.py +306 -161
  280. mindspore/ops/_grad/grad_nn_ops.py +192 -181
  281. mindspore/ops/_grad/grad_other_ops.py +1 -1
  282. mindspore/ops/_grad/grad_quant_ops.py +5 -5
  283. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  284. mindspore/ops/_grad/grad_sparse.py +15 -9
  285. mindspore/ops/_grad_experimental/__init__.py +1 -0
  286. mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
  287. mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
  288. mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
  289. mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
  290. mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
  291. mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
  292. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  293. mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
  294. mindspore/ops/_op_impl/__init__.py +3 -3
  295. mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
  296. mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
  297. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  298. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
  299. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  300. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  301. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
  302. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  303. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  304. mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
  305. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  306. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
  307. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  308. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  309. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  310. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  311. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  312. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  313. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  314. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  315. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  316. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  317. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  318. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  319. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  320. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  321. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  322. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  323. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  324. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  325. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
  326. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
  327. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  328. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  329. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  330. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  331. mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
  332. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  333. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  334. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  335. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  336. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  337. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  338. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  339. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  340. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  341. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  342. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  343. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  344. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  345. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  346. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  347. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  348. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  349. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  350. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  351. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  352. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
  353. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  354. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
  355. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  356. mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
  357. mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
  358. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  359. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  360. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  361. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  362. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  363. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  364. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  365. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
  366. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  367. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  368. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  369. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  370. mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
  371. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  372. mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
  373. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  374. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  375. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  376. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  377. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  378. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  379. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  380. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  381. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  382. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  383. mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
  384. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  385. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  386. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  387. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  388. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  389. mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
  390. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  391. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  392. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  393. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  394. mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
  395. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  396. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  397. mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
  398. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  399. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  400. mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
  401. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  402. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  403. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  404. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  405. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  406. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  407. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  408. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  409. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  410. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  411. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  412. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  413. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  414. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  415. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  416. mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
  417. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  418. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  419. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  420. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  421. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  422. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  423. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  424. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  425. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  426. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  427. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  428. mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
  429. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  430. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  431. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  432. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  433. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  434. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  435. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  436. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  437. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  438. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  439. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  440. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  441. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  442. mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
  443. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  444. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  445. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  446. mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
  447. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
  448. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
  449. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  450. mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
  451. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  452. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  453. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  454. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  455. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  456. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  457. mindspore/ops/_op_impl/cpu/__init__.py +1 -2
  458. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  459. mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
  460. mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
  461. mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
  462. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  463. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  464. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  465. mindspore/ops/_op_impl/tbe/__init__.py +27 -608
  466. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
  467. mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
  468. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  469. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  470. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  471. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
  472. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  473. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  474. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
  475. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
  476. mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
  477. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  478. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
  479. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  480. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  481. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  482. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  483. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  484. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
  485. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
  486. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  487. mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
  488. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
  489. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  490. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  491. mindspore/ops/_op_impl/tbe/greater.py +2 -0
  492. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  493. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
  494. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  495. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  496. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
  497. mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
  498. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
  499. mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
  500. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
  501. mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
  502. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
  503. mindspore/ops/_op_impl/tbe/slice.py +26 -15
  504. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  505. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  506. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
  507. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  508. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
  509. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
  510. mindspore/ops/_primitive_cache.py +3 -2
  511. mindspore/ops/_register_for_op.py +11 -0
  512. mindspore/ops/_utils/__init__.py +1 -1
  513. mindspore/ops/_utils/utils.py +20 -41
  514. mindspore/ops/_vmap/__init__.py +2 -2
  515. mindspore/ops/_vmap/vmap_array_ops.py +170 -78
  516. mindspore/ops/_vmap/vmap_base.py +24 -10
  517. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  518. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  519. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
  520. mindspore/ops/_vmap/vmap_image_ops.py +52 -0
  521. mindspore/ops/_vmap/vmap_math_ops.py +77 -6
  522. mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
  523. mindspore/ops/_vmap/vmap_other_ops.py +3 -1
  524. mindspore/ops/_vmap/vmap_random_ops.py +55 -3
  525. mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
  526. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  527. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  528. mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
  529. mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
  530. mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
  531. mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
  532. mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
  533. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  534. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  535. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  536. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
  537. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  538. mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
  539. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  540. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  541. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
  542. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
  543. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  544. mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
  545. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  546. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  547. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  548. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  549. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  550. mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
  551. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  552. mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
  553. mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
  554. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  555. mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
  556. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  557. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  558. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
  559. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
  560. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  561. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  562. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  563. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  564. mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
  565. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  566. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  567. mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
  568. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
  569. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  570. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
  571. mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
  572. mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
  573. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
  574. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  575. mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
  576. mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
  577. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  578. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  579. mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
  580. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  581. mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
  582. mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
  583. mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
  584. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  585. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  586. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  587. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  588. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  589. mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
  590. mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
  591. mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
  592. mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
  593. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  594. mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
  595. mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
  596. mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
  597. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  598. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  599. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  600. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  601. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  602. mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
  603. mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
  604. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  605. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  606. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  607. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  608. mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
  609. mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
  610. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
  611. mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
  612. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  613. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  614. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  615. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  616. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  617. mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
  618. mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
  619. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  620. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  621. mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
  622. mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
  623. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
  624. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
  625. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  626. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
  627. mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
  628. mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
  629. mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
  630. mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
  631. mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
  632. mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
  633. mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
  634. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
  635. mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
  636. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  637. mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
  638. mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
  639. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  640. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  642. mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
  643. mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
  644. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  645. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  646. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  647. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  648. mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
  649. mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
  650. mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
  651. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  652. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  653. mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
  654. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
  655. mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
  656. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
  657. mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
  658. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  659. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  660. mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
  661. mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
  662. mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
  663. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  664. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  665. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
  666. mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
  667. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
  668. mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
  669. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
  670. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  671. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  672. mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
  673. mindspore/ops/bprop_mindir/__init__.py +1 -4
  674. mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
  675. mindspore/ops/composite/__init__.py +12 -13
  676. mindspore/ops/composite/base.py +261 -254
  677. mindspore/ops/composite/env_ops.py +41 -0
  678. mindspore/ops/composite/math_ops.py +197 -156
  679. mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
  680. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
  681. mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
  682. mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
  683. mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
  684. mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
  685. mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
  686. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  687. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  688. mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
  689. mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
  690. mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
  691. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
  692. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  693. mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
  694. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
  695. mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
  696. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  697. mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
  698. mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
  699. mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
  700. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
  701. mindspore/ops/function/__init__.py +323 -8
  702. mindspore/ops/function/array_func.py +3511 -780
  703. mindspore/ops/function/clip_func.py +329 -0
  704. mindspore/ops/function/debug_func.py +6 -6
  705. mindspore/ops/function/grad/__init__.py +5 -1
  706. mindspore/ops/function/grad/grad_func.py +736 -65
  707. mindspore/ops/function/image_func.py +270 -0
  708. mindspore/ops/function/linalg_func.py +268 -8
  709. mindspore/ops/function/math_func.py +8032 -3164
  710. mindspore/ops/function/nn_func.py +5619 -1855
  711. mindspore/ops/function/other_func.py +115 -0
  712. mindspore/ops/function/parameter_func.py +11 -10
  713. mindspore/ops/function/random_func.py +939 -77
  714. mindspore/ops/function/sparse_func.py +249 -84
  715. mindspore/ops/function/sparse_unary_func.py +2303 -0
  716. mindspore/ops/function/spectral_func.py +146 -0
  717. mindspore/ops/function/vmap_func.py +114 -0
  718. mindspore/ops/functional.py +182 -254
  719. mindspore/ops/op_info_register.py +79 -34
  720. mindspore/ops/operations/__init__.py +210 -118
  721. mindspore/ops/operations/_csr_ops.py +7 -7
  722. mindspore/ops/operations/_embedding_cache_ops.py +25 -15
  723. mindspore/ops/operations/_grad_ops.py +447 -322
  724. mindspore/ops/operations/_inner_ops.py +547 -176
  725. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  726. mindspore/ops/operations/_ms_kernel.py +29 -27
  727. mindspore/ops/operations/_ocr_ops.py +11 -11
  728. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  729. mindspore/ops/operations/_quant_ops.py +186 -101
  730. mindspore/ops/operations/_rl_inner_ops.py +122 -61
  731. mindspore/ops/operations/_scalar_ops.py +466 -0
  732. mindspore/ops/operations/_sequence_ops.py +1047 -0
  733. mindspore/ops/operations/_tensor_array.py +10 -11
  734. mindspore/ops/operations/_thor_ops.py +4 -4
  735. mindspore/ops/operations/array_ops.py +1428 -1226
  736. mindspore/ops/operations/comm_ops.py +180 -117
  737. mindspore/ops/operations/control_ops.py +4 -2
  738. mindspore/ops/operations/custom_ops.py +185 -98
  739. mindspore/ops/operations/debug_ops.py +92 -54
  740. mindspore/ops/operations/image_ops.py +406 -211
  741. mindspore/ops/operations/inner_ops.py +42 -53
  742. mindspore/ops/operations/linalg_ops.py +32 -29
  743. mindspore/ops/operations/math_ops.py +2076 -897
  744. mindspore/ops/operations/nn_ops.py +1282 -1252
  745. mindspore/ops/operations/other_ops.py +124 -278
  746. mindspore/ops/operations/random_ops.py +345 -178
  747. mindspore/ops/operations/rl_ops.py +8 -9
  748. mindspore/ops/operations/sparse_ops.py +502 -157
  749. mindspore/ops/operations/spectral_ops.py +107 -0
  750. mindspore/ops/primitive.py +192 -15
  751. mindspore/ops/vm_impl_registry.py +23 -2
  752. mindspore/parallel/__init__.py +6 -1
  753. mindspore/parallel/_auto_parallel_context.py +199 -92
  754. mindspore/parallel/_cell_wrapper.py +4 -2
  755. mindspore/parallel/_cost_model_context.py +3 -0
  756. mindspore/parallel/_dp_allreduce_fusion.py +2 -1
  757. mindspore/parallel/_offload_context.py +185 -0
  758. mindspore/parallel/_parallel_serialization.py +167 -28
  759. mindspore/parallel/_ps_context.py +9 -5
  760. mindspore/parallel/_recovery_context.py +1 -1
  761. mindspore/parallel/_tensor.py +9 -1
  762. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  763. mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
  764. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  765. mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
  766. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  767. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
  768. mindspore/parallel/_utils.py +47 -7
  769. mindspore/parallel/algo_parameter_config.py +5 -1
  770. mindspore/parallel/checkpoint_transform.py +329 -0
  771. mindspore/parallel/shard.py +229 -0
  772. mindspore/perf_msvcbuildinsights.dll +0 -0
  773. mindspore/pgodb140.dll +0 -0
  774. mindspore/pgort140.dll +0 -0
  775. mindspore/profiler/__init__.py +2 -1
  776. mindspore/profiler/common/util.py +4 -3
  777. mindspore/profiler/common/validator/validate_path.py +2 -2
  778. mindspore/profiler/envprofiling.py +249 -0
  779. mindspore/profiler/parser/aicpu_data_parser.py +38 -39
  780. mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
  781. mindspore/profiler/parser/base_timeline_generator.py +471 -0
  782. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
  783. mindspore/profiler/parser/framework_parser.py +42 -16
  784. mindspore/profiler/parser/hccl_parser.py +158 -158
  785. mindspore/profiler/parser/hwts_log_parser.py +7 -6
  786. mindspore/profiler/parser/integrator.py +18 -1579
  787. mindspore/profiler/parser/minddata_analyzer.py +8 -8
  788. mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
  789. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  790. mindspore/profiler/parser/optime_parser.py +17 -18
  791. mindspore/profiler/parser/profiler_info.py +108 -0
  792. mindspore/profiler/parser/step_trace_parser.py +1 -1
  793. mindspore/profiler/profiling.py +396 -194
  794. mindspore/rewrite/__init__.py +6 -2
  795. mindspore/rewrite/api/node.py +51 -110
  796. mindspore/rewrite/api/node_type.py +10 -6
  797. mindspore/rewrite/api/pattern_engine.py +51 -7
  798. mindspore/rewrite/api/scoped_value.py +64 -53
  799. mindspore/rewrite/api/symbol_tree.py +108 -61
  800. mindspore/rewrite/api/tree_node_helper.py +2 -3
  801. mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
  802. mindspore/rewrite/ast_helpers/__init__.py +6 -3
  803. mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
  804. mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
  805. mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
  806. mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
  807. mindspore/rewrite/ast_transformers/__init__.py +0 -1
  808. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
  809. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
  810. mindspore/rewrite/common/__init__.py +2 -0
  811. mindspore/rewrite/common/event.py +1 -1
  812. mindspore/rewrite/common/observable.py +1 -1
  813. mindspore/rewrite/common/observer.py +1 -1
  814. mindspore/rewrite/common/rewrite_elog.py +35 -0
  815. mindspore/rewrite/namer.py +2 -2
  816. mindspore/rewrite/namespace.py +14 -4
  817. mindspore/rewrite/node.py +161 -13
  818. mindspore/rewrite/parser.py +0 -1
  819. mindspore/rewrite/parser_register.py +0 -1
  820. mindspore/rewrite/parsers/arguments_parser.py +3 -2
  821. mindspore/rewrite/parsers/assign_parser.py +267 -67
  822. mindspore/rewrite/parsers/attribute_parser.py +56 -0
  823. mindspore/rewrite/parsers/class_def_parser.py +191 -108
  824. mindspore/rewrite/parsers/constant_parser.py +101 -0
  825. mindspore/rewrite/parsers/container_parser.py +88 -0
  826. mindspore/rewrite/parsers/for_parser.py +28 -15
  827. mindspore/rewrite/parsers/function_def_parser.py +21 -5
  828. mindspore/rewrite/parsers/if_parser.py +11 -28
  829. mindspore/rewrite/parsers/module_parser.py +9 -6
  830. mindspore/rewrite/parsers/return_parser.py +3 -2
  831. mindspore/rewrite/sparsify/__init__.py +0 -0
  832. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  833. mindspore/rewrite/sparsify/sparsify.py +109 -0
  834. mindspore/rewrite/sparsify/utils.py +173 -0
  835. mindspore/rewrite/symbol_tree.py +322 -109
  836. mindspore/rewrite/symbol_tree_builder.py +45 -8
  837. mindspore/rewrite/symbol_tree_dumper.py +0 -1
  838. mindspore/rewrite/topological_manager.py +1 -2
  839. mindspore/run_check/_check_version.py +209 -112
  840. mindspore/run_check/run_check.py +2 -1
  841. mindspore/tbbmalloc.dll +0 -0
  842. mindspore/tinyxml2.dll +0 -0
  843. mindspore/train/__init__.py +6 -4
  844. mindspore/train/_utils.py +28 -5
  845. mindspore/train/amp.py +321 -50
  846. mindspore/train/callback/__init__.py +3 -1
  847. mindspore/train/callback/_backup_and_restore.py +120 -0
  848. mindspore/train/callback/_callback.py +8 -8
  849. mindspore/train/callback/_checkpoint.py +12 -9
  850. mindspore/train/callback/_early_stop.py +13 -7
  851. mindspore/train/callback/_history.py +8 -8
  852. mindspore/train/callback/_lambda_callback.py +6 -6
  853. mindspore/train/callback/_landscape.py +36 -38
  854. mindspore/train/callback/_loss_monitor.py +12 -6
  855. mindspore/train/callback/_lr_scheduler_callback.py +2 -4
  856. mindspore/train/callback/_on_request_exit.py +212 -0
  857. mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
  858. mindspore/train/callback/_summary_collector.py +27 -19
  859. mindspore/train/callback/_time_monitor.py +13 -7
  860. mindspore/train/checkpoint_pb2.py +68 -8
  861. mindspore/train/data_sink.py +122 -33
  862. mindspore/train/dataset_helper.py +28 -87
  863. mindspore/train/loss_scale_manager.py +4 -7
  864. mindspore/{nn → train}/metrics/__init__.py +20 -20
  865. mindspore/{nn → train}/metrics/accuracy.py +12 -10
  866. mindspore/{nn → train}/metrics/auc.py +4 -4
  867. mindspore/{nn → train}/metrics/bleu_score.py +4 -4
  868. mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
  869. mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
  870. mindspore/{nn → train}/metrics/dice.py +6 -5
  871. mindspore/{nn → train}/metrics/error.py +7 -5
  872. mindspore/{nn → train}/metrics/fbeta.py +9 -7
  873. mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
  874. mindspore/{nn → train}/metrics/loss.py +4 -3
  875. mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
  876. mindspore/{nn → train}/metrics/metric.py +6 -5
  877. mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
  878. mindspore/{nn → train}/metrics/perplexity.py +5 -4
  879. mindspore/{nn → train}/metrics/precision.py +5 -4
  880. mindspore/{nn → train}/metrics/recall.py +5 -4
  881. mindspore/{nn → train}/metrics/roc.py +7 -6
  882. mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
  883. mindspore/{nn → train}/metrics/topk.py +7 -5
  884. mindspore/train/mind_ir_pb2.py +339 -32
  885. mindspore/train/model.py +113 -84
  886. mindspore/train/serialization.py +547 -167
  887. mindspore/train/summary/_summary_adapter.py +1 -1
  888. mindspore/train/summary/summary_record.py +43 -12
  889. mindspore/train/train_thor/convert_utils.py +7 -1
  890. mindspore/train/train_thor/dataset_helper.py +3 -3
  891. mindspore/train/train_thor/model_thor.py +0 -4
  892. mindspore/turbojpeg.dll +0 -0
  893. mindspore/vcmeta.dll +0 -0
  894. mindspore/vcruntime140.dll +0 -0
  895. mindspore/vcruntime140_1.dll +0 -0
  896. mindspore/version.py +1 -1
  897. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
  898. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +901 -660
  899. mindspore/compression/common/constant.py +0 -124
  900. mindspore/compression/export/__init__.py +0 -19
  901. mindspore/compression/export/quant_export.py +0 -514
  902. mindspore/compression/quant/qat.py +0 -636
  903. mindspore/compression/quant/quant_utils.py +0 -462
  904. mindspore/compression/quant/quantizer.py +0 -68
  905. mindspore/libatomic-1.dll +0 -0
  906. mindspore/libgcc_s_seh-1.dll +0 -0
  907. mindspore/libgfortran-4.dll +0 -0
  908. mindspore/libgomp-1.dll +0 -0
  909. mindspore/libjpeg-62.dll +0 -0
  910. mindspore/libmindspore.dll +0 -0
  911. mindspore/libmindspore_common.dll +0 -0
  912. mindspore/libmindspore_core.dll +0 -0
  913. mindspore/libmindspore_glog.dll +0 -0
  914. mindspore/libnnacl.dll +0 -0
  915. mindspore/libopencv_core452.dll +0 -0
  916. mindspore/libopencv_imgcodecs452.dll +0 -0
  917. mindspore/libopencv_imgproc452.dll +0 -0
  918. mindspore/libquadmath-0.dll +0 -0
  919. mindspore/libsqlite3.dll +0 -0
  920. mindspore/libssp-0.dll +0 -0
  921. mindspore/libstdc++-6.dll +0 -0
  922. mindspore/libtinyxml2.dll +0 -0
  923. mindspore/libturbojpeg.dll +0 -0
  924. mindspore/libwinpthread-1.dll +0 -0
  925. mindspore/nn/layer/quant.py +0 -1868
  926. mindspore/nn/layer/rnn_utils.py +0 -90
  927. mindspore/nn/probability/dpn/__init__.py +0 -22
  928. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  929. mindspore/nn/probability/dpn/vae/cvae.py +0 -138
  930. mindspore/nn/probability/dpn/vae/vae.py +0 -122
  931. mindspore/nn/probability/infer/__init__.py +0 -22
  932. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  933. mindspore/nn/probability/infer/variational/svi.py +0 -84
  934. mindspore/nn/probability/toolbox/__init__.py +0 -22
  935. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  936. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
  937. mindspore/nn/probability/transforms/__init__.py +0 -22
  938. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  939. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  940. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  941. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  942. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  943. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  944. mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
  945. mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
  946. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
  947. mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
  948. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
  949. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
  950. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
  951. mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
  952. mindspore/ops/composite/array_ops.py +0 -210
  953. mindspore/ops/composite/clip_ops.py +0 -238
  954. mindspore/ops/composite/random_ops.py +0 -426
  955. mindspore/ops/composite/vmap_ops.py +0 -38
  956. mindspore/ops/operations/sponge_ops.py +0 -3531
  957. mindspore/ops/operations/sponge_update_ops.py +0 -2546
  958. mindspore/parallel/nn/__init__.py +0 -42
  959. mindspore/parallel/nn/loss.py +0 -22
  960. mindspore/parallel/nn/moe.py +0 -21
  961. mindspore/parallel/nn/op_parallel_config.py +0 -22
  962. mindspore/parallel/nn/transformer.py +0 -31
  963. mindspore/run_check/_check_deps_version.py +0 -84
  964. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  965. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  966. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -14,29 +14,29 @@
14
14
  # ============================================================================
15
15
 
16
16
  """Operators for math."""
17
-
18
17
  from __future__ import absolute_import
19
18
  from __future__ import division
20
19
 
21
20
  import numpy as np
21
+
22
22
  from mindspore import context
23
+ from mindspore import log as logger
23
24
  from mindspore.ops import signature as sig
24
- from mindspore._checkparam import Validator as validator
25
- from mindspore._checkparam import Rel
25
+ from mindspore import _checkparam as validator
26
26
  from mindspore.common import dtype as mstype
27
27
  from mindspore.common.tensor import Tensor
28
28
  from mindspore.common._decorator import deprecated
29
29
  from mindspore.ops._utils import get_broadcast_shape
30
30
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
31
31
  from mindspore._c_expression import Tensor as Tensor_
32
- from mindspore.ops._utils import is_shape_unknown
32
+ from mindspore.common._utils import is_shape_unknown
33
33
 
34
34
 
35
35
  def _infer_shape_reduce(x, axis, keep_dims, prim_name):
36
36
  """Common infer for reduce operator"""
37
37
 
38
38
  def reduce_one_axis(one_axis):
39
- validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
39
+ validator.check_int_range(one_axis, -dim, dim, validator.INC_LEFT, 'axis', prim_name)
40
40
  if one_axis < 0:
41
41
  one_axis += dim
42
42
  axis_reduce.add(one_axis)
@@ -102,9 +102,9 @@ class _MathBinaryOp(_BinaryOp):
102
102
  (mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
103
103
  }
104
104
  if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
105
- raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],'
106
- + '[complex64, float32], [float32, complex64], [complex128, complex128],'
107
- + '[complex128, float64], [float64, complex128],'
105
+ raise TypeError('Complex math binary op expecting Tensor [Complex64, Complex64],'
106
+ + '[Complex64, Float32], [Float32, Complex64], [Complex128, Complex128],'
107
+ + '[Complex128, Float64], [Float64, Complex128],'
108
108
  + f'but got : [{format(x_dtype)},{format(y_dtype)}].')
109
109
  return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
110
110
 
@@ -150,7 +150,7 @@ class Ger(Primitive):
150
150
  shape :math:`(m,)` and `x2` is a 1D Tensor of shape :math:`(n,)`, then `output` must be a 2D Tensor of shape
151
151
  :math:`(m, n)`.
152
152
 
153
- Refer to :func:`mindspore.ops.ger` for more detail.
153
+ Refer to :func:`mindspore.ops.ger` for more details.
154
154
 
155
155
  Supported Platforms:
156
156
  ``Ascend`` ``GPU`` ``CPU``
@@ -177,7 +177,7 @@ class Add(_MathBinaryOp):
177
177
  r"""
178
178
  Adds two input tensors element-wise.
179
179
 
180
- Refer to :func:`mindspore.ops.add` for more detail.
180
+ Refer to :func:`mindspore.ops.add` for more details.
181
181
 
182
182
  Supported Platforms:
183
183
  ``Ascend`` ``GPU`` ``CPU``
@@ -218,14 +218,6 @@ class Add(_MathBinaryOp):
218
218
  return out
219
219
  return None
220
220
 
221
- def _infer_min_value(self, x, y):
222
- """Calculate min value for output for Add op"""
223
- return self._infer_specified_add_value(x, y)
224
-
225
- def _infer_max_value(self, x, y):
226
- """Calculate max value for output for Add op"""
227
- return self._infer_specified_add_value(x, y)
228
-
229
221
  def infer_value(self, x, y):
230
222
  if x is not None and y is not None:
231
223
  x = x.asnumpy()
@@ -235,6 +227,14 @@ class Add(_MathBinaryOp):
235
227
  return Tensor(out)
236
228
  return None
237
229
 
230
+ def _infer_min_value(self, x, y):
231
+ """Calculate min value for output for Add op"""
232
+ return self._infer_specified_add_value(x, y)
233
+
234
+ def _infer_max_value(self, x, y):
235
+ """Calculate max value for output for Add op"""
236
+ return self._infer_specified_add_value(x, y)
237
+
238
238
  def _infer_shape_value(self, x, y):
239
239
  shape_value = self._infer_specified_add_value(x, y)
240
240
  shape_value = self._convert_back_shape(shape_value, x)
@@ -243,8 +243,8 @@ class Add(_MathBinaryOp):
243
243
 
244
244
  class Addcdiv(Primitive):
245
245
  r"""
246
- Performs the element-wise division of tensor x1 by tensor x2,
247
- multiply the result by the scalar value and add it to input_data.
246
+ Performs the element-wise division of tensor `x1` by tensor `x2`,
247
+ multiply the result by the scalar `value` and add it to `input_data`.
248
248
 
249
249
  .. math::
250
250
  y[i] = input\_data[i] + value[i] * (x1[i] / x2[i])
@@ -260,9 +260,10 @@ class Addcdiv(Primitive):
260
260
 
261
261
  Raises:
262
262
  TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
263
- ValueError: If `x1` could not be broadcast to a tensor with shape of `x2`.
264
- ValueError: If `value` could not be broadcast to tensors with shapes of `x1/x2`.
265
- ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1/x2)`.
263
+ TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
264
+ ValueError: If `x1` could not be broadcast to `x2`.
265
+ ValueError: If `value` could not be broadcast to `x1/x2`.
266
+ ValueError: If `input_data` could not be broadcast to `value*(x1/x2)`.
266
267
 
267
268
  Supported Platforms:
268
269
  ``Ascend`` ``GPU`` ``CPU``
@@ -286,8 +287,8 @@ class Addcdiv(Primitive):
286
287
 
287
288
  class Addcmul(Primitive):
288
289
  r"""
289
- Performs the element-wise product of tensor x1 and tensor x2,
290
- multiply the result by the scalar value and add it to input_data.
290
+ Performs the element-wise product of tensor `x1` and tensor `x2`,
291
+ multiply the result by the scalar `value` and add it to `input_data`.
291
292
 
292
293
  .. math::
293
294
  output[i] = input\_data[i] + value[i] * (x1[i] * x2[i])
@@ -303,15 +304,13 @@ class Addcmul(Primitive):
303
304
 
304
305
  Raises:
305
306
  TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
306
- TypeError: If dtype of `input_data` is not one of: float32, float16, int32.
307
- TypeError: If dtype of `x1` or `x2` is not one of: float32, float16, int32.
308
- TypeError: If dtype of `value` is not one of: float32, float16, int32.
309
- ValueError: If `x1` could not be broadcast to a tensor with shape of `x2`.
310
- ValueError: If `value` could not be broadcast to tensors with shapes of `x1` * `x2`.
311
- ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1*x2)`.
307
+ TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
308
+ ValueError: If `x1` could not be broadcast to `x2`.
309
+ ValueError: If `value` could not be broadcast to `x1` * `x2`.
310
+ ValueError: If `input_data` could not be broadcast to `value*(x1*x2)`.
312
311
 
313
312
  Supported Platforms:
314
- ``Ascend`` ``GPU``
313
+ ``Ascend`` ``GPU`` ``CPU``
315
314
 
316
315
  Examples:
317
316
  >>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
@@ -338,8 +337,9 @@ class AddV2(Primitive):
338
337
 
339
338
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
340
339
  The inputs must be two tensors or one tensor and one scalar.
341
- When the inputs are two tensors, the shapes of them should be the same.
340
+ When the inputs are two tensors, and the shapes of them can be broadcast.
342
341
  When the inputs are one tensor and one scalar, the scalar could only be a constant.
342
+ CPU/Ascend does not support broadcast for now.
343
343
 
344
344
  .. math::
345
345
 
@@ -358,14 +358,14 @@ class AddV2(Primitive):
358
358
  and the data type is the one with higher precision or higher digits among the two inputs.
359
359
 
360
360
  Raises:
361
- TypeError: If neither `x` nor `y` is a Tensor .
361
+ TypeError: If neither `x` nor `y` is a Tensor.
362
362
  TypeError: If dtype of `x` or `y` is not in [float16, float32, float64,
363
363
  uint8, int8, int16, int32, int64, complex64, complex128].
364
- ValueError: If the shape of 'x' and 'y' is not the same.
364
+ ValueError: If the shape of 'x' and 'y' is not the same for CPU and Ascend.
365
365
 
366
366
 
367
367
  Supported Platforms:
368
- ``Ascend`` ``CPU``
368
+ ``Ascend`` ``GPU`` ``CPU``
369
369
 
370
370
  Examples:
371
371
  >>> from mindspore.ops.operations.math_ops import AddV2
@@ -410,7 +410,7 @@ class AssignAdd(Primitive):
410
410
  """
411
411
  Updates a `Parameter` by adding a value to it.
412
412
 
413
- Refer to :func:`mindspore.ops.assign_add` for more detail.
413
+ Refer to :func:`mindspore.ops.assign_add` for more details.
414
414
 
415
415
  Supported Platforms:
416
416
  ``Ascend`` ``GPU`` ``CPU``
@@ -448,7 +448,7 @@ class AssignSub(Primitive):
448
448
  """
449
449
  Updates a `Parameter` by subtracting a value from it.
450
450
 
451
- Refer to :func:`mindspore.ops.assign_sub` for more detail.
451
+ Refer to :func:`mindspore.ops.assign_sub` for more details.
452
452
 
453
453
  Supported Platforms:
454
454
  ``Ascend`` ``GPU`` ``CPU``
@@ -513,11 +513,12 @@ class _Reduce(PrimitiveWithCheck):
513
513
  value = None
514
514
  if input_x is not None and axis is not None:
515
515
  prim_map = {
516
- 'ReduceSum': np.sum,
517
516
  'ReduceMax': np.max,
518
517
  'ReduceMin': np.min,
519
518
  'ReduceProd': np.prod,
520
519
  'ReduceMean': np.mean,
520
+ 'ReduceAll': np.all,
521
+ 'ReduceAny': np.any,
521
522
  }
522
523
  np_reduce_func = prim_map.get(self.name, None)
523
524
 
@@ -535,6 +536,47 @@ class _Reduce(PrimitiveWithCheck):
535
536
  return value
536
537
 
537
538
 
539
+ class EuclideanNorm(Primitive):
540
+ """
541
+ Calculates the Euclidean norm(aka L2 norm) of a Tensor along the specified axes.
542
+ The specified `axes` are removed by default.
543
+
544
+ Args:
545
+ keep_dims (bool, optional): whether to retain the reduced dimensions. If true, retains them with length 1.
546
+ If false, these dimensions are removed. Default: False.
547
+
548
+ Inputs:
549
+ - **x** (Tensor) - The input Tensor to reduce.
550
+ - **axes** (Tensor) - The axes to perform reduction on. Must be one of the following types: int32, int64.
551
+ It must be in range :math:`[-rank(x), rank(x))`.
552
+
553
+ Outputs:
554
+ Tensor, has the same type as the 'x'.
555
+
556
+ Raises:
557
+ TypeError: If `keep_dims` is not a bool.
558
+ TypeError: If `x` is not a Tensor.
559
+ ValueError: If `axes` is out of range.
560
+
561
+ Supported Platforms:
562
+ ``GPU``
563
+
564
+ Examples:
565
+ >>> x = Tensor(np.array([[3, 5], [4, 12]])).astype(np.int32)
566
+ >>> axes = Tensor([0])
567
+ >>> op = ops.EuclideanNorm(keep_dims=True)
568
+ >>> output = op(x, axes)
569
+ >>> print(output)
570
+ [[5 13]]
571
+ """
572
+
573
+ @prim_attr_register
574
+ def __init__(self, keep_dims=False):
575
+ """Initialize"""
576
+ self.init_prim_io_names(inputs=['x', 'axes'], outputs=['y'])
577
+ validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
578
+
579
+
538
580
  class ReduceMean(_Reduce):
539
581
  """
540
582
  Reduces a dimension of a tensor by averaging all elements in the dimension, by default. And also can reduce
@@ -547,7 +589,7 @@ class ReduceMean(_Reduce):
547
589
 
548
590
  Inputs:
549
591
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
550
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
592
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
551
593
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
552
594
  Only constant value is allowed. Must be in the range [-r, r).
553
595
 
@@ -619,7 +661,75 @@ class ReduceMean(_Reduce):
619
661
  super(ReduceMean, self).__init__(keep_dims)
620
662
 
621
663
 
622
- class ReduceSum(_Reduce):
664
+ class CumulativeLogsumexp(Primitive):
665
+ """
666
+ Compute the cumulative log-sum-exp of the input tensor `x` along `axis` . For example, with all parameters at
667
+ default values, if the input `x` is a tensor [a, b, c], the output will be [a, log(exp(a) + exp(b)),
668
+ log(exp(a) + exp(b) + exp(c))].
669
+
670
+ Args:
671
+ exclusive (bool, optional): If true, the last element will be skipped during the calculation and thus an
672
+ exclusive cumulative log-sum-exp will be performed. For example, this operation
673
+ will output [-inf, a, log(exp(a) * exp(b))] with tensor [a, b, c] as the input.
674
+ Note that the minimal value -inf, for performance reasons, is representable by the
675
+ floating point type. Default: False.
676
+ reverse (bool, optional): If true, the function accumulation values will be calculated after the elements of
677
+ `x` on `axis` are flipped, and the calculation result will be flipped afterwards. For
678
+ example, this operation will output [log(exp(c) + exp(b) + exp(a)), log(exp(c) +
679
+ exp(b)), c] with tensor [a, b, c] as the input. Default: False.
680
+
681
+ Inputs:
682
+ - **x** (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64. The
683
+ dimension of `x` must greater than 0.
684
+ - **axis** (Tensor) - A 0-D tensor describing the dimension to compute the cumulative product. Must be one of
685
+ the following types: int64, int32, int16. Must be in the range [-rank(x), rank(x)). Default: 0.
686
+
687
+ Outputs:
688
+ Tensor, has the same dtype and shape as the `x`.
689
+
690
+ Raises:
691
+ TypeError: If `x` or `axis` not a Tensor.
692
+ TypeError: If dtype of `x` is not in [float16, float32, float64].
693
+ TypeError: If dtype of `axis` is not in [int16, int32, int64].
694
+ TypeError: If `exclusive` or `reverse` is not a bool.
695
+ ValueError: If the dimension of `x` is not greater than 0.
696
+ RuntimeError: If `axis` is out of range [-rank(x), rank(x)).
697
+
698
+ Supported Platforms:
699
+ ``Ascend`` ``CPU`` ``GPU``
700
+
701
+ Examples:
702
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
703
+ >>> op = ops.CumulativeLogsumexp(exclusive=False, reverse=False)
704
+ >>> output = op(x, Tensor(0))
705
+ >>> print(output)
706
+ [1. 2.3132617 3.407606 ]
707
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
708
+ >>> op = ops.CumulativeLogsumexp(exclusive=True, reverse=False)
709
+ >>> output = op(x, Tensor(0))
710
+ >>> print(output)
711
+ [-3.4028235e+38 1.0000000e+00 2.3132617e+00]
712
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
713
+ >>> op = ops.CumulativeLogsumexp(exclusive=False, reverse=True)
714
+ >>> output = op(x, Tensor(0))
715
+ >>> print(output)
716
+ [3.407606 3.3132617 3. ]
717
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
718
+ >>> op = ops.CumulativeLogsumexp(exclusive=True, reverse=True)
719
+ >>> output = op(x, Tensor(0))
720
+ >>> print(output)
721
+ [ 3.3132617e+00 3.0000000e+00 -3.4028235e+38]
722
+ """
723
+
724
+ @prim_attr_register
725
+ def __init__(self, exclusive=False, reverse=False):
726
+ """Initialize CumulativeLogsumexp"""
727
+ self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
728
+ validator.check_bool(exclusive, "exclusive", self.name)
729
+ validator.check_bool(reverse, "reverse", self.name)
730
+
731
+
732
+ class ReduceSum(PrimitiveWithCheck):
623
733
  """
624
734
  Reduces a dimension of a tensor by summing all elements in the dimension, by default. And also can reduce a
625
735
  dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
@@ -628,18 +738,24 @@ class ReduceSum(_Reduce):
628
738
  Args:
629
739
  keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
630
740
  If false, don't keep these dimensions. Default: False.
741
+ skip_mode (bool): If true and axis is empty tuple or empty list, the ReduceSum operation isn't performed,
742
+ skip it.
743
+ If true and axis is other values, the ReduceSum calculation is performed normally.
744
+ If false, do reduce. Default: False.
631
745
 
632
746
  Inputs:
633
747
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
634
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
635
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
636
- Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
748
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
749
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions
750
+ when skip_mode is false. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
637
751
 
638
752
  Outputs:
639
753
  Tensor, has the same dtype as the `x`.
640
754
 
641
- - If axis is (), and keep_dims is False,
755
+ - If axis is (), keep_dims is False, and skip_mode is False,
642
756
  the output is a 0-D tensor representing the sum of all elements in the input tensor.
757
+ - If axis is (), and skip_mode is True,
758
+ the ReduceSum operation is not performed, output tensor is equal to the input tensor.
643
759
  - If axis is int, set as 2, and keep_dims is False,
644
760
  the shape of output is :math:`(x_1, x_3, ..., x_R)`.
645
761
  - If axis is tuple(int) or list(int), set as (2, 3), and keep_dims is False,
@@ -647,6 +763,7 @@ class ReduceSum(_Reduce):
647
763
 
648
764
  Raises:
649
765
  TypeError: If `keep_dims` is not a bool.
766
+ TypeError: If `skip_mode` is not a bool.
650
767
  TypeError: If `x` is not a Tensor.
651
768
  ValueError: If `axis` is None.
652
769
 
@@ -694,12 +811,44 @@ class ReduceSum(_Reduce):
694
811
  [54.]]]
695
812
  """
696
813
 
814
+ __mindspore_signature__ = (
815
+ sig.make_sig('input_x'),
816
+ sig.make_sig('axis', default=())
817
+ )
818
+
697
819
  @prim_attr_register
698
- def __init__(self, keep_dims=False):
699
- """Initialize ReduceSum"""
700
- super(ReduceSum, self).__init__(keep_dims)
820
+ def __init__(self, keep_dims=False, skip_mode=False):
821
+ """Initialize Reduce"""
822
+ validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
823
+ validator.check_value_type('skip_mode', skip_mode, [bool], self.name)
824
+ self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
825
+ self.keep_dims = keep_dims
826
+ self.skip_mode = skip_mode
701
827
  self.__setattr_flag__ = True
702
828
 
829
+ def __call__(self, x, axis=()):
830
+ args = [x, axis]
831
+ output = _run_op(self, self.name, args)
832
+ return output
833
+
834
+ def infer_value(self, input_x, axis):
835
+ """ return reduce op value"""
836
+ value = None
837
+ if input_x is not None and axis is not None:
838
+ value = input_x.asnumpy()
839
+ if isinstance(axis, int):
840
+ pass
841
+ elif axis:
842
+ axis = tuple(set(axis))
843
+ elif axis in ((), []) and self.skip_mode:
844
+ return input_x
845
+ else:
846
+ axis = tuple(range(len(value.shape)))
847
+ value = np.sum(value, axis, keepdims=self.keep_dims)
848
+ value = np.array(value)
849
+ value = Tensor(value)
850
+ return value
851
+
703
852
 
704
853
  class ReduceAll(_Reduce):
705
854
  """
@@ -823,7 +972,7 @@ class ReduceMax(_Reduce):
823
972
 
824
973
  Inputs:
825
974
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
826
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
975
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
827
976
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
828
977
  Only constant value is allowed. Must be in the range [-r, r).
829
978
 
@@ -907,7 +1056,7 @@ class ReduceMin(_Reduce):
907
1056
 
908
1057
  Inputs:
909
1058
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
910
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1059
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
911
1060
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
912
1061
  Only constant value is allowed. Must be in the range [-r, r).
913
1062
 
@@ -975,10 +1124,10 @@ class ReduceMin(_Reduce):
975
1124
 
976
1125
  class Bucketize(Primitive):
977
1126
  """
978
- Bucketizes 'input' based on 'boundaries'.
1127
+ Bucketizes `input` based on `boundaries`.
979
1128
 
980
1129
  Args:
981
- boundaries (list_float): A sorted list of floats gives the boundary of the buckets, and no default value.
1130
+ boundaries (list[float]): A sorted list of floats gives the boundary of the buckets, and no default value.
982
1131
 
983
1132
  Inputs:
984
1133
  - **input** (Tensor) - A tensor containing the search value(s).
@@ -991,13 +1140,13 @@ class Bucketize(Primitive):
991
1140
  TypeError: If `input` is not a Tensor.
992
1141
 
993
1142
  Supported Platforms:
994
- ``GPU`` ``CPU``
1143
+ ``Ascend`` ``GPU`` ``CPU``
995
1144
 
996
1145
  Examples:
997
1146
  >>> class Bucketize(nn.Cell):
998
1147
  ... def __init__(self, boundaries):
999
1148
  ... super().__init__()
1000
- ... self.bucketize = op.Bucketize(boundaries=boundaries)
1149
+ ... self.bucketize = ops.Bucketize(boundaries=boundaries)
1001
1150
  ... def construct(self, input):
1002
1151
  ... return self.bucketize(input)
1003
1152
  >>> input = Tensor(np.array([[3, 6, 9], [3, 6, 9]]).astype(np.int32))
@@ -1026,11 +1175,11 @@ class ReduceProd(_Reduce):
1026
1175
 
1027
1176
  Args:
1028
1177
  keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
1029
- If false, don't keep these dimensions. Default : False.
1178
+ If false, don't keep these dimensions. Default: False.
1030
1179
 
1031
1180
  Inputs:
1032
1181
  - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
1033
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1182
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1034
1183
  - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1035
1184
  Only constant value is allowed. Must be in the range [-r, r).
1036
1185
 
@@ -1175,7 +1324,10 @@ class Lcm(Primitive):
1175
1324
  """
1176
1325
  Computes least common multiplier of input tensors element-wise.
1177
1326
  The shape of two inputs should be broadcastable, and data type of them should be
1178
- one of: int32, int64
1327
+ one of: int32, int64.
1328
+
1329
+ .. warning::
1330
+ This is an experimental API that is subject to change or deletion.
1179
1331
 
1180
1332
  Inputs:
1181
1333
  - **x1** (Tensor) - The first input tensor.
@@ -1190,7 +1342,7 @@ class Lcm(Primitive):
1190
1342
  ValueError: If shape of two inputs are not broadcastable.
1191
1343
 
1192
1344
  Supported Platforms:
1193
- ``Ascend`` ``CPU``
1345
+ ``Ascend`` ``GPU`` ``CPU``
1194
1346
 
1195
1347
  Examples:
1196
1348
  >>> x1 = Tensor(np.array([7, 8, 9]))
@@ -1212,12 +1364,15 @@ class Cdist(Primitive):
1212
1364
  """
1213
1365
  Computes batched the p-norm distance between each pair of the two collections of row vectors.
1214
1366
 
1215
- Refer to :func:`mindspore.ops.cdist` for more detail.
1367
+ Refer to :func:`mindspore.ops.cdist` for more details.
1216
1368
 
1217
1369
  Supported Platforms:
1218
- ``Ascend`` ``CPU``
1370
+ ``Ascend`` ``GPU`` ``CPU``
1219
1371
 
1220
1372
  Examples:
1373
+ >>> import numpy as np
1374
+ >>> import mindspore
1375
+ >>> from mindspore import Tensor, ops
1221
1376
  >>> input_x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
1222
1377
  >>> input_y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
1223
1378
  >>> op = ops.Cdist(p=2.0)
@@ -1243,7 +1398,29 @@ class LpNorm(Primitive):
1243
1398
  .. math::
1244
1399
  output = sum(abs(input)**p)**(1/p)
1245
1400
 
1246
- Refer to :func:`mindspore.ops.norm` for more detail.
1401
+ Args:
1402
+ axis(int,list,tuple): Specifies which dimension or dimensions of input to calculate the norm across.
1403
+ p(int, optional): The order of norm. Default: 2.
1404
+ keep_dims(bool, optional): Whether the output tensors have dim retained or not. Default: False.
1405
+ epsilon(float, optional): A value added to the denominator for numerical stability. Default: 1e-12.
1406
+
1407
+ Inputs:
1408
+ - **input** (Tensor) - Input tensor.
1409
+
1410
+ Outputs:
1411
+ Tensor, has the same dtype as `input`, its shape depends on `axis`. For example, if the shape of input
1412
+ is :math:`(2, 3, 4)`, `axis` is :math:`[0, 1]`, output shape will be :math:`(4,)`.
1413
+
1414
+ Raises:
1415
+ TypeError: If `input` is not a Tensor.
1416
+ TypeError: If dtype of `input` is not one of: float16, float32.
1417
+ TypeError: If `p` is not an int.
1418
+ TypeError: If `axis` is not an int, a tuple or a list.
1419
+ TypeError: If `axis` is a tuple or a list, but the element of `axis` is not an int.
1420
+ TypeError: If `keep_dims` is not a bool.
1421
+ ValueError: If the element of `axis` is out of the range :math:`[-r, r)`,
1422
+ where :math:`r` is the rank of `input`.
1423
+ ValueError: If the length of shape of `axis` is bigger than the length of shape of `input`.
1247
1424
 
1248
1425
  Supported Platforms:
1249
1426
  ``Ascend`` ``GPU`` ``CPU``
@@ -1293,9 +1470,9 @@ class MatMul(PrimitiveWithCheck):
1293
1470
 
1294
1471
  Inputs:
1295
1472
  - **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
1296
- `transpose_a` is True, its shape must be :math:`(N, C)` after transpose.
1473
+ `transpose_a` is True, its shape must be :math:`(C, N)` after transpose.
1297
1474
  - **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
1298
- `transpose_b` is True, its shape must be :math:`(C, M)` after transpose.
1475
+ `transpose_b` is True, its shape must be :math:`(M, C)` after transpose.
1299
1476
 
1300
1477
  Outputs:
1301
1478
  Tensor, the shape of the output tensor is :math:`(N, M)`.
@@ -1359,9 +1536,8 @@ class MatMul(PrimitiveWithCheck):
1359
1536
 
1360
1537
  def check_dtype(self, x1, x2):
1361
1538
  args = {"x1": x1, "x2": x2}
1362
- validator.check_tensors_dtypes_same_and_valid(args,
1363
- mstype.float_type + mstype.int_type + (mstype.complex64,),
1364
- self.name)
1539
+ validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type
1540
+ + (mstype.complex64, mstype.complex128), self.name)
1365
1541
 
1366
1542
 
1367
1543
  class BatchMatMul(Primitive):
@@ -1403,28 +1579,14 @@ class BatchMatMul(Primitive):
1403
1579
  >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1404
1580
  >>> batmatmul = ops.BatchMatMul()
1405
1581
  >>> output = batmatmul(x, y)
1406
- >>> print(output)
1407
- [[[[3. 3. 3. 3.]]
1408
- [[3. 3. 3. 3.]]
1409
- [[3. 3. 3. 3.]]
1410
- [[3. 3. 3. 3.]]]
1411
- [[[3. 3. 3. 3.]]
1412
- [[3. 3. 3. 3.]]
1413
- [[3. 3. 3. 3.]]
1414
- [[3. 3. 3. 3.]]]]
1582
+ >>> print(output.shape)
1583
+ (2, 4, 1, 4)
1415
1584
  >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
1416
1585
  >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1417
1586
  >>> batmatmul = ops.BatchMatMul(transpose_a=True)
1418
1587
  >>> output = batmatmul(x, y)
1419
- >>> print(output)
1420
- [[[[3. 3. 3. 3.]]
1421
- [[3. 3. 3. 3.]]
1422
- [[3. 3. 3. 3.]]
1423
- [[3. 3. 3. 3.]]]
1424
- [[[3. 3. 3. 3.]]
1425
- [[3. 3. 3. 3.]]
1426
- [[3. 3. 3. 3.]]
1427
- [[3. 3. 3. 3.]]]]
1588
+ >>> print(output.shape)
1589
+ (2, 4, 1, 4)
1428
1590
  """
1429
1591
 
1430
1592
  @prim_attr_register
@@ -1434,14 +1596,15 @@ class BatchMatMul(Primitive):
1434
1596
  cls_name = self.name
1435
1597
  validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
1436
1598
  validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
1599
+ self.add_prim_attr('adj_x1', self.transpose_a)
1600
+ self.add_prim_attr('adj_x2', self.transpose_b)
1437
1601
 
1438
1602
 
1439
1603
  class Betainc(Primitive):
1440
1604
  r"""
1441
- Computes the regularized incomplete beta integral
1442
- :math:`I_{x}(a, b)`.
1443
-
1444
- The regularized incomplete beta integral is defined as:
1605
+ Calculates the regularized incomplete beta function
1606
+ :math:`I_{x}(a, b)`. It is defined as the ratio of the incomplete beta function
1607
+ to the complete beta function:
1445
1608
 
1446
1609
  .. math::
1447
1610
 
@@ -1451,17 +1614,26 @@ class Betainc(Primitive):
1451
1614
 
1452
1615
  .. math::
1453
1616
 
1454
- B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} d t
1617
+ B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} dt
1618
+
1619
+ is the incomplete beta function and
1620
+
1621
+ .. math::
1622
+
1623
+ B(a, b) = \int_0^1 t^{a-1} (1-t)^{b-1} dt
1455
1624
 
1456
- is the incomplete beta function and B(a, b) is the complete beta function
1625
+ is the complete beta function.
1457
1626
 
1458
1627
  Inputs:
1459
- - **a** (Tensor) - A Tensor of types: float32, float64.
1460
- - **b** (Tensor) - A Tensor, must have the same dtype and shape as a.
1461
- - **x** (Tensor) - A Tensor, must have the same dtype and shape as a.
1628
+ - **a** (Tensor) - Peak location of beta distribution.
1629
+ A Tensor of types: float32, float64.
1630
+ - **b** (Tensor) - Spread of the beta distribution.
1631
+ A Tensor, must have the same dtype and shape as `a` .
1632
+ - **x** (Tensor) - Upper limit of integration of the incomplete beta function.
1633
+ A Tensor, must have the same dtype and shape as `a` .
1462
1634
 
1463
1635
  Outputs:
1464
- A Tensor, has the same dtype and shape as a.
1636
+ A Tensor, has the same dtype and shape as `a` .
1465
1637
 
1466
1638
  Raises:
1467
1639
  TypeError: If dtype of `a` is not float32 nor float64.
@@ -1470,20 +1642,21 @@ class Betainc(Primitive):
1470
1642
 
1471
1643
 
1472
1644
  Supported Platforms:
1473
- ``Ascend`` ``CPU``
1645
+ ``Ascend`` ``GPU`` ``CPU``
1474
1646
 
1475
- Example:
1476
- >>> a = Tensor(np.array([1, 1, 1]), mindspore.float32)
1477
- >>> b = Tensor(np.array([1, 1, 1]), mindspore.float32)
1478
- >>> x = Tensor(np.array([1, 1,1 ]), mindspore.float32)
1647
+ Examples:
1648
+ >>> a = Tensor(np.array([0.3, 0.1, 0.4]), mindspore.float32)
1649
+ >>> b = Tensor(np.array([0.4, 0.5, 0.9]), mindspore.float32)
1650
+ >>> x = Tensor(np.array([0.2, 0.6, 0.5]), mindspore.float32)
1479
1651
  >>> betainc = ops.Betainc()
1480
1652
  >>> print(betainc(a, b, x))
1481
- [1. 1. 1.]
1653
+ [0.41462693 0.8706035 0.7298298 ]
1482
1654
  """
1483
1655
 
1484
1656
  @prim_attr_register
1485
1657
  def __init__(self):
1486
1658
  """Initialize Betainc"""
1659
+ self.init_prim_io_names(inputs=['a', 'b', 'x'], outputs=['output'])
1487
1660
 
1488
1661
 
1489
1662
  class CumSum(Primitive):
@@ -1563,7 +1736,7 @@ class AddN(Primitive):
1563
1736
  """
1564
1737
  Computes addition of all input tensors element-wise.
1565
1738
 
1566
- Refer to :func:`mindspore.ops.addn` for more detail.
1739
+ Refer to :func:`mindspore.ops.addn` for more details.
1567
1740
 
1568
1741
  Supported Platforms:
1569
1742
  ``Ascend`` ``GPU`` ``CPU``
@@ -1604,26 +1777,10 @@ class AccumulateNV2(Primitive):
1604
1777
  """
1605
1778
  Computes accumulation of all input tensors element-wise.
1606
1779
 
1607
- AccumulateNV2 is similar to AddN, but there is a significant difference
1608
- among them: AccumulateNV2 will not wait for all of its inputs to be ready
1609
- before summing. That is to say, AccumulateNV2 is able to save
1610
- memory when inputs are ready at different time since the minimum temporary
1611
- storage is proportional to the output size rather than the input size.
1612
-
1613
- Inputs:
1614
- - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
1615
- is made up of multiple tensors whose dtype is number to be added together.
1616
- Each element of tuple or list should have the same shape.
1617
-
1618
- Outputs:
1619
- Tensor, has the same shape and dtype as each entry of the `x`.
1620
-
1621
- Raises:
1622
- TypeError: If `x` is neither tuple nor list.
1623
- ValueError: If there is an input element with a different shape.
1780
+ Refer to :func:`mindspore.ops.accumulate_n` for more details.
1624
1781
 
1625
1782
  Supported Platforms:
1626
- ``Ascend``
1783
+ ``Ascend`` ``GPU``
1627
1784
 
1628
1785
  Examples:
1629
1786
  >>> class NetAccumulateNV2(nn.Cell):
@@ -1662,7 +1819,7 @@ class Neg(Primitive):
1662
1819
  """
1663
1820
  Returns a tensor with negative values of the input tensor element-wise.
1664
1821
 
1665
- Refer to :func:`mindspore.ops.neg` for more detail.
1822
+ Refer to :func:`mindspore.ops.neg` for more details.
1666
1823
 
1667
1824
  Supported Platforms:
1668
1825
  ``Ascend`` ``GPU`` ``CPU``
@@ -1683,37 +1840,25 @@ class Neg(Primitive):
1683
1840
 
1684
1841
  class InplaceUpdateV2(Primitive):
1685
1842
  r"""
1686
- Updates specified rows with values in `v`.
1687
-
1688
- Note:
1689
- This operator only supports dynamic shape. As for static shape, please use operator 'InplaceUpdate' instead.
1843
+ Updates specified values in `x` to `v` according to `indices`.
1690
1844
 
1691
- Args:
1692
-
1693
- Inputs:
1694
- - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
1695
- float32, float16 and int32.
1696
- - **indices** (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
1697
- to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
1698
- - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
1699
- the first dimension, which must be the same as the size of `indices`.
1700
-
1701
- Outputs:
1702
- Tensor, with the same type and shape as the input `x`.
1845
+ .. warning::
1846
+ This is an experimental API that is subject to change or deletion.
1703
1847
 
1704
- Raises:
1705
- TypeError: If `indices` is neither int nor tuple.
1706
- TypeError: If `indices` is a tuple and its element is not an int.
1848
+ Refer to :func:`mindspore.ops.inplace_update` for more details.
1707
1849
 
1708
1850
  Supported Platforms:
1709
- ``Ascend``
1851
+ ``GPU`` ``CPU``
1710
1852
 
1711
1853
  Examples:
1854
+ >>> import numpy as np
1855
+ >>> import mindspore
1856
+ >>> from mindspore import Tensor, ops
1712
1857
  >>> indices = (0, 1)
1713
1858
  >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1714
1859
  >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1715
- >>> inplace_update = ops.InplaceUpdate(indices)
1716
- >>> output = inplace_update(x, v)
1860
+ >>> inplace_update_v2 = ops.InplaceUpdateV2()
1861
+ >>> output = inplace_update_v2(x, indices, v)
1717
1862
  >>> print(output)
1718
1863
  [[0.5 1. ]
1719
1864
  [1. 1.5]
@@ -1731,42 +1876,15 @@ class InplaceUpdateV2(Primitive):
1731
1876
  return output
1732
1877
 
1733
1878
 
1734
- class InplaceUpdate(PrimitiveWithInfer):
1879
+ class InplaceUpdate(Primitive):
1735
1880
  r"""
1736
- Updates specified rows with values in `v`.
1737
-
1738
- Args:
1739
- indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
1740
- to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
1741
-
1742
- Inputs:
1743
- - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
1744
- float32, float16 and int32.
1745
- - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
1746
- the first dimension, which must be the same as the size of `indices`.
1747
-
1748
- Outputs:
1749
- Tensor, with the same type and shape as the input `x`.
1750
-
1751
- Raises:
1752
- TypeError: If `indices` is neither int nor tuple.
1753
- TypeError: If `indices` is a tuple and its element is not an int.
1881
+ The InplaceUpdate interface is deprecated. Please use the :class:`mindspore.ops.InplaceUpdateV2` instead.
1754
1882
 
1755
1883
  Supported Platforms:
1756
- ``Ascend`` ``GPU`` ``CPU``
1757
-
1758
- Examples:
1759
- >>> indices = (0, 1)
1760
- >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1761
- >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1762
- >>> inplace_update = ops.InplaceUpdate(indices)
1763
- >>> output = inplace_update(x, v)
1764
- >>> print(output)
1765
- [[0.5 1. ]
1766
- [1. 1.5]
1767
- [5. 6. ]]
1884
+ Deprecated
1768
1885
  """
1769
1886
 
1887
+ @deprecated("2.0", "ops.InplaceUpdateV2", False)
1770
1888
  @prim_attr_register
1771
1889
  def __init__(self, indices):
1772
1890
  """Initialize InplaceUpdate"""
@@ -1779,14 +1897,14 @@ class InplaceUpdate(PrimitiveWithInfer):
1779
1897
  validator.check_value_type("item of indices", item, [int], self.name)
1780
1898
 
1781
1899
 
1782
- class InplaceAdd(PrimitiveWithInfer):
1900
+ class InplaceAdd(Primitive):
1783
1901
  """
1784
1902
  Adds `v` into specified rows of `x`. Computes `y` = `x`; y[i,] += `v`.
1785
1903
 
1786
- Refer to :func:`mindspore.ops.inplace_add` for more detail.
1904
+ Refer to :func:`mindspore.ops.inplace_add` for more details.
1787
1905
 
1788
1906
  Supported Platforms:
1789
- ``Ascend`` ``CPU``
1907
+ ``Ascend`` ``GPU`` ``CPU``
1790
1908
 
1791
1909
  Examples:
1792
1910
  >>> import numpy as np
@@ -1814,35 +1932,53 @@ class InplaceAdd(PrimitiveWithInfer):
1814
1932
  for item in self.indices:
1815
1933
  validator.check_value_type("item of indices", item, [int], self.name)
1816
1934
 
1817
- def infer_dtype(self, x_dtype, v_dtype):
1818
- args = {'x': x_dtype, 'v': v_dtype}
1819
- valid_type = [mstype.int32, mstype.float16, mstype.float32]
1820
- validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
1821
- return x_dtype
1822
1935
 
1823
- def infer_shape(self, x_shape, v_shape):
1824
- validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
1825
- validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
1826
- Rel.EQ, self.name)
1827
- for i in self.indices:
1828
- if i < 0 or i >= x_shape[0]:
1829
- raise ValueError(f"For '{self.name}', the value of 'indices' must be "
1830
- f"in [0, {x_shape[0]}), but got {i}.")
1831
- x_rank = len(x_shape)
1832
- for idx in range(x_rank)[1:]:
1833
- validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
1936
+ class InplaceIndexAdd(Primitive):
1937
+ """
1938
+ Adds Tensor `updates` to specified axis and indices of Tensor `var` element-wise.
1939
+
1940
+ .. warning::
1941
+ This is an experimental API that is subject to change or deletion.
1942
+
1943
+ Refer to :func:`mindspore.ops.inplace_index_add` for more details.
1944
+
1945
+ Supported Platforms:
1946
+ ``Ascend`` ``CPU``
1947
+
1948
+ Examples:
1949
+ >>> var = Parameter(Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32))
1950
+ >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
1951
+ >>> updates = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1952
+ >>> inplaceIndexAdd = ops.InplaceIndexAdd(axis=0)
1953
+ >>> var = inplaceIndexAdd(var, indices, updates)
1954
+ >>> print(var)
1955
+ [[1.5 3. ]
1956
+ [4. 5.5]
1957
+ [5. 6. ]]
1958
+ """
1959
+
1960
+ __mindspore_signature__ = (
1961
+ sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
1962
+ sig.make_sig('indices', dtype=sig.sig_dtype.T1),
1963
+ sig.make_sig('updates', dtype=sig.sig_dtype.T)
1964
+ )
1834
1965
 
1835
- return x_shape
1966
+ @prim_attr_register
1967
+ def __init__(self, axis):
1968
+ """Initialize InplaceIndexAdd"""
1969
+ self.init_prim_io_names(inputs=['var', 'indices', 'updates'], outputs=['var'])
1970
+ self.axis = axis
1971
+ validator.check_value_type('axis', axis, [int], self.name)
1836
1972
 
1837
1973
 
1838
- class InplaceSub(PrimitiveWithInfer):
1974
+ class InplaceSub(Primitive):
1839
1975
  r"""
1840
1976
  Subtracts `v` into specified rows of `x`. Computes :math:`y = x`; :math:`y[i,] -= input\_v`.
1841
1977
 
1842
- Refer to :func:`mindspore.ops.inplace_sub` for more detail.
1978
+ Refer to :func:`mindspore.ops.inplace_sub` for more details.
1843
1979
 
1844
1980
  Supported Platforms:
1845
- ``Ascend`` ``CPU``
1981
+ ``Ascend`` ``GPU`` ``CPU``
1846
1982
 
1847
1983
  Examples:
1848
1984
  >>> import numpy as np
@@ -1869,33 +2005,14 @@ class InplaceSub(PrimitiveWithInfer):
1869
2005
  self.indices = (indices,)
1870
2006
  for item in self.indices:
1871
2007
  validator.check_value_type("item of indices", item, [int], self.name)
1872
-
1873
- def infer_dtype(self, x_dtype, v_dtype):
1874
- args = {'x': x_dtype, 'v': v_dtype}
1875
- valid_type = [mstype.int32, mstype.float16, mstype.float32]
1876
- validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
1877
- return x_dtype
1878
-
1879
- def infer_shape(self, x_shape, v_shape):
1880
- validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
1881
- validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
1882
- Rel.EQ, self.name)
1883
- for i in self.indices:
1884
- if i < 0 or i >= x_shape[0]:
1885
- raise ValueError(f"For '{self.name}', the value of 'indices' must be "
1886
- f"in [0, {x_shape[0]}), but got {i}.")
1887
- x_rank = len(x_shape)
1888
- for idx in range(x_rank)[1:]:
1889
- validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
1890
-
1891
- return x_shape
2008
+ self.add_prim_attr("indices", self.indices)
1892
2009
 
1893
2010
 
1894
2011
  class Sub(_MathBinaryOp):
1895
2012
  r"""
1896
2013
  Subtracts the second input tensor from the first input tensor element-wise.
1897
2014
 
1898
- Refer to :func:`mindspore.ops.sub` for more detail.
2015
+ Refer to :func:`mindspore.ops.sub` for more details.
1899
2016
 
1900
2017
  Supported Platforms:
1901
2018
  ``Ascend`` ``GPU`` ``CPU``
@@ -1923,7 +2040,7 @@ class Mul(_MathBinaryOp):
1923
2040
  r"""
1924
2041
  Multiplies two tensors element-wise.
1925
2042
 
1926
- Refer to :func:`mindspore.ops.mul` for more detail.
2043
+ Refer to :func:`mindspore.ops.mul` for more details.
1927
2044
 
1928
2045
  Supported Platforms:
1929
2046
  ``Ascend`` ``GPU`` ``CPU``
@@ -1937,6 +2054,17 @@ class Mul(_MathBinaryOp):
1937
2054
  [ 4. 10. 18.]
1938
2055
  """
1939
2056
 
2057
+ # Let x/y using same sig_dtype to enable implicit conversion for compatibility
2058
+ __mindspore_signature__ = (
2059
+ sig.make_sig('x', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
2060
+ sig.make_sig('y', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T)
2061
+ )
2062
+
2063
+ @prim_attr_register
2064
+ def __init__(self):
2065
+ """Initialize Xdivy."""
2066
+ self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
2067
+
1940
2068
  @staticmethod
1941
2069
  def _infer_specified_mul_value(x, y):
1942
2070
  """Calculate min/max value for output of Mul op"""
@@ -1991,10 +2119,9 @@ class SquaredDifference(Primitive):
1991
2119
  out_{i} = (x_{i} - y_{i}) * (x_{i} - y_{i}) = (x_{i} - y_{i})^2
1992
2120
 
1993
2121
  Inputs:
1994
- - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
1995
- or a tensor whose data type is float16, float32, int32 or bool.
2122
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool, or a tensor.
1996
2123
  - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
1997
- is a tensor or a tensor whose data type is float16, float32, int32 or bool.
2124
+ is a tensor, or a tensor.
1998
2125
 
1999
2126
  Outputs:
2000
2127
  Tensor, the shape is the same as the one after broadcasting,
@@ -2104,7 +2231,8 @@ class Sqrt(Primitive):
2104
2231
  out_{i} = \sqrt{x_{i}}
2105
2232
 
2106
2233
  Inputs:
2107
- - **x** (Tensor) - The input tensor with a dtype of Number, its rank must be in [0, 7] inclusive.
2234
+ - **x** (Tensor) - The input tensor with a dtype of Number, the shape is :math:`(N, *)`
2235
+ where :math:`*` means, any number of additional dimensions.
2108
2236
 
2109
2237
  Outputs:
2110
2238
  Tensor, has the same shape and data type as the `x`.
@@ -2129,7 +2257,7 @@ class Sqrt(Primitive):
2129
2257
  self.init_prim_io_names(inputs=['x'], outputs=['output'])
2130
2258
 
2131
2259
 
2132
- class Reciprocal(PrimitiveWithInfer):
2260
+ class Reciprocal(PrimitiveWithCheck):
2133
2261
  r"""
2134
2262
  Returns reciprocal of a tensor element-wise.
2135
2263
 
@@ -2167,13 +2295,6 @@ class Reciprocal(PrimitiveWithInfer):
2167
2295
  self.target = "OTHER"
2168
2296
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
2169
2297
 
2170
- def infer_shape(self, x):
2171
- return x
2172
-
2173
- def infer_dtype(self, x):
2174
- validator.check_subclass("x", x, mstype.tensor, self.name)
2175
- return x
2176
-
2177
2298
  def infer_value(self, x):
2178
2299
  if x is not None:
2179
2300
  x = x.asnumpy()
@@ -2187,7 +2308,7 @@ class Pow(Primitive):
2187
2308
  r"""
2188
2309
  Calculates the `y` power of each element in `x`.
2189
2310
 
2190
- Refer to :func:`mindspore.ops.pow` for more detail.
2311
+ Refer to :func:`mindspore.ops.pow` for more details.
2191
2312
 
2192
2313
  Supported Platforms:
2193
2314
  ``Ascend`` ``GPU`` ``CPU``
@@ -2229,7 +2350,7 @@ class Exp(Primitive):
2229
2350
  r"""
2230
2351
  Returns exponential of a tensor element-wise.
2231
2352
 
2232
- Refer to :func:`mindspore.ops.exp` for more detail.
2353
+ Refer to :func:`mindspore.ops.exp` for more details.
2233
2354
 
2234
2355
  Supported Platforms:
2235
2356
  ``Ascend`` ``GPU`` ``CPU``
@@ -2255,21 +2376,22 @@ class Logit(Primitive):
2255
2376
  r"""
2256
2377
  Calculate the logit of a tensor element-wise. Element in `x` is clamped to [eps, 1-eps].
2257
2378
 
2258
- .. math::
2259
- \begin{align}
2260
- y_{i} & = \ln(\frac{z_{i}}{1 - z_{i}}) \\
2261
- z_{i} & = \begin{cases}
2262
- x_{i} & \text{if eps is None} \\
2263
- \text{eps} & \text{if } x_{i} \lt \text{eps} \\
2264
- x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
2265
- 1 - \text{eps} & \text{if } x_{i} \gt 1 - \text{eps}
2266
- \end{cases}
2267
- \end{align}
2379
+ .. warning::
2380
+ This is an experimental API that is subject to change or deletion.
2381
+
2382
+ Refer to :func:`mindspore.ops.logit` for more details.
2383
+
2384
+ Args:
2385
+ eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: -1.0.
2268
2386
 
2269
- Refer to :func:`mindspore.ops.logit` for more detail.
2387
+ Inputs:
2388
+ - **x** (Tensor) - The input tensor.
2389
+
2390
+ Outputs:
2391
+ Tensor, with the same shape and dtype as the `x`.
2270
2392
 
2271
2393
  Supported Platforms:
2272
- ``GPU``
2394
+ ``Ascend`` ``GPU`` ``CPU``
2273
2395
 
2274
2396
  Examples:
2275
2397
  >>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
@@ -2289,10 +2411,33 @@ class Logit(Primitive):
2289
2411
 
2290
2412
  class ReduceStd(Primitive):
2291
2413
  """
2292
- Returns the standard-deviation and mean of each row of the input tensor in the dimension `axis`.
2293
- If `axis` is a list of dimensions, reduce over all of them.
2414
+ Returns the standard-deviation and mean of the input Tensor along
2415
+ dimension(s) specified by `axis`.
2416
+
2417
+ Args:
2418
+ axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
2419
+ Default: (), reduce all dimensions. Only constant value is allowed.
2420
+ Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`.
2421
+ unbiased (bool, optional): Whether to use Bessel’s correction.
2422
+ If True, will use the Bessel correction unbiased estimation.
2423
+ If False, will through the biased estimation to calculate the standard deviation.
2424
+ Default: True.
2425
+ keep_dims (bool, optional): Whether the output Tensor has dim retained or not.
2426
+ If True, keep these reduced dimensions specified by `axis` and the length is 1.
2427
+ If False, don't keep these dimensions.
2428
+ Default: Fasle.
2294
2429
 
2295
- Refer to :func:`mindspore.ops.std` for more detail.
2430
+ Inputs:
2431
+ - **input_x** (Tensor[Number]) - The input Tensor, it has dtype Number with shape
2432
+ :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2433
+
2434
+ Outputs:
2435
+ Tuple(output_std, output_mean) containing the standard deviation and mean.
2436
+
2437
+ Raises:
2438
+ TypeError: If `keep_dims` is not a bool.
2439
+ TypeError: If `input_x` is not a Tensor.
2440
+ ValueError: If `axis` is not one of the following: int, tuple or list.
2296
2441
 
2297
2442
  Supported Platforms:
2298
2443
  ``Ascend`` ``CPU``
@@ -2324,12 +2469,13 @@ class ReduceStd(Primitive):
2324
2469
 
2325
2470
  class Einsum(Primitive):
2326
2471
  """
2327
- This operator uses equation to represent a tuple of tensors operations,
2328
- you can use this operator to perform diagonal/reducesum/transpose/matmul/mul/inner product operations, etc.
2472
+ Sums the product of the elements of the input Tensor along
2473
+ dimensions specified notation based on the Einstein summation convention(Einsum).
2474
+ You can use this operator to perform diagonal/reducesum/transpose/matmul/mul/inner product operations, etc.
2329
2475
 
2330
2476
  The inputs must be a tuple of tensors.
2331
2477
  When the inputs are only one tensor, you can input (tensor, )
2332
- dtypes of them should be float16/float32/float64
2478
+ dtypes of them should be float16/float32/float64.
2333
2479
 
2334
2480
  Args:
2335
2481
  equation (str): An attribute, represent the operation you want to do.
@@ -2423,21 +2569,83 @@ class Einsum(Primitive):
2423
2569
  self.init_prim_io_names(inputs=['inputs'], outputs=['output'])
2424
2570
 
2425
2571
 
2426
- class Expm1(Primitive):
2427
- r"""
2428
- Returns exponential then minus 1 of a tensor element-wise.
2572
+ class Diagonal(Primitive):
2573
+ """
2574
+ Create a tensor with specific diagonal elements of input. This operator extracts the diagonal elements with
2575
+ offset from the 2-D sub-tensors which specified by dim1 and dim2.
2576
+ The shape of output tensor can be dertermined by removing dim1 and dim2 form the shape of input and appending
2577
+ a dimension at the end. The size of the last dimension is the length of diagonal.
2578
+
2579
+ Args:
2580
+ offset (int): The offset of main diagonal, which controls which diagonal to consider. If :math:`offset=0`,
2581
+ return the main diagonal elements with respect to dim1 and dim2. If :math:`offset>0`, return the
2582
+ diagonal elements that are `offset` units upward from the main diagonal. If :math:`offset<0`, return the
2583
+ diagonal elements that are `offset` units downward from the main diagonal. Default: 0.
2584
+ dim1 (int): The first dimension with respect to which to take diagonal. Default: 0.
2585
+ dim2 (int): The second dimension with respect to which to take diagonal. Default: 1.
2586
+
2587
+ Inputs:
2588
+ - **x** (Tensor) - The input to take diagonal, with float32 or double data type.
2589
+ The input must be at least 2-dimensional.
2590
+ The shape is :math:`(N_{0}, N_{1}, *)` where :math:`*` means, any number of additional dimensions.
2591
+
2592
+ Outputs:
2593
+ - **y** (Tensor) - A tensor whose values are diagonal of input, with the same data type as input.
2594
+ The shape of the output is one dimension lower than the input.
2595
+ If the shape of `x` is :math:`(d_{0}, d_{1}, ..., d_{n-1})`, the size of the `dim1` dimension
2596
+ is :math:`d_{i}` and the size of the `dim2` dimension is :math:`d_{j}`, the shape of `y` is the same
2597
+ as the input shape with `dim1` and `dim2` dimension removed and the diagonal dimension appended.
2598
+ If the `offset` is nonnegative, the size of output's last dimension is
2599
+ :math:`max(min(d_{i}, d_{j}-offset), 0)`. But if the `offset` is negative, the size of output's
2600
+ last dimension is :math:`max(min(d_{i} + offset, d_{j}), 0)`.
2429
2601
 
2430
- Refer to :func:`mindspore.ops.expm1` for more detail.
2602
+ Raises:
2603
+ TypeError: If dtype of `x` is neither float32 nor double.
2604
+ TypeError: If `offset` is not an int.
2605
+ TypeError: If `dim1` is not an int.
2606
+ TypeError: If `dim2` is not an int.
2607
+ ValueError: If the dimension of input is less than 2 dimensions.
2608
+ ValueError: If `dim1` is not in range of [-len(x.shape), len(x.shape)).
2609
+ ValueError: If `dim2` is not in range of [-len(x.shape), len(x.shape)).
2610
+ ValueError: If `dim1` and `dim2` are identical.
2431
2611
 
2432
2612
  Supported Platforms:
2433
- ``Ascend`` ``GPU`` ``CPU``
2613
+ ``Ascend`` ``CPU``
2434
2614
 
2435
2615
  Examples:
2436
- >>> x = Tensor(np.array([0.0, 2.0, 3.0, 5.0]), mindspore.float32)
2437
- >>> expm1 = ops.Expm1()
2438
- >>> output = expm1(x)
2439
- >>> print(output)
2440
- [ 0. 6.389056 19.085537 147.41316 ]
2616
+ >>> x = Tensor(np.array([[[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]],
2617
+ ... [[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]]), mindspore.float32)
2618
+ >>> diagonal_ops = ops.Diagonal(offset=1, dim1=-1, dim2=1)
2619
+ >>> y = diagonal_ops(x)
2620
+ >>> print(y)
2621
+ [[ 4. 9.]
2622
+ [16. 21.]]
2623
+ """
2624
+
2625
+ @prim_attr_register
2626
+ def __init__(self, offset=0, dim1=0, dim2=1):
2627
+ """Initialize Diagonal"""
2628
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
2629
+ validator.check_is_int(offset, "offset", self.name)
2630
+ validator.check_is_int(dim1, "dim1", self.name)
2631
+ validator.check_is_int(dim2, "dim2", self.name)
2632
+
2633
+
2634
+ class Expm1(Primitive):
2635
+ r"""
2636
+ Returns exponential then minus 1 of a tensor element-wise.
2637
+
2638
+ Refer to :func:`mindspore.ops.expm1` for more details.
2639
+
2640
+ Supported Platforms:
2641
+ ``Ascend`` ``GPU`` ``CPU``
2642
+
2643
+ Examples:
2644
+ >>> x = Tensor(np.array([0.0, 2.0, 3.0, 5.0]), mindspore.float32)
2645
+ >>> expm1 = ops.Expm1()
2646
+ >>> output = expm1(x)
2647
+ >>> print(output)
2648
+ [ 0. 6.389056 19.085537 147.41316 ]
2441
2649
  """
2442
2650
 
2443
2651
  @prim_attr_register
@@ -2448,7 +2656,7 @@ class Expm1(Primitive):
2448
2656
 
2449
2657
  class Histogram(Primitive):
2450
2658
  """
2451
- Computes the histogram of a tensor.
2659
+ Computes the histogram of Tensor element distribution.
2452
2660
 
2453
2661
  The elements are sorted into equal width bins between `min` and `max`.
2454
2662
  If `min` and `max` are both zero, the minimum and maximum values of the data are used.
@@ -2456,12 +2664,12 @@ class Histogram(Primitive):
2456
2664
  Elements lower than min and higher than max are ignored.
2457
2665
 
2458
2666
  Args:
2459
- bins (int) : Number of histogram bins, optional. Default 100. If specified, must be positive.
2460
- min (float): An optional float of the lower end of the range (inclusive). Default value is 0.0.
2461
- max (float): An optional float of the upper end of the range (inclusive). Default value is 0.0.
2667
+ bins (int, optional): Number of histogram bins, optional. Default 100. If specified, must be positive.
2668
+ min (float, optional): An optional float of the lower end of the range (inclusive). Default value is 0.0.
2669
+ max (float, optional): An optional float of the upper end of the range (inclusive). Default value is 0.0.
2462
2670
 
2463
2671
  Inputs:
2464
- - **x** (Tensor) - the input tensor, type support list [float16, float32, int32]
2672
+ - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
2465
2673
 
2466
2674
  Outputs:
2467
2675
  Tensor, 1-D Tensor with type int32.
@@ -2493,7 +2701,7 @@ class Histogram(Primitive):
2493
2701
  validator.check_value_type("min", min, [float], self.name)
2494
2702
  validator.check_value_type("max", max, [float], self.name)
2495
2703
  validator.check_positive_int(bins, 'bins', self.name)
2496
- validator.check('min', min, 'max', max, Rel.LE, self.name)
2704
+ validator.check('min', min, 'max', max, validator.LE, self.name)
2497
2705
 
2498
2706
 
2499
2707
  class HistogramFixedWidth(PrimitiveWithInfer):
@@ -2502,21 +2710,21 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2502
2710
  width and determined by the inputs `range` and the arguments `nbins`.
2503
2711
 
2504
2712
  Args:
2505
- dtype (str): An optional attribute. The dtype must be "int32". Default: "int32".
2506
2713
  nbins (int): The number of histogram bins, the type is a positive integer.
2714
+ dtype (str, optional): An optional attribute. The dtype must be str. Default: "int32".
2507
2715
 
2508
2716
  Inputs:
2509
2717
  - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
2510
- - **range** (Tensor) - Must have the same data type as `x`, and the shape is (2,).
2718
+ - **range** (Tensor) - Must have the same data type as `x`, and the shape is :math:`(2,)`.
2511
2719
  x <= range[0] will be mapped to histogram[0], x >= range[1] will be mapped to histogram[-1].
2512
2720
 
2513
2721
  Outputs:
2514
- Tensor, the type is int32.
2722
+ 1-D Tensor, whose length is the type is `nbins` with dtype of int32.
2515
2723
 
2516
2724
  Raises:
2517
2725
  TypeError: If `dtype` is not a str or `nbins` is not an int.
2518
2726
  ValueError: If `nbins` is less than 1.
2519
- ValueError: If `dtype` is neither 'int32' nor 'int64'.
2727
+ ValueError: If `dtype` is not 'int32'.
2520
2728
 
2521
2729
  Supported Platforms:
2522
2730
  ``Ascend`` ``GPU``
@@ -2534,7 +2742,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2534
2742
  def __init__(self, nbins, dtype='int32'):
2535
2743
  """Initialize HistogramFixedWidth."""
2536
2744
  self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
2537
- validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
2745
+ validator.check_int(nbins, 1, validator.GE, "nbins", self.name)
2538
2746
  valid_values = ['int32']
2539
2747
  self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
2540
2748
  self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
@@ -2545,7 +2753,7 @@ class Log(Primitive):
2545
2753
  """
2546
2754
  Returns the natural logarithm of a tensor element-wise.
2547
2755
 
2548
- Refer to :func:`mindspore.ops.log` for more detail.
2756
+ Refer to :func:`mindspore.ops.log` for more details.
2549
2757
 
2550
2758
  Supported Platforms:
2551
2759
  ``Ascend`` ``GPU`` ``CPU``
@@ -2572,7 +2780,7 @@ class Log1p(Primitive):
2572
2780
  r"""
2573
2781
  Returns the natural logarithm of one plus the input tensor element-wise.
2574
2782
 
2575
- Refer to :func:`mindspore.ops.log1p` for more detail.
2783
+ Refer to :func:`mindspore.ops.log1p` for more details.
2576
2784
 
2577
2785
  Supported Platforms:
2578
2786
  ``Ascend`` ``GPU`` ``CPU``
@@ -2595,7 +2803,10 @@ class Hypot(Primitive):
2595
2803
  """
2596
2804
  Computes hypotenuse of input tensors element-wise as legs of a right triangle.
2597
2805
  The shape of two inputs should be broadcastable, and data type of them should be
2598
- one of: float32, float64
2806
+ one of: float32, float64.
2807
+
2808
+ .. warning::
2809
+ This is an experimental API that is subject to change or deletion.
2599
2810
 
2600
2811
  Inputs:
2601
2812
  - **x1** (Tensor) - The first input tensor.
@@ -2610,7 +2821,7 @@ class Hypot(Primitive):
2610
2821
  ValueError: If shape of two inputs are not broadcastable.
2611
2822
 
2612
2823
  Supported Platforms:
2613
- ``Ascend`` ``CPU``
2824
+ ``Ascend`` ``GPU`` ``CPU``
2614
2825
 
2615
2826
  Examples:
2616
2827
  >>> x1 = Tensor(np.array([3., 5., 7.]))
@@ -2630,7 +2841,7 @@ class Hypot(Primitive):
2630
2841
 
2631
2842
  class Heaviside(Primitive):
2632
2843
  r"""
2633
- Computes the Heaviside step function for each element in input.
2844
+ Applies the Heaviside step function for input `x` element-wise.
2634
2845
 
2635
2846
  .. math::
2636
2847
  \text { heaviside }(\text { x, values })=\left\{\begin{array}{ll}
@@ -2639,13 +2850,16 @@ class Heaviside(Primitive):
2639
2850
  1, & \text { if x }>0
2640
2851
  \end{array}\right.
2641
2852
 
2853
+ .. warning::
2854
+ This is an experimental API that is subject to change or deletion.
2855
+
2642
2856
  Inputs:
2643
2857
  - **x** (Tensor) - The input tensor. With real number data type.
2644
- - **values** (Tensor) - The values to use where x is zero. Values can be broadcast with x.
2645
- 'x' should have the same dtype with 'values'.
2858
+ - **values** (Tensor) - The values to use where `x` is zero.
2859
+ It should be able to broadcast with `x` have the same dtype as `x`.
2646
2860
 
2647
2861
  Outputs:
2648
- Tensor, has the same type as 'x' and 'values'.
2862
+ Tensor, has the same type as `x` and `values`.
2649
2863
 
2650
2864
  Raises:
2651
2865
  TypeError: If `x` or `values` is not Tensor.
@@ -2673,7 +2887,7 @@ class Erf(Primitive):
2673
2887
  r"""
2674
2888
  Computes the Gauss error function of `x` element-wise.
2675
2889
 
2676
- Refer to :func:`mindspore.ops.erf` for more detail.
2890
+ Refer to :func:`mindspore.ops.erf` for more details.
2677
2891
 
2678
2892
  Supported Platforms:
2679
2893
  ``Ascend`` ``GPU`` ``CPU``
@@ -2696,7 +2910,7 @@ class Erfc(Primitive):
2696
2910
  r"""
2697
2911
  Computes the complementary error function of `x` element-wise.
2698
2912
 
2699
- Refer to :func:`mindspore.ops.erfc` for more detail.
2913
+ Refer to :func:`mindspore.ops.erfc` for more details.
2700
2914
 
2701
2915
  Supported Platforms:
2702
2916
  ``Ascend`` ``GPU`` ``CPU``
@@ -2719,7 +2933,7 @@ class Minimum(_MathBinaryOp):
2719
2933
  r"""
2720
2934
  Computes the minimum of input tensors element-wise.
2721
2935
 
2722
- Refer to :func:`mindspore.ops.minimum` for more detail.
2936
+ Refer to :func:`mindspore.ops.minimum` for more details.
2723
2937
 
2724
2938
  Supported Platforms:
2725
2939
  ``Ascend`` ``GPU`` ``CPU``
@@ -2754,7 +2968,7 @@ class Maximum(_MathBinaryOp):
2754
2968
  """
2755
2969
  Computes the maximum of input tensors element-wise.
2756
2970
 
2757
- Refer to :func:`mindspore.ops.maximum` for more detail.
2971
+ Refer to :func:`mindspore.ops.maximum` for more details.
2758
2972
 
2759
2973
  Supported Platforms:
2760
2974
  ``Ascend`` ``GPU`` ``CPU``
@@ -2780,7 +2994,17 @@ class RealDiv(_MathBinaryOp):
2780
2994
  """
2781
2995
  Divides the first input tensor by the second input tensor in floating-point type element-wise.
2782
2996
 
2783
- Refer to :func:`mindspore.ops.div` for more detail.
2997
+ Refer to :func:`mindspore.ops.div` for more details.
2998
+
2999
+ Inputs:
3000
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3001
+ a bool or a tensor whose data type is number or bool.
3002
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3003
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
3004
+
3005
+ Outputs:
3006
+ Tensor, the shape is the same as the one after broadcasting,
3007
+ and the data type is the one with higher precision or higher digits among the two inputs.
2784
3008
 
2785
3009
  Supported Platforms:
2786
3010
  ``Ascend`` ``GPU`` ``CPU``
@@ -2812,7 +3036,7 @@ class Div(_MathBinaryOp):
2812
3036
 
2813
3037
  out_{i} = \frac{x_i}{y_i}
2814
3038
 
2815
- .. note::
3039
+ Note:
2816
3040
  - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2817
3041
  - The inputs must be two tensors or one tensor and one scalar.
2818
3042
  - When the inputs are two tensors,
@@ -2822,8 +3046,8 @@ class Div(_MathBinaryOp):
2822
3046
  Inputs:
2823
3047
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
2824
3048
  a bool or a tensor whose data type is
2825
- `number <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_ or
2826
- `bool_ <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_.
3049
+ `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3050
+ `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_.
2827
3051
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
2828
3052
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
2829
3053
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -2896,7 +3120,7 @@ class Div(_MathBinaryOp):
2896
3120
 
2897
3121
  class DivNoNan(Primitive):
2898
3122
  r"""
2899
- Computes a safe divide and returns 0 if the x2 is zero.
3123
+ Operates a safe division between `x1` and `x2` element-wise. Returns 0 if element of `x2` is zero.
2900
3124
 
2901
3125
  Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent.
2902
3126
  The inputs must be two tensors or one tensor and one scalar.
@@ -2914,10 +3138,11 @@ class DivNoNan(Primitive):
2914
3138
  Inputs:
2915
3139
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
2916
3140
  a bool or a tensor whose data type is
2917
- `number <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_ or
2918
- `bool_ <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_.
3141
+ `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3142
+ `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`
3143
+ _.
2919
3144
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
2920
- a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
3145
+ a bool when the first input is a bool or a tensor whose data type is number or bool\_.
2921
3146
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
2922
3147
 
2923
3148
 
@@ -3020,7 +3245,7 @@ class FloorDiv(Primitive):
3020
3245
  """
3021
3246
  Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
3022
3247
 
3023
- Refer to :func:`mindspore.ops.floor_div` for more detail.
3248
+ Refer to :func:`mindspore.ops.floor_div` for more details.
3024
3249
 
3025
3250
  Supported Platforms:
3026
3251
  ``Ascend`` ``GPU`` ``CPU``
@@ -3043,8 +3268,8 @@ class FloorDiv(Primitive):
3043
3268
 
3044
3269
  class TruncateDiv(Primitive):
3045
3270
  """
3046
- Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will
3047
- round fractional quantities towards zero.
3271
+ Divides the first input tensor by the second input tensor element-wise and rounds the results
3272
+ of division towards zero. Equivalent to C-style integer division.
3048
3273
 
3049
3274
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3050
3275
  The inputs must be two tensors or one tensor and one scalar.
@@ -3070,7 +3295,7 @@ class TruncateDiv(Primitive):
3070
3295
  TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3071
3296
 
3072
3297
  Supported Platforms:
3073
- ``Ascend`` ``CPU`` ``GPU``
3298
+ ``Ascend`` ``GPU`` ``CPU``
3074
3299
 
3075
3300
  Examples:
3076
3301
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
@@ -3102,10 +3327,10 @@ class TruncateMod(Primitive):
3102
3327
 
3103
3328
  .. warning::
3104
3329
  - The input data does not support 0.
3105
- - When the elements of input exceed 2048 , the accuracy of operator cannot guarantee the requirement of
3330
+ - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
3106
3331
  double thousandths in the mini form.
3107
3332
  - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
3108
- - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
3333
+ - If shape is expressed as :math:`(D1, D2, ..., Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
3109
3334
 
3110
3335
  Inputs:
3111
3336
  - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, or a bool,
@@ -3123,7 +3348,7 @@ class TruncateMod(Primitive):
3123
3348
  ValueError: If the shape `x` and `y` cannot be broadcasted to each other.
3124
3349
 
3125
3350
  Supported Platforms:
3126
- ``Ascend`` ``CPU`` ``GPU``
3351
+ ``Ascend`` ``GPU`` ``CPU``
3127
3352
 
3128
3353
  Examples:
3129
3354
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
@@ -3202,7 +3427,7 @@ class Floor(Primitive):
3202
3427
  r"""
3203
3428
  Rounds a tensor down to the closest integer element-wise.
3204
3429
 
3205
- Refer to :func:`mindspore.ops.floor` for more detail.
3430
+ Refer to :func:`mindspore.ops.floor` for more details.
3206
3431
 
3207
3432
  Supported Platforms:
3208
3433
  ``Ascend`` ``GPU`` ``CPU``
@@ -3223,9 +3448,9 @@ class Floor(Primitive):
3223
3448
 
3224
3449
  class FloorMod(Primitive):
3225
3450
  r"""
3226
- Computes the remainder of division element-wise. It's a flooring divide.
3451
+ Computes the remainder of division element-wise, and it's a flooring divide.
3227
3452
 
3228
- Refer to :func:`mindspore.ops.floor_mod` for more detail.
3453
+ Refer to :func:`mindspore.ops.floor_mod` for more details.
3229
3454
 
3230
3455
  Supported Platforms:
3231
3456
  ``Ascend`` ``GPU`` ``CPU``
@@ -3251,7 +3476,7 @@ class Ceil(PrimitiveWithInfer):
3251
3476
  r"""
3252
3477
  Rounds a tensor up to the closest integer element-wise.
3253
3478
 
3254
- Refer to :func:`mindspore.ops.ceil` for more detail.
3479
+ Refer to :func:`mindspore.ops.ceil` for more details.
3255
3480
 
3256
3481
  Supported Platforms:
3257
3482
  ``Ascend`` ``GPU`` ``CPU``
@@ -3366,7 +3591,7 @@ class Xlogy(Primitive):
3366
3591
  Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
3367
3592
  Returns zero when `x` is zero.
3368
3593
 
3369
- Refer to :func:`mindspore.ops.xlogy` for more detail.
3594
+ Refer to :func:`mindspore.ops.xlogy` for more details.
3370
3595
 
3371
3596
  Supported Platforms:
3372
3597
  ``Ascend`` ``GPU`` ``CPU``
@@ -3391,7 +3616,7 @@ class Acosh(Primitive):
3391
3616
  r"""
3392
3617
  Computes inverse hyperbolic cosine of the inputs element-wise.
3393
3618
 
3394
- Refer to :func:`mindspore.ops.acosh` for more detail.
3619
+ Refer to :func:`mindspore.ops.acosh` for more details.
3395
3620
 
3396
3621
  Supported Platforms:
3397
3622
  ``Ascend`` ``GPU`` ``CPU``
@@ -3417,7 +3642,7 @@ class Cosh(Primitive):
3417
3642
  r"""
3418
3643
  Computes hyperbolic cosine of input element-wise.
3419
3644
 
3420
- Refer to :func:`mindspore.ops.cosh` for more detail.
3645
+ Refer to :func:`mindspore.ops.cosh` for more details.
3421
3646
 
3422
3647
  Supported Platforms:
3423
3648
  ``Ascend`` ``GPU`` ``CPU``
@@ -3439,7 +3664,7 @@ class Asinh(Primitive):
3439
3664
  r"""
3440
3665
  Computes inverse hyperbolic sine of the input element-wise.
3441
3666
 
3442
- Refer to :func:`mindspore.ops.asinh` for more detail.
3667
+ Refer to :func:`mindspore.ops.asinh` for more details.
3443
3668
 
3444
3669
  Supported Platforms:
3445
3670
  ``Ascend`` ``GPU`` ``CPU``
@@ -3462,28 +3687,19 @@ class Sinc(Primitive):
3462
3687
  r"""
3463
3688
  Computes the normalized sinc of input.
3464
3689
 
3465
- Refer to :func:`mindspore.ops.sinc` for more detail.
3466
-
3467
- .. math::
3468
-
3469
- y_i = \begin{cases}1 & \text{ if } x_i= 0\\ \frac{sin(\pi x_i)}{x_i} &
3470
- \text{ otherwise } \end{cases}
3471
-
3472
- Inputs:
3473
- - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3474
-
3475
- Outputs:
3476
- Tensor, has the same shape as the `x`. The dtype of output is float32 when dtype of `x` is in
3477
- [uint8, uint8, uint16, int16, uint32, int32, uint64, int64, bool]. Otherwise output has the
3478
- same dtype as the `x`.
3690
+ .. warning::
3691
+ This is an experimental API that is subject to change or deletion.
3479
3692
 
3480
- Raises:
3481
- TypeError: If `x` is not a Tensor.
3693
+ Refer to :func:`mindspore.ops.sinc` for more details.
3482
3694
 
3483
3695
  Supported Platforms:
3484
- ``Ascend`` ``CPU``
3696
+ ``Ascend`` ``GPU`` ``CPU``
3485
3697
 
3486
3698
  Examples:
3699
+ >>> import mindspore
3700
+ >>> import numpy as np
3701
+ >>> import mindspore.ops.operations.math_ops as ops
3702
+ >>> from mindspore import Tensor, dtype
3487
3703
  >>> sinc = ops.Sinc()
3488
3704
  >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
3489
3705
  >>> output = sinc(x)
@@ -3500,7 +3716,7 @@ class Sinh(Primitive):
3500
3716
  r"""
3501
3717
  Computes hyperbolic sine of the input element-wise.
3502
3718
 
3503
- Refer to :func:`mindspore.ops.sinh` for more detail.
3719
+ Refer to :func:`mindspore.ops.sinh` for more details.
3504
3720
 
3505
3721
  Supported Platforms:
3506
3722
  ``Ascend`` ``GPU`` ``CPU``
@@ -3534,11 +3750,47 @@ class _LogicBinaryOp(_BinaryOp):
3534
3750
  return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
3535
3751
 
3536
3752
 
3753
+ class Quantile(Primitive):
3754
+ r"""
3755
+ Computes the q-th quantiles of all elements in the input tensor, doing a linear interpolation when the
3756
+ q-th quantile lies between two data points.
3757
+
3758
+ Refer to :func:`mindspore.ops.quantile` and :func:`mindspore.ops.nanquantile` for more details.
3759
+
3760
+ Supported Platforms:
3761
+
3762
+
3763
+ Examples:
3764
+ >>> quantile = ops.Quantile()
3765
+ >>> input = Tensor(np.array([0.0700, -0.5446, 0.9214]), mindspore.float32)
3766
+ >>> q = Tensor(np.array([0, 0.5, 1]), mindspore.float32)
3767
+ >>> output = quantile(input, q)
3768
+ >>> print(output)
3769
+ [-0.5446 0.07 0.9214]
3770
+ """
3771
+
3772
+ @prim_attr_register
3773
+ def __init__(self, dim=None, keep_dims=False, ignore_nan=False):
3774
+ """Initialize Quantile"""
3775
+ if dim is not None:
3776
+ validator.check_value_type("dim", dim, [int], self.name)
3777
+ else:
3778
+ self.add_prim_attr("dim", 10000)
3779
+ if keep_dims is not None:
3780
+ validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
3781
+ else:
3782
+ self.add_prim_attr("keep_dims", False)
3783
+ if ignore_nan is not None:
3784
+ validator.check_value_type("ignore_nan", ignore_nan, [bool], self.name)
3785
+ else:
3786
+ self.add_prim_attr("ignore_nan", False)
3787
+
3788
+
3537
3789
  class Equal(Primitive):
3538
3790
  r"""
3539
3791
  Computes the equivalence between two tensors element-wise.
3540
3792
 
3541
- Refer to :func:`mindspore.ops.equal` for more detail.
3793
+ Refer to :func:`mindspore.ops.equal` for more details.
3542
3794
 
3543
3795
  Supported Platforms:
3544
3796
  ``Ascend`` ``GPU`` ``CPU``
@@ -3627,12 +3879,12 @@ class EqualCount(PrimitiveWithInfer):
3627
3879
  Inputs:
3628
3880
  - **x** (Tensor) - The first input tensor. If the data type and shape of `y` are determined, then `x`
3629
3881
  must be the same as `y`, and vice versa.
3630
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3882
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3631
3883
  - **y** (Tensor) - The second input tensor. If the data type and shape of `x` are determined, then `y`
3632
3884
  must be the same as `x`, and vice versa.
3633
3885
 
3634
3886
  Outputs:
3635
- Tensor, with the type same as input tensor and shape as (1,).
3887
+ Tensor, with the type same as input tensor and shape as :math:`(1,)`.
3636
3888
 
3637
3889
  Raises:
3638
3890
  TypeError: If `x` or `y` is not a Tensor.
@@ -3655,22 +3907,12 @@ class EqualCount(PrimitiveWithInfer):
3655
3907
  """Initialize EqualCount"""
3656
3908
  self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
3657
3909
 
3658
- def infer_shape(self, x_shape, y_shape):
3659
- validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
3660
- output_shape = (1,)
3661
- return output_shape
3662
-
3663
- def infer_dtype(self, x_dtype, y_dtype):
3664
- args = {'x': x_dtype, 'y': y_dtype}
3665
- validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
3666
- return x_dtype
3667
-
3668
3910
 
3669
3911
  class NotEqual(Primitive):
3670
3912
  """
3671
3913
  Computes the non-equivalence of two tensors element-wise.
3672
3914
 
3673
- Refer to :func:`mindspore.ops.ne` for more detail.
3915
+ Refer to :func:`mindspore.ops.ne` for more details.
3674
3916
 
3675
3917
  Supported Platforms:
3676
3918
  ``Ascend`` ``GPU`` ``CPU``
@@ -3701,7 +3943,7 @@ class Greater(PrimitiveWithCheck):
3701
3943
  r"""
3702
3944
  Compare the value of the input parameters :math:`x,y` element-wise, and the output result is a bool value.
3703
3945
 
3704
- Refer to :func:`mindspore.ops.gt` for more detail.
3946
+ Refer to :func:`mindspore.ops.gt` for more details.
3705
3947
 
3706
3948
  Supported Platforms:
3707
3949
  ``Ascend`` ``GPU`` ``CPU``
@@ -3712,7 +3954,7 @@ class Greater(PrimitiveWithCheck):
3712
3954
  >>> greater = ops.Greater()
3713
3955
  >>> output = greater(x, y)
3714
3956
  >>> print(output)
3715
- [False True False]
3957
+ [False True False]
3716
3958
  """
3717
3959
  __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
3718
3960
 
@@ -3736,7 +3978,7 @@ class GreaterEqual(PrimitiveWithCheck):
3736
3978
  r"""
3737
3979
  Computes the boolean value of :math:`x >= y` element-wise.
3738
3980
 
3739
- Refer to :func:`mindspore.ops.ge` for more detail.
3981
+ Refer to :func:`mindspore.ops.ge` for more details.
3740
3982
 
3741
3983
  Supported Platforms:
3742
3984
  ``Ascend`` ``GPU`` ``CPU``
@@ -3766,9 +4008,18 @@ class GreaterEqual(PrimitiveWithCheck):
3766
4008
 
3767
4009
  class Lerp(Primitive):
3768
4010
  """
3769
- Computes the minimum of input tensors element-wise.
4011
+ Does a linear interpolation of two tensors start and end based on a float or tensor weight.
3770
4012
 
3771
- Refer to :func:`mindspore.ops.lerp` for more detail.
4013
+ Refer to :func:`mindspore.ops.lerp` for more details.
4014
+
4015
+ Inputs:
4016
+ - **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
4017
+ - **end** (Tensor) - The tensor with the ending points. Data type must be the same as `start`.
4018
+ - **weight** (Union[float, Tensor]) - The weight for the interpolation formula. Must be a float
4019
+ or a scalar tensor with float16 or float32 data type.
4020
+
4021
+ Outputs:
4022
+ Tensor, has the same type and shape as input `start`.
3772
4023
 
3773
4024
  Supported Platforms:
3774
4025
  ``Ascend`` ``GPU`` ``CPU``
@@ -3784,7 +4035,6 @@ class Lerp(Primitive):
3784
4035
 
3785
4036
  @prim_attr_register
3786
4037
  def __init__(self):
3787
- super().__init__("Lerp")
3788
4038
  self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
3789
4039
 
3790
4040
 
@@ -3792,7 +4042,10 @@ class Gcd(Primitive):
3792
4042
  """
3793
4043
  Computes greatest common divisor of input tensors element-wise.
3794
4044
  The shape of two inputs should be broadcastable, and data type of them should be
3795
- one of: int32, int64
4045
+ one of: int32, int64.
4046
+
4047
+ .. warning::
4048
+ This is an experimental API that is subject to change or deletion.
3796
4049
 
3797
4050
  Inputs:
3798
4051
  - **x1** (Tensor) - The first input tensor.
@@ -3800,14 +4053,14 @@ class Gcd(Primitive):
3800
4053
 
3801
4054
  Outputs:
3802
4055
  Tensor, the shape is the same as the one after broadcasting, and the data type is one
3803
- with higher digits in the two inputs.
4056
+ with higher precision in the two inputs.
3804
4057
 
3805
4058
  Raises:
3806
4059
  TypeError: If data type `x1` or `x2` is not int32 or int64.
3807
4060
  ValueError: If shape of two inputs are not broadcastable.
3808
4061
 
3809
4062
  Supported Platforms:
3810
- ``Ascend`` ``CPU``
4063
+ ``Ascend`` ``GPU`` ``CPU``
3811
4064
 
3812
4065
  Examples:
3813
4066
  >>> x1 = Tensor(np.array([7, 8, 9]))
@@ -3829,7 +4082,7 @@ class Less(PrimitiveWithCheck):
3829
4082
  r"""
3830
4083
  Computes the boolean value of :math:`x < y` element-wise.
3831
4084
 
3832
- Refer to :func:`mindspore.ops.less` for more detail.
4085
+ Refer to :func:`mindspore.ops.less` for more details.
3833
4086
 
3834
4087
  Supported Platforms:
3835
4088
  ``Ascend`` ``GPU`` ``CPU``
@@ -3861,7 +4114,7 @@ class LessEqual(PrimitiveWithCheck):
3861
4114
  r"""
3862
4115
  Computes the boolean value of :math:`x <= y` element-wise.
3863
4116
 
3864
- Refer to :func:`mindspore.ops.le` for more detail.
4117
+ Refer to :func:`mindspore.ops.le` for more details.
3865
4118
 
3866
4119
  Supported Platforms:
3867
4120
  ``Ascend`` ``GPU`` ``CPU``
@@ -3893,7 +4146,7 @@ class LogicalNot(Primitive):
3893
4146
  """
3894
4147
  Computes the "logical NOT" of a tensor element-wise.
3895
4148
 
3896
- Refer to :func:`mindspore.ops.logical_not` for more detail.
4149
+ Refer to :func:`mindspore.ops.logical_not` for more details.
3897
4150
 
3898
4151
  Supported Platforms:
3899
4152
  ``Ascend`` ``GPU`` ``CPU``
@@ -3916,7 +4169,7 @@ class LogicalAnd(_LogicBinaryOp):
3916
4169
  r"""
3917
4170
  Computes the "logical AND" of two tensors element-wise.
3918
4171
 
3919
- Refer to :func:`mindspore.ops.logical_and` for more detail.
4172
+ Refer to :func:`mindspore.ops.logical_and` for more details.
3920
4173
 
3921
4174
  Supported Platforms:
3922
4175
  ``Ascend`` ``GPU`` ``CPU``
@@ -3935,7 +4188,7 @@ class LogicalOr(_LogicBinaryOp):
3935
4188
  """
3936
4189
  Computes the "logical OR" of two tensors element-wise.
3937
4190
 
3938
- Refer to :func:`mindspore.ops.logical_or` for more detail.
4191
+ Refer to :func:`mindspore.ops.logical_or` for more details.
3939
4192
 
3940
4193
  Supported Platforms:
3941
4194
  ``Ascend`` ``GPU`` ``CPU``
@@ -3954,24 +4207,13 @@ class LogicalXor(Primitive):
3954
4207
  r"""
3955
4208
  Computes the "logical XOR" of two tensors element-wise.
3956
4209
 
3957
- .. math::
3958
-
3959
- out_{i} = x_{i} \oplus y_{i}
3960
-
3961
- Inputs:
3962
- - **x** (Tensor) - The first input is a tensor whose data type is bool.
3963
- - **y** (Tensor) - The second input is a the tensor to compute XOR with the first input.
3964
- Datatype must be bool.
3965
-
3966
- Outputs:
3967
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4210
+ .. warning::
4211
+ This is an experimental API that is subject to change or deletion.
3968
4212
 
3969
- Raises:
3970
- TypeError: If neither `x` nor `y` is a Tensor whose data type is bool.
3971
- ValueError: If the shape of two inputs cannot be broadcast.
4213
+ Refer to :func:`mindspore.ops.logical_xor` for more details.
3972
4214
 
3973
4215
  Supported Platforms:
3974
- ``CPU``
4216
+ ``Ascend`` ``CPU``
3975
4217
 
3976
4218
  Examples:
3977
4219
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
@@ -3992,7 +4234,7 @@ class IsNan(Primitive):
3992
4234
  r"""
3993
4235
  Determines which elements are NaN for each position.
3994
4236
 
3995
- Refer to :func:`mindspore.ops.isnan` for more detail.
4237
+ Refer to :func:`mindspore.ops.isnan` for more details.
3996
4238
 
3997
4239
  Supported Platforms:
3998
4240
  ``Ascend`` ``GPU`` ``CPU``
@@ -4013,26 +4255,9 @@ class IsNan(Primitive):
4013
4255
 
4014
4256
  class IsInf(Primitive):
4015
4257
  r"""
4016
- Determines which elements are inf or -inf for each position
4017
-
4018
- .. math::
4019
-
4020
- out_i = \begin{cases}
4021
- & \text{ if } x_{i} = \text{Inf},\ \ True \\
4022
- & \text{ if } x_{i} \ne \text{Inf},\ \ False
4023
- \end{cases}
4024
-
4025
- where :math:`Inf` means not a number.
4026
-
4027
- Inputs:
4028
- - **x** (Tensor) - The input tensor.
4029
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4030
-
4031
- Outputs:
4032
- Tensor, has the same shape of input, and the dtype is bool.
4258
+ Determines which elements are inf or -inf for each position.
4033
4259
 
4034
- Raises:
4035
- TypeError: If `x` is not a Tensor.
4260
+ Refer to :func:`mindspore.ops.isinf` for more details.
4036
4261
 
4037
4262
  Supported Platforms:
4038
4263
  ``Ascend`` ``GPU`` ``CPU``
@@ -4055,7 +4280,7 @@ class IsFinite(Primitive):
4055
4280
  r"""
4056
4281
  Determines which elements are finite for each position.
4057
4282
 
4058
- Refer to :func:`mindspore.ops.isfinite` for more detail.
4283
+ Refer to :func:`mindspore.ops.isfinite` for more details.
4059
4284
 
4060
4285
  Supported Platforms:
4061
4286
  ``Ascend`` ``GPU`` ``CPU``
@@ -4079,11 +4304,11 @@ class FloatStatus(Primitive):
4079
4304
  Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
4080
4305
 
4081
4306
  Inputs:
4082
- - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
4083
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4307
+ - **x** (Tensor) - The input tensor. The data type must be float16, float32 or float64.
4308
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4084
4309
 
4085
4310
  Outputs:
4086
- Tensor, has the shape of `(1,)`, and the dtype is `mindspore.dtype.float32`.
4311
+ Tensor, has the shape of :math:`(1,)`, and the dtype is `mindspore.dtype.float32`.
4087
4312
 
4088
4313
  Raises:
4089
4314
  TypeError: If dtype of `x` is not in [float16, float32, float64].
@@ -4105,17 +4330,17 @@ class FloatStatus(Primitive):
4105
4330
  self.init_prim_io_names(inputs=['x'], outputs=['output'])
4106
4331
 
4107
4332
 
4108
- class NPUAllocFloatStatus(PrimitiveWithInfer):
4333
+ class NPUAllocFloatStatus(Primitive):
4109
4334
  """
4110
4335
  Allocates a flag to store the overflow status.
4111
4336
 
4112
- The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4337
+ The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
4113
4338
 
4114
4339
  Note:
4115
4340
  Please refer to the Examples of :class:`mindspore.ops.NPUGetFloatStatus`.
4116
4341
 
4117
4342
  Outputs:
4118
- Tensor, has the shape of `(8,)`.
4343
+ Tensor, has the shape of :math:`(8,)`.
4119
4344
 
4120
4345
  Supported Platforms:
4121
4346
  ``Ascend``
@@ -4130,24 +4355,22 @@ class NPUAllocFloatStatus(PrimitiveWithInfer):
4130
4355
  @prim_attr_register
4131
4356
  def __init__(self):
4132
4357
  """Initialize NPUAllocFloatStatus"""
4358
+ logger.warning("The 'NPUAllocFloatStatus' operator will be deprecated in the future. Please don't use it.")
4133
4359
 
4134
- def infer_shape(self):
4135
- return [8]
4136
4360
 
4137
- def infer_dtype(self):
4138
- return mstype.float32
4139
-
4140
-
4141
- class NPUGetFloatStatus(PrimitiveWithInfer):
4361
+ class NPUGetFloatStatus(Primitive):
4142
4362
  """
4143
- Updates the flag which is the output tensor of `NPUAllocFloatStatus` with the latest overflow status.
4363
+ `mindspore.ops.NPUGetFloatStatus` updates the flag which is
4364
+ the output tensor of :class:`mindspore.ops.NPUAllocFloatStatus` with the latest overflow status.
4144
4365
 
4145
- The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4146
- If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
4147
- is overflow happened.
4148
- In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus operator,
4149
- need to ensure that the NPUClearFlotStatus and your compute has been executed.
4150
- We use Depend on ensure the execution order.
4366
+
4367
+ Note:
4368
+ The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
4369
+ If the sum of the flag equals to 0, there is no overflow happened. If the sum of the
4370
+ flag is bigger than 0, there is overflow happened.
4371
+ In addition, there are strict sequencing requirements for use, i.e., before
4372
+ using the NPUGetFloatStatus operator, need to ensure that the NPUClearFlotStatus
4373
+ and your compute has been executed. We use :class:`mindspore.ops.Depend` to ensure the execution order.
4151
4374
 
4152
4375
  Inputs:
4153
4376
  - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
@@ -4202,19 +4425,10 @@ class NPUGetFloatStatus(PrimitiveWithInfer):
4202
4425
  @prim_attr_register
4203
4426
  def __init__(self):
4204
4427
  """Initialize NPUGetFloatStatus"""
4428
+ logger.warning("The 'NPUGetFloatStatus' operator will be deprecated in the future. Please don't use it.")
4205
4429
 
4206
- def infer_shape(self, x_shape):
4207
- cls_name = self.name
4208
- validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
4209
- validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
4210
- return [8]
4211
-
4212
- def infer_dtype(self, x_dtype):
4213
- validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
4214
- return mstype.float32
4215
4430
 
4216
-
4217
- class NPUClearFloatStatus(PrimitiveWithInfer):
4431
+ class NPUClearFloatStatus(Primitive):
4218
4432
  """
4219
4433
  Clears the flag which stores the overflow status.
4220
4434
 
@@ -4275,23 +4489,178 @@ class NPUClearFloatStatus(PrimitiveWithInfer):
4275
4489
  @prim_attr_register
4276
4490
  def __init__(self):
4277
4491
  """Initialize NPUClearFloatStatus"""
4492
+ logger.warning("The 'NPUClearFloatStatus' operator will be deprecated in the future. Please don't use it.")
4278
4493
 
4279
- def infer_shape(self, x_shape):
4280
- cls_name = self.name
4281
- validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
4282
- validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
4283
- return [8]
4284
4494
 
4285
- def infer_dtype(self, x_dtype):
4286
- validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
4287
- return mstype.float32
4495
+ class NPUGetFloatStatusV2(Primitive):
4496
+ """
4497
+ Get the flag for storage overflow status. This flag is located in a register at a
4498
+ fixed address on the `Ascend` device, and overflow information is automatically
4499
+ written to this register.
4500
+ The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
4501
+ If the value of flag is zero, no overflow has occurred, otherwise, overflow.
4502
+ When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
4503
+ reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
4504
+ status after the network execution is completed.
4505
+
4506
+ Note:
4507
+ - In order to avoid mis-optimization by the compiler, additional input is added to
4508
+ this operator. The input is defined as a shape of: math:`(8,)` and data type of
4509
+ `mindspore.dtype.int32` Tensor, meaningless.
4510
+ - Since this op lacks contextual dependencies with parameters in the network,
4511
+ :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
4512
+
4513
+ Inputs:
4514
+ Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
4515
+ data type is `mindspore.dtype.int32`, and has no actual meaning.
4516
+ Usually use the output of `NPUClearFloatStatusV2`.
4517
+
4518
+ Outputs:
4519
+ Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
4520
+
4521
+ Raises:
4522
+ TypeError: If `x` is not a Tensor.
4523
+ TypeError: If dtype of `x` is not int32.
4524
+ ValueError: If shape of `x` is not equal to :math:`(8,)`.
4525
+
4526
+ Supported Platforms:
4527
+ ``Ascend``
4528
+
4529
+ Examples:
4530
+ >>> import mindspore as ms
4531
+ >>> import numpy as np
4532
+ >>> from mindspore import ops, nn, Tensor
4533
+ >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
4534
+ >>> class Net(nn.Cell):
4535
+ ... def __init__(self):
4536
+ ... super().__init__()
4537
+ ... self.clear_status = NPUClearFloatStatusV2()
4538
+ ... self.get_status = NPUGetFloatStatusV2()
4539
+ ... self.sub = ops.Sub()
4540
+ ... self.neg = ops.Neg()
4541
+ ... self.not_equal = ops.NotEqual()
4542
+ ... self.reduce_any = ops.ReduceAny(keep_dims=False)
4543
+ ... self.base = Tensor([0], dtype=ms.int32)
4544
+ ...
4545
+ ... def construct(self, x):
4546
+ ... init = Tensor([0]*8, dtype=ms.int32)
4547
+ ... clear_status = self.clear_status(init)
4548
+ ... x = ops.depend(x, clear_status)
4549
+ ... res = self.sub(x, self.neg(x))
4550
+ ... init = ops.depend(init, res)
4551
+ ... get_status = self.get_status(init)
4552
+ ... flag = self.not_equal(self.base, get_status)
4553
+ ... overflow = self.reduce_any(flag)
4554
+ ... return overflow
4555
+ ...
4556
+ >>> value = 65504
4557
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4558
+ >>> x = Tensor(data, dtype=ms.float16)
4559
+ >>> net = Net()
4560
+ >>> res = net(x)
4561
+ >>> print(res)
4562
+ True
4563
+ >>> value = 10
4564
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4565
+ >>> x = Tensor(data, dtype=ms.float16)
4566
+ >>> net = Net()
4567
+ >>> res = net(x)
4568
+ >>> print(res)
4569
+ False
4570
+ """
4571
+
4572
+ @prim_attr_register
4573
+ def __init__(self):
4574
+ """Initialize NPUGetFloatStatusV2"""
4575
+
4576
+
4577
+
4578
+ class NPUClearFloatStatusV2(Primitive):
4579
+ """
4580
+ Clear the flag for storage overflow status. This flag is located in a register at a
4581
+ fixed address on the `Ascend` device, and overflow information is automatically
4582
+ written to this register.
4583
+ The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
4584
+ If the value of flag is zero, no overflow has occurred, otherwise, overflow.
4585
+ When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
4586
+ reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
4587
+ status after the network execution is completed.
4588
+
4589
+ Note:
4590
+ - In order to avoid mis-optimization by the compiler, additional input and output are added to
4591
+ this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
4592
+ `mindspore.dtype.int32` Tensor, meaningless.
4593
+ - Since this op lacks contextual dependencies with parameters in the network,
4594
+ :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
4595
+
4596
+ Inputs:
4597
+ Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
4598
+ data type is `mindspore.dtype.int32`, and has no actual meaning.
4599
+
4600
+ Outputs:
4601
+ Tensor, shape and data type are the same as input, meaningless.
4602
+
4603
+ Raises:
4604
+ TypeError: If `x` is not a Tensor.
4605
+ TypeError: If dtype of `x` is not int32.
4606
+ ValueError: If shape of `x` is not equal to :math:`(8,)`.
4607
+
4608
+ Supported Platforms:
4609
+ ``Ascend``
4610
+
4611
+ Examples:
4612
+ >>> import mindspore as ms
4613
+ >>> import numpy as np
4614
+ >>> from mindspore import ops, nn, Tensor
4615
+ >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
4616
+ >>> class Net(nn.Cell):
4617
+ ... def __init__(self):
4618
+ ... super().__init__()
4619
+ ... self.clear_status = NPUClearFloatStatusV2()
4620
+ ... self.get_status = NPUGetFloatStatusV2()
4621
+ ... self.sub = ops.Sub()
4622
+ ... self.neg = ops.Neg()
4623
+ ... self.not_equal = ops.NotEqual()
4624
+ ... self.reduce_any = ops.ReduceAny(keep_dims=False)
4625
+ ... self.base = Tensor([0], dtype=ms.int32)
4626
+ ...
4627
+ ... def construct(self, x):
4628
+ ... init = Tensor([0]*8, dtype=ms.int32)
4629
+ ... clear_status = self.clear_status(init)
4630
+ ... x = ops.depend(x, clear_status)
4631
+ ... res = self.sub(x, self.neg(x))
4632
+ ... init = ops.depend(init, res)
4633
+ ... get_status = self.get_status(init)
4634
+ ... flag = self.not_equal(self.base, get_status)
4635
+ ... overflow = self.reduce_any(flag)
4636
+ ... return overflow
4637
+ ...
4638
+ >>> value = 65504
4639
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4640
+ >>> x = Tensor(data, dtype=ms.float16)
4641
+ >>> net = Net()
4642
+ >>> res = net(x)
4643
+ >>> print(res)
4644
+ True
4645
+ >>> value = 10
4646
+ >>> data = np.full((2, 3), value, dtype=np.float16)
4647
+ >>> x = Tensor(data, dtype=ms.float16)
4648
+ >>> net = Net()
4649
+ >>> res = net(x)
4650
+ >>> print(res)
4651
+ False
4652
+ """
4653
+
4654
+ @prim_attr_register
4655
+ def __init__(self):
4656
+ """Initialize NPUClearFloatStatusV2"""
4288
4657
 
4289
4658
 
4290
4659
  class Cos(Primitive):
4291
4660
  r"""
4292
4661
  Computes cosine of input element-wise.
4293
4662
 
4294
- Refer to :func:`mindspore.ops.cos` for more detail.
4663
+ Refer to :func:`mindspore.ops.cos` for more details.
4295
4664
 
4296
4665
  Supported Platforms:
4297
4666
  ``Ascend`` ``GPU`` ``CPU``
@@ -4313,7 +4682,7 @@ class ACos(Primitive):
4313
4682
  r"""
4314
4683
  Computes arccosine of input tensors element-wise.
4315
4684
 
4316
- Refer to :func:`mindspore.ops.acos` for more detail.
4685
+ Refer to :func:`mindspore.ops.acos` for more details.
4317
4686
 
4318
4687
  Supported Platforms:
4319
4688
  ``Ascend`` ``GPU`` ``CPU``
@@ -4336,7 +4705,7 @@ class Sin(Primitive):
4336
4705
  r"""
4337
4706
  Computes sine of the input element-wise.
4338
4707
 
4339
- Refer to :func:`mindspore.ops.sin` for more detail.
4708
+ Refer to :func:`mindspore.ops.sin` for more details.
4340
4709
 
4341
4710
  Supported Platforms:
4342
4711
  ``Ascend`` ``GPU`` ``CPU``
@@ -4358,7 +4727,7 @@ class Asin(Primitive):
4358
4727
  r"""
4359
4728
  Computes arcsine of input tensors element-wise.
4360
4729
 
4361
- Refer to :func:`mindspore.ops.asin` for more detail.
4730
+ Refer to :func:`mindspore.ops.asin` for more details.
4362
4731
 
4363
4732
  Supported Platforms:
4364
4733
  ``Ascend`` ``GPU`` ``CPU``
@@ -4379,11 +4748,12 @@ class Asin(Primitive):
4379
4748
 
4380
4749
  class NMSWithMask(PrimitiveWithInfer):
4381
4750
  r"""
4382
- When object detection problem is performed in the computer vision field, object detection algorithm generates
4751
+ Non-maximum Suppression. When object detection problem is performed in the computer vision field,
4752
+ object detection algorithm generates
4383
4753
  a plurality of bounding boxes. Use the box with the highest score, calculate the overlap between other boxes and
4384
4754
  the current box, and delete the box based on a certain threshold(IOU). On Ascend platform, the input box score is
4385
4755
  ignored, which only selects boexs based on the IOU between boxes, which means if you want to remove boxes that has
4386
- lower scores, you need to sort the input boxes by score in descending order in advance. The IOU is as follows,
4756
+ lower scores, you need to sort the input boxes by score in descending order in advance. The IOU is as follows:
4387
4757
 
4388
4758
  .. math::
4389
4759
  \text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
@@ -4460,7 +4830,7 @@ class Abs(Primitive):
4460
4830
  r"""
4461
4831
  Returns absolute value of a tensor element-wise.
4462
4832
 
4463
- Refer to :func:`mindspore.ops.abs` for more detail.
4833
+ Refer to :func:`mindspore.ops.abs` for more details.
4464
4834
 
4465
4835
  Supported Platforms:
4466
4836
  ``Ascend`` ``GPU`` ``CPU``
@@ -4490,7 +4860,7 @@ class Sign(Primitive):
4490
4860
 
4491
4861
  Inputs:
4492
4862
  - **x** (Tensor) - The input tensor.
4493
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4863
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4494
4864
 
4495
4865
  Outputs:
4496
4866
  Tensor, has the same shape and dtype as the `x`.
@@ -4499,7 +4869,7 @@ class Sign(Primitive):
4499
4869
  TypeError: If `x` is not a Tensor.
4500
4870
 
4501
4871
  Supported Platforms:
4502
- ``Ascend`` ``CPU`` ``GPU``
4872
+ ``Ascend`` ``GPU`` ``CPU``
4503
4873
 
4504
4874
  Examples:
4505
4875
  >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
@@ -4518,17 +4888,17 @@ class Round(Primitive):
4518
4888
  r"""
4519
4889
  Returns half to even of a tensor element-wise.
4520
4890
 
4521
- Refer to :func:`mindspore.ops.round` for more detailed.
4891
+ Refer to :func:`mindspore.ops.round` for more detailsed.
4522
4892
 
4523
4893
  Supported Platforms:
4524
4894
  ``Ascend`` ``GPU`` ``CPU``
4525
4895
 
4526
4896
  Examples:
4527
- >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4528
- >>> round = ops.Round()
4529
- >>> output = round(x)
4530
- >>> print(output)
4531
- [ 1. 2. 2. 2. -4.]
4897
+ >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4898
+ >>> round = ops.Round()
4899
+ >>> output = round(x)
4900
+ >>> print(output)
4901
+ [ 1. 2. 2. 2. -4.]
4532
4902
  """
4533
4903
 
4534
4904
  @prim_attr_register
@@ -4541,10 +4911,10 @@ class Tan(Primitive):
4541
4911
  r"""
4542
4912
  Computes tangent of `x` element-wise.
4543
4913
 
4544
- Refer to :func:`mindspore.ops.tan` for more detail.
4914
+ Refer to :func:`mindspore.ops.tan` for more details.
4545
4915
 
4546
4916
  Supported Platforms:
4547
- ``Ascend`` ``CPU`` ``GPU``
4917
+ ``Ascend`` ``GPU`` ``CPU``
4548
4918
 
4549
4919
  Examples:
4550
4920
  >>> tan = ops.Tan()
@@ -4564,7 +4934,7 @@ class Atan(Primitive):
4564
4934
  r"""
4565
4935
  Computes the trigonometric inverse tangent of the input element-wise.
4566
4936
 
4567
- Refer to :func:`mindspore.ops.atan` for more detail.
4937
+ Refer to :func:`mindspore.ops.atan` for more details.
4568
4938
 
4569
4939
  Supported Platforms:
4570
4940
  ``Ascend`` ``GPU`` ``CPU``
@@ -4588,9 +4958,9 @@ class Atanh(Primitive):
4588
4958
  Computes inverse hyperbolic tangent of the input element-wise.
4589
4959
 
4590
4960
  .. warning::
4591
- This is an experimental prototype that is subject to change and/or deletion.
4961
+ This is an experimental API that is subject to change or deletion.
4592
4962
 
4593
- Refer to :func:`mindspore.ops.atanh` for more detail.
4963
+ Refer to :func:`mindspore.ops.atanh` for more details.
4594
4964
 
4595
4965
  Supported Platforms:
4596
4966
  ``Ascend`` ``GPU`` ``CPU``
@@ -4613,10 +4983,10 @@ class Atan2(_MathBinaryOp):
4613
4983
  r"""
4614
4984
  Returns arctangent of x/y element-wise.
4615
4985
 
4616
- Refer to :func:`mindspore.ops.atan2` for more detail.
4986
+ Refer to :func:`mindspore.ops.atan2` for more details.
4617
4987
 
4618
4988
  Supported Platforms:
4619
- ``Ascend`` ``CPU`` ``GPU``
4989
+ ``Ascend`` ``GPU`` ``CPU``
4620
4990
 
4621
4991
  Examples:
4622
4992
  >>> x = Tensor(np.array([0, 1]), mindspore.float32)
@@ -4626,6 +4996,7 @@ class Atan2(_MathBinaryOp):
4626
4996
  >>> print(output)
4627
4997
  [0. 0.7853982]
4628
4998
  """
4999
+
4629
5000
  @prim_attr_register
4630
5001
  def __init__(self):
4631
5002
  """Initialize Atan2"""
@@ -4634,7 +5005,7 @@ class Atan2(_MathBinaryOp):
4634
5005
 
4635
5006
  class SquareSumAll(Primitive):
4636
5007
  r"""
4637
- Returns the square sum of a tensor element-wise
5008
+ Returns the square sum of a tensor element-wise.
4638
5009
 
4639
5010
  .. math::
4640
5011
 
@@ -4642,14 +5013,14 @@ class SquareSumAll(Primitive):
4642
5013
  \\out_{y} = {\textstyle \sum_{0}^{N}} (y_{i})^2
4643
5014
  \end{matrix}\right.
4644
5015
 
5016
+ Note:
5017
+ SquareSumAll only supports float16 and float32 data type.
5018
+
4645
5019
  Inputs:
4646
5020
  - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
4647
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5021
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4648
5022
  - **y** (Tensor) - The input tensor has the same type and shape as the `x`.
4649
5023
 
4650
- Note:
4651
- SquareSumAll only supports float16 and float32 data type.
4652
-
4653
5024
  Outputs:
4654
5025
  - **output_x** (Tensor) - The same type as the `x`.
4655
5026
  - **output_y** (Tensor) - The same type as the `x`.
@@ -4685,7 +5056,7 @@ class BitwiseAnd(_BitwiseBinaryOp):
4685
5056
  r"""
4686
5057
  Returns bitwise `and` of two tensors element-wise.
4687
5058
 
4688
- Refer to :func:`mindspore.ops.bitwise_and` for more detail.
5059
+ Refer to :func:`mindspore.ops.bitwise_and` for more details.
4689
5060
 
4690
5061
  Supported Platforms:
4691
5062
  ``Ascend`` ``GPU`` ``CPU``
@@ -4704,7 +5075,7 @@ class BitwiseOr(_BitwiseBinaryOp):
4704
5075
  r"""
4705
5076
  Returns bitwise `or` of two tensors element-wise.
4706
5077
 
4707
- Refer to :func:`mindspore.ops.bitwise_or` for more detail.
5078
+ Refer to :func:`mindspore.ops.bitwise_or` for more details.
4708
5079
 
4709
5080
  Supported Platforms:
4710
5081
  ``Ascend`` ``GPU`` ``CPU``
@@ -4723,7 +5094,7 @@ class BitwiseXor(_BitwiseBinaryOp):
4723
5094
  r"""
4724
5095
  Returns bitwise `xor` of two tensors element-wise.
4725
5096
 
4726
- Refer to :func:`mindspore.ops.bitwise_xor` for more detail.
5097
+ Refer to :func:`mindspore.ops.bitwise_xor` for more details.
4727
5098
 
4728
5099
  Supported Platforms:
4729
5100
  ``Ascend`` ``GPU`` ``CPU``
@@ -4742,16 +5113,10 @@ class BesselI0(Primitive):
4742
5113
  """
4743
5114
  Computes BesselI0 of input element-wise.
4744
5115
 
4745
- Inputs:
4746
- - **x** (Tensor) - The shape of tensor is
4747
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4748
- Data type must be float16, float32 or float64.
4749
-
4750
- Outputs:
4751
- Tensor, has the same shape as `x`.
5116
+ .. warning::
5117
+ This is an experimental API that is subject to change or deletion.
4752
5118
 
4753
- Raises:
4754
- TypeError: If `x` is not a Tensor of float16, float32 or float64.
5119
+ Refer to :func:`mindspore.ops.bessel_i0` for more details.
4755
5120
 
4756
5121
  Supported Platforms:
4757
5122
  ``GPU`` ``CPU``
@@ -4761,28 +5126,22 @@ class BesselI0(Primitive):
4761
5126
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4762
5127
  >>> output = bessel_i0(x)
4763
5128
  >>> print(output)
4764
- [1.014452 1.179784 1.0241697 1.0020261]
5129
+ [1.0144521 1.1797839 1.0241698 1.0020262]
4765
5130
  """
4766
5131
 
4767
5132
  @prim_attr_register
4768
5133
  def __init__(self):
4769
- """Initialize BesselI0"""
5134
+ self.init_prim_io_names(inputs=['x'], outputs='y')
4770
5135
 
4771
5136
 
4772
5137
  class BesselI1(Primitive):
4773
5138
  """
4774
5139
  Computes BesselI1 of input element-wise.
4775
5140
 
4776
- Inputs:
4777
- - **x** (Tensor) - The shape of tensor is
4778
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4779
- Data type must be float16 or float32.
4780
-
4781
- Outputs:
4782
- Tensor, has the same shape as `x`.
5141
+ .. warning::
5142
+ This is an experimental API that is subject to change or deletion.
4783
5143
 
4784
- Raises:
4785
- TypeError: If `x` is not a Tensor of float16, float32 or float64.
5144
+ Refer to :func:`mindspore.ops.bessel_i1` for more details.
4786
5145
 
4787
5146
  Supported Platforms:
4788
5147
  ``GPU`` ``CPU``
@@ -4812,8 +5171,7 @@ class BesselI0e(Primitive):
4812
5171
  where bessel_i0 is Bessel function of the first kind with 0 order.
4813
5172
 
4814
5173
  Inputs:
4815
- - **x** (Tensor) - The shape of tensor is
4816
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5174
+ - **x** (Tensor) - The input tensor.
4817
5175
  Data type must be float16, float32 or float64.
4818
5176
 
4819
5177
  Outputs:
@@ -4852,8 +5210,7 @@ class BesselI1e(Primitive):
4852
5210
  where bessel_i1 is Bessel function of the first kind with 1 order.
4853
5211
 
4854
5212
  Inputs:
4855
- - **x** (Tensor) - The shape of tensor is
4856
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5213
+ - **x** (Tensor) - The input tensor.
4857
5214
  Data type must be float16 or float32, float64.
4858
5215
 
4859
5216
  Outputs:
@@ -4884,9 +5241,11 @@ class BesselK0(Primitive):
4884
5241
  r"""
4885
5242
  Computes BesselK0 of input element-wise.
4886
5243
 
5244
+ .. warning::
5245
+ This is an experimental API that is subject to change or deletion.
5246
+
4887
5247
  Inputs:
4888
- - **x** (Tensor) - The shape of tensor is
4889
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5248
+ - **x** (Tensor) - The input tensor.
4890
5249
  Data type must be float16, float32, float64.
4891
5250
 
4892
5251
  Outputs:
@@ -4915,9 +5274,11 @@ class BesselK1(Primitive):
4915
5274
  r"""
4916
5275
  Computes BesselK1 of input element-wise.
4917
5276
 
5277
+ .. warning::
5278
+ This is an experimental API that is subject to change or deletion.
5279
+
4918
5280
  Inputs:
4919
- - **x** (Tensor) - The shape of tensor is
4920
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5281
+ - **x** (Tensor) - The input tensor.
4921
5282
  Data type must be float16, float32, float64.
4922
5283
 
4923
5284
  Outputs:
@@ -4946,9 +5307,11 @@ class BesselK0e(Primitive):
4946
5307
  """
4947
5308
  Computes BesselK0e of input element-wise.
4948
5309
 
5310
+ .. warning::
5311
+ This is an experimental API that is subject to change or deletion.
5312
+
4949
5313
  Inputs:
4950
- - **x** (Tensor) - The shape of tensor is
4951
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5314
+ - **x** (Tensor) - The input tensor.
4952
5315
  Data type must be float16, float32, float64.
4953
5316
 
4954
5317
  Outputs:
@@ -4977,9 +5340,11 @@ class BesselK1e(Primitive):
4977
5340
  """
4978
5341
  Computes BesselK1e of input element-wise.
4979
5342
 
5343
+ .. warning::
5344
+ This is an experimental API that is subject to change or deletion.
5345
+
4980
5346
  Inputs:
4981
- - **x** (Tensor) - The shape of tensor is
4982
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5347
+ - **x** (Tensor) - The input tensor.
4983
5348
  Data type must be float16, float32, float64.
4984
5349
 
4985
5350
  Outputs:
@@ -5008,9 +5373,11 @@ class BesselJ0(Primitive):
5008
5373
  """
5009
5374
  Computes BesselJ0 of input element-wise.
5010
5375
 
5376
+ .. warning::
5377
+ This is an experimental API that is subject to change or deletion.
5378
+
5011
5379
  Inputs:
5012
- - **x** (Tensor) - The shape of tensor is
5013
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5380
+ - **x** (Tensor) - The input tensor.
5014
5381
  Data type must be float16, float32 or float64.
5015
5382
 
5016
5383
  Outputs:
@@ -5040,9 +5407,11 @@ class BesselJ1(Primitive):
5040
5407
  """
5041
5408
  Computes BesselJ1 of input element-wise.
5042
5409
 
5410
+ .. warning::
5411
+ This is an experimental API that is subject to change or deletion.
5412
+
5043
5413
  Inputs:
5044
- - **x** (Tensor) - The shape of tensor is
5045
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5414
+ - **x** (Tensor) - The input tensor.
5046
5415
  Data type must be float16, float32 or float64.
5047
5416
 
5048
5417
  Outputs:
@@ -5072,9 +5441,11 @@ class BesselY0(Primitive):
5072
5441
  """
5073
5442
  Computes BesselY0 of input element-wise.
5074
5443
 
5444
+ .. warning::
5445
+ This is an experimental API that is subject to change or deletion.
5446
+
5075
5447
  Inputs:
5076
- - **x** (Tensor) - The shape of tensor is
5077
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5448
+ - **x** (Tensor) - The input tensor.
5078
5449
  Data type must be float16, float32 or float64.
5079
5450
 
5080
5451
  Outputs:
@@ -5104,9 +5475,11 @@ class BesselY1(Primitive):
5104
5475
  """
5105
5476
  Computes BesselY1 of input element-wise.
5106
5477
 
5478
+ .. warning::
5479
+ This is an experimental API that is subject to change or deletion.
5480
+
5107
5481
  Inputs:
5108
- - **x** (Tensor) - The shape of tensor is
5109
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5482
+ - **x** (Tensor) - The input tensor.
5110
5483
  Data type must be float16, float32 or float64.
5111
5484
 
5112
5485
  Outputs:
@@ -5234,7 +5607,7 @@ class LinSpace(Primitive):
5234
5607
  Returns a Tensor whose value is `num` evenly spaced in the interval `start` and `stop` (including `start` and
5235
5608
  `stop`), and the length of the output Tensor is `num`.
5236
5609
 
5237
- Refer to :func:`mindspore.ops.linspace` for more detail.
5610
+ Refer to :func:`mindspore.ops.linspace` for more details.
5238
5611
 
5239
5612
  Supported Platforms:
5240
5613
  ``Ascend`` ``GPU`` ``CPU``
@@ -5280,7 +5653,7 @@ class MatrixInverse(Primitive):
5280
5653
  ValueError: If the dimension of `x` is less than 2.
5281
5654
 
5282
5655
  Supported Platforms:
5283
- ``GPU`` ``CPU``
5656
+ ``Ascend`` ``GPU`` ``CPU``
5284
5657
 
5285
5658
  Examples:
5286
5659
  >>> x = Tensor(np.array([[[-0.710504 , -1.1207525],
@@ -5303,14 +5676,58 @@ class MatrixInverse(Primitive):
5303
5676
  validator.check_value_type('adjoint', adjoint, [bool], self.name)
5304
5677
 
5305
5678
 
5679
+ class MatrixPower(Primitive):
5680
+ """
5681
+ Calculates the n-th power of a batch of square matrices.
5682
+ When n equals 0, it returns a group of identity matrices. If n is negative,
5683
+ it computes the inverse of each matrix (if possible) raised to the power of abs(n).
5684
+
5685
+ Args:
5686
+ n (int) : The exponent, a required int.
5687
+
5688
+ Inputs:
5689
+ - **x** (Tensor) - A 3-D Tensor. Supported data types are float16 and float32.
5690
+ The shape is :math:`(b, m, m)`, represents b m-D square matrices.
5691
+
5692
+ Outputs:
5693
+ - **y** (Tensor) - A 3-D Tensor. Data type and shape are the same as `x`'s.
5694
+
5695
+ Raises:
5696
+ TypeError: If the data type of `n` is not int.
5697
+ TypeError: If the data type of `x` is neither float32 nor float16.
5698
+ TypeError: If x is not a Tensor.
5699
+ ValueError: If `x` is not a 3-D tensor.
5700
+ ValueError: If shape[1] and shape[2] of `x` are not the same.
5701
+ ValueError: If n is negative but got input x has singular matrices.
5702
+
5703
+ Supported Platforms:
5704
+
5705
+
5706
+ Examples:
5707
+ >>> x = Tensor([[[0, 1], [-1, 0]], [[1, 0], [0, -1]]], dtype=ms.float32)
5708
+ >>> matrix_power = ops.MatrixPower(n=2)
5709
+ >>> y = matrix_power(x)
5710
+ >>> print(y)
5711
+ [[[-1. 0.]
5712
+ [-0. -1.]]
5713
+ [[ 1. 0.]
5714
+ [ 0. 1.]]]
5715
+ """
5716
+
5717
+ @prim_attr_register
5718
+ def __init__(self, n):
5719
+ super().__init__(name="MatrixPower")
5720
+ self.n = validator.check_value_type("n", n, [int], self.name)
5721
+
5722
+
5306
5723
  class MatrixDeterminant(Primitive):
5307
5724
  """
5308
- Computes the determinant of one or more square matrices.
5725
+ Calculates the value of the determinant for one or more square matrices.
5309
5726
 
5310
- Refer to :func:`mindspore.ops.matrix_determinant` for more detail.
5727
+ Refer to :func:`mindspore.ops.det` for more details.
5311
5728
 
5312
5729
  Supported Platforms:
5313
- ``GPU`` ``CPU``
5730
+
5314
5731
 
5315
5732
  Examples:
5316
5733
  >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
@@ -5329,12 +5746,12 @@ class MatrixDeterminant(Primitive):
5329
5746
 
5330
5747
  class LogMatrixDeterminant(Primitive):
5331
5748
  """
5332
- Computes the sign and the log of the absolute value of the determinant of one or more square matrices.
5749
+ Calculates the sign and logarithm of the determinant of one or more square matrices.
5333
5750
 
5334
- Refer to :func:`mindspore.ops.log_matrix_determinant` for more detail.
5751
+ Refer to :func:`mindspore.ops.slogdet` for more details.
5335
5752
 
5336
5753
  Supported Platforms:
5337
- ``GPU`` ``CPU``
5754
+
5338
5755
 
5339
5756
  Examples:
5340
5757
  >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
@@ -5353,9 +5770,44 @@ class LogMatrixDeterminant(Primitive):
5353
5770
  self.init_prim_io_names(inputs=['x'], outputs=['sign', 'y'])
5354
5771
 
5355
5772
 
5773
+ class MatrixLogarithm(Primitive):
5774
+ """
5775
+ Return the matrix logarithm of one or more square matrices.
5776
+
5777
+ Inputs:
5778
+ - **x** (Tensor) - x is a tensor. The shape of tensor is :math:`[..., M, M]`.
5779
+ Must be one of the following types:complex64, complex128. And shape must be 2D-7D.
5780
+
5781
+ Outputs:
5782
+ - **y** (Tensor) - has the same shape and type as input.
5783
+
5784
+ Raises:
5785
+ TypeError: If `x` is not a Tensor.
5786
+ TypeError: If dtype of `x` is not one of: complex64, complex128.
5787
+ ValueError: If the dimension of `x` is less to 2.
5788
+ ValueError: If the size of last two dimensions are not equal.
5789
+
5790
+ Supported Platforms:
5791
+ ``Ascend`` ``CPU``
5792
+
5793
+ Examples:
5794
+ >>> x = Tensor([[1 + 2j, 2 + 1j], [4 + 1j, 5 + 2j]])
5795
+ >>> matrix_logarithm = ops.MatrixLogarithm()
5796
+ >>> y = matrix_logarithm(x)
5797
+ >>> print(y)
5798
+ [[0.69155775+1.71618359j 0.64665196-0.34928196j]
5799
+ [1.02426074-0.88736831j 1.44677531+0.6400109j ]]
5800
+ """
5801
+
5802
+ @prim_attr_register
5803
+ def __init__(self):
5804
+ """Initialize MatrixLogarithm"""
5805
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
5806
+
5807
+
5356
5808
  class IndexAdd(Primitive):
5357
5809
  """
5358
- Adds tensor `y` to specified axis and indices of tensor `x`. The axis should be in [0, len(x.dim) - 1],
5810
+ Adds tensor `y` to specified axis and indices of tensor `x`. The axis should be in [-len(x.dim), len(x.dim) - 1],
5359
5811
  and indices should be in [0, the size of `x` - 1] at the axis dimension.
5360
5812
 
5361
5813
  Args:
@@ -5425,7 +5877,9 @@ class IndexAdd(Primitive):
5425
5877
 
5426
5878
  class Erfinv(Primitive):
5427
5879
  r"""
5428
- Computes the inverse error function of input. The inverse error function is defined in the range (-1, 1) as:
5880
+ Computes the inverse error function of input. The inverse error function is defined in the range (-1, 1).
5881
+
5882
+ The formula is defined as:
5429
5883
 
5430
5884
  .. math::
5431
5885
  erfinv(erf(x)) = x
@@ -5440,7 +5894,7 @@ class Erfinv(Primitive):
5440
5894
  TypeError: If dtype of `input_x` is not float16, float32 or float64.
5441
5895
 
5442
5896
  Supported Platforms:
5443
- ``Ascend`` ``GPU``
5897
+ ``Ascend`` ``GPU`` ``CPU``
5444
5898
 
5445
5899
  Examples:
5446
5900
  >>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
@@ -5494,27 +5948,31 @@ class Conj(Primitive):
5494
5948
 
5495
5949
  class ComplexAbs(Primitive):
5496
5950
  r"""
5497
- Returns a Tensor that is the absolute value part of the input.
5951
+ Returns a Tensor that contains the magnitudes of the input.
5498
5952
 
5499
- The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
5953
+ The complex numbers in input must be of the form :math:`a + bj`,
5954
+ where :math:`a` is the real part and :math:`b` is the imaginary part.
5500
5955
 
5501
5956
  .. math::
5502
5957
 
5503
- y = \sqrt{a^2+b^2}.
5958
+ y = \sqrt{a^2+b^2}
5959
+
5960
+ .. warning::
5961
+ This is an experimental API that is subject to change or deletion.
5504
5962
 
5505
5963
  Inputs:
5506
- -**x** (Tensor) - A Tensor, types: complex64, complex128.
5964
+ - **x** (Tensor) - A Tensor, types: complex64, complex128.
5507
5965
 
5508
5966
  Outputs:
5509
- -**y** (Tensor) - Tensor, has the same shape as x. If the type of x is complex64, the type of y is float32.
5510
- If the type of x is complex128, the type of y is float64.
5967
+ Tensor, has the same shape as x. If the type of x is complex64, the type of output is float32.
5968
+ If the type of x is complex128, the type of output is float64.
5511
5969
 
5512
5970
  Raises:
5513
5971
  TypeError: If the input is not a Tensor.
5514
5972
  TypeError: If the input type is not complex64 or complex128.
5515
5973
 
5516
5974
  Supported Platforms:
5517
- ``Ascend`` ``CPU`` ``GPU``
5975
+ ``Ascend`` ``GPU`` ``CPU``
5518
5976
 
5519
5977
  Examples:
5520
5978
  >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
@@ -5545,7 +6003,7 @@ class Real(Primitive):
5545
6003
  TypeError: If the input is not a Tensor.
5546
6004
 
5547
6005
  Supported Platforms:
5548
- ``GPU`` ``CPU``
6006
+ ``Ascend`` ``GPU`` ``CPU``
5549
6007
 
5550
6008
  Examples:
5551
6009
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
@@ -5565,6 +6023,9 @@ class Complex(Primitive):
5565
6023
  """
5566
6024
  Returns a complex Tensor from the real part and the imag part.
5567
6025
 
6026
+ .. warning::
6027
+ This is an experimental API that is subject to change or deletion.
6028
+
5568
6029
  Inputs:
5569
6030
  - **real** (Tensor) - The real input tensor. types: float32, float64.
5570
6031
  - **imag** (Tensor) - The imag input tensor. types: float32, float64.
@@ -5577,7 +6038,7 @@ class Complex(Primitive):
5577
6038
  TypeError: If the dtypes of two inputs are not same.
5578
6039
 
5579
6040
  Supported Platforms:
5580
- ``Ascend`` ``CPU`` ``GPU``
6041
+ ``Ascend`` ``GPU`` ``CPU``
5581
6042
 
5582
6043
  Examples:
5583
6044
  >>> real = Tensor(np.array([1]), mindspore.float32)
@@ -5609,7 +6070,7 @@ class Imag(Primitive):
5609
6070
  TypeError: If the input is not a Tensor.
5610
6071
 
5611
6072
  Supported Platforms:
5612
- ``GPU`` ``CPU``
6073
+ ``Ascend`` ``GPU`` ``CPU``
5613
6074
 
5614
6075
  Examples:
5615
6076
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
@@ -5628,21 +6089,14 @@ class Imag(Primitive):
5628
6089
  class Angle(Primitive):
5629
6090
  """
5630
6091
  Returns the element-wise argument of a complex tensor.
5631
- The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
5632
- is the imaginary part. The argument returned by this function is of the form atan2(b,a).
5633
-
5634
- Inputs:
5635
- - **input** (Tensor) - The input tensor. types: complex64, complex128.
5636
6092
 
5637
- Outputs:
5638
- Tensor, has the float32 or float64 type and the same shape as input.
6093
+ .. warning::
6094
+ This is an experimental API that is subject to change or deletion.
5639
6095
 
5640
- Raises:
5641
- TypeError: If `input` is not a Tensor.
5642
- TypeError: If the dtype of input is not one of: complex64, complex128.
6096
+ Refer to :func:`mindspore.ops.angle` for more details.
5643
6097
 
5644
6098
  Supported Platforms:
5645
- ``Ascend`` ``CPU``
6099
+ ``Ascend`` ``GPU`` ``CPU``
5646
6100
 
5647
6101
  Examples:
5648
6102
  >>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
@@ -5662,10 +6116,16 @@ class Trunc(Primitive):
5662
6116
  """
5663
6117
  Returns a new tensor with the truncated integer values of the elements of input.
5664
6118
 
5665
- Refer to :func:`mindspore.ops.trunc` for more detail.
6119
+ Refer to :func:`mindspore.ops.trunc` for more details.
5666
6120
 
5667
6121
  Supported Platforms:
5668
6122
  ``Ascend`` ``GPU`` ``CPU``
6123
+
6124
+ Examples:
6125
+ >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]), mindspore.float32)
6126
+ >>> output = ops.Trunc()(x)
6127
+ >>> print(output)
6128
+ [ 3. 0. -0. -3.]
5669
6129
  """
5670
6130
 
5671
6131
  @prim_attr_register
@@ -5679,24 +6139,20 @@ class TridiagonalMatMul(Primitive):
5679
6139
  Return the result of a multiplication of two matrices, where the left one is a Tridiagonal Matrix.
5680
6140
 
5681
6141
  Inputs:
5682
- - **superdiag** (Tensor) - The input tensor.
6142
+ - **superdiag** (Tensor) - Superdiagonals of Tridiagonal Matrices to the left of multiplication.
5683
6143
  Data types must be: float16, float32, double, complex64, complex128.
5684
- The shape is [..., 1, M].
5685
- Representing superdiagonals of Tridiagonal Matrices to the left of multiplication.
6144
+ The shape is :math:`(..., 1, M)`.
5686
6145
  Last element is ignored.
5687
- - **maindiag** (Tensor) - The input tensor.
6146
+ - **maindiag** (Tensor) - Maindiagonals of Tridiagonal Matrices to the left of multiplication.
5688
6147
  Data types must be: float16, float32, double, complex64, complex128.
5689
- The shape is [..., 1, M].
5690
- Representing maindiagonals of Tridiagonal Matrices to the left of multiplication.
5691
- - **subdiag** (Tensor) - The input tensor.
6148
+ The shape is :math:`(..., 1, M)`.
6149
+ - **subdiag** (Tensor) - Subdiagonals of Tridiagonal Matrices to the left of multiplication.
5692
6150
  Data types must be: float16, float32, double, complex64, complex128.
5693
- The shape is [..., 1, M].
5694
- Representing subdiagonals of Tridiagonal Matrices to the left of multiplication.
6151
+ The shape is :math:`(..., 1, M)`.
5695
6152
  First element is ignored.
5696
- - **rhs** (Tensor) - The input tensor.
6153
+ - **rhs** (Tensor) - MxN Matrices to the right of multiplication.
5697
6154
  Data types must be: float16, float32, double, complex64, complex128.
5698
- The shape is [..., M, N].
5699
- Representing MxN Matrices to the right of multiplication.
6155
+ The shape is :math:`(..., 1, M)`.
5700
6156
 
5701
6157
  Outputs:
5702
6158
  Tensor, with the same shape and data type as the `rhs`.
@@ -5720,7 +6176,7 @@ class TridiagonalMatMul(Primitive):
5720
6176
  are not same.
5721
6177
 
5722
6178
  Supported Platforms:
5723
- ``Ascend`` ``CPU``
6179
+ ``CPU``
5724
6180
 
5725
6181
  Examples:
5726
6182
  >>> tridiagonalmatmul = ops.TridiagonalMatMul()
@@ -5746,36 +6202,11 @@ class TridiagonalMatMul(Primitive):
5746
6202
  class Igamma(Primitive):
5747
6203
  r"""
5748
6204
  Calculates lower regularized incomplete Gamma function.
5749
- The lower regularized incomplete Gamma function is defined as:
5750
-
5751
- .. math::
5752
- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)
5753
-
5754
- where
5755
-
5756
- .. math::
5757
- gamma(a, x) = \int_0^x t^{a-1} \exp^{-t} dt
5758
-
5759
- is the lower incomplete Gamma function.
5760
-
5761
- Above :math:`Q(a, x)` is the upper regularized complete Gamma function.
5762
6205
 
5763
6206
  .. warning::
5764
- This is an experimental prototype that is subject to change and/or deletion.
5765
-
5766
- Inputs:
5767
- - **a** (Tensor) - The input tensor. With type of float32 or float64.
5768
- - **x** (Tensor) - The input tensor. With float32 or float64 type. `x` should have
5769
- the same dtype with `a`.
6207
+ This is an experimental API that is subject to change or deletion.
5770
6208
 
5771
- Outputs:
5772
- Tensor, has the same dtype as `a` and `x`.
5773
-
5774
- Raises:
5775
- TypeError: If a or x is not a Tensor.
5776
- TypeError: If dtype of input x and a is not float32 nor float64.
5777
- TypeError: If x has different dtype with a.
5778
- ValueError: If `a` could not be broadcast to a tensor with shape of `x`.
6209
+ Refer to :func:`mindspore.ops.igamma` for more details.
5779
6210
 
5780
6211
  Supported Platforms:
5781
6212
  ``Ascend`` ``GPU`` ``CPU``
@@ -5783,7 +6214,7 @@ class Igamma(Primitive):
5783
6214
  Examples:
5784
6215
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
5785
6216
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
5786
- >>> igamma = P.Igamma()
6217
+ >>> igamma = ops.Igamma()
5787
6218
  >>> output = igamma(a, x)
5788
6219
  >>> print (output)
5789
6220
  [0.593994 0.35276785 0.21486944 0.13337152]
@@ -5799,31 +6230,7 @@ class Igammac(Primitive):
5799
6230
  r"""
5800
6231
  Compute the upper regularized incomplete Gamma function Q(a, x).
5801
6232
 
5802
- The upper regularized incomplete Gamma function is defined as:
5803
- \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)
5804
- where
5805
- \(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\)
5806
-
5807
- is the upper incomplete Gama function.
5808
-
5809
- Note, above P(a, x) (Igamma) is the lower regularized complete Gamma function.
5810
-
5811
- .. warning::
5812
- This is an experimental prototype that is subject to change and/or deletion.
5813
-
5814
- Inputs:
5815
- - **a** (Tensor) - The input tensor of igammac. With float32 or float64 data type.
5816
- - **x** (Tensor) - The input tensor of igammac. With float32 or float64 type. `x` should have
5817
- the same type with `a`.
5818
-
5819
- Outputs:
5820
- A Tensor, has the same dtype as `a` and `x`.
5821
-
5822
- Raises:
5823
- TypeError: If dtype of input x and a is not float32 nor float64.
5824
- TypeError: If a or x is not a Tensor.
5825
- TypeError: If x has different dtype with a.
5826
- ValueError: If `a` could not be broadcast to a tensor with shape of `x`.
6233
+ Refer to :func:`mindspore.ops.igammac` for more details.
5827
6234
 
5828
6235
  Supported Platforms:
5829
6236
  ``Ascend`` ``GPU`` ``CPU``
@@ -5831,7 +6238,7 @@ class Igammac(Primitive):
5831
6238
  Examples:
5832
6239
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
5833
6240
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
5834
- >>> igammac = P.Igammac()
6241
+ >>> igammac = ops.Igammac()
5835
6242
  >>> output = igammac(a, x)
5836
6243
  >>> print (output)
5837
6244
  [0.40600586 0.6472318 0.7851304 0.8666283 ]
@@ -5845,9 +6252,10 @@ class Igammac(Primitive):
5845
6252
 
5846
6253
  class IsClose(Primitive):
5847
6254
  r"""
5848
- Returns a boolean Tensor where two tensors are element-wise equal within a tolerance.
6255
+ Returns a tensor of Boolean values indicating whether two input tensors
6256
+ are element-wise equal within a given tolerance.
5849
6257
 
5850
- Refer to :func:`mindspore.ops.isclose` for more detail.
6258
+ Refer to :func:`mindspore.ops.isclose` for more details.
5851
6259
 
5852
6260
  Supported Platforms:
5853
6261
  ``Ascend`` ``GPU`` ``CPU``
@@ -5881,28 +6289,10 @@ class MatrixExp(Primitive):
5881
6289
  r"""
5882
6290
  Computes the matrix exponential of a square matrix. Supports batched inputs.
5883
6291
 
5884
- Refer to :func:`mindspore.ops.matrix_exp` for more detail.
5885
-
5886
- .. math::
5887
-
5888
- matrix\_exp(x) = \sum_{k=0}^{\infty} \frac{1}{k !} x^{k} \in \mathbb{K}^{n \times n}
5889
-
5890
- Inputs:
5891
- - **x** (Tensor) - The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
5892
- Must be one of the following types: float64, float32, float16, complex64, complex128.
5893
-
5894
- Outputs:
5895
- Tensor, has the same shape and dtype as the `x`.
5896
-
5897
- Raises:
5898
- TypeError: If `x` is not a Tensor.
5899
- TypeError: If the dtype of `x` is not one of the following dtype:
5900
- float16, float32, float64, complex64, complex128.
5901
- ValueError: If the rank of `x` is less than 2.
5902
- ValueError: If the last two dimensions of `x` are not equal.
6292
+ Refer to :func:`mindspore.ops.matrix_exp` for more details.
5903
6293
 
5904
6294
  Supported Platforms:
5905
- ``Ascend`` ``CPU``
6295
+
5906
6296
 
5907
6297
  Examples:
5908
6298
  >>> matrix_exp = ops.MatrixExp()
@@ -5923,24 +6313,30 @@ class MatrixSolve(Primitive):
5923
6313
  Solves systems of linear equations.
5924
6314
 
5925
6315
  Args:
5926
- adjoint(bool): Indicating whether to solve with matrix or its (block-wise) adjoint. Default: False.
6316
+ adjoint (bool, optional): Indicates whether the adjoint of the
6317
+ matrix is used during the computation. Default: False, use its transpose instead.
5927
6318
 
5928
6319
  Inputs:
5929
- - **matrix** (Tensor) - The shape of tensor is :math:`[..., M, M]`.
5930
- - **rhs** (Tensor) - The shape of tensor is :math:`[..., M, K]`. 'rhs' must have the same type as `matrix`.
6320
+ - **matrix** (Tensor) - A tensor of shape :math:`(..., M, M)`,
6321
+ is a matrix of coefficients for a system of linear equations.
6322
+ - **rhs** (Tensor) - A tensor of shape :math:`(..., M, K)`,
6323
+ is a matrix of the resulting values of a system of linear equations.
6324
+ `rhs` must have the same type as `matrix`.
5931
6325
 
5932
6326
  Outputs:
5933
- A Tensor. Has the same type and shape as 'rhs'.
6327
+ Tensor, a matrix composed of solutions to a system of linear equations,
6328
+ which has the same type and shape as `rhs`.
5934
6329
 
5935
6330
  Raises:
5936
- TypeError: If adjoint is not the type of bool.
5937
- TypeError: If the type of matrix is not one of the following dtype:
5938
- mstype.float16, mstype.float32, mstype.float64, mstype.complex64, mstype.complex128.
6331
+ TypeError: If `adjoint` is not the type of bool.
6332
+ TypeError: If the type of `matrix` is not one of the following dtype:
6333
+ mstype.float16, mstype.float32, mstype.float64, mstype.complex64,
6334
+ mstype.complex128.
5939
6335
  TypeError: If the type of `matrix` is not the same as that of `rhs`.
5940
6336
  ValueError: If the rank of `matrix` less than 2.
5941
- ValueError: If the dimension of `matrix` is not the same as `rhs`.
6337
+ ValueError: If the dimension of `matrix` is not the same as `rhs` .
5942
6338
  ValueError: If the inner-most 2 dimension of `matrix` is not the same.
5943
- ValueError: If the inner-most 2 dimension of `rhs` does not match `matrix`.
6339
+ ValueError: If the inner-most 2 dimension of `rhs` does not match `matrix` .
5944
6340
 
5945
6341
  Supported Platforms:
5946
6342
  ``Ascend`` ``CPU``
@@ -5961,27 +6357,130 @@ class MatrixSolve(Primitive):
5961
6357
  self.adjoint = validator.check_value_type("adjoint", adjoint, [bool], self.name)
5962
6358
 
5963
6359
 
5964
- class LuSolve(Primitive):
6360
+ class MatrixSolveLs(Primitive):
6361
+ r"""
6362
+ Solves one or more linear least-squares problems.
6363
+
6364
+ If `fast` is `True`,then the solution is computed by solving the normal equations using Cholesky decomposition.
6365
+ If `fast` is `False` an algorithm based on the numerically robust complete orthogonal decomposition is used. This
6366
+ path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored.
6367
+
6368
+ Args:
6369
+ fast (bool): An optional bool. Defaults to True.
6370
+
6371
+ Inputs:
6372
+ - **matrix** (Tensor) - A Tensor. Must be one of the following data types: float64, float32, complex64,
6373
+ complex128. Shape is :math:`(*, M, N)`.
6374
+ - **rhs** (Tensor) - A Tensor. Must have the same data type as matrix. Shape is :math:`(*, M, K)`.
6375
+ `matrix` and `rhs` should have the same dimensions except the last one.
6376
+ - **l2_regularizer** (Tensor) - A Tensor of type float64. Scalar tensor.
6377
+
6378
+ Outputs:
6379
+ Tensor of shape :math:`(*, N, K)` with the same data type as `matrix`.
6380
+
6381
+ Raises:
6382
+ TypeError: If `matrix`, `rhs` or `l2_regularizer` is not tensor.
6383
+ TypeError: If either of `matrix` and `rhs` is not float32, float64, complex64 or complex128.
6384
+ TypeError: If `l2_regularizer` is not float64.
6385
+ TypeError: If `fast` is not bool.
6386
+ ValueError: If dimensions of `matrix` or `rhs` is less than 2.
6387
+ ValueError: If shape of `matrix` dose not match the shape of `rhs`.
6388
+
6389
+ Supported Platforms:
6390
+ ``CPU``
6391
+
6392
+ Examples:
6393
+ >>> matrix_solve_ls = ops.MatrixSolveLs(fast=True)
6394
+ >>> matrix = Tensor([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], mstype.float32)
6395
+ >>> rhs = Tensor(np.array([[4], [2], [4], [2]]), mstype.float32)
6396
+ >>> l2 = Tensor(0.0, mstype.float64)
6397
+ >>> output = matrix_solve_ls(matrix, rhs, l2)
6398
+ >>> print(output)
6399
+ [[ 1.3333334]
6400
+ [-0.6666667]
6401
+ [ 2.6666665]
6402
+ [-1.3333333]]
6403
+ """
6404
+
6405
+ @prim_attr_register
6406
+ def __init__(self, fast=True):
6407
+ """Initialize MatrixSolveLs"""
6408
+ validator.check_value_type('fast', fast, [bool], self.name)
6409
+
6410
+
6411
+ class Lu(Primitive):
6412
+ """
6413
+ Computes the LU decomposition of one or more square matrices.
6414
+
6415
+ Args:
6416
+ output_idx_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
6417
+ Default: `mindspore.dtype.int32`.
6418
+
6419
+ Inputs:
6420
+ - **input** (Tensor) - A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
6421
+ matrices of size `[M, M]`, with data type float32, float64, complex64, complex128.
6422
+
6423
+ Outputs:
6424
+ - **lu** (Tensor) - A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the lower
6425
+ triangular factor `L` with unit diagonal. Upper triangular part denotes the upper triangular factor `U`.
6426
+ - **p** (Tensor) - Permutation of the rows encoded as a list of indices in `0..M-1`, shape is `[..., M]`.
6427
+
6428
+ Raises:
6429
+ TypeError: If the dtype of `input` is not one of the following dtype:
6430
+ float32, float64, complex64, complex128.
6431
+ TypeError: If `output_idx_type` is neither int32 nor int64.
6432
+ ValueError: If `input` rank is less than 2.
6433
+ ValueError: If input[-1] is not equal to input[-2].
6434
+
6435
+ Supported Platforms:
6436
+ ``GPU``
6437
+
6438
+ Examples:
6439
+ >>> input = Tensor(np.array([[2.5,3.1,3.5], [4.7,1.9,0.2], [1.1,3.6,2.0]]), mindspore.float32)
6440
+ >>> lu, p = ops.Lu(output_idx_type=mindspore.int32)(input)
6441
+ >>> print(lu)
6442
+ [[4.7 1.9 0.2 ]
6443
+ [0.23404257 3.155319 1.9531915 ]
6444
+ [0.5319149 0.6621713 2.1002696 ]]
6445
+ >>> print(p)
6446
+ [1 2 0]
5965
6447
  """
5966
- Return the solution of the linear equation Ax = b.
6448
+
6449
+ @prim_attr_register
6450
+ def __init__(self, output_idx_type):
6451
+ super().__init__(name="Lu")
6452
+ self.init_prim_io_names(inputs=['input'], outputs=['lu', 'p'])
6453
+ validator.check_type_name("output_idx_type", output_idx_type, [mstype.int32, mstype.int64], self.name)
6454
+ self.add_prim_attr('output_idx_type', output_idx_type)
6455
+
6456
+
6457
+ class LuSolve(Primitive):
6458
+ r"""
6459
+ Computes the solution y to the system of linear equations :math:`Ay = b` ,
6460
+ given LU decomposition A and column vector b.
6461
+
6462
+ LU decomposition of a matrix can be generated from :func:`mindspore.scipy.linalg.lu` .
5967
6463
 
5968
6464
  Note:
5969
6465
  The batch dimensions of lu_pivots must match the batch dimensions of lu_data, the size of the dimension and the
5970
- number of each dimension must be the same. For example, lu_data is (3, 3, 2, 2) lu_pivots is (3, 3, 2),
5971
- lu_data's batch dimensions is (3, 3), lu_pivots's batch dimensions is (3, 3).
6466
+ number of each dimension must be the same. For example, lu_data is :math:`(3, 3, 2, 2)` lu_pivots is
6467
+ :math:`(3, 3, 2)`,
6468
+ lu_data's batch dimensions is :math:`(3, 3)`, lu_pivots's batch dimensions is :math:`(3, 3)`.
5972
6469
 
5973
6470
  The batch dimensions of lu_data must match the batch dimensions of x, the batch dimensions may have
5974
6471
  different sizes, from right to left, the corresponding dimensions must be equal. For example, lu_data
5975
- is (3, 3, 2, 2) x is (2, 3, 3, 2, 1), lu_data's batch dimensions is (3, 3), x's batch dimensions is (2, 3, 3).
6472
+ is :math:`(3, 3, 2, 2)` x is :math:`(2, 3, 3, 2, 1)`, lu_data's batch dimensions is
6473
+ :math:`(3, 3)`, x's batch dimensions is :math:`(2, 3, 3)`.
5976
6474
 
5977
6475
  Inputs:
5978
- - **x** (Tensor) - The input is a tensor of size (*, m, k), where * is batch dimensions, with data type
5979
- float32, float16.
5980
- - **lu_data** (Tensor) - The input is a tensor of size (*, m, m), where * is batch dimensions, that can
5981
- be decomposed into an upper
5982
- triangular matrix U and a lower triangular matrix L, with data type float32, float16.
5983
- - **lu_pivots** (Tensor) - The input is a tensor of size (*, m), where * is batch dimensions, that can
5984
- be converted to a permutation matrix P, with data type int32.
6476
+ - **x** (Tensor) - Column vector `b` in the above equation. It has shape :math:`(*, m, k)`,
6477
+ where :math:`*` is batch dimensions, with data type float32, float16.
6478
+ - **lu_data** (Tensor) - LU decomposition. It has shape :math:`(*, m, m)`, where * is batch
6479
+ dimensions, that can be decomposed into an upper triangular matrix U and a lower triangular
6480
+ matrix L, with data type float32, float16.
6481
+ - **lu_pivots** (Tensor) - Permutation matrix P of LU decomposition. It has
6482
+ shape :math:`(*, m)`, where :math:`*` is batch dimensions, that can be converted
6483
+ to a permutation matrix P, with data type int32.
5985
6484
 
5986
6485
  Outputs:
5987
6486
  Tensor, the same data type as the x and lu_data.
@@ -5995,7 +6494,7 @@ class LuSolve(Primitive):
5995
6494
  ValueError: If `x` dimension less than 2, `lu_data` dimension less than 2 or `lu_pivots` dimension less than 1.
5996
6495
 
5997
6496
  Supported Platforms:
5998
- ``Ascend`` ``CPU``
6497
+ ``Ascend`` ``GPU`` ``CPU``
5999
6498
 
6000
6499
  Examples:
6001
6500
  >>> x = Tensor(np.array([[1], [3], [3]]), mindspore.float32)
@@ -6016,41 +6515,18 @@ class LuSolve(Primitive):
6016
6515
 
6017
6516
  class LuUnpack(Primitive):
6018
6517
  """
6019
- Unpack the LU_data and LU_pivots from a LU factorization of a tensor.
6518
+ Converts `LU_data` and `LU_pivots` back into P, L and U matrices, where
6519
+ P is a permutation matrix, L is a lower triangular matrix, and U is an
6520
+ upper triangular matrix. Typically, `LU_data` and `LU_pivots` are generated
6521
+ from the LU decomposition of a matrix.
6020
6522
 
6021
- Args:
6022
- unpack_data (bool): A flag indicating if the LU_data should be unpacked. If False, then the returned L and U
6023
- are None. Default: True.
6024
- unpack_pivots (bool): A flag indicating if the LU_pivots should be unpacked into a permutation matrix P. If
6025
- False, then the returned P is None. Default: True.
6026
-
6027
- Inputs:
6028
- - **LU_data** (Tensor) - The packed LU factorization data. A tensor of size [*, M, N], where * is batch
6029
- dimensions, with data type int8, uint8, int16, int32, int64, float16, float32, float64. The dims of LU_data
6030
- must be equal to or greater than 2.
6031
- - **LU_pivots** (Tensor) - The packed LU factorization pivots. A tensor of size [*, min(M, N)], where * is
6032
- batch dimensions, with data type int8, uint8, int16, int32, int64.
6033
-
6034
- Outputs:
6035
- - **pivots** (Tensor) - The permutation matrix of LU factorization. The shape is `[*, M, M]`, the dtype is
6036
- same as `LU_data`.
6037
- - **L** (Tensor) - The L matrix of LU factorization. The dtype is the same as `LU_data`.
6038
- - **U** (Tensor) - The U matrix of LU factorization. The dtype is the same as `LU_data`.
6523
+ .. warning::
6524
+ This is an experimental API that is subject to change or deletion.
6039
6525
 
6040
- Raises:
6041
- TypeError: If the dtype of `LU_data` is not one of the following: int8, uint8, int16, int32,
6042
- int64, float16, float32, float64.
6043
- TypeError: If the dtype of `LU_pivots` is not one of the following: int8, uint8, int16, int32, int64.
6044
- ValueError: If the dimension of `LU_data` is less than 2.
6045
- ValueError: If the dimension of `LU_pivots` is less than 1.
6046
- ValueError: If the size of the last dimension of LU_pivots is not equal to the minimum of the sizes of the last
6047
- two dimensions of LU_data.
6048
- ValueError: If the batch dimensions of LU_data's does not match LU_pivots's batch dimensions.
6049
- ValueError: On the CPU platform, if the value of `LU_pivots` are out of range[1, LU_data.shape[-2]).
6050
- RuntimeError: On the Ascend platform, if the value of `LU_pivots` are out of range[1, LU_data.shape[-2]).
6526
+ Refer to :func:`mindspore.ops.lu_unpack` for more details.
6051
6527
 
6052
6528
  Supported Platforms:
6053
- ``Ascend`` ``CPU``
6529
+ ``GPU`` ``CPU``
6054
6530
 
6055
6531
  Examples:
6056
6532
  >>> LU_data = Tensor(np.array([[[-0.3806, -0.4872, 0.5536],
@@ -6062,10 +6538,31 @@ class LuUnpack(Primitive):
6062
6538
  >>> LU_pivots = Tensor(np.array([[1, 3, 3],
6063
6539
  ... [2, 3, 3]]), mstype.int32)
6064
6540
  >>> lu_unpack = ops.LuUnpack()
6065
- >>> pivots, L, U = lu_unpack(LU_data, LU_pivots, unpack_data, unpack_pivots)
6541
+ >>> pivots, L, U = lu_unpack(LU_data, LU_pivots)
6066
6542
  >>> print(pivots)
6543
+ [[[1. 0. 0.]
6544
+ [0. 0. 1.]
6545
+ [0. 1. 0.]]
6546
+ <BLANKLINE>
6547
+ [[0. 0. 1.]
6548
+ [1. 0. 0.]
6549
+ [0. 1. 0.]]]
6067
6550
  >>> print(L)
6551
+ [[[ 1. 0. 0. ]
6552
+ [-0.1287 1. 0. ]
6553
+ [ 0.2583 0.5239 1. ]]
6554
+ <BLANKLINE>
6555
+ [[ 1. 0. 0. ]
6556
+ [-0.6401 1. 0. ]
6557
+ [ 0.1015 -0.5363 1. ]]]
6068
6558
  >>> print(U)
6559
+ [[[-0.3806 -0.4872 0.5536]
6560
+ [ 0. 0.6508 -0.2396]
6561
+ [ 0. 0. 0.6902]]
6562
+ <BLANKLINE>
6563
+ [[ 0.6706 -1.1782 0.4574]
6564
+ [ 0. -0.4779 0.6701]
6565
+ [ 0. 0. 0.6165]]]
6069
6566
  """
6070
6567
 
6071
6568
  @prim_attr_register
@@ -6075,39 +6572,113 @@ class LuUnpack(Primitive):
6075
6572
  validator.check_value_type("unpack_pivots", unpack_pivots, [bool], self.name)
6076
6573
 
6077
6574
 
6078
- class CholeskyInverse(Primitive):
6575
+ class Lgamma(Primitive):
6576
+ r"""
6577
+ Computes the natural logarithm of the absolute value of the gamma function on input.
6578
+
6579
+ Refer to :func:`mindspore.ops.lgamma` for more details.
6580
+
6581
+ Supported Platforms:
6582
+ ``GPU`` ``CPU``
6583
+
6584
+ Examples:
6585
+ >>> x = Tensor(np.array([0.5, 3.2, 8.5]), mindspore.float32)
6586
+ >>> lgamma = ops.Lgamma()
6587
+ >>> output = lgamma(x)
6588
+ >>> print(output)
6589
+ [0.5723649 0.8854049 9.549267 ]
6079
6590
  """
6080
- Returns the inverse of the positive definite matrix using cholesky matrix factorization.
6081
6591
 
6082
- If upper is False, u is a lower triangular such that the output tensor is
6592
+ @prim_attr_register
6593
+ def __init__(self):
6594
+ """Initialize Lgamma"""
6595
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
6083
6596
 
6084
- .. math::
6085
- inv = (uu^{T})^{{-1}}
6086
6597
 
6087
- If upper is True, u is an upper triangular such that the output tensor is
6598
+ class Digamma(Primitive):
6599
+ r"""
6600
+ Computes the grad of the lgamma function on input.
6088
6601
 
6089
6602
  .. math::
6090
- inv = (u^{T}u)^{{-1}}
6091
-
6092
- Note:
6093
- The input must be either an upper triangular matrix or a lower triangular matrix.
6603
+ P(x) = grad(ln(gamma(x)))
6094
6604
 
6095
- Args:
6096
- upper(bool): Whether to return a lower or upper triangular matrix. Default: False.
6605
+ .. warning::
6606
+ This is an experimental API that is subject to change or deletion.
6097
6607
 
6098
6608
  Inputs:
6099
- - **x** (Tensor) - The input tensor. types: float32, float64.
6609
+ - **x** (Tensor) - The input tensor. With type of float16 or float32 or float64.
6100
6610
 
6101
6611
  Outputs:
6102
- Tensor, has the same shape and dtype as x.
6612
+ Tensor, has the same dtype as `x`.
6103
6613
 
6104
6614
  Raises:
6105
- TypeError: If `x` is not a Tensor.
6106
- TypeError: If dtype of `x` is not one of: float32, float64.
6107
- ValueError: If the dimension of `x` is not equal to 2.
6615
+ TypeError: If x is not a Tensor.
6616
+ TypeError: If dtype of input x is not float16 or float32 or float64.
6108
6617
 
6109
6618
  Supported Platforms:
6110
- ``CPU``
6619
+ ``GPU`` ``CPU``
6620
+
6621
+ Examples:
6622
+ >>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
6623
+ >>> digamma = ops.Digamma()
6624
+ >>> output = digamma(x)
6625
+ >>> print(output)
6626
+ [ 0.0365 -1.964 2.14 ]
6627
+ """
6628
+
6629
+ @prim_attr_register
6630
+ def __init__(self):
6631
+ """Initialize Digamma"""
6632
+ self.init_prim_io_names(inputs=['input'], outputs=['output'])
6633
+
6634
+
6635
+ class Polygamma(Primitive):
6636
+ r"""
6637
+ Computes the :math:`a`th derivative of the polygamma function on `x`.
6638
+
6639
+ .. warning::
6640
+ This is an experimental API that is subject to change or deletion.
6641
+
6642
+ Refer to :func:`mindspore.ops.polygamma` for more details.
6643
+
6644
+ Supported Platforms:
6645
+ ``GPU`` ``CPU``
6646
+
6647
+ Examples:
6648
+ >>> x = Tensor(np.array([1.0, -0.5]), mindspore.float32)
6649
+ >>> a = Tensor(np.array(1), mindspore.int64)
6650
+ >>> polygamma = ops.Polygamma()
6651
+ >>> output = polygamma(a, x)
6652
+ >>> print(output)
6653
+ [1.644934 8.934802]
6654
+ >>> a = Tensor(np.array(2), mindspore.int64)
6655
+ >>> output = polygamma(a, x)
6656
+ >>> print(output)
6657
+ [-2.404114 -0.8287967]
6658
+ >>> a = Tensor(np.array(3), mindspore.int64)
6659
+ >>> output = polygamma(a, x)
6660
+ >>> print(output)
6661
+ [ 6.4939404 193.40909 ]
6662
+ >>> a = Tensor(np.array(4), mindspore.int64)
6663
+ >>> output = polygamma(a, x)
6664
+ >>> print(output)
6665
+ [-24.886265 -3.4742498]
6666
+ """
6667
+
6668
+ @prim_attr_register
6669
+ def __init__(self):
6670
+ """Initialize Polygamma"""
6671
+ self.init_prim_io_names(inputs=['a', 'x'], outputs=['y'])
6672
+
6673
+
6674
+ class CholeskyInverse(Primitive):
6675
+ """
6676
+ Returns the inverse of the positive definite matrix using cholesky matrix factorization given its Cholesky factor.
6677
+
6678
+ Refer to :func:`mindspore.ops.cholesky_inverse` for more details.
6679
+
6680
+ Supported Platforms:
6681
+ ``Ascend`` ``CPU``
6111
6682
 
6112
6683
  Examples:
6113
6684
  >>> x = Tensor(np.array([[2,0,0], [4,1,0], [-1,1,2]]), mindspore.float32)
@@ -6129,30 +6700,25 @@ class CholeskyInverse(Primitive):
6129
6700
  class Cross(Primitive):
6130
6701
  """
6131
6702
  Returns the cross product of vectors in dimension `dim` of x1 and x2.
6132
- x1 and x2 must have the same shape and the same type, and the size of their `dim` dimension should be 3.
6133
- If `dim` is not given, it defaults to the first dimension found with the size 3.
6703
+
6704
+ .. warning::
6705
+ This is an experimental API that is subject to change or deletion.
6706
+
6707
+ Refer to :func:`mindspore.ops.cross` for more details.
6134
6708
 
6135
6709
  Args:
6136
- dim (int): The default value is -65530.
6710
+ dim (int): Spefcified dim along which to cumpute cross product with. Default: -65530.
6137
6711
 
6138
6712
  Inputs:
6139
- - **x1** (Tensor) - x1 is a tensor.
6140
- x1 and x2 must have the same shape and the same type, and the size of their `dim` dimension should be 3.
6141
- - **x2** (Tensor) - x2 is a tensor.
6713
+ - **x1** (Tensor) - Input Tensor.
6714
+ - **x2** (Tensor) - Another input Tensor, must have the same shape and
6715
+ the same type as `x1`, and the size of their `dim` dimension should be 3.
6142
6716
 
6143
6717
  Outputs:
6144
- Tensor, has the same shape and type as input.
6145
-
6146
- Raises:
6147
- TypeError: If `x1` is not a Tensor.
6148
- TypeError: If `x2` is not a Tensor.
6149
- TypeError: If the type of `x1` is not the same as that of `x2`.
6150
- ValueError: If `x1` and `x2` not have the same size, and the size of their `dim` dimension not be 3.
6151
- ValueError: If `x1` and `x2` not have the same shape.
6152
- ValueError: If `dim` is out of range, `dim` should be [-len(x1.shape), len(x1.shape)-1].
6718
+ Tensor, has the same shape and type as inputs.
6153
6719
 
6154
6720
  Supported Platforms:
6155
- ``CPU``
6721
+ ``Ascend`` ``CPU``
6156
6722
 
6157
6723
  Examples:
6158
6724
  >>> import mindspore
@@ -6178,28 +6744,31 @@ class RaggedRange(Primitive):
6178
6744
  """
6179
6745
  Returns a `RaggedTensor` containing the specified sequences of numbers.
6180
6746
 
6181
- Args:
6747
+ Args:
6182
6748
  Tsplits (mindspore.dtype): An mindspore.dtype from: mindspore.int32, mindspore.int64.
6183
6749
 
6184
- Inputs:
6750
+ Inputs:
6185
6751
  - **starts** (Tensor) - The starts of each range, whose type is int32, int64, float32 or float64,
6186
6752
  and shape is 0D or 1D.
6187
6753
  - **limits** (Tensor) - The limits of each range, whose type and shape should be same as input `starts`.
6188
6754
  - **deltas** (Tensor) - The deltas of each range, whose type and shape should be same as input `starts`,
6189
6755
  and each element in the tensor should not be equal to 0.
6190
- Outputs:
6756
+
6757
+ Outputs:
6191
6758
  - **rt_nested_splits** (Tensor) - The nested splits of the return `RaggedTensor`,
6192
6759
  and type of the tensor is `Tsplits`,
6193
6760
  shape of the tensor is equal to shape of input `starts` plus 1.
6194
6761
  - **rt_dense_values** (Tensor) - The dense values of the return `RaggedTensor`,
6195
6762
  and type of the tensor should be same as input `starts`.
6196
6763
  Let size of input `starts`, input `limits` and input `deltas` are i,
6197
- if type of the input `starts`, input `limits` and input `deltas`
6198
- are int32 or int64, shape of the output `rt_dense_values` is equal to
6199
- sum(abs(limits[i] - starts[i]) + abs(deltas[i]) - 1) / abs(deltas[i])),
6200
- if type of the input `starts`, input `limits` and input `deltas`
6201
- are float32 or float64, shape of the output `rt_dense_values` is equal to
6202
- sum(ceil(abs((limits[i] - starts[i]) / deltas[i]))).
6764
+
6765
+ - if type of the input `starts`, input `limits` and input `deltas`
6766
+ are int32 or int64, shape of the output `rt_dense_values` is equal to
6767
+ :math:`sum(abs(limits[i] - starts[i]) + abs(deltas[i] - 1) / abs(deltas[i]))`.
6768
+ - if type of the input `starts`, input `limits` and input `deltas`
6769
+ are float32 or float64, shape of the output `rt_dense_values` is equal to
6770
+ :math:`sum(ceil(abs((limits[i] - starts[i]) / deltas[i])))`.
6771
+
6203
6772
  Raises:
6204
6773
  TypeError: If any input is not Tensor.
6205
6774
  TypeError: If the type of `starts` is not one of the following dtype: int32, int64, float32, float64.
@@ -6210,7 +6779,7 @@ class RaggedRange(Primitive):
6210
6779
  ValueError: If the shape of `starts`, `limits` and `deltas` are not same.
6211
6780
 
6212
6781
  Supported Platforms:
6213
- ``Ascend`` ``CPU``
6782
+ ``Ascend`` ``GPU`` ``CPU``
6214
6783
 
6215
6784
  Examples:
6216
6785
  >>> raggedrange = ops.RaggedRange(Tsplits=mstype.int64)
@@ -6239,27 +6808,40 @@ class Trace(Primitive):
6239
6808
  Returns a new tensor that is the sum of the input trace.
6240
6809
 
6241
6810
  Note:
6242
- Input must be matrix, and complex number is nor supported at present.
6811
+ Input must be matrix, and complex number is not supported at present.
6812
+
6813
+ .. warning::
6814
+ This is an experimental API that is subject to change or deletion.
6243
6815
 
6244
6816
  Inputs:
6245
- - **x**(Tensor) - A matrix to be calculated. The matrix must be two dimensional.
6817
+ - **x** (Tensor) - A matrix to be calculated. The matrix must be two dimensional.
6246
6818
 
6247
- Output:
6248
- Tensor, with the same data type as input 'x', and size equals to 1.
6819
+ Outputs:
6820
+ Tensor, 0D Tensor with 1 element, it has the same data type as input `x`.
6249
6821
 
6250
6822
  Raises:
6251
6823
  TypeError: If `x` is not a Tensor.
6252
6824
  ValueError: If the dimension of `x` is not equal to 2.
6253
6825
 
6254
6826
  Supported Platforms:
6255
- ``Ascend`` ``CPU``
6827
+ ``Ascend`` ``GPU`` ``CPU``
6256
6828
 
6257
6829
  Examples:
6258
6830
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
6259
6831
  >>> trace = ops.Trace()
6260
6832
  >>> output = trace(x)
6261
6833
  >>> print(output)
6262
- 15.
6834
+ 15.0
6835
+ >>> x = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
6836
+ >>> trace = ops.Trace()
6837
+ >>> output = trace(x)
6838
+ >>> print(output)
6839
+ 18.0
6840
+ >>> x = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
6841
+ >>> trace = ops.Trace()
6842
+ >>> output = trace(x)
6843
+ >>> print(output)
6844
+ 24.0
6263
6845
  """
6264
6846
 
6265
6847
  @prim_attr_register
@@ -6269,32 +6851,36 @@ class Trace(Primitive):
6269
6851
 
6270
6852
  class Median(Primitive):
6271
6853
  """
6272
- Computes the median of elements of input tensor in the `axis` dimension. If `global_median` is True, computes the
6273
- median of all elements of tensor.
6854
+ Computes the median and its corresponding indices of input tensor in the `axis` dimension.
6855
+ If `global_median` is True, computes the median of all elements of tensor.
6274
6856
 
6275
6857
  .. warning::
6276
6858
  When attr `global_median` is True, the value of the second output tensor `indices` is meaningless.
6277
6859
 
6278
6860
  Args:
6279
- global_median (bool): Whether the output tensor is the median of all input tensor elements or not.
6280
- axis (int): The dimension need to reduce. Default: 0.
6281
- keep_dims (bool): Whether the output tensor need to retain `axis` dimension or not. Default: False.
6861
+ global_median (bool, optional): Whether the output tensor is the median of all
6862
+ input tensor elements or not. Default: Fasle.
6863
+ axis (int, optional): The specified dimension to compute median. Default: 0.
6864
+ keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not. Default: False.
6282
6865
 
6283
6866
  Inputs:
6284
- - **x** (Tensor) - A Tensor, whose dtype is int16, int32, int64, float32 or float64.
6867
+ - **x** (Tensor) - A Tensor to calculate median with. Supported dtype:int16, int32, int64, float32 or float64.
6285
6868
 
6286
6869
  Outputs:
6287
- - **y** (Tensor) - A Tensor, Has the same dtype as the `x`. If `global_median` is true, the `y` has only one
6288
- element. If `keep_dims` is true, the `y` has the same shape as the `x` except the shape of `y` in dimension
6289
- `axis` is size 1. Otherwise, the `y` lacks `axis` dimension than input.
6290
- - **indices** (Tensor) - A Tensor, Has the same shape as the `y`, but dtype is int64.
6870
+ - **y** (Tensor) - Median, has the same dtype as the `x`.
6871
+
6872
+ - If `global_median` is True, the `y` has only one element.
6873
+ - If `keep_dims` is True, the `y` has the same shape as the `x` except the size
6874
+ of `y` in dimension `axis` is 1.
6875
+ - Otherwise, the `y` lacks `axis` dimension than input.
6876
+
6877
+ - **indices** (Tensor) - Indices, Has the same shape as the `y`, with dtype int64.
6291
6878
 
6292
6879
  Raises:
6293
- TypeError: If dtype of `x` is not one of the following: int16, int32, int64, float32, double.
6880
+ TypeError: If dtype of `x` is not one of the following: int16, int32, int64, float32, float64.
6294
6881
  TypeError: If input `x` is not a Tensor.
6295
- TypeError: If `global_median` is not a bool.
6296
- TypeError: If `axis` is not a int.
6297
- TypeError: If `keep_dims` is not a bool.
6882
+ TypeError: If `global_median` or `keep_dims` is assigned a nonboolean value.
6883
+ TypeError: If `axis` is not int.
6298
6884
  ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
6299
6885
 
6300
6886
  Supported Platforms:
@@ -6302,20 +6888,18 @@ class Median(Primitive):
6302
6888
 
6303
6889
  Examples:
6304
6890
  >>> # case 1 : common median compute
6305
- >>> from mindspore import Tensor
6306
- >>> from mindspore.ops.operations.math_ops import Median
6891
+ >>> from mindspore import Tensor, ops
6307
6892
  >>> import numpy as np
6308
6893
  >>> x = Tensor(np.array([[5, 1, 2],[3, 5, 7], [1, 6, 4]]).astype(np.int64))
6309
- >>> median = Median(global_median=False, axis=0, keep_dims=False)
6894
+ >>> median = ops.Median(global_median=False, axis=0, keep_dims=False)
6310
6895
  >>> y = median(x)
6311
6896
  >>> print(y)
6312
6897
  (Tensor(shape=[3], dtype=Int64, value= [3, 5, 4]), Tensor(shape=[3], dtype=Int64, value= [1, 1, 2]))
6313
6898
  >>> # case 2 : global median compute
6314
- >>> from mindspore import Tensor
6315
- >>> from mindspore.ops.operations.math_ops import Median
6899
+ >>> from mindspore import Tensor, ops
6316
6900
  >>> import numpy as np
6317
6901
  >>> x = Tensor(np.array([[1, 7, 6],[5, 1, 3],[9, 17, 1]]).astype(np.int32))
6318
- >>> median = Median(global_median=True)
6902
+ >>> median = ops.Median(global_median=True)
6319
6903
  >>> y = median(x)
6320
6904
  >>> print(y)
6321
6905
  (Tensor(shape=[], dtype=Int32, value= 5), Tensor(shape=[], dtype=Int64, value= 0))
@@ -6335,7 +6919,7 @@ class SparseSegmentMean(Primitive):
6335
6919
  """
6336
6920
  Computes the mean along sparse segments of a Tensor.
6337
6921
 
6338
- Refer to :func:`mindspore.ops.sparse_segment_mean` for more detail.
6922
+ Refer to :func:`mindspore.ops.sparse_segment_mean` for more details.
6339
6923
 
6340
6924
  Supported Platforms:
6341
6925
  ``GPU`` ``CPU``
@@ -6361,19 +6945,18 @@ class SparseSegmentMean(Primitive):
6361
6945
 
6362
6946
 
6363
6947
  class Zeta(Primitive):
6364
- """
6365
- Compute the Hurwitz zeta function ζ(x,q).
6948
+ r"""
6949
+ Compute the Hurwitz zeta function ζ(x,q) of input Tensor.
6366
6950
 
6367
6951
  .. warning::
6368
- This is an experimental prototype that is subject to change and/or deletion.
6952
+ This is an experimental API that is subject to change or deletion.
6369
6953
 
6370
6954
  .. math::
6371
-
6372
- \\zeta \\left ( x,q \\right )= \\textstyle \\sum_{n=0} ^ {\\infty} \\left ( q+n\\right )^{-x}
6955
+ \zeta \left ( x,q \right )= \textstyle \sum_{n=0} ^ {\infty} \left ( q+n\right )^{-x}
6373
6956
 
6374
6957
  Inputs:
6375
6958
  - **x** (Tensor) - A Tensor, types: float32, float64.
6376
- - **q** (Tensor) - A Tensor, must have the same shape and type as x.
6959
+ - **q** (Tensor) - A Tensor, must have the same shape and type as `x`.
6377
6960
 
6378
6961
  Outputs:
6379
6962
  Tensor, has the same dtype and shape as the x.
@@ -6385,9 +6968,9 @@ class Zeta(Primitive):
6385
6968
  ValueError: If shape of `x` is not same as the `q`.
6386
6969
 
6387
6970
  Supported Platforms:
6388
- ``Ascend`` ``CPU`` ``GPU``
6971
+ ``Ascend`` ``GPU`` ``CPU``
6389
6972
 
6390
- Example:
6973
+ Examples:
6391
6974
  >>> x = Tensor(np.array([10.]), mindspore.float32)
6392
6975
  >>> q = Tensor(np.array([1.]), mindspore.float32)
6393
6976
  >>> zeta = ops.Zeta()
@@ -6405,25 +6988,28 @@ class Bernoulli(Primitive):
6405
6988
  """
6406
6989
  Randomly set the elements of output to 0 or 1 with the probability of P which follows the Bernoulli distribution.
6407
6990
 
6408
- Refer to :func:`mindspore.ops.bernoulli` for more detail.
6991
+ .. warning::
6992
+ This is an experimental API that is subject to change or deletion.
6993
+
6994
+ Refer to :func:`mindspore.ops.bernoulli` for more details.
6409
6995
 
6410
6996
  Supported Platforms:
6411
- ``GPU``
6997
+ ``GPU`` ``CPU``
6412
6998
 
6413
6999
  Examples:
6414
- >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int8)
6415
- >>> bernoulli = Bernoulli()
6416
- >>> output = bernoulli(input_x, 1.0)
7000
+ >>> input_x = Tensor([0.1, 0.2, 0.3], mindspore.float32)
7001
+ >>> bernoulli = ops.Bernoulli()
7002
+ >>> output = bernoulli(input_x, Tensor([1.0]))
6417
7003
  >>> print(output)
6418
- [1 1 1]
6419
- >>> input_p = Tensor(np.array([0.0, 1.0, 1.0]), mindspore.float32)
7004
+ [1. 1. 1.]
7005
+ >>> input_p = Tensor([0.0, 1.0, 1.0], mindspore.float32)
6420
7006
  >>> output = bernoulli(input_x, input_p)
6421
7007
  >>> print(output)
6422
- [0 1 1]
7008
+ [0. 1. 1.]
6423
7009
  """
6424
7010
 
6425
7011
  @prim_attr_register
6426
- def __init__(self, seed=-1):
7012
+ def __init__(self, seed=-1, offset=0):
6427
7013
  """Initialize Bernoulli"""
6428
7014
  self.init_prim_io_names(inputs=['x', 'p'], outputs=['y'])
6429
7015
  validator.check_value_type("seed", seed, [int], self.name)
@@ -6431,6 +7017,57 @@ class Bernoulli(Primitive):
6431
7017
  raise ValueError(f"Seed must be -1 or a non-negative integer, but got {seed}.")
6432
7018
 
6433
7019
 
7020
+ class TridiagonalSolve(Primitive):
7021
+ """
7022
+ Return the results of tridiagonal systems of equations.
7023
+
7024
+ Solve the tridiagonal systems of equations like:AX = B.
7025
+ and only the main diagonal, superdiagonal and subdiagonal has values.
7026
+ The type of diagonals and rhs should be the same.
7027
+ The penultimate dimension of diagonals must be 3.
7028
+
7029
+ Args:
7030
+ partial_pivoting (bool): decide if use the method of partial_pivoting. Default: True.
7031
+
7032
+ Inputs:
7033
+ - **diagonals** [Tensor] - The input tensor A of the equation AX = B, with data type of float32,
7034
+ float64, complex64, complex128.
7035
+ The penultimate dimension of diagonals must be 3.
7036
+ Diagonals and rhs must have the same rank and the same type.
7037
+ - **rhs** [Tensor] - The input tensor B of the equation AX = B, with data type of float32,
7038
+ float64, complex64, complex128.
7039
+ The penultimate dimension of rhs should be the same to the last dimension of diagonals.
7040
+ Diagonals and rhs must have the same rank and the same type.
7041
+
7042
+ Outputs:
7043
+ Tensor, has the same type and shape as the input "rhs".
7044
+
7045
+ Raises:
7046
+ TypeError: If `diagonals` and "rhs" are not a float32, float64, complex64 or complex128.
7047
+ TypeError: If the args `partial_pivoting` is not bool.
7048
+ ValueError: If the last second value of the "diagonals" is not "3".
7049
+ ValueError: If the last value of the "diagonals" is not equal to the last second value of the "rhs".
7050
+ ValueError: If diagonals and rhs have different rank of shape.
7051
+
7052
+ Supported Platforms:
7053
+ ``CPU``
7054
+ Examples:
7055
+ >>> diagonals = Tensor(np.array([[1.0,2.0,3.0],[2.0,3.0,4.0],[3.0,4.0,5.0]]).astype(np.float32))
7056
+ >>> rhs = Tensor(np.array([[1.0],[2.0],[3.0]]).astype(np.float32))
7057
+ >>> y = P.TridiagonalSolve()(diagonals,rhs)
7058
+ >>> print(output)
7059
+ [[ 0. ]
7060
+ [ 1. ]
7061
+ [-0.5]]
7062
+ """
7063
+
7064
+ @prim_attr_register
7065
+ def __init__(self, partial_pivoting=True):
7066
+ self.init_prim_io_names(inputs=['diagonals', 'rhs'], outputs=['y'])
7067
+ self.partial_pivoting = validator.check_value_type(
7068
+ "partial_pivoting", partial_pivoting, [bool], self.name)
7069
+
7070
+
6434
7071
  class Renorm(Primitive):
6435
7072
  """
6436
7073
  Renormalizes the sub-tensors along dimension `dim`, and each sub-tensor's p-norm should not exceed the
@@ -6438,10 +7075,10 @@ class Renorm(Primitive):
6438
7075
  `maxnorm`. Otherwise the sub-tensor needs to be modified to the original value of the corresponding position
6439
7076
  divided by the p-norm of the substensor and then multiplied by `maxnorm`.
6440
7077
 
6441
- Refer to :func::`mindspore.ops.renorm` for more detail.
7078
+ Refer to :func:`mindspore.ops.renorm` for more details.
6442
7079
 
6443
7080
  Supported Platforms:
6444
- ``Ascend`` ``CPU`` ``GPU``
7081
+ ``Ascend`` ``GPU`` ``CPU``
6445
7082
 
6446
7083
  Examples:
6447
7084
  >>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
@@ -6466,39 +7103,16 @@ class Renorm(Primitive):
6466
7103
 
6467
7104
  class Cholesky(Primitive):
6468
7105
  """
6469
- Computes the Cholesky decomposition of a symmetric positive-definite matrix `A`
6470
- or for batches of symmetric positive-definite matrices.
6471
-
6472
- If `upper` is `True`, the returned matrix `U` is upper-triangular, and the decomposition has the form:
6473
-
6474
- .. math::
6475
- A = U^TU
6476
-
6477
- If `upper` is `False`, the returned matrix `L` is lower-triangular, and the decomposition has the form:
6478
-
6479
- .. math::
6480
- A = LL^T
6481
-
6482
- Args:
6483
- upper (bool): Flag that indicates whether to return a upper or lower triangular matrix.
6484
- Default: False.
6485
-
6486
- Inputs:
6487
- - **input_x** (Tensor) - Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
6488
- consisting of symmetric positive-definite matrices, with float32 or float64 data type.
7106
+ Performs the Cholesky decomposition on a single or a batch of
7107
+ symmetric positive-definite matrices.
6489
7108
 
6490
- Outputs:
6491
- Tensor, has the same shape and data type as `input_x`.
7109
+ .. warning::
7110
+ This is an experimental API that is subject to change or deletion.
6492
7111
 
6493
- Raises:
6494
- TypeError: If `upper` is not a bool.
6495
- TypeError: If dtype of `input_x` is not one of: float64, float32.
6496
- TypeError: If `input_x` is not a Tensor.
6497
- ValueError: If `input_x` is not batch square.
6498
- ValueError: If `input_x` is not symmetric positive definite.
7112
+ Refer to :func:`mindspore.ops.cholesky` for more details.
6499
7113
 
6500
7114
  Supported Platforms:
6501
- ``Ascend`` ``CPU``
7115
+ ``GPU`` ``CPU``
6502
7116
 
6503
7117
  Examples:
6504
7118
  >>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
@@ -6518,28 +7132,16 @@ class Cholesky(Primitive):
6518
7132
 
6519
7133
  class STFT(Primitive):
6520
7134
  """
6521
- STFTs can be used as a way of quantifying the change of a nonstationary signal’s
6522
- frequency and phase content over time.
7135
+ Applies Short-time Fourier transform (STFT) on input signal.
6523
7136
 
6524
- Args:
6525
- n_fft (int): The size of Fourier transform.
6526
- hop_length (int): The distance between neighboring sliding window
6527
- frames.
6528
- win_length (int): the size of window frame and STFT filter.
6529
- normalized (bool): controls whether to return the normalized STFT results
6530
- onesided (bool): controls whether to return half of results to
6531
- avoid redundancy for real inputs.
6532
- return_complex (bool, optional): whether to return a complex tensor, or
6533
- a real tensor with an extra last dimension for the real and
6534
- imaginary components.
7137
+ STFT segments the signal into narrow time intervals and takes the Fourier transform
7138
+ of each segment to quantify the change of a nonstationary signal’s frequency
7139
+ and phase content over time.
6535
7140
 
6536
- input:
6537
- - **x** (Tensor) - Time sequence of stft, must be either a 1-D time tensor or a 2-D tensor.
6538
- - **window** (Tensor) - the optional window function.
7141
+ Refer to :func:`mindspore.ops.stft` for more details.
6539
7142
 
6540
-
6541
- output:
6542
- - **y** (Tensor) - A tensor containing the STFT result with shape described above.
7143
+ Supported Platforms:
7144
+ ``Ascend`` ``CPU``
6543
7145
 
6544
7146
  Examples:
6545
7147
  >>> import mindspore as ms
@@ -6567,27 +7169,29 @@ class STFT(Primitive):
6567
7169
 
6568
7170
  class CholeskySolve(Primitive):
6569
7171
  """
6570
- Given its Cholesky factor `u`, solves a linear system of equations with a positive definite matrix.
7172
+ Computes the solution of a set of linear equations with a positive definite matrix,
7173
+ according to its Cholesky decomposition factor `u` , and outputs the result as `c`.
6571
7174
 
6572
- If `upper` is `True`, `u` is upper triangular and `c` is returned such that:
7175
+ If `upper` is set to `True`, `u` is upper triangular and `c` is returned such that:
6573
7176
 
6574
7177
  .. math::
6575
7178
  c = (u^{T}u)^{{-1}}b
6576
7179
 
6577
- If `upper` is `False`, `u` is lower triangular and `c` is returned such that:
7180
+ If `upper` is set to `False`, `u` is lower triangular and `c` is returned such that:
6578
7181
 
6579
7182
  .. math::
6580
7183
  c = (uu^{T})^{{-1}}b
6581
7184
 
6582
7185
  Args:
6583
- upper (bool): Flag which indicates whether to consider the Cholesky factor
6584
- as a lower or upper triangular matrix. Default: False.
7186
+ upper (bool, optional): A flag indicates whether to treat the Cholesky factor
7187
+ as an upper or a lower triangular matrix. Default: False.
6585
7188
 
6586
7189
  Inputs:
6587
7190
  - **x1** (Tensor) - Tensor of shape :math:`(*, N, M)`, indicating 2D or 3D matrices,
6588
7191
  with float32 or float64 data type.
6589
7192
  - **x2** (Tensor) - Tensor of shape :math:`(*, N, N)`, indicating 2D or 3D square matrices composed of
6590
7193
  upper or lower triangular Cholesky factor, with float32 or float64 data type.
7194
+ x1 and x2 must have the same type.
6591
7195
 
6592
7196
  Outputs:
6593
7197
  Tensor, has the same shape and data type as `x1`.
@@ -6603,7 +7207,7 @@ class CholeskySolve(Primitive):
6603
7207
  ValueError: If `x2` is not 2D or 3D square matrices.
6604
7208
 
6605
7209
  Supported Platforms:
6606
- ``Ascend`` ``CPU``
7210
+ ``Ascend`` ``GPU`` ``CPU``
6607
7211
 
6608
7212
  Examples:
6609
7213
  >>> x1 = Tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), mindspore.float32)
@@ -6634,7 +7238,7 @@ class FFTWithSize(Primitive):
6634
7238
  \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
6635
7239
  e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
6636
7240
 
6637
- where :math:`d` = :attr:`signal_ndim` is number of dimensions for the
7241
+ where :math:`d` = `signal_ndim` is number of dimensions for the
6638
7242
  signal, and :math:`N_i` is the size of signal dimension :math:`i`.
6639
7243
 
6640
7244
  For ifft, it computes the following expression:
@@ -6644,29 +7248,44 @@ class FFTWithSize(Primitive):
6644
7248
  \frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
6645
7249
  e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
6646
7250
 
6647
- where :math:`d` = :attr:`signal_ndim` is number of dimensions for the
7251
+ where :math:`d` = `signal_ndim` is number of dimensions for the
6648
7252
  signal, and :math:`N_i` is the size of signal dimension :math:`i`.
6649
7253
 
6650
7254
  Note:
6651
- FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs.
6652
- RFFT requires float32 or float64 inputs, return complex64 or complex128 outputs.
6653
- IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs.
7255
+ - FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs.
7256
+ - RFFT requires float32 or float64 inputs, return complex64 or complex128 outputs.
7257
+ - IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs.
6654
7258
 
6655
7259
  Args:
6656
- signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions of the fourier
6657
- transform are realized, can only be 1, 2 or 3.
6658
- inverse (bool): Whether it is the inverse transformation, used to select FFT or IFFT and RFFT or IRFFT.
6659
- inverse=False means FFT or RFFT, inverse=True means IFFT or IRFFT.
6660
- real (bool): Whether it is the real transformation, used to select FFT/IFFT or RFFT/IRFFT.
6661
- real=False means FFT or IFFT, real=True means RFFT or IRFFT.
6662
- norm (str): The default normalization ("backward") has the direct (forward) transforms unscaled
6663
- and the inverse (backward) transforms scaled by 1/n.
6664
- "ortho" has both direct and inverse transforms are scaled by 1/sqrt(n).
6665
- "forward" has the direct transforms scaled by 1/n and the inverse transforms unscaled.
6666
- n is the input x's element numbers.
6667
- onesided (bool): Controls whether the input is halved to avoid redundancy. Default: True.
6668
- signal_sizes (list): Size of the original signal (the signal before rfft, no batch dimension),
6669
- only in irfft mode and set onesided=true requires the parameter. Default: [].
7260
+ signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions
7261
+ of the fourier transform are realized, can only be 1, 2 or 3.
7262
+ inverse (bool): Whether it is the inverse transformation.
7263
+ real (bool): Whether it is the real transformation.
7264
+
7265
+ - "inverse:False real:False" corresponds to FFT.
7266
+ - "inverse:True real:False" corresponds to IFFT.
7267
+ - "inverse:False real:True" corresponds to RFFT.
7268
+ - "inverse:True real:True" corresponds to IRFFT.
7269
+
7270
+ norm (str, optional): The normalization, optional values: ["backward", "forward", "ortho"].
7271
+ Default value: "backward".
7272
+
7273
+ - "backward" has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
7274
+ where n is the input x's element numbers.
7275
+ - "ortho" has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
7276
+ - "forward" has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
7277
+
7278
+ onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: True.
7279
+ signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension),
7280
+ only in IRFFT mode and set `onesided` to True requires the parameter, the following conditions must be
7281
+ satisfied. Default: ().
7282
+
7283
+ - The length of `signal_sizes` is equal to the signal_ndim of the IRFFT:
7284
+ :math:`len(signal_sizes)=signal_ndim`.
7285
+ - The last dimension of `signal_sizes` divided by 2 is equal to
7286
+ the last dimension of the IRFFT input: :math:`signal_size[-1]/2+1=x.shape[-1]`.
7287
+ - `signal_sizes` has exactly the same dimensions as the input shape
7288
+ except for the last dimension: :math:`signal_sizes[:-1]=x.shape[:-1]`.
6670
7289
 
6671
7290
  Inputs:
6672
7291
  - **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim.
@@ -6683,36 +7302,36 @@ class FFTWithSize(Primitive):
6683
7302
  ValueError: If norm is none of "backward", "forward" or "ortho".
6684
7303
 
6685
7304
  Supported Platforms:
6686
- ``GPU``
7305
+ ``GPU`` ``CPU``
6687
7306
 
6688
7307
  Examples:
6689
- >>> # case FFT: signal_ndim: 1, inverse: False, real: False.
6690
- >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
6691
- >>> fft_net = math_ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
6692
- >>> fft_output = fft_net(fft_in)
6693
- >>> print(fft_output)
6694
- [5.0000005 +2.9802322e-08j 0.50000036+8.6602569e-01j
6695
- 0.49999955-8.6602527e-01j]
6696
- >>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
6697
- >>> ifft_in = fft_output
6698
- >>> ifft_net = math_ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
6699
- >>> ifft_output = ifft_net(ifft_in)
6700
- >>> print(ifft_output)
6701
- [2. +1.291434e-07j 1.0000004+7.947286e-08j 2.0000005-7.947286e-08j]
6702
- >>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
6703
- >>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
6704
- >>> rfft_net = math_ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
6705
- >>> rfft_output = rfft_net(rfft_in)
6706
- >>> print(rfft_output)
6707
- [[ 1.5000001e+01+2.0954278e-07j 1.1920929e-06+5.1961541e+00j]
6708
- [-5.0000005e+00-5.9604645e-08j 9.9999934e-01-3.4641027e+00j]]
6709
- >>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
6710
- >>> irfft_in = rfft_output
6711
- >>> irfft_net = math_ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
6712
- >>> irfft_output = irfft_net(irfft_in)
6713
- >>> print(irfft_output)
6714
- [[2.0000002 0.99999976 2.0000005 ]
6715
- [3.0000007 0.999999 6.000002 ]]
7308
+ >>> # case FFT: signal_ndim: 1, inverse: False, real: False.
7309
+ >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
7310
+ >>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
7311
+ >>> fft_output = fft_net(fft_in)
7312
+ >>> print(fft_output)
7313
+ [5. +0.j 0.5 +0.86602545j 0.50000006-0.8660255j ]
7314
+ >>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
7315
+ >>> ifft_in = fft_output
7316
+ >>> ifft_net = ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
7317
+ >>> ifft_output = ifft_net(ifft_in)
7318
+ >>> print(ifft_output)
7319
+ [2. -1.9868216e-08j 0.99999994+0.0000000e+00j
7320
+ 1.9999999 +7.9472862e-08j]
7321
+ >>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
7322
+ >>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
7323
+ >>> rfft_net = ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
7324
+ >>> rfft_output = rfft_net(rfft_in)
7325
+ >>> print(rfft_output)
7326
+ [[ 1.5000000e+01+1.1920929e-07j -2.3841858e-07+5.1961522e+00j]
7327
+ [-5.0000000e+00-2.9802322e-08j 9.9999988e-01-3.4641016e+00j]]
7328
+ >>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
7329
+ >>> irfft_in = rfft_output
7330
+ >>> irfft_net = ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
7331
+ >>> irfft_output = irfft_net(irfft_in)
7332
+ >>> print(irfft_output)
7333
+ [[2. 1. 2. ]
7334
+ [3. 0.99999994 5.9999995 ]]
6716
7335
  """
6717
7336
 
6718
7337
  @prim_attr_register
@@ -6726,14 +7345,47 @@ class FFTWithSize(Primitive):
6726
7345
  validator.check_value_type('signal_sizes', signal_sizes, [tuple, list], self.name)
6727
7346
 
6728
7347
 
7348
+ class Polar(Primitive):
7349
+ r"""
7350
+ Converts polar coordinates to Cartesian coordinates.
7351
+
7352
+ Refer to :func:`mindspore.ops.polar` for more details.
7353
+
7354
+ Supported Platforms:
7355
+ ``GPU`` ``CPU``
7356
+
7357
+ Examples:
7358
+ >>> polar = ops.Polar()
7359
+ >>> x1 = Tensor(np.array([1, 2]), mindspore.float64)
7360
+ >>> x2 = Tensor(np.array([3, 4]), mindspore.float64)
7361
+ >>> output = polar(x1, x2)
7362
+ >>> print(output)
7363
+ [-0.9899925 +0.14112001j -1.30728724-1.51360499j]
7364
+ """
7365
+
7366
+ @prim_attr_register
7367
+ def __init__(self):
7368
+ """Initialize Polar"""
7369
+ self.init_prim_io_names(inputs=['abs', 'angle'], outputs=['y'])
7370
+
7371
+
6729
7372
  class NextAfter(Primitive):
6730
7373
  """
6731
- Returns the next representable value after the given first number in the direction of given second number.
7374
+ Returns the next representable floating-point value after `x1` towards `x2` element-wise.
7375
+
7376
+ Say there are two float32 numbers :math:`a, b`, and let the
7377
+ representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
7378
+ then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
7379
+ If :math:`a > b`,
7380
+ the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
6732
7381
 
6733
7382
  .. math::
6734
7383
 
6735
7384
  out_{i} = nextafter({x1_{i}, x2_{i}})
6736
7385
 
7386
+ .. warning::
7387
+ This is an experimental API that is subject to change or deletion.
7388
+
6737
7389
  Inputs:
6738
7390
  - **x1** (Tensor) - The shape of tensor is
6739
7391
  :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
@@ -6744,16 +7396,16 @@ class NextAfter(Primitive):
6744
7396
  Must be one of the following types: float32, float64.
6745
7397
 
6746
7398
  Outputs:
6747
- Tensor, has the same shape and data type as `x`.
7399
+ Tensor, has the same shape and data type as `x1`.
6748
7400
 
6749
7401
  Raises:
6750
7402
  TypeError: If neither `x1` nor `x2` is a Tensor.
6751
- TypeError: If the dtype of input is not one of: float32, float64.
6752
- TypeError: If the dtypes of two inputs are not same.
7403
+ TypeError: If the dtype of `x1` and `x2` is not one of: float32, float64.
7404
+ TypeError: If the dtypes of `x1` and `x2` are not same.
6753
7405
  ValueError: If `x1`'s shape is not the same as `x2`.
6754
7406
 
6755
7407
  Supported Platforms:
6756
- ``Ascend`` ``CPU`` ``GPU``
7408
+ ``Ascend`` ``GPU`` ``CPU``
6757
7409
 
6758
7410
  Examples:
6759
7411
  >>> nextafter = ops.NextAfter()
@@ -6770,13 +7422,113 @@ class NextAfter(Primitive):
6770
7422
  self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
6771
7423
 
6772
7424
 
7425
+ class TrilIndices(Primitive):
7426
+ r"""
7427
+ Calculates the indices of the lower triangular elements in a `row` * `col` matrix
7428
+ and returns them as a 2-by-N Tensor.
7429
+
7430
+ .. warning::
7431
+ This is an experimental API that is subject to change or deletion.
7432
+
7433
+ Refer to :func:`mindspore.ops.tril_indices` for more details.
7434
+
7435
+ Args:
7436
+ row (int): number of rows in the 2-D matrix.
7437
+ col (int): number of columns in the 2-D matrix.
7438
+ offset (int, optional): diagonal offset from the main diagonal. Default: 0.
7439
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
7440
+ An optional data type of `mstype.int32` and `mstype.int64`. Default: `mstype.int32`.
7441
+
7442
+ Outputs:
7443
+ - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
7444
+ The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
7445
+ lower triangular matrix.
7446
+
7447
+ Supported Platforms:
7448
+ ``Ascend`` ``GPU`` ``CPU``
7449
+
7450
+ Examples:
7451
+ >>> net = ops.TrilIndices(4, 3, -1, mstype.int64)
7452
+ >>> output = net()
7453
+ >>> print(output)
7454
+ [[1 2 2 3 3 3]
7455
+ [0 0 1 0 1 2]]
7456
+ >>> print(output.dtype)
7457
+ Int64
7458
+ """
7459
+
7460
+ @prim_attr_register
7461
+ def __init__(self, row, col, offset=0, dtype=mstype.int32):
7462
+ """Initialize TrilIndices"""
7463
+ self.init_prim_io_names(inputs=[], outputs=['y'])
7464
+ validator.check_int(row, 0, validator.GE, "row", self.name)
7465
+ validator.check_int(col, 0, validator.GE, "col", self.name)
7466
+ validator.check_value_type("offset", offset, [int], self.name)
7467
+ valid_values = (mstype.int32, mstype.int64)
7468
+ validator.check_type_name("dtype", dtype, valid_values, self.name)
7469
+
7470
+
7471
+ class MatrixTriangularSolve(Primitive):
7472
+ r"""
7473
+ Returns a new tensor with the solution of a linear equation system with an
7474
+ upper or lower triangular matrix.
7475
+
7476
+ Note:
7477
+ Only GPU platforms now support the broadcast mechanism.
7478
+
7479
+ Args:
7480
+ lower (bool, optional): If True, the innermost matrices in `matrix` is
7481
+ are lower triangular. Default: True.
7482
+ adjoint (bool, optional): Indicates whether the adjoint of the
7483
+ matrix is used during the computation. Default: False, use its transpose instead.
7484
+
7485
+ Inputs:
7486
+ - **matrix** (Tensor) - Tensor of shape :math:`(*, M, M)`,
7487
+ with float32, float64, complex64 and complex128 data type.
7488
+ - **rhs** (Tensor) - Tensor of shape :math:`(*, M, N)`,
7489
+ with float32, float64, complex64 and complex128 data type.
7490
+
7491
+ Outputs:
7492
+ Tensor, has the shape of :math:`(*, M, N)` and the same data type as `matrix`.
7493
+
7494
+ Raises:
7495
+ TypeError: If `matrix` or `rhs` is not a Tensor.
7496
+ TypeError: If `lower` or `adjoint` is not bool.
7497
+ ValueError: For GPU platform, if the batch sizes of `matrix` and `rhs` do not satisfy broadcasting rules.
7498
+ For other platforms, if the batch sizes of `matrix` and `rhs` are not equal.
7499
+ ValueError: If the inner-most 2 dimensions of `matrix` are not equal.
7500
+ ValueError: If the second-last dimensions of `matrix` and `rhs` are not equal.
7501
+
7502
+ Supported Platforms:
7503
+ ``Ascend`` ``GPU`` ``CPU``
7504
+
7505
+ Examples:
7506
+ >>> matrix_triangular_solve = ops.MatrixTriangularSolve(lower=True, adjoint=False)
7507
+ >>> matrix = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
7508
+ >>> rhs = np.array([[1, 0],[2, 2],[1, 5],[0, 3]])
7509
+ >>> output = matrix_triangular_solve(Tensor(matrix, mindspore.float32), Tensor(rhs, mindspore.float32))
7510
+ >>> print(output)
7511
+ [[ 0.33333334 0. ]
7512
+ [ 1.3333333 2. ]
7513
+ [ 0.6666666 5. ]
7514
+ [-2.3333333 -4. ]]
7515
+ """
7516
+
7517
+ @prim_attr_register
7518
+ def __init__(self, lower=True, adjoint=False):
7519
+ """Initialize MatrixTriangularSolve"""
7520
+ validator.check_value_type('adjoint', adjoint, [bool], self.name)
7521
+ validator.check_value_type('lower', lower, [bool], self.name)
7522
+
7523
+
6773
7524
  class CompareAndBitpack(Primitive):
6774
7525
  """
6775
7526
  Compare values of `x` to `threshold` and pack resulting bits into a `uint8`.
6776
7527
 
6777
7528
  Each comparison returns a boolean true (if x_value > threshold) or and false otherwise.
6778
7529
 
6779
- Given an `x` shaped `[s0, s1, ..., s_n]`, the output is a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
7530
+ Given an `x` shaped :math:`(s_0, s_1, ..., s_n)`, the output is a `uint8`
7531
+ Tensor shaped :math:`(s_0, s_1, ..., s_n / 8)`.
6780
7532
 
6781
7533
  Inputs:
6782
7534
  - **x** (Tensor) - Input tensor. Values to compare against `threshold` and bitpack. The data type must be
@@ -6812,40 +7564,56 @@ class CompareAndBitpack(Primitive):
6812
7564
  """Initialize CompareAndBitPack"""
6813
7565
 
6814
7566
 
6815
- class Orgqr(Primitive):
6816
- r"""
6817
- Computes the first :math:`N` columns of a product of Householder matrices. Take the case of input without batch
6818
- as an example. The input x is a matrix of size :math:`(M, N)` after householder transformation. When the diagonal
6819
- of x is set to 1, every colunm of lower triangular in x is denoted as :math:`w_j` for :math:`j` for
6820
- :math:`j=1, \ldots, M`, this function returns the first :math:`N` columns of the matrix
7567
+ class NanToNum(Primitive):
7568
+ """
7569
+ Replaces `NaN`, positive infinity and negative infinity values in the input Tensor with the values
7570
+ specified by `nan`, `posinf` and `neginf` respectively.
6821
7571
 
6822
- .. math::
6823
- H_{1} H_{2} \ldots H_{k} \quad \text { with } \quad H_{j}=\mathrm{I}_{M}-\tau_{j} w_{j} w_{j}^{\mathrm{H}}
7572
+ .. warning::
7573
+ This is an experimental API that is subject to change or deletion.
6824
7574
 
6825
- where :math:`\mathrm{I}_{M}` is the :math:`M`-dimensional identity matrix. And when :math:`w` is complex,
6826
- :math:`w^{\mathrm{H}}` is the conjugate transpose, otherwise the transpose.
6827
- The output matrix is the same size as the input matrix :math:`x`.
7575
+ Refer to :func:`mindspore.ops.nan_to_num` for more details.
6828
7576
 
6829
- Inputs:
6830
- - **x** (Tensor) - Tensor of shape :math:`(*, M, N)`, indicating 2D or 3D matrices,
6831
- with float32, float64, complex64 and complex128 data type.
6832
- - **tau** (Tensor) - Tensor of shape :math:`(*, K)`, where `K` is less than or equal to `N`, indicating the
6833
- reflecting coefficient in Householder transformation, which have the same type as x.
7577
+ Supported Platforms:
7578
+ ``Ascend`` ``CPU``
6834
7579
 
6835
- Outputs:
6836
- Tensor, has the same shape and data type as `x`.
7580
+ Examples:
7581
+ >>> nan_to_num = ops.NanToNum()
7582
+ >>> x = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 3.14]), mindspore.float32)
7583
+ >>> output = nan_to_num(x)
7584
+ >>> print(output)
7585
+ [ 0.0000000e+00 3.4028235e+38 -3.4028235e+38 3.1400001e+00]
7586
+ """
7587
+
7588
+ @prim_attr_register
7589
+ def __init__(self, nan=0.0, posinf=None, neginf=None):
7590
+ """Initialize NanToNum"""
7591
+ if nan is not None:
7592
+ validator.check_value_type("nan", nan, [float], self.name)
7593
+ else:
7594
+ self.add_prim_attr("nan_none", True)
7595
+ if posinf is not None:
7596
+ validator.check_value_type("posinf", posinf, [float], self.name)
7597
+ else:
7598
+ self.add_prim_attr("posinf_none", True)
7599
+ if neginf is not None:
7600
+ validator.check_value_type("neginf", neginf, [float], self.name)
7601
+ else:
7602
+ self.add_prim_attr("neginf_none", True)
6837
7603
 
6838
- Raises:
6839
- TypeError: If `x` or `tau` are not Tensors.
6840
- TypeError: If dtype of `x` and `tau` is not one of: float64, float32, complex64, complex128.
6841
- ValueError: If `x` and `tau` have different batch size.
6842
- ValueError: If x.shape[-2] < x.shape[-1].
6843
- ValueError: If x.shape[-1] < tau.shape[-1].
6844
- ValueError: If rank(x) - rank(tau) != 1.
6845
- ValueError: If rank(x) != 2 or 3.
7604
+
7605
+ class Orgqr(Primitive):
7606
+ r"""
7607
+ Calculates the explicit representation of the orthogonal matrix :math:`Q`
7608
+ returned by :class:`mindspore.ops.Geqrf`.
7609
+
7610
+ .. warning::
7611
+ This is an experimental API that is subject to change or deletion.
7612
+
7613
+ Refer to :func:`mindspore.ops.orgqr` for more details.
6846
7614
 
6847
7615
  Supported Platforms:
6848
- ``Ascend`` ``CPU``
7616
+ ``Ascend`` ``GPU`` ``CPU``
6849
7617
 
6850
7618
  Examples:
6851
7619
  >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62.]]), mindspore.float32)
@@ -6862,3 +7630,414 @@ class Orgqr(Primitive):
6862
7630
  def __init__(self):
6863
7631
  """Initialize Orgqr"""
6864
7632
  self.init_prim_io_names(inputs=['x', 'tau'], outputs=['y'])
7633
+
7634
+
7635
+ class TriuIndices(Primitive):
7636
+ r"""
7637
+ Calculates the indices of the upper triangular elements in a `row` * `col` matrix
7638
+ and returns them as a 2-by-N Tensor.
7639
+
7640
+ .. warning::
7641
+ This is an experimental API that is subject to change or deletion.
7642
+
7643
+ Refer to :func:`mindspore.ops.triu_indices` for more details.
7644
+
7645
+ Args:
7646
+ row (int): number of rows in the 2-D matrix.
7647
+ col (int): number of columns in the 2-D matrix.
7648
+ offset (int, optional): diagonal offset from the main diagonal. Default: 0.
7649
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
7650
+ An optional data type of `mstype.int32` and `mstype.int64`. Default: `mstype.int32`.
7651
+
7652
+ Outputs:
7653
+ - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
7654
+ The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
7655
+ lower triangular matrix.
7656
+
7657
+ Supported Platforms:
7658
+ ``Ascend`` ``GPU`` ``CPU``
7659
+
7660
+ Examples:
7661
+ >>> net = ops.TriuIndices(5, 4, 2, mstype.int64)
7662
+ >>> output = net()
7663
+ >>> print(output)
7664
+ [[0 0 1]
7665
+ [2 3 3]]
7666
+ >>> print(output.dtype)
7667
+ Int64
7668
+ """
7669
+
7670
+ @prim_attr_register
7671
+ def __init__(self, row, col, offset=0, dtype=mstype.int32):
7672
+ """Initialize TriuIndices"""
7673
+ self.init_prim_io_names(inputs=[], outputs=['y'])
7674
+ validator.check_int(row, 0, validator.GE, "row", self.name)
7675
+ validator.check_int(col, 0, validator.GE, "col", self.name)
7676
+ validator.check_value_type("offset", offset, [int], self.name)
7677
+ valid_values = (mstype.int32, mstype.int64)
7678
+ validator.check_type_name("dtype", dtype, valid_values, self.name)
7679
+
7680
+
7681
+ class Fmin(Primitive):
7682
+ """
7683
+ Computes the minimum of input tensors element-wise.
7684
+
7685
+ Refer to :func:`mindspore.ops.fmin` for more detail.
7686
+
7687
+ Supported Platforms:
7688
+
7689
+
7690
+ Examples:
7691
+ >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
7692
+ >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
7693
+ >>> fmin = ops.Fmin()
7694
+ >>> output = fmin(x1, x2)
7695
+ >>> print(output)
7696
+ [1. 2. 3.]
7697
+ """
7698
+
7699
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
7700
+
7701
+ @prim_attr_register
7702
+ def __init__(self):
7703
+ """Initialize Fmin"""
7704
+ self.add_prim_attr('ignore_nan', True)
7705
+ self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
7706
+
7707
+
7708
+ class Fmax(Primitive):
7709
+ """
7710
+ Computes the maximum of input tensors element-wise.
7711
+
7712
+ .. warning::
7713
+ This is an experimental API that is subject to change or deletion.
7714
+
7715
+ Refer to :func:`mindspore.ops.fmax` for more detail.
7716
+
7717
+ Supported Platforms:
7718
+ ``CPU``
7719
+
7720
+ Examples:
7721
+ >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
7722
+ >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
7723
+ >>> fmax = ops.Fmax()
7724
+ >>> output = fmax(x1, x2)
7725
+ >>> print(output)
7726
+ [4. 5. 6.]
7727
+ """
7728
+
7729
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
7730
+
7731
+ @prim_attr_register
7732
+ def __init__(self):
7733
+ """Initialize Fmax"""
7734
+ self.add_prim_attr('ignore_nan', True)
7735
+ self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
7736
+
7737
+
7738
+ class Eig(Primitive):
7739
+ """
7740
+ Computes the eigenvalues and eigenvectors of a square matrix(batch square matrices).
7741
+
7742
+ .. warning::
7743
+ This is an experimental API that is subject to change or deletion.
7744
+
7745
+ Args:
7746
+ compute_v (bool, optional): If `True`, compute both eigenvalues and eigenvectors;
7747
+ If `False`, just eigenvalues will be computed. Default: False.
7748
+ Inputs:
7749
+ - **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`,
7750
+ with float32, float64, complex64 or complex128 data type.
7751
+
7752
+ Outputs:
7753
+ - **eigen_values** (Tensor) - Shape :math:`(*, N)`. Each inner most vector represents eigenvalues of
7754
+ the corresponding matrix. The eigenvalues may not have an order.
7755
+ - **eigen_vectors** (Tensor) - If `compute_v` is `False`, it’s an empty tensor. Otherwise, this tensor
7756
+ has shape :math:`(*, N, N)`, whose columns represent normalized (unit length) eigenvectors of corresponding
7757
+ eigenvalues.
7758
+
7759
+ Raises:
7760
+ TypeError: If `compute_v` is not a bool.
7761
+ TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128.
7762
+ TypeError: If `x` is not a Tensor.
7763
+ ValueError: If `x` is not a square(batch squares).
7764
+
7765
+ Supported Platforms:
7766
+ ``Ascend`` ``CPU``
7767
+
7768
+ Examples:
7769
+ >>> input_x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]), mindspore.float32)
7770
+ >>> eig = ops.Eig(compute_v=True)
7771
+ >>> u, v = eig(input_x)
7772
+ >>> print(u)
7773
+ [1.+0.j 2.+0.j]
7774
+ >>> print(v)
7775
+ [[1.+0.j 0.+0.j]
7776
+ [0.+0.j 1.+0.j]]
7777
+ """
7778
+
7779
+ @prim_attr_register
7780
+ def __init__(self, compute_v=False):
7781
+ """Initialize Eig"""
7782
+ self.init_prim_io_names(inputs=['x'], outputs=['eigen_values', 'eigen_vectors'])
7783
+ validator.check_value_type('compute_v', compute_v, [bool], self.name)
7784
+
7785
+
7786
+ class SelfAdjointEig(Primitive):
7787
+ r"""
7788
+ Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in input
7789
+ such that input[..., :, :] = v[..., :, :] * diag(e[..., :]).
7790
+ The eigenvalues are sorted in non-decreasing order.
7791
+
7792
+ Args:
7793
+ compute_v(bool): If `True` then eigenvectors will be computed and returned in v;
7794
+ If `False`, only the eigenvalues will be computed. Default: True.
7795
+
7796
+ Inputs:
7797
+ - **x** (Tensor) - Must be one of the following types:
7798
+ float64, float32, complex64, complex128. Tensor input of shape :math:`[...,N, N]`.
7799
+
7800
+ Outputs:
7801
+ - **eigen_value** (Tensor) - Has the same type as input, the shape is :math:`[...,N]`.
7802
+ - **eigen_vector** (Tensor) - If `compute_v` is `False`, it’s an empty tensor.
7803
+ Otherwise, it has the same type and shape as input, the shape is the same as the input.
7804
+
7805
+ Raises:
7806
+ TypeError: If `compute_v` is not a bool.
7807
+ TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128.
7808
+ TypeError: If `x` is not a Tensor.
7809
+ ValueError: If `x` is not a square(batch squares).
7810
+
7811
+ Supported Platforms:
7812
+ ``CPU``
7813
+
7814
+ Examples:
7815
+ >>> from mindspore.ops.operations.math_ops import SelfAdjointEig
7816
+ >>> input_x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]).astype(np.float32))
7817
+ >>> SelfAdjointEig = SelfAdjointEig()
7818
+ >>> eigen_value, eigen_vector = SelfAdjointEig(input_x)
7819
+ >>> print(eigen_value)
7820
+ [1. 2.]
7821
+ >>> print(eigen_vector)
7822
+ [[1. 0.]
7823
+ [0. 1.]]
7824
+ """
7825
+
7826
+ @prim_attr_register
7827
+ def __init__(self, compute_v=True):
7828
+ """Initialize SelfAdjointEig."""
7829
+ self.init_prim_io_names(inputs=['x'], outputs=['eigen_value', 'eigen_vector'])
7830
+ validator.check_value_type("compute_v", compute_v, [bool], self.name)
7831
+
7832
+
7833
+ class Qr(Primitive):
7834
+ """
7835
+ Returns the QR decomposition of one or more matrices. If `full_matrices` is true, compute full-sized q and r,
7836
+ If False (the default), compute the P columns of q where P is minimum of the 2 innermost dimensions of x.
7837
+
7838
+ .. warning::
7839
+ This is an experimental API that is subject to change or deletion.
7840
+
7841
+ Args:
7842
+ full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: False.
7843
+
7844
+ Inputs:
7845
+ - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions.
7846
+ types: float16, float32, float64, complex64, complex128.
7847
+ Define the shape of x as :math:`(..., m, n)` p as the minimum values of m and n.
7848
+
7849
+ Outputs:
7850
+ - **q** (Tensor) - The orthonormal matrices of x.
7851
+ If `full_matrices` is true, the shape is :math:`(m, m)`, else the shape is :math:`(m, p)`.
7852
+ The dtype of `q` is same as `x`.
7853
+ - **r** (Tensor) - The upper triangular matrices of x.
7854
+ If `full_matrices` is true, the shape is :math:`(m, n)`, else the shape is :math:`(p, n)`.
7855
+ The dtype of `r` is same as `x`.
7856
+
7857
+ Raises:
7858
+ TypeError: If `x` is not a Tensor.
7859
+ TypeError: If `full_matrices` is not a bool.
7860
+ ValueError: If the dimension of `x` is less than 2.
7861
+
7862
+ Supported Platforms:
7863
+ ``Ascend`` ``GPU`` ``CPU``
7864
+
7865
+ Examples:
7866
+ >>> qr_op = ops.Qr(full_matrices=False)
7867
+ >>> x = Tensor([[20., -31, 7], [4, 270, -90], [-8, 17, -32]], mstype.float32)
7868
+ >>> q, r = qr_op(x)
7869
+ >>> print(q)
7870
+ [[-0.912871 0.16366126 0.37400758]
7871
+ [-0.18257418 -0.9830709 -0.01544376]
7872
+ [ 0.36514837 -0.08238228 0.92729706]]
7873
+ >>> print(r)
7874
+ [[ -21.908903 -14.788506 -1.6431675]
7875
+ [ 0. -271.9031 92.25824 ]
7876
+ [ 0. 0. -25.665514 ]]
7877
+ """
7878
+
7879
+ @prim_attr_register
7880
+ def __init__(self, full_matrices=False):
7881
+ """Initialize Qr"""
7882
+ validator.check_value_type('full_matrices', full_matrices, [bool], self.name)
7883
+
7884
+
7885
+ class Cauchy(Primitive):
7886
+ r"""
7887
+ Create a tensor of shape `size` with random numbers drawn from Cauchy distribution.
7888
+ It is defined as follows:
7889
+
7890
+ .. math::
7891
+ f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
7892
+
7893
+ Args:
7894
+ size (list[int]): The size of tensor.
7895
+ sigma (float, optional): the location parameter, specifying the location
7896
+ of the peak of the distribution. Default: 1.0.
7897
+ median (float, optional): the scale parameter which specifies the half-width
7898
+ at half-maximum. Default: 0.0.
7899
+
7900
+ Outputs:
7901
+ Tensor with cauchy distribution data. Tensor shape is size, and data type is float32.
7902
+
7903
+ Raises:
7904
+ TypeError: If `sigma` is not a float.
7905
+ TypeError: If `median` is not a float.
7906
+ TypeError: If `size` is not a list.
7907
+ ValueError: If `size` list is empty.
7908
+ ValueError: If data of `size` is not a positive integer.
7909
+
7910
+ Supported Platforms:
7911
+ ``Ascend`` ``CPU``
7912
+
7913
+ Examples:
7914
+ >>> size = [1]
7915
+ >>> net = ops.Cauchy(size)
7916
+ >>> y = net()
7917
+ >>> print(y)
7918
+ [0.03128606]
7919
+ """
7920
+
7921
+ @prim_attr_register
7922
+ def __init__(self, size, median=0.0, sigma=1.0):
7923
+ validator.check_value_type('median', median, [float], self.name)
7924
+ validator.check_value_type('sigma', sigma, [float], self.name)
7925
+ validator.check_value_type('size', size, (list), self.name)
7926
+ for index, size_ in enumerate(size):
7927
+ validator.check_positive_int(size_, 'size[%d]' % index, self.name)
7928
+
7929
+
7930
+ class Ormqr(Primitive):
7931
+ r"""
7932
+ Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
7933
+ Multiplies a(m, n) matrix C (given by other) with a matrix Q, where Q is represented using Householder
7934
+ reflectors (x, tau), which is the output of geqrf().
7935
+
7936
+ Args:
7937
+ left (bool, optional): controls the order of multiplication. If true, compute op(Q)*C.
7938
+ If false, compute C*op(Q). Default: True.
7939
+ transpose(bool, optional): controls whether the matrix Q is conjugate transposed or not.Default: False.
7940
+
7941
+ Inputs:
7942
+ - **x** (Tensor) - Tensor of shape: (*, mn, k) where mn equals to m or n depending on the the args of `left`,
7943
+ and `*` is zero or more batch dimensions.
7944
+ - **tau** (Tensor) - Tensor of shape (*, min(mn, k)) where `*` is zero or more batch dimensions,
7945
+ and its type is the same as `x`.
7946
+ - **other** (Tensor) - Tensor of shape (*, m, n) where `*` is zero or more batch dimensions,
7947
+ and its type is the same as `x`.
7948
+
7949
+ Outputs:
7950
+ - **y** (Tensor) - the output Tensor, has the same shape and data type as `other`.
7951
+
7952
+ Raises:
7953
+ TypeError: If `x` or `tau` or `other` is not Tensor.
7954
+ TypeError: If dtype of `x` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
7955
+ ValueError: If `x` or `other` is less than 2D.
7956
+ ValueError: If rank(x) - rank(tau) != 1.
7957
+ ValueError: If tau.shape[:-2] != x.shape[:-2]
7958
+ ValueError: If other.shape[:-2] != x.shape[:-2]
7959
+ ValueError: If left == true, other.shape[-2] < tau.shape[-1].
7960
+ ValueError: If left == true, other.shape[-2] != x.shape[-2].
7961
+ ValueError: If left == false, other.shape[-1] < tau.shape[-1].
7962
+ ValueError: If left == false, other.shape[-1] != x.shape[-2].
7963
+
7964
+ Supported Platforms:
7965
+ ``GPU``
7966
+
7967
+ Examples:
7968
+ >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62]]), mindspore.float32)
7969
+ >>> tau = Tensor(np.array([1.55, 1.94, 3.0]), mindspore.float32)
7970
+ >>> other = Tensor(np.array([[-114.6, 10.9, 1.1],
7971
+ [-0.304, 38.07, 69.38],
7972
+ [-0.45, -0.17, 62]]), mindspore.float32)
7973
+ >>> net = ops.Ormqr()
7974
+ >>> y = net(x, tau, other)
7975
+ >>> print(y)
7976
+ [[ 63.82713 -13.823125 -116.28614 ]
7977
+ [ -53.659264 -28.157839 -70.42702 ]
7978
+ [ -79.54292 24.00183 -41.34253 ]]
7979
+ """
7980
+
7981
+ @prim_attr_register
7982
+ def __init__(self, left=True, transpose=False):
7983
+ """Initialize Ormqr"""
7984
+ self.init_prim_io_names(inputs=['x', 'tau', 'other'], outputs=['y'])
7985
+ self.left = validator.check_value_type('left', left, [bool], self.name)
7986
+ self.transpose = validator.check_value_type('transpose', transpose, [bool], self.name)
7987
+ self.add_prim_attr('left', self.left)
7988
+ self.add_prim_attr('transpose', self.transpose)
7989
+
7990
+
7991
+ class Roll(Primitive):
7992
+ """
7993
+ Rolls the elements of a tensor along an axis.
7994
+
7995
+ Refer to :func:`mindspore.ops.roll` for more details.
7996
+
7997
+ Args:
7998
+ shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
7999
+ positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
8000
+ in the opposite direction.
8001
+ axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
8002
+
8003
+ Inputs:
8004
+ - **input_x** (Tensor) - Input tensor.
8005
+
8006
+ Outputs:
8007
+ Tensor, has the same shape and type as `input_x`.
8008
+
8009
+ Supported Platforms:
8010
+ ``GPU``
8011
+
8012
+ Examples:
8013
+ >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
8014
+ >>> op = ops.Roll(shift=2, axis=0)
8015
+ >>> output = op(input_x)
8016
+ >>> print(output)
8017
+ [3. 4. 0. 1. 2.]
8018
+ >>> input_x = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32))
8019
+ >>> op = ops.Roll(shift=-1, axis=0)
8020
+ >>> output = op(input_x)
8021
+ >>> print(output)
8022
+ [[5. 6. 7. 8. 9.]
8023
+ [0. 1. 2. 3. 4.]]
8024
+ """
8025
+
8026
+ @prim_attr_register
8027
+ def __init__(self, shift, axis):
8028
+ """Initialize Roll"""
8029
+ if context.get_context("device_target") == "GPU":
8030
+ validator.check_value_type("shift", shift, [int, tuple, list], self.name)
8031
+ if not isinstance(shift, (list, tuple)):
8032
+ self.add_prim_attr('shift', [shift])
8033
+ validator.check_value_type("axis", axis, [int, tuple, list], self.name)
8034
+ if not isinstance(axis, (list, tuple)):
8035
+ self.add_prim_attr('axis', [axis])
8036
+ else:
8037
+ if isinstance(shift, (tuple, list)) and isinstance(axis, (tuple, list)):
8038
+ validator.check_equal_int(len(shift), 1, "shift size", self.name)
8039
+ validator.check_equal_int(len(axis), 1, "shift size", self.name)
8040
+ validator.check_equal_int(axis[0], 0, "axis", self.name)
8041
+ elif isinstance(shift, int) and isinstance(axis, int):
8042
+ validator.check_is_int(axis, "axis", self.name)
8043
+ self.init_prim_io_names(inputs=['input_x'], outputs=['output'])