mindspore 2.0.0rc1__cp38-none-any.whl → 2.2.0__cp38-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (870) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +2 -2
  3. mindspore/__init__.py +5 -2
  4. mindspore/_akg/akg/build_module.py +5 -6
  5. mindspore/_akg/akg/composite/build_module.py +49 -16
  6. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  7. mindspore/_akg/akg/config/repository.json +195 -0
  8. mindspore/_akg/akg/global_configs.py +5 -1
  9. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  10. mindspore/_akg/akg/tvm/api.py +4 -3
  11. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  12. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  13. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  14. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  15. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  16. mindspore/_akg/akg/tvm/build_module.py +16 -1
  17. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  18. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  19. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  20. mindspore/_akg/akg/tvm/module.py +1 -2
  21. mindspore/_akg/akg/tvm/stmt.py +2 -2
  22. mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
  23. mindspore/_akg/akg/utils/kernel_exec.py +58 -260
  24. mindspore/_akg/akg/utils/op_dsl.py +17 -1
  25. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  26. mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
  27. mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
  28. mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
  29. mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
  30. mindspore/_check_jit_forbidden_api.py +5 -1
  31. mindspore/_checkparam.py +79 -62
  32. mindspore/_extends/graph_kernel/__init__.py +0 -1
  33. mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
  34. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  35. mindspore/_extends/graph_kernel/splitter.py +1 -9
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
  38. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  39. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
  40. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
  41. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  42. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  43. mindspore/_extends/parse/__init__.py +19 -17
  44. mindspore/_extends/parse/namespace.py +7 -36
  45. mindspore/_extends/parse/parser.py +375 -189
  46. mindspore/_extends/parse/resources.py +36 -41
  47. mindspore/_extends/parse/standard_method.py +350 -245
  48. mindspore/_extends/parse/trope.py +2 -12
  49. mindspore/_extends/remote/kernel_build_server.py +24 -7
  50. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  51. mindspore/_install_custom.py +43 -0
  52. mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
  53. mindspore/amp.py +85 -19
  54. mindspore/bin/cache_admin +0 -0
  55. mindspore/bin/cache_server +0 -0
  56. mindspore/boost/base.py +2 -2
  57. mindspore/boost/boost.py +27 -32
  58. mindspore/boost/boost_cell_wrapper.py +37 -13
  59. mindspore/boost/grad_accumulation.py +1 -1
  60. mindspore/boost/grad_freeze.py +34 -6
  61. mindspore/boost/group_loss_scale_manager.py +15 -14
  62. mindspore/boost/less_batch_normalization.py +28 -3
  63. mindspore/common/__init__.py +15 -11
  64. mindspore/common/_auto_dynamic.py +68 -0
  65. mindspore/common/_jit_fallback_utils.py +111 -0
  66. mindspore/common/_register_for_adapter.py +17 -5
  67. mindspore/common/_register_for_tensor.py +2 -2
  68. mindspore/common/_stub_tensor.py +18 -15
  69. mindspore/common/_utils.py +31 -7
  70. mindspore/common/api.py +269 -101
  71. mindspore/common/auto_dynamic_shape.py +498 -0
  72. mindspore/common/dtype.py +61 -21
  73. mindspore/common/dump.py +9 -7
  74. mindspore/common/initializer.py +106 -76
  75. mindspore/common/jit_config.py +35 -14
  76. mindspore/common/lazy_inline.py +187 -0
  77. mindspore/common/mindir_util.py +101 -0
  78. mindspore/common/mutable.py +10 -13
  79. mindspore/common/parameter.py +246 -55
  80. mindspore/common/seed.py +13 -7
  81. mindspore/common/sparse_tensor.py +29 -33
  82. mindspore/common/tensor.py +907 -251
  83. mindspore/communication/__init__.py +7 -4
  84. mindspore/communication/_comm_helper.py +84 -4
  85. mindspore/communication/management.py +160 -88
  86. mindspore/config/op_info.config +99 -75
  87. mindspore/config/super_bar_config.json +36 -4
  88. mindspore/context.py +526 -219
  89. mindspore/dataset/__init__.py +9 -46
  90. mindspore/dataset/audio/__init__.py +4 -19
  91. mindspore/dataset/audio/transforms.py +545 -233
  92. mindspore/dataset/audio/utils.py +21 -18
  93. mindspore/dataset/callback/ds_callback.py +42 -13
  94. mindspore/dataset/core/config.py +158 -100
  95. mindspore/dataset/core/validator_helpers.py +1 -63
  96. mindspore/dataset/debug/debug_hook.py +45 -13
  97. mindspore/dataset/debug/pre_defined_hook.py +5 -5
  98. mindspore/dataset/engine/__init__.py +0 -5
  99. mindspore/dataset/engine/cache_client.py +38 -15
  100. mindspore/dataset/engine/datasets.py +615 -278
  101. mindspore/dataset/engine/datasets_audio.py +154 -283
  102. mindspore/dataset/engine/datasets_standard_format.py +104 -116
  103. mindspore/dataset/engine/datasets_text.py +443 -326
  104. mindspore/dataset/engine/datasets_user_defined.py +251 -164
  105. mindspore/dataset/engine/datasets_vision.py +839 -1443
  106. mindspore/dataset/engine/iterators.py +11 -4
  107. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
  108. mindspore/dataset/engine/obs/util.py +3 -0
  109. mindspore/dataset/engine/offload.py +6 -6
  110. mindspore/dataset/engine/queue.py +15 -14
  111. mindspore/dataset/engine/samplers.py +39 -23
  112. mindspore/dataset/engine/serializer_deserializer.py +22 -6
  113. mindspore/dataset/engine/validators.py +21 -331
  114. mindspore/dataset/text/__init__.py +5 -33
  115. mindspore/dataset/text/transforms.py +334 -165
  116. mindspore/dataset/text/utils.py +215 -145
  117. mindspore/dataset/transforms/__init__.py +1 -1
  118. mindspore/dataset/transforms/c_transforms.py +3 -2
  119. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  120. mindspore/dataset/transforms/transforms.py +174 -71
  121. mindspore/dataset/utils/browse_dataset.py +25 -17
  122. mindspore/dataset/utils/line_reader.py +24 -21
  123. mindspore/dataset/vision/__init__.py +5 -26
  124. mindspore/dataset/vision/c_transforms.py +177 -165
  125. mindspore/dataset/vision/py_transforms.py +114 -119
  126. mindspore/dataset/vision/py_transforms_util.py +54 -51
  127. mindspore/dataset/vision/transforms.py +1127 -381
  128. mindspore/dataset/vision/utils.py +54 -38
  129. mindspore/dataset/vision/validators.py +12 -2
  130. mindspore/experimental/map_parameter.py +38 -4
  131. mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
  132. mindspore/experimental/optim/adam.py +192 -0
  133. mindspore/experimental/optim/adamw.py +181 -0
  134. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  135. mindspore/experimental/optim/optimizer.py +252 -0
  136. mindspore/experimental/optim/sgd.py +147 -0
  137. mindspore/gen_ops.py +273 -0
  138. mindspore/include/OWNERS +1 -2
  139. mindspore/include/api/context.h +21 -1
  140. mindspore/include/api/data_type.h +2 -1
  141. mindspore/include/api/graph.h +0 -15
  142. mindspore/include/api/kernel.h +2 -0
  143. mindspore/include/api/kernel_api.h +37 -12
  144. mindspore/include/api/model.h +29 -42
  145. mindspore/include/api/model_group.h +14 -3
  146. mindspore/include/api/model_parallel_runner.h +18 -2
  147. mindspore/include/api/serialization.h +26 -0
  148. mindspore/include/api/status.h +1 -0
  149. mindspore/include/api/types.h +38 -4
  150. mindspore/include/c_api/ms/abstract.h +67 -0
  151. mindspore/include/c_api/ms/attribute.h +197 -0
  152. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  153. mindspore/include/c_api/ms/base/macros.h +32 -0
  154. mindspore/include/c_api/ms/base/status.h +33 -0
  155. mindspore/include/c_api/ms/base/types.h +282 -0
  156. mindspore/include/c_api/ms/context.h +102 -0
  157. mindspore/include/c_api/ms/graph.h +160 -0
  158. mindspore/include/c_api/ms/node.h +606 -0
  159. mindspore/include/c_api/ms/tensor.h +161 -0
  160. mindspore/include/c_api/ms/value.h +84 -0
  161. mindspore/include/c_api/status_c.h +3 -0
  162. mindspore/include/dataset/constants.h +6 -12
  163. mindspore/include/dataset/execute.h +23 -13
  164. mindspore/include/dataset/text.h +26 -26
  165. mindspore/include/dataset/transforms.h +25 -31
  166. mindspore/include/dataset/vision.h +60 -60
  167. mindspore/include/dataset/vision_ascend.h +5 -6
  168. mindspore/include/dataset/vision_lite.h +17 -17
  169. mindspore/include/mindapi/base/format.h +0 -1
  170. mindspore/include/mindapi/base/type_id.h +2 -1
  171. mindspore/include/mindapi/base/types.h +5 -1
  172. mindspore/lib/libdnnl.so.2 +0 -0
  173. mindspore/lib/libjemalloc.so.2 +0 -0
  174. mindspore/lib/libmindspore.so +0 -0
  175. mindspore/lib/libmindspore_backend.so +0 -0
  176. mindspore/lib/libmindspore_common.so +0 -0
  177. mindspore/lib/libmindspore_core.so +0 -0
  178. mindspore/lib/libmindspore_glog.so.0 +0 -0
  179. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  180. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  181. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  182. mindspore/lib/libmindspore_shared_lib.so +0 -0
  183. mindspore/lib/libmpi_adapter.so +0 -0
  184. mindspore/lib/libnnacl.so +0 -0
  185. mindspore/lib/libopencv_core.so.4.5 +0 -0
  186. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  187. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  188. mindspore/lib/libps_cache.so +0 -0
  189. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  190. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  191. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
  192. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  193. mindspore/lib/plugin/ascend/libakg.so +0 -0
  194. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  195. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  196. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  197. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  198. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  199. mindspore/lib/plugin/cpu/libakg.so +0 -0
  200. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  201. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  202. mindspore/log.py +9 -6
  203. mindspore/mindrecord/filereader.py +33 -4
  204. mindspore/mindrecord/filewriter.py +70 -35
  205. mindspore/mindrecord/mindpage.py +40 -34
  206. mindspore/mindrecord/shardreader.py +1 -1
  207. mindspore/mindrecord/shardsegment.py +1 -1
  208. mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
  209. mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
  210. mindspore/mindrecord/tools/csv_to_mr.py +29 -13
  211. mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
  212. mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
  213. mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
  214. mindspore/nn/cell.py +463 -169
  215. mindspore/nn/dynamic_lr.py +47 -43
  216. mindspore/nn/layer/activation.py +225 -82
  217. mindspore/nn/layer/basic.py +121 -79
  218. mindspore/nn/layer/channel_shuffle.py +21 -21
  219. mindspore/nn/layer/combined.py +33 -26
  220. mindspore/nn/layer/container.py +277 -22
  221. mindspore/nn/layer/conv.py +441 -304
  222. mindspore/nn/layer/dense.py +19 -13
  223. mindspore/nn/layer/embedding.py +62 -49
  224. mindspore/nn/layer/flash_attention.py +264 -0
  225. mindspore/nn/layer/image.py +50 -39
  226. mindspore/nn/layer/math.py +62 -51
  227. mindspore/nn/layer/normalization.py +219 -167
  228. mindspore/nn/layer/padding.py +58 -70
  229. mindspore/nn/layer/pooling.py +334 -287
  230. mindspore/nn/layer/rnn_cells.py +53 -38
  231. mindspore/nn/layer/rnns.py +59 -56
  232. mindspore/nn/layer/thor_layer.py +52 -44
  233. mindspore/nn/layer/timedistributed.py +6 -4
  234. mindspore/nn/layer/transformer.py +284 -164
  235. mindspore/nn/learning_rate_schedule.py +34 -25
  236. mindspore/nn/loss/__init__.py +3 -2
  237. mindspore/nn/loss/loss.py +554 -311
  238. mindspore/nn/optim/ada_grad.py +12 -9
  239. mindspore/nn/optim/adadelta.py +14 -11
  240. mindspore/nn/optim/adafactor.py +19 -16
  241. mindspore/nn/optim/adam.py +62 -47
  242. mindspore/nn/optim/adamax.py +13 -10
  243. mindspore/nn/optim/adasum.py +12 -8
  244. mindspore/nn/optim/asgd.py +10 -9
  245. mindspore/nn/optim/ftrl.py +20 -17
  246. mindspore/nn/optim/lamb.py +16 -12
  247. mindspore/nn/optim/lars.py +8 -6
  248. mindspore/nn/optim/lazyadam.py +25 -20
  249. mindspore/nn/optim/momentum.py +10 -7
  250. mindspore/nn/optim/optimizer.py +61 -9
  251. mindspore/nn/optim/proximal_ada_grad.py +14 -13
  252. mindspore/nn/optim/rmsprop.py +17 -13
  253. mindspore/nn/optim/rprop.py +30 -17
  254. mindspore/nn/optim/sgd.py +40 -23
  255. mindspore/nn/optim/thor.py +24 -26
  256. mindspore/nn/probability/bijector/bijector.py +11 -11
  257. mindspore/nn/probability/bijector/exp.py +1 -1
  258. mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
  259. mindspore/nn/probability/bijector/invert.py +1 -1
  260. mindspore/nn/probability/bijector/power_transform.py +29 -29
  261. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  262. mindspore/nn/probability/bijector/softplus.py +5 -5
  263. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
  264. mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
  265. mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
  266. mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
  267. mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
  268. mindspore/nn/probability/distribution/_utils/utils.py +1 -1
  269. mindspore/nn/probability/distribution/bernoulli.py +9 -9
  270. mindspore/nn/probability/distribution/beta.py +8 -8
  271. mindspore/nn/probability/distribution/categorical.py +23 -15
  272. mindspore/nn/probability/distribution/cauchy.py +5 -6
  273. mindspore/nn/probability/distribution/distribution.py +3 -3
  274. mindspore/nn/probability/distribution/exponential.py +4 -4
  275. mindspore/nn/probability/distribution/gamma.py +10 -10
  276. mindspore/nn/probability/distribution/geometric.py +8 -8
  277. mindspore/nn/probability/distribution/gumbel.py +8 -9
  278. mindspore/nn/probability/distribution/half_normal.py +5 -5
  279. mindspore/nn/probability/distribution/laplace.py +5 -5
  280. mindspore/nn/probability/distribution/log_normal.py +12 -11
  281. mindspore/nn/probability/distribution/logistic.py +8 -8
  282. mindspore/nn/probability/distribution/normal.py +6 -5
  283. mindspore/nn/probability/distribution/poisson.py +10 -11
  284. mindspore/nn/probability/distribution/student_t.py +8 -9
  285. mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
  286. mindspore/nn/probability/distribution/uniform.py +11 -11
  287. mindspore/nn/reinforcement/tensor_array.py +2 -2
  288. mindspore/nn/sparse/sparse.py +9 -9
  289. mindspore/nn/wrap/cell_wrapper.py +188 -63
  290. mindspore/nn/wrap/grad_reducer.py +21 -12
  291. mindspore/nn/wrap/loss_scale.py +136 -49
  292. mindspore/numpy/__init__.py +4 -4
  293. mindspore/numpy/array_creations.py +55 -56
  294. mindspore/numpy/array_ops.py +134 -35
  295. mindspore/numpy/logic_ops.py +66 -20
  296. mindspore/numpy/math_ops.py +142 -139
  297. mindspore/numpy/utils_const.py +2 -2
  298. mindspore/offline_debug/convert_async.py +2 -2
  299. mindspore/ops/_grad_experimental/__init__.py +7 -5
  300. mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
  301. mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
  302. mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
  303. mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
  304. mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
  305. mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
  306. mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
  307. mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
  308. mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
  309. mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
  310. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
  311. mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
  312. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  313. mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
  314. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
  315. mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
  316. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
  317. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
  318. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
  319. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
  320. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  321. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
  322. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
  323. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
  324. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  325. mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
  326. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  327. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  328. mindspore/ops/_op_impl/aicpu/cast.py +52 -0
  329. mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
  330. mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
  331. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  332. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
  333. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  334. mindspore/ops/_op_impl/aicpu/eye.py +4 -4
  335. mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
  336. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
  337. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  338. mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
  339. mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
  340. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  341. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  342. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  343. mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
  344. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
  345. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  346. mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
  347. mindspore/ops/_op_impl/aicpu/median.py +1 -0
  348. mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
  349. mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
  350. mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
  351. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
  352. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  353. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  354. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  355. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  356. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  357. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
  358. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
  359. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
  360. mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
  361. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  362. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  363. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  364. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  365. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
  366. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
  367. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  368. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  369. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  370. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  371. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  372. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
  373. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
  374. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
  375. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
  376. mindspore/ops/_op_impl/tbe/__init__.py +6 -4
  377. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  378. mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
  379. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
  380. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
  381. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
  382. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
  383. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
  384. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  385. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
  386. mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
  387. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
  388. mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
  389. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
  390. mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
  391. mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
  392. mindspore/ops/_op_impl/tbe/im2col.py +4 -4
  393. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  394. mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
  395. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
  396. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
  397. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  398. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
  399. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  400. mindspore/ops/_primitive_cache.py +1 -1
  401. mindspore/ops/_tracefunc.py +241 -0
  402. mindspore/ops/_utils/utils.py +10 -2
  403. mindspore/ops/_vmap/vmap_array_ops.py +5 -3
  404. mindspore/ops/_vmap/vmap_base.py +5 -4
  405. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  406. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  407. mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
  408. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  409. mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
  410. mindspore/ops/arg_dtype_cast.py +54 -0
  411. mindspore/ops/composite/__init__.py +7 -5
  412. mindspore/ops/composite/base.py +78 -34
  413. mindspore/ops/composite/math_ops.py +5 -695
  414. mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
  415. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
  416. mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
  417. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  418. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  419. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
  420. mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
  421. mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
  422. mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
  423. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
  424. mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
  425. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
  426. mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
  427. mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
  428. mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
  429. mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
  430. mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
  431. mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
  432. mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
  433. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  434. mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
  435. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
  436. mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
  437. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
  438. mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
  439. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  440. mindspore/ops/deprecated.py +304 -0
  441. mindspore/ops/function/__init__.py +41 -4
  442. mindspore/ops/function/array_func.py +1108 -467
  443. mindspore/ops/function/clip_func.py +94 -27
  444. mindspore/ops/function/debug_func.py +3 -1
  445. mindspore/ops/function/grad/grad_func.py +82 -73
  446. mindspore/ops/function/image_func.py +28 -12
  447. mindspore/ops/function/linalg_func.py +135 -39
  448. mindspore/ops/function/math_func.py +3779 -894
  449. mindspore/ops/function/nn_func.py +1584 -657
  450. mindspore/ops/function/parameter_func.py +13 -3
  451. mindspore/ops/function/random_func.py +247 -153
  452. mindspore/ops/function/sparse_func.py +14 -11
  453. mindspore/ops/function/sparse_unary_func.py +173 -47
  454. mindspore/ops/function/spectral_func.py +8 -4
  455. mindspore/ops/function/vmap_func.py +8 -7
  456. mindspore/ops/functional.py +47 -16
  457. mindspore/ops/op_info_register.py +346 -86
  458. mindspore/ops/operations/__init__.py +38 -22
  459. mindspore/ops/operations/_grad_ops.py +145 -149
  460. mindspore/ops/operations/_inner_ops.py +298 -56
  461. mindspore/ops/operations/_ms_kernel.py +3 -3
  462. mindspore/ops/operations/_quant_ops.py +24 -28
  463. mindspore/ops/operations/_rl_inner_ops.py +9 -7
  464. mindspore/ops/operations/_scalar_ops.py +115 -0
  465. mindspore/ops/operations/_sequence_ops.py +148 -10
  466. mindspore/ops/operations/_tensor_array.py +1 -1
  467. mindspore/ops/operations/_thor_ops.py +2 -2
  468. mindspore/ops/operations/array_ops.py +1239 -561
  469. mindspore/ops/operations/comm_ops.py +166 -90
  470. mindspore/ops/operations/control_ops.py +3 -3
  471. mindspore/ops/operations/custom_ops.py +124 -102
  472. mindspore/ops/operations/debug_ops.py +24 -11
  473. mindspore/ops/operations/image_ops.py +86 -71
  474. mindspore/ops/operations/inner_ops.py +18 -13
  475. mindspore/ops/operations/linalg_ops.py +30 -11
  476. mindspore/ops/operations/math_ops.py +1730 -435
  477. mindspore/ops/operations/nn_ops.py +1953 -943
  478. mindspore/ops/operations/other_ops.py +65 -43
  479. mindspore/ops/operations/random_ops.py +258 -98
  480. mindspore/ops/operations/rl_ops.py +4 -36
  481. mindspore/ops/operations/sparse_ops.py +38 -33
  482. mindspore/ops/operations/spectral_ops.py +8 -4
  483. mindspore/ops/primitive.py +66 -44
  484. mindspore/ops/signature.py +5 -5
  485. mindspore/parallel/_auto_parallel_context.py +80 -19
  486. mindspore/parallel/_cost_model_context.py +42 -0
  487. mindspore/parallel/_offload_context.py +162 -72
  488. mindspore/parallel/_parallel_serialization.py +2 -2
  489. mindspore/parallel/_ps_context.py +16 -4
  490. mindspore/parallel/_recovery_context.py +2 -1
  491. mindspore/parallel/_tensor.py +15 -13
  492. mindspore/parallel/_transformer/layers.py +8 -6
  493. mindspore/parallel/_transformer/loss.py +1 -0
  494. mindspore/parallel/_transformer/moe.py +7 -7
  495. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  496. mindspore/parallel/_transformer/transformer.py +34 -14
  497. mindspore/parallel/_utils.py +36 -14
  498. mindspore/parallel/algo_parameter_config.py +114 -20
  499. mindspore/parallel/checkpoint_transform.py +16 -18
  500. mindspore/parallel/shard.py +16 -13
  501. mindspore/profiler/__init__.py +1 -1
  502. mindspore/profiler/common/struct_type.py +3 -3
  503. mindspore/profiler/common/util.py +3 -2
  504. mindspore/profiler/envprofiling.py +11 -4
  505. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  506. mindspore/profiler/parser/ascend_flops_generator.py +94 -0
  507. mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
  508. mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
  509. mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
  510. mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
  511. mindspore/profiler/parser/ascend_op_generator.py +276 -0
  512. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  513. mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
  514. mindspore/profiler/parser/base_timeline_generator.py +11 -7
  515. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
  516. mindspore/profiler/parser/flops_parser.py +15 -11
  517. mindspore/profiler/parser/framework_parser.py +92 -73
  518. mindspore/profiler/parser/hccl_parser.py +16 -12
  519. mindspore/profiler/parser/integrator.py +22 -11
  520. mindspore/profiler/parser/memory_usage_parser.py +36 -11
  521. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  522. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  523. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  524. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  525. mindspore/profiler/parser/optime_parser.py +1 -1
  526. mindspore/profiler/parser/profiler_info.py +4 -5
  527. mindspore/profiler/parser/step_trace_parser.py +11 -14
  528. mindspore/profiler/profiling.py +678 -377
  529. mindspore/rewrite/api/node.py +211 -54
  530. mindspore/rewrite/api/node_type.py +5 -0
  531. mindspore/rewrite/api/pattern_engine.py +22 -23
  532. mindspore/rewrite/api/scoped_value.py +20 -17
  533. mindspore/rewrite/api/symbol_tree.py +252 -106
  534. mindspore/rewrite/api/tree_node_helper.py +3 -0
  535. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  536. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  537. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  538. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
  539. mindspore/rewrite/common/rewrite_elog.py +5 -1
  540. mindspore/rewrite/namer.py +51 -51
  541. mindspore/rewrite/namespace.py +14 -5
  542. mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
  543. mindspore/rewrite/node/call_function.py +79 -0
  544. mindspore/rewrite/node/cell_container.py +135 -0
  545. mindspore/rewrite/node/control_flow.py +88 -0
  546. mindspore/rewrite/{node.py → node/node.py} +313 -247
  547. mindspore/rewrite/node/node_manager.py +254 -0
  548. mindspore/rewrite/node/node_topological_manager.py +243 -0
  549. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  550. mindspore/rewrite/parsers/assign_parser.py +225 -239
  551. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  552. mindspore/rewrite/parsers/class_def_parser.py +179 -218
  553. mindspore/rewrite/parsers/constant_parser.py +9 -6
  554. mindspore/rewrite/parsers/container_parser.py +9 -7
  555. mindspore/rewrite/parsers/for_parser.py +36 -15
  556. mindspore/rewrite/parsers/function_def_parser.py +23 -20
  557. mindspore/rewrite/parsers/if_parser.py +28 -24
  558. mindspore/rewrite/parsers/module_parser.py +202 -25
  559. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  560. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  561. mindspore/rewrite/parsers/return_parser.py +6 -6
  562. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  563. mindspore/rewrite/sparsify/sparsify.py +4 -1
  564. mindspore/rewrite/sparsify/utils.py +11 -5
  565. mindspore/rewrite/symbol_tree.py +577 -732
  566. mindspore/rewrite/symbol_tree_builder.py +9 -175
  567. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  568. mindspore/run_check/_check_version.py +46 -39
  569. mindspore/run_check/run_check.py +3 -2
  570. mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
  571. mindspore/safeguard/rewrite_obfuscation.py +517 -0
  572. mindspore/scipy/__init__.py +1 -1
  573. mindspore/scipy/linalg.py +67 -61
  574. mindspore/scipy/ops.py +5 -41
  575. mindspore/scipy/ops_grad.py +3 -2
  576. mindspore/scipy/ops_wrapper.py +5 -5
  577. mindspore/scipy/optimize/line_search.py +8 -8
  578. mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
  579. mindspore/scipy/optimize/minimize.py +16 -12
  580. mindspore/scipy/utils.py +1 -52
  581. mindspore/scipy/utils_const.py +4 -4
  582. mindspore/train/__init__.py +4 -4
  583. mindspore/train/_utils.py +13 -5
  584. mindspore/train/amp.py +410 -148
  585. mindspore/train/anf_ir_pb2.py +16 -4
  586. mindspore/train/callback/_backup_and_restore.py +8 -11
  587. mindspore/train/callback/_callback.py +80 -3
  588. mindspore/train/callback/_checkpoint.py +82 -51
  589. mindspore/train/callback/_early_stop.py +12 -15
  590. mindspore/train/callback/_history.py +1 -1
  591. mindspore/train/callback/_lambda_callback.py +13 -13
  592. mindspore/train/callback/_landscape.py +21 -17
  593. mindspore/train/callback/_loss_monitor.py +9 -10
  594. mindspore/train/callback/_on_request_exit.py +16 -33
  595. mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
  596. mindspore/train/callback/_summary_collector.py +44 -30
  597. mindspore/train/callback/_time_monitor.py +62 -12
  598. mindspore/train/data_sink.py +10 -16
  599. mindspore/train/dataset_helper.py +154 -86
  600. mindspore/train/loss_scale_manager.py +14 -9
  601. mindspore/train/metrics/__init__.py +10 -2
  602. mindspore/train/metrics/accuracy.py +1 -1
  603. mindspore/train/metrics/auc.py +1 -1
  604. mindspore/train/metrics/bleu_score.py +2 -2
  605. mindspore/train/metrics/confusion_matrix.py +14 -14
  606. mindspore/train/metrics/cosine_similarity.py +3 -3
  607. mindspore/train/metrics/dice.py +1 -1
  608. mindspore/train/metrics/fbeta.py +1 -1
  609. mindspore/train/metrics/hausdorff_distance.py +8 -6
  610. mindspore/train/metrics/mean_surface_distance.py +5 -4
  611. mindspore/train/metrics/metric.py +49 -17
  612. mindspore/train/metrics/occlusion_sensitivity.py +4 -4
  613. mindspore/train/metrics/perplexity.py +1 -1
  614. mindspore/train/metrics/precision.py +2 -2
  615. mindspore/train/metrics/recall.py +2 -3
  616. mindspore/train/metrics/roc.py +7 -7
  617. mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
  618. mindspore/train/metrics/topk.py +7 -4
  619. mindspore/train/mind_ir_pb2.py +193 -48
  620. mindspore/train/model.py +377 -133
  621. mindspore/train/serialization.py +697 -245
  622. mindspore/train/summary/_summary_adapter.py +5 -2
  623. mindspore/train/summary/_writer_pool.py +4 -3
  624. mindspore/train/summary/summary_record.py +25 -23
  625. mindspore/train/train_thor/convert_utils.py +39 -23
  626. mindspore/train/train_thor/dataset_helper.py +4 -3
  627. mindspore/train/train_thor/model_thor.py +8 -8
  628. mindspore/version.py +1 -1
  629. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
  630. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +633 -804
  631. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
  632. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  633. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  634. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  635. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  636. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  637. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  638. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  639. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  640. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  641. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  642. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  643. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  644. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  645. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  646. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  647. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  648. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  649. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  650. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  651. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  652. mindspore/_extends/graph_kernel/expander.py +0 -80
  653. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
  654. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  655. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  656. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  657. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  658. mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
  659. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  660. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  661. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  662. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  663. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  664. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  665. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  666. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  667. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  668. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  669. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  670. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  671. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  672. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  673. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  674. mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
  675. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  676. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  677. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  678. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  679. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  680. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  681. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  682. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  683. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  684. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  685. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  686. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  687. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  688. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  689. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  690. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  691. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  692. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  693. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  694. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  695. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  696. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  697. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  698. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  699. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  700. mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
  701. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  702. mindspore/_extends/parse/jit_fallback_modules.py +0 -51
  703. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  704. mindspore/dataset/engine/graphdata.py +0 -1586
  705. mindspore/include/api/net.h +0 -142
  706. mindspore/ops/_grad/grad_array_ops.py +0 -1347
  707. mindspore/ops/_grad/grad_clip_ops.py +0 -84
  708. mindspore/ops/_grad/grad_debug_ops.py +0 -68
  709. mindspore/ops/_grad/grad_inner_ops.py +0 -235
  710. mindspore/ops/_grad/grad_math_ops.py +0 -1684
  711. mindspore/ops/_grad/grad_nn_ops.py +0 -1529
  712. mindspore/ops/_grad/grad_other_ops.py +0 -89
  713. mindspore/ops/_grad/grad_sequence_ops.py +0 -296
  714. mindspore/ops/_grad/grad_sparse.py +0 -323
  715. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
  716. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
  717. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  718. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  719. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  720. mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
  721. mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
  722. mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
  723. mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
  724. mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
  725. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
  726. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
  727. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  728. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
  729. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  730. mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
  731. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  732. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
  733. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
  734. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
  735. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  736. mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
  737. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
  738. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
  739. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
  740. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
  741. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
  742. mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
  743. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
  744. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
  745. mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
  746. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  747. mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
  748. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  749. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  750. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
  751. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
  752. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
  753. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  754. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  755. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  756. mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
  757. mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
  758. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  759. mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
  760. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
  761. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
  762. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
  763. mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
  764. mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
  765. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
  766. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  767. mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
  768. mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
  769. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
  770. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
  771. mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
  772. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  773. mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
  774. mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
  775. mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
  776. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
  777. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
  778. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
  779. mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
  780. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  781. mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
  782. mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
  783. mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
  784. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
  785. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
  786. mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
  787. mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
  788. mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
  789. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
  790. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
  791. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
  792. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
  793. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  794. mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
  795. mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
  796. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
  797. mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
  798. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  799. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  800. mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
  801. mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
  802. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
  803. mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
  804. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  805. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  806. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  807. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
  808. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
  809. mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
  810. mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
  811. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
  812. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  813. mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
  814. mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
  815. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
  816. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
  817. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
  818. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
  819. mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
  820. mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
  821. mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
  822. mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
  823. mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
  824. mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
  825. mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
  826. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
  827. mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
  828. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
  829. mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
  830. mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
  831. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
  832. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  833. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
  834. mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
  835. mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
  836. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
  837. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  838. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
  839. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
  840. mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
  841. mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
  842. mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
  843. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  844. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  845. mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
  846. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
  847. mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
  848. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
  849. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
  850. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  851. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
  852. mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
  853. mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
  854. mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
  855. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  856. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  857. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
  858. mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
  859. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
  860. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
  861. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
  862. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
  863. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
  864. mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
  865. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  866. mindspore/rewrite/node_visitor.py +0 -44
  867. mindspore/rewrite/topological_manager.py +0 -203
  868. mindspore/scipy/sparse/linalg.py +0 -192
  869. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
  870. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
@@ -25,11 +25,9 @@ from mindspore.ops import signature as sig
25
25
  from mindspore import _checkparam as validator
26
26
  from mindspore.common import dtype as mstype
27
27
  from mindspore.common.tensor import Tensor
28
- from mindspore.common._decorator import deprecated
29
28
  from mindspore.ops._utils import get_broadcast_shape
30
29
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
31
30
  from mindspore._c_expression import Tensor as Tensor_
32
- from mindspore.common._utils import is_shape_unknown
33
31
 
34
32
 
35
33
  def _infer_shape_reduce(x, axis, keep_dims, prim_name):
@@ -91,15 +89,18 @@ class _MathBinaryOp(_BinaryOp):
91
89
  def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
92
90
  """Staticmethod of infer dtype for _MathBinaryOp."""
93
91
  args_type = {"x": x_dtype, "y": y_dtype}
94
- complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)]
92
+ complex_types = [mstype.TensorType(mstype.complex64), mstype.TensorType(mstype.complex128)]
95
93
  if x_dtype in complex_types or y_dtype in complex_types:
94
+ if (not isinstance(x_dtype, type(mstype.tensor_type))) or \
95
+ (not isinstance(y_dtype, type(mstype.tensor_type))):
96
+ raise TypeError('Only Tensor type support Complex')
96
97
  type_infer_dict = {
97
- (mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64),
98
- (mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64),
99
- (mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64),
100
- (mstype.complex128, mstype.complex128): mstype.tensor_type(mstype.complex128),
101
- (mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128),
102
- (mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
98
+ (mstype.complex64, mstype.complex64): mstype.TensorType(mstype.complex64),
99
+ (mstype.complex64, mstype.float32): mstype.TensorType(mstype.complex64),
100
+ (mstype.float32, mstype.complex64): mstype.TensorType(mstype.complex64),
101
+ (mstype.complex128, mstype.complex128): mstype.TensorType(mstype.complex128),
102
+ (mstype.complex128, mstype.float64): mstype.TensorType(mstype.complex128),
103
+ (mstype.float64, mstype.complex128): mstype.TensorType(mstype.complex128),
103
104
  }
104
105
  if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
105
106
  raise TypeError('Complex math binary op expecting Tensor [Complex64, Complex64],'
@@ -152,10 +153,20 @@ class Ger(Primitive):
152
153
 
153
154
  Refer to :func:`mindspore.ops.ger` for more details.
154
155
 
156
+ Inputs:
157
+ - **x1** - (Tensor) - 1-D input Tensor.
158
+ - **x2** - (Tensor) - 1-D input Tensor, has the same dtype as `x1`.
159
+
160
+ Outputs:
161
+ Tensor, output matrix with the same dtype as inputs.With `x1` shape :math:`(m,)` and
162
+ `x2` shape of :math:`(n,)`,the `output` has shape :math:`(m, n)`.
163
+
155
164
  Supported Platforms:
156
165
  ``Ascend`` ``GPU`` ``CPU``
157
166
 
158
167
  Examples:
168
+ >>> import mindspore
169
+ >>> from mindspore import Tensor, ops
159
170
  >>> x1 = Tensor([1., 2., 3., 4.], mindspore.float32)
160
171
  >>> x2 = Tensor([1., 2., 3.], mindspore.float32)
161
172
  >>> ger = ops.Ger()
@@ -179,10 +190,35 @@ class Add(_MathBinaryOp):
179
190
 
180
191
  Refer to :func:`mindspore.ops.add` for more details.
181
192
 
193
+ Note:
194
+ - One of the two inputs must be a Tensor, when the two inputs have different shapes,
195
+ they must be able to broadcast to a common shape.
196
+ - The two inputs can not be bool type at the same time,
197
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
198
+ - The two inputs comply with the implicit type conversion rules to make the data types
199
+ consistent.
200
+ - When input is Tensor, it's dimension should be greater than or equal to 1.
201
+
202
+ Inputs:
203
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
204
+ a bool or a tensor whose data type is
205
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
206
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
207
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
208
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
209
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
210
+
211
+ Outputs:
212
+ Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
213
+ and the data type is the one with higher precision or higher digits among the two inputs.
214
+
182
215
  Supported Platforms:
183
216
  ``Ascend`` ``GPU`` ``CPU``
184
217
 
185
218
  Examples:
219
+ >>> import mindspore
220
+ >>> import numpy as np
221
+ >>> from mindspore import Tensor, ops
186
222
  >>> # case 1: x and y are both Tensor.
187
223
  >>> add = ops.Add()
188
224
  >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
@@ -201,6 +237,20 @@ class Add(_MathBinaryOp):
201
237
  >>> # and the output is the data format of higher precision float32.
202
238
  >>> print(output.dtype)
203
239
  Float32
240
+ >>> # case 3: one of x and y is a bool scalar
241
+ >>> add = ops.Add()
242
+ >>> x = True
243
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
244
+ >>> output = add(x, y)
245
+ >>> print(output)
246
+ [5. 6. 7.]
247
+ >>> # case 4: one of x and y is a bool Tensor
248
+ >>> add = ops.Add()
249
+ >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
250
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
251
+ >>> output = add(x, y)
252
+ >>> print(output)
253
+ [5. 5. 7.]
204
254
  """
205
255
 
206
256
  @staticmethod
@@ -243,8 +293,8 @@ class Add(_MathBinaryOp):
243
293
 
244
294
  class Addcdiv(Primitive):
245
295
  r"""
246
- Performs the element-wise division of tensor `x1` by tensor `x2`,
247
- multiply the result by the scalar `value` and add it to `input_data`.
296
+ Adds the element-wise division of `x1` by `x2`, multiplied by `value` to `input_data`.
297
+ It computes the following operation:
248
298
 
249
299
  .. math::
250
300
  y[i] = input\_data[i] + value[i] * (x1[i] / x2[i])
@@ -269,6 +319,9 @@ class Addcdiv(Primitive):
269
319
  ``Ascend`` ``GPU`` ``CPU``
270
320
 
271
321
  Examples:
322
+ >>> import mindspore
323
+ >>> import numpy as np
324
+ >>> from mindspore import Tensor, ops
272
325
  >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
273
326
  >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
274
327
  >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
@@ -287,8 +340,8 @@ class Addcdiv(Primitive):
287
340
 
288
341
  class Addcmul(Primitive):
289
342
  r"""
290
- Performs the element-wise product of tensor `x1` and tensor `x2`,
291
- multiply the result by the scalar `value` and add it to `input_data`.
343
+ Adds the element-wise product of `x1` by `x2`, multiplied by `value` to `input_data`.
344
+ It computes the following operation:
292
345
 
293
346
  .. math::
294
347
  output[i] = input\_data[i] + value[i] * (x1[i] * x2[i])
@@ -313,6 +366,9 @@ class Addcmul(Primitive):
313
366
  ``Ascend`` ``GPU`` ``CPU``
314
367
 
315
368
  Examples:
369
+ >>> import mindspore
370
+ >>> import numpy as np
371
+ >>> from mindspore import Tensor, ops
316
372
  >>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
317
373
  >>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
318
374
  >>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32)
@@ -384,38 +440,28 @@ class AddV2(Primitive):
384
440
  self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
385
441
 
386
442
 
387
- class TensorAdd(_MathBinaryOp):
388
- """
389
- Same as operator Add. TensorAdd will be deprecated in the future.
390
- Please use Add instead.
391
- """
392
-
393
- @deprecated("1.1", "Add", True)
394
- @prim_attr_register
395
- def __init__(self):
396
- """Initialize TensorAdd."""
397
- _MathBinaryOp.__init__(self)
398
-
399
- def infer_value(self, x, y):
400
- if x is not None and y is not None:
401
- x = x.asnumpy()
402
- y = y.asnumpy()
403
- out = x + y
404
- out = np.array(out, x.dtype)
405
- return Tensor(out)
406
- return None
407
-
408
-
409
443
  class AssignAdd(Primitive):
410
444
  """
411
445
  Updates a `Parameter` by adding a value to it.
412
446
 
413
447
  Refer to :func:`mindspore.ops.assign_add` for more details.
414
448
 
449
+ Inputs:
450
+ - **variable** (Parameter) - The `Parameter`.
451
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
452
+ - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
453
+ It must have the same shape as `variable` if it is a Tensor.
454
+
455
+ Outputs:
456
+ Tensor, has the same data type and shape as original `variable`.
457
+
415
458
  Supported Platforms:
416
459
  ``Ascend`` ``GPU`` ``CPU``
417
460
 
418
461
  Examples:
462
+ >>> import mindspore
463
+ >>> import numpy as np
464
+ >>> from mindspore import Tensor, ops, nn
419
465
  >>> class Net(nn.Cell):
420
466
  ... def __init__(self):
421
467
  ... super(Net, self).__init__()
@@ -450,10 +496,22 @@ class AssignSub(Primitive):
450
496
 
451
497
  Refer to :func:`mindspore.ops.assign_sub` for more details.
452
498
 
499
+ Inputs:
500
+ - **variable** (Parameter) - The `Parameter`.
501
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank be should be less than 8.
502
+ - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
503
+ It must have the same shape as `variable` if it is a Tensor.
504
+
505
+ Outputs:
506
+ Tensor, has the same data type and shape as original `variable`.
507
+
453
508
  Supported Platforms:
454
509
  ``Ascend`` ``GPU`` ``CPU``
455
510
 
456
511
  Examples:
512
+ >>> import mindspore
513
+ >>> import numpy as np
514
+ >>> from mindspore import Tensor, ops, nn
457
515
  >>> class Net(nn.Cell):
458
516
  ... def __init__(self):
459
517
  ... super(Net, self).__init__()
@@ -488,8 +546,8 @@ class _Reduce(PrimitiveWithCheck):
488
546
  Definition of base class of reduction class operators.
489
547
 
490
548
  Args:
491
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
492
- If false, don't keep these dimensions. Default: False.
549
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
550
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
493
551
  """
494
552
 
495
553
  __mindspore_signature__ = (
@@ -542,8 +600,8 @@ class EuclideanNorm(Primitive):
542
600
  The specified `axes` are removed by default.
543
601
 
544
602
  Args:
545
- keep_dims (bool, optional): whether to retain the reduced dimensions. If true, retains them with length 1.
546
- If false, these dimensions are removed. Default: False.
603
+ keep_dims (bool, optional): whether to retain the reduced dimensions. If ``True`` , retains them with length 1.
604
+ If ``False`` , these dimensions are removed. Default: ``False`` .
547
605
 
548
606
  Inputs:
549
607
  - **x** (Tensor) - The input Tensor to reduce.
@@ -584,23 +642,22 @@ class ReduceMean(_Reduce):
584
642
  controlling `keep_dims`.
585
643
 
586
644
  Args:
587
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
588
- If false, don't keep these dimensions. Default: False.
645
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
646
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
589
647
 
590
648
  Inputs:
591
- - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
592
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
593
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
594
- Only constant value is allowed. Must be in the range [-r, r).
649
+ - **x** (Tensor[Number]) - The input tensor.
650
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
651
+ dimensions. Only constant value is allowed. Must be in the range [-r, r).
595
652
 
596
653
  Outputs:
597
654
  Tensor, has the same dtype as the `x`.
598
655
 
599
- - If `axis` is (), and `keep_dims` is False,
656
+ - If `axis` is (), and `keep_dims` is ``False`` ,
600
657
  the output is a 0-D tensor representing the mean of all elements in the input tensor.
601
- - If `axis` is int, set as 1, and `keep_dims` is False,
658
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
602
659
  the shape of output is :math:`(x_0, x_2, ..., x_R)`.
603
- - If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is False,
660
+ - If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` ,
604
661
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
605
662
 
606
663
  Raises:
@@ -613,6 +670,9 @@ class ReduceMean(_Reduce):
613
670
  ``Ascend`` ``GPU`` ``CPU``
614
671
 
615
672
  Examples:
673
+ >>> import mindspore
674
+ >>> import numpy as np
675
+ >>> from mindspore import Tensor, ops
616
676
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
617
677
  >>> op = ops.ReduceMean(keep_dims=True)
618
678
  >>> output = op(x, 1)
@@ -668,21 +728,21 @@ class CumulativeLogsumexp(Primitive):
668
728
  log(exp(a) + exp(b) + exp(c))].
669
729
 
670
730
  Args:
671
- exclusive (bool, optional): If true, the last element will be skipped during the calculation and thus an
731
+ exclusive (bool, optional): If ``True`` , the last element will be skipped during the calculation and thus an
672
732
  exclusive cumulative log-sum-exp will be performed. For example, this operation
673
733
  will output [-inf, a, log(exp(a) * exp(b))] with tensor [a, b, c] as the input.
674
734
  Note that the minimal value -inf, for performance reasons, is representable by the
675
- floating point type. Default: False.
676
- reverse (bool, optional): If true, the function accumulation values will be calculated after the elements of
677
- `x` on `axis` are flipped, and the calculation result will be flipped afterwards. For
678
- example, this operation will output [log(exp(c) + exp(b) + exp(a)), log(exp(c) +
679
- exp(b)), c] with tensor [a, b, c] as the input. Default: False.
735
+ floating point type. Default: ``False`` .
736
+ reverse (bool, optional): If ``True`` , the function accumulation values will be calculated after the elements
737
+ of `x` on `axis` are flipped, and the calculation result will be flipped afterwards.
738
+ For example, this operation will output [log(exp(c) + exp(b) + exp(a)), log(exp(c) +
739
+ exp(b)), c] with tensor [a, b, c] as the input. Default: ``False`` .
680
740
 
681
741
  Inputs:
682
742
  - **x** (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64. The
683
743
  dimension of `x` must greater than 0.
684
744
  - **axis** (Tensor) - A 0-D tensor describing the dimension to compute the cumulative product. Must be one of
685
- the following types: int64, int32, int16. Must be in the range [-rank(x), rank(x)). Default: 0.
745
+ the following types: int64, int32, int16. Must be in the range [-rank(x), rank(x)). Default: ``0`` .
686
746
 
687
747
  Outputs:
688
748
  Tensor, has the same dtype and shape as the `x`.
@@ -736,29 +796,29 @@ class ReduceSum(PrimitiveWithCheck):
736
796
  controlling `keep_dims`.
737
797
 
738
798
  Args:
739
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
740
- If false, don't keep these dimensions. Default: False.
741
- skip_mode (bool): If true and axis is empty tuple or empty list, the ReduceSum operation isn't performed,
799
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
800
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
801
+ skip_mode (bool): If ``True`` and axis is empty tuple or empty list, the ReduceSum operation isn't performed,
742
802
  skip it.
743
- If true and axis is other values, the ReduceSum calculation is performed normally.
744
- If false, do reduce. Default: False.
803
+ If ``True`` and axis is other values, the ReduceSum calculation is performed normally.
804
+ If ``False`` , do reduce. Default: ``False`` .
745
805
 
746
806
  Inputs:
747
- - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
748
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
749
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions
750
- when skip_mode is false. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
807
+ - **x** (Tensor[Number]) - The input tensor.
808
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
809
+ dimensions when skip_mode is ``False`` . Only constant value is allowed. Must be in the range [-rank(`x`),
810
+ rank(`x`)).
751
811
 
752
812
  Outputs:
753
813
  Tensor, has the same dtype as the `x`.
754
814
 
755
- - If axis is (), keep_dims is False, and skip_mode is False,
815
+ - If axis is (), keep_dims is ``False`` , and skip_mode is ``False`` ,
756
816
  the output is a 0-D tensor representing the sum of all elements in the input tensor.
757
- - If axis is (), and skip_mode is True,
817
+ - If axis is (), and skip_mode is ``True`` ,
758
818
  the ReduceSum operation is not performed, output tensor is equal to the input tensor.
759
- - If axis is int, set as 2, and keep_dims is False,
819
+ - If axis is int, set as 2, and keep_dims is ``False`` ,
760
820
  the shape of output is :math:`(x_1, x_3, ..., x_R)`.
761
- - If axis is tuple(int) or list(int), set as (2, 3), and keep_dims is False,
821
+ - If axis is tuple(int) or list(int), set as (2, 3), and keep_dims is ``False`` ,
762
822
  the shape of output is :math:`(x_1, x_4, ..., x_R)`.
763
823
 
764
824
  Raises:
@@ -771,6 +831,9 @@ class ReduceSum(PrimitiveWithCheck):
771
831
  ``Ascend`` ``GPU`` ``CPU``
772
832
 
773
833
  Examples:
834
+ >>> import mindspore
835
+ >>> import numpy as np
836
+ >>> from mindspore import Tensor, ops
774
837
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
775
838
  >>> op = ops.ReduceSum(keep_dims=True)
776
839
  >>> output = op(x, 1)
@@ -857,23 +920,22 @@ class ReduceAll(_Reduce):
857
920
  controlling `keep_dims`.
858
921
 
859
922
  Args:
860
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
861
- If false, don't keep these dimensions. Default : False.
923
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
924
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
862
925
 
863
926
  Inputs:
864
927
  - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
865
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
866
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
867
- Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
928
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
929
+ dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
868
930
 
869
931
  Outputs:
870
932
  Tensor, the dtype is bool.
871
933
 
872
- - If axis is (), and keep_dims is False,
934
+ - If axis is (), and keep_dims is ``False`` ,
873
935
  the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
874
- - If axis is int, set as 2, and keep_dims is False,
936
+ - If axis is int, set as 2, and keep_dims is ``False`` ,
875
937
  the shape of output is :math:`(x_1, x_3, ..., x_R)`.
876
- - If axis is tuple(int), set as (2, 3), and keep_dims is False,
938
+ - If axis is tuple(int), set as (2, 3), and keep_dims is ``False`` ,
877
939
  the shape of output is :math:`(x_1, x_4, ..., x_R)`.
878
940
 
879
941
  Raises:
@@ -885,6 +947,8 @@ class ReduceAll(_Reduce):
885
947
  ``Ascend`` ``GPU`` ``CPU``
886
948
 
887
949
  Examples:
950
+ >>> import numpy as np
951
+ >>> from mindspore import Tensor, ops
888
952
  >>> x = Tensor(np.array([[True, False], [True, True]]))
889
953
  >>> op = ops.ReduceAll(keep_dims=True)
890
954
  >>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
@@ -902,6 +966,12 @@ class ReduceAll(_Reduce):
902
966
  >>> print(output)
903
967
  [[False]
904
968
  [ True]]
969
+ >>> # case 4: input is a scalar.
970
+ >>> x = Tensor(True)
971
+ >>> op = ops.ReduceAll()
972
+ >>> output = op(x)
973
+ >>> print(output)
974
+ True
905
975
  """
906
976
 
907
977
 
@@ -912,14 +982,13 @@ class ReduceAny(_Reduce):
912
982
  controlling `keep_dims`.
913
983
 
914
984
  Args:
915
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
916
- If false, don't keep these dimensions. Default : False.
985
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
986
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
917
987
 
918
988
  Inputs:
919
989
  - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
920
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
921
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
922
- Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
990
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
991
+ dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
923
992
 
924
993
  Outputs:
925
994
  Tensor, the dtype is bool.
@@ -940,6 +1009,8 @@ class ReduceAny(_Reduce):
940
1009
  ``Ascend`` ``GPU`` ``CPU``
941
1010
 
942
1011
  Examples:
1012
+ >>> import numpy as np
1013
+ >>> from mindspore import Tensor, ops
943
1014
  >>> x = Tensor(np.array([[True, False], [True, True]]))
944
1015
  >>> op = ops.ReduceAny(keep_dims=True)
945
1016
  >>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
@@ -957,6 +1028,12 @@ class ReduceAny(_Reduce):
957
1028
  >>> print(output)
958
1029
  [[True]
959
1030
  [ True]]
1031
+ >>> # case 4: input is a scalar.
1032
+ >>> x = Tensor(True)
1033
+ >>> op = ops.ReduceAny()
1034
+ >>> output = op(x)
1035
+ >>> print(output)
1036
+ True
960
1037
  """
961
1038
 
962
1039
 
@@ -967,14 +1044,13 @@ class ReduceMax(_Reduce):
967
1044
  controlling `keep_dims`.
968
1045
 
969
1046
  Args:
970
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
971
- If false, don't keep these dimensions. Default : False.
1047
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1048
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
972
1049
 
973
1050
  Inputs:
974
- - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
975
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
976
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
977
- Only constant value is allowed. Must be in the range [-r, r).
1051
+ - **x** (Tensor[Number]) - The input tensor.
1052
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
1053
+ dimensions. Only constant value is allowed. Must be in the range [-r, r).
978
1054
 
979
1055
  Outputs:
980
1056
  Tensor, has the same dtype as the `x`.
@@ -996,6 +1072,9 @@ class ReduceMax(_Reduce):
996
1072
  ``Ascend`` ``GPU`` ``CPU``
997
1073
 
998
1074
  Examples:
1075
+ >>> import mindspore
1076
+ >>> import numpy as np
1077
+ >>> from mindspore import Tensor, ops
999
1078
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1000
1079
  >>> op = ops.ReduceMax(keep_dims=True)
1001
1080
  >>> output = op(x, 1)
@@ -1051,23 +1130,22 @@ class ReduceMin(_Reduce):
1051
1130
  controlling `keep_dims`.
1052
1131
 
1053
1132
  Args:
1054
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
1055
- If false, don't keep these dimensions. Default : False.
1133
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1134
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
1056
1135
 
1057
1136
  Inputs:
1058
- - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
1059
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1060
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1061
- Only constant value is allowed. Must be in the range [-r, r).
1137
+ - **x** (Tensor[Number]) - The input tensor.
1138
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
1139
+ dimensions. Only constant value is allowed. Must be in the range [-r, r).
1062
1140
 
1063
1141
  Outputs:
1064
1142
  Tensor, has the same dtype as the `x`.
1065
1143
 
1066
- - If `axis` is (), and `keep_dims` is False,
1144
+ - If `axis` is (), and `keep_dims` is ``False`` ,
1067
1145
  the output is a 0-D tensor representing the minimum of all elements in the input tensor.
1068
- - If `axis` is int, set as 1, and `keep_dims` is False,
1146
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
1069
1147
  the shape of output is :math:`(x_0, x_2, ..., x_R)`.
1070
- - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is False,
1148
+ - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
1071
1149
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
1072
1150
 
1073
1151
  Raises:
@@ -1080,6 +1158,9 @@ class ReduceMin(_Reduce):
1080
1158
  ``Ascend`` ``GPU`` ``CPU``
1081
1159
 
1082
1160
  Examples:
1161
+ >>> import mindspore
1162
+ >>> import numpy as np
1163
+ >>> from mindspore import Tensor, ops
1083
1164
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1084
1165
  >>> op = ops.ReduceMin(keep_dims=True)
1085
1166
  >>> output = op(x, 1)
@@ -1174,23 +1255,22 @@ class ReduceProd(_Reduce):
1174
1255
  controlling `keep_dims`.
1175
1256
 
1176
1257
  Args:
1177
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
1178
- If false, don't keep these dimensions. Default: False.
1258
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
1259
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
1179
1260
 
1180
1261
  Inputs:
1181
- - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
1182
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1183
- - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1184
- Only constant value is allowed. Must be in the range [-r, r).
1262
+ - **x** (Tensor[Number]) - The input tensor.
1263
+ - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: ``()`` , reduce all
1264
+ dimensions. Only constant value is allowed. Must be in the range [-r, r).
1185
1265
 
1186
1266
  Outputs:
1187
1267
  Tensor, has the same dtype as the `x`.
1188
1268
 
1189
- - If `axis` is (), and `keep_dims` is False,
1269
+ - If `axis` is (), and `keep_dims` is ``False`` ,
1190
1270
  the output is a 0-D tensor representing the product of all elements in the input tensor.
1191
- - If `axis` is int, set as 1, and `keep_dims` is False,
1271
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
1192
1272
  the shape of output is :math:`(x_0, x_2, ..., x_R)`.
1193
- - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is False,
1273
+ - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
1194
1274
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
1195
1275
 
1196
1276
  Raises:
@@ -1203,6 +1283,9 @@ class ReduceProd(_Reduce):
1203
1283
  ``Ascend`` ``GPU`` ``CPU``
1204
1284
 
1205
1285
  Examples:
1286
+ >>> import mindspore
1287
+ >>> import numpy as np
1288
+ >>> from mindspore import Tensor, ops
1206
1289
  >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
1207
1290
  >>> op = ops.ReduceProd(keep_dims=True)
1208
1291
  >>> output = op(x, 1)
@@ -1259,12 +1342,12 @@ class CumProd(Primitive):
1259
1342
  y_i = x_1 * x_2 * x_3 * ... * x_i
1260
1343
 
1261
1344
  Args:
1262
- exclusive (bool): If true, perform exclusive cumulative product. Default: False.
1263
- reverse (bool): If true, reverse the result along axis. Default: False
1345
+ exclusive (bool): If ``True`` , perform exclusive cumulative product. Default: ``False`` .
1346
+ reverse (bool): If ``True`` , reverse the result along axis. Default: ``False`` .
1264
1347
 
1265
1348
  Inputs:
1266
- - **x** (Tensor[Number]) - The input tensor.
1267
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1349
+ - **x** (Tensor[Number]) - The input Tensor with shape
1350
+ :math:`(N, *)` where :math:`*` means any number of additional dimensions.
1268
1351
  - **axis** (int) - The dimensions to compute the cumulative product.
1269
1352
  Only constant value is allowed.
1270
1353
 
@@ -1280,6 +1363,8 @@ class CumProd(Primitive):
1280
1363
  ``Ascend`` ``GPU`` ``CPU``
1281
1364
 
1282
1365
  Examples:
1366
+ >>> import numpy as np
1367
+ >>> from mindspore import Tensor, ops
1283
1368
  >>> a, b, c, = 1, 2, 3
1284
1369
  >>> x = Tensor(np.array([a, b, c]).astype(np.float32))
1285
1370
  >>> op0 = ops.CumProd()
@@ -1345,6 +1430,8 @@ class Lcm(Primitive):
1345
1430
  ``Ascend`` ``GPU`` ``CPU``
1346
1431
 
1347
1432
  Examples:
1433
+ >>> import numpy as np
1434
+ >>> from mindspore import Tensor, ops
1348
1435
  >>> x1 = Tensor(np.array([7, 8, 9]))
1349
1436
  >>> x2 = Tensor(np.array([14, 6, 12]))
1350
1437
  >>> lcm_ = ops.Lcm()
@@ -1366,6 +1453,19 @@ class Cdist(Primitive):
1366
1453
 
1367
1454
  Refer to :func:`mindspore.ops.cdist` for more details.
1368
1455
 
1456
+ Args:
1457
+ p (float, optional): P value for the p-norm distance to calculate between each vector pair, P ∈ [0,∞].
1458
+ Default: ``2.0`` .
1459
+
1460
+ Inputs:
1461
+ - **input_x** (Tensor) - Input tensor of shape :math:`(B, P, M)`.
1462
+ When :math:`B` is equal to 0, it means this dimension can be ignored,
1463
+ i.e. shape of the tensor is :math:`(P, M)`.
1464
+ - **input_y** (Tensor) - Input tensor of shape :math:`(B, R, M)` with the same dtype as `input_x`.
1465
+
1466
+ Outputs:
1467
+ Tensor, has the same dtype as `input_x`, which shape is :math:`(B, P, R)`.
1468
+
1369
1469
  Supported Platforms:
1370
1470
  ``Ascend`` ``GPU`` ``CPU``
1371
1471
 
@@ -1392,20 +1492,20 @@ class Cdist(Primitive):
1392
1492
 
1393
1493
 
1394
1494
  class LpNorm(Primitive):
1395
- """
1495
+ r"""
1396
1496
  Returns the matrix norm or vector norm of a given tensor.
1397
1497
 
1398
1498
  .. math::
1399
- output = sum(abs(input)**p)**(1/p)
1499
+ output = \sum(abs(input)**p)**(1/p)
1400
1500
 
1401
1501
  Args:
1402
1502
  axis(int,list,tuple): Specifies which dimension or dimensions of input to calculate the norm across.
1403
- p(int, optional): The order of norm. Default: 2.
1404
- keep_dims(bool, optional): Whether the output tensors have dim retained or not. Default: False.
1405
- epsilon(float, optional): A value added to the denominator for numerical stability. Default: 1e-12.
1503
+ p(int, optional): The order of norm. Default: ``2`` .
1504
+ keep_dims(bool, optional): Whether the output tensors have dim retained or not. Default: ``False`` .
1505
+ epsilon(float, optional): A value added to the denominator for numerical stability. Default: ``1e-12`` .
1406
1506
 
1407
1507
  Inputs:
1408
- - **input** (Tensor) - Input tensor.
1508
+ - **input** (Tensor) - Input tensor of type float16, float32.
1409
1509
 
1410
1510
  Outputs:
1411
1511
  Tensor, has the same dtype as `input`, its shape depends on `axis`. For example, if the shape of input
@@ -1426,6 +1526,8 @@ class LpNorm(Primitive):
1426
1526
  ``Ascend`` ``GPU`` ``CPU``
1427
1527
 
1428
1528
  Examples:
1529
+ >>> import numpy as np
1530
+ >>> from mindspore import Tensor, ops
1429
1531
  >>> input_x = Tensor(np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]).astype(np.float32))
1430
1532
  >>> op = ops.LpNorm(axis=[0, 1], p=2, keep_dims=False)
1431
1533
  >>> output = op(input_x)
@@ -1451,7 +1553,7 @@ class LpNorm(Primitive):
1451
1553
  self.init_prim_io_names(inputs=['input'], outputs=['output'])
1452
1554
 
1453
1555
 
1454
- class MatMul(PrimitiveWithCheck):
1556
+ class MatMul(Primitive):
1455
1557
  r"""
1456
1558
  Multiplies matrix `a` and matrix `b`.
1457
1559
 
@@ -1463,22 +1565,24 @@ class MatMul(PrimitiveWithCheck):
1463
1565
 
1464
1566
  Note:
1465
1567
  If :math:`N * M` cannot be divided by 16, the performance will be poor in ascend environment.
1568
+ The dtype of inputs must be same.
1466
1569
 
1467
1570
  Args:
1468
- transpose_a (bool): If true, `a` is transposed before multiplication. Default: False.
1469
- transpose_b (bool): If true, `b` is transposed before multiplication. Default: False.
1571
+ transpose_a (bool): If ``True`` , `a` is transposed before multiplication. Default: ``False`` .
1572
+ transpose_b (bool): If ``True`` , `b` is transposed before multiplication. Default: ``False`` .
1470
1573
 
1471
1574
  Inputs:
1472
1575
  - **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
1473
- `transpose_a` is True, its shape must be :math:`(C, N)` after transpose.
1576
+ `transpose_a` is ``True`` , its shape must be :math:`(C, N)` after transpose.
1474
1577
  - **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
1475
- `transpose_b` is True, its shape must be :math:`(M, C)` after transpose.
1578
+ `transpose_b` is ``True`` , its shape must be :math:`(M, C)` after transpose.
1476
1579
 
1477
1580
  Outputs:
1478
1581
  Tensor, the shape of the output tensor is :math:`(N, M)`.
1479
1582
 
1480
1583
  Raises:
1481
1584
  TypeError: If `transpose_a` or `transpose_b` is not a bool.
1585
+ TypeError: If the dtype of `a` and the dtype of `b` are not the same.
1482
1586
  ValueError: If the column of matrix dimensions of `a` is not equal to
1483
1587
  the row of matrix dimensions of `b`.
1484
1588
  ValueError: If length of shape of `a` or `b` is not equal to 2.
@@ -1487,6 +1591,9 @@ class MatMul(PrimitiveWithCheck):
1487
1591
  ``Ascend`` ``GPU`` ``CPU``
1488
1592
 
1489
1593
  Examples:
1594
+ >>> import mindspore
1595
+ >>> import numpy as np
1596
+ >>> from mindspore import Tensor, ops
1490
1597
  >>> a = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
1491
1598
  >>> b = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
1492
1599
  >>> matmul = ops.MatMul()
@@ -1505,40 +1612,6 @@ class MatMul(PrimitiveWithCheck):
1505
1612
  self.add_prim_attr('transpose_x1', self.transpose_a)
1506
1613
  self.add_prim_attr('transpose_x2', self.transpose_b)
1507
1614
 
1508
- def check_shape_size(self, x1, x2):
1509
- if len(x1) != 2 or len(x2) != 2:
1510
- raise ValueError(f"For '{self.name}', inputs 'x', 'y' should have the same dimension size and "
1511
- f"be equal to 2, but got the size of 'x': ({len(x1)}) and the size of 'y': ({len(x2)}).")
1512
-
1513
- def check_shape(self, x1, x2):
1514
- is_dyn_shape = is_shape_unknown(x1) or is_shape_unknown(x2)
1515
- if not is_dyn_shape:
1516
- self.check_shape_size(x1, x2)
1517
- cls_name = self.name
1518
-
1519
- # set attribute
1520
- self.add_prim_attr('transpose_x1', self.transpose_a)
1521
- self.add_prim_attr('transpose_x2', self.transpose_b)
1522
-
1523
- if is_dyn_shape:
1524
- return
1525
-
1526
- # validate whether last two dims satisfying matrix multiply
1527
- x1_last = x1[-2:]
1528
- x2_last = x2[-2:]
1529
- x1_col = x1_last[not self.transpose_a]
1530
- x2_row = x2_last[self.transpose_b]
1531
- if np.all(np.array(x1) != -1) and np.all(np.array(x2) != -1):
1532
- if x1_col != x2_row:
1533
- raise ValueError(f"For '{cls_name}', the input dimensions must be equal, but got 'x1_col': {x1_col} "
1534
- f"and 'x2_row': {x2_row}. And 'x' shape {x1}(transpose_a={self.transpose_a}), "
1535
- f"'y' shape {x2}(transpose_b={self.transpose_b}).")
1536
-
1537
- def check_dtype(self, x1, x2):
1538
- args = {"x1": x1, "x2": x2}
1539
- validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type
1540
- + (mstype.complex64, mstype.complex128), self.name)
1541
-
1542
1615
 
1543
1616
  class BatchMatMul(Primitive):
1544
1617
  r"""
@@ -1548,20 +1621,20 @@ class BatchMatMul(Primitive):
1548
1621
 
1549
1622
  \text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :])
1550
1623
 
1551
- The first input tensor must be not less than `3` and the second input must be not less than `2`.
1624
+ The rank of both two input tensors must be same and not less than `2`.
1552
1625
 
1553
1626
  Args:
1554
- transpose_a (bool): If true, the last two dimensions of `x` is transposed before multiplication.
1555
- Default: False.
1556
- transpose_b (bool): If true, the last two dimensions of `y` is transposed before multiplication.
1557
- Default: False.
1627
+ transpose_a (bool): If ``True`` , the last two dimensions of `x` is transposed before multiplication.
1628
+ Default: ``False`` .
1629
+ transpose_b (bool): If ``True`` , the last two dimensions of `y` is transposed before multiplication.
1630
+ Default: ``False`` .
1558
1631
 
1559
1632
  Inputs:
1560
1633
  - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
1561
1634
  where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
1562
- size of the last two dimensions. If `transpose_a` is True, its shape must be :math:`(*B, C, N)`.
1635
+ size of the last two dimensions. If `transpose_a` is ``True`` , its shape must be :math:`(*B, C, N)`.
1563
1636
  - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
1564
- `transpose_b` is True, its shape must be :math:`(*B, M, C)`.
1637
+ `transpose_b` is ``True`` , its shape must be :math:`(*B, M, C)`.
1565
1638
 
1566
1639
  Outputs:
1567
1640
  Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
@@ -1569,12 +1642,15 @@ class BatchMatMul(Primitive):
1569
1642
  Raises:
1570
1643
  TypeError: If `transpose_a` or `transpose_b` is not a bool.
1571
1644
  ValueError: If length of shape of `x` is not equal to length of shape of `y` or
1572
- length of shape of `x` is less than 3.
1645
+ length of shape of inputs is less than 2.
1573
1646
 
1574
1647
  Supported Platforms:
1575
1648
  ``Ascend`` ``GPU`` ``CPU``
1576
1649
 
1577
1650
  Examples:
1651
+ >>> import mindspore
1652
+ >>> import numpy as np
1653
+ >>> from mindspore import Tensor, ops
1578
1654
  >>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
1579
1655
  >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1580
1656
  >>> batmatmul = ops.BatchMatMul()
@@ -1669,11 +1745,12 @@ class CumSum(Primitive):
1669
1745
 
1670
1746
  Args:
1671
1747
  exclusive (bool): By default, this op performs an inclusive cumsum, which means that the first
1672
- element of the input is identical to the first element of the output. Default: False.
1673
- reverse (bool): If true, perform inverse cumulative sum. Default: False.
1748
+ element of the input is identical to the first element of the output. Default: ``False`` .
1749
+ reverse (bool): If ``True`` , perform inverse cumulative sum. Default: ``False`` .
1674
1750
 
1675
1751
  Inputs:
1676
- - **input** (Tensor) - The input tensor to accumulate.
1752
+ - **input** (Tensor) - The input Tensor with shape
1753
+ :math:`(N, *)` where :math:`*` means any number of additional dimensions.
1677
1754
  - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
1678
1755
  Must be in the range [-rank(input), rank(input)).
1679
1756
 
@@ -1688,6 +1765,8 @@ class CumSum(Primitive):
1688
1765
  ``Ascend`` ``GPU`` ``CPU``
1689
1766
 
1690
1767
  Examples:
1768
+ >>> import numpy as np
1769
+ >>> from mindspore import Tensor, ops
1691
1770
  >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
1692
1771
  >>> cumsum = ops.CumSum()
1693
1772
  >>> # case 1: along the axis 0
@@ -1738,10 +1817,20 @@ class AddN(Primitive):
1738
1817
 
1739
1818
  Refer to :func:`mindspore.ops.addn` for more details.
1740
1819
 
1820
+ Inputs:
1821
+ - **x** (Union(tuple[Tensor], list[Tensor])) - A tuple or list composed of Tensor, the data type is
1822
+ boolean or numeric.
1823
+
1824
+ Outputs:
1825
+ Tensor, has the same shape and dtype as each Tensor of `x`.
1826
+
1741
1827
  Supported Platforms:
1742
1828
  ``Ascend`` ``GPU`` ``CPU``
1743
1829
 
1744
1830
  Examples:
1831
+ >>> import mindspore
1832
+ >>> import numpy as np
1833
+ >>> from mindspore import Tensor, nn, ops
1745
1834
  >>> class NetAddN(nn.Cell):
1746
1835
  ... def __init__(self):
1747
1836
  ... super(NetAddN, self).__init__()
@@ -1779,10 +1868,21 @@ class AccumulateNV2(Primitive):
1779
1868
 
1780
1869
  Refer to :func:`mindspore.ops.accumulate_n` for more details.
1781
1870
 
1871
+ Inputs:
1872
+ - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
1873
+ is made up of multiple tensors whose dtype is number to be added together.
1874
+ Each element of tuple or list should have the same shape.
1875
+
1876
+ Outputs:
1877
+ Tensor, has the same shape and dtype as each entry of the `x`.
1878
+
1782
1879
  Supported Platforms:
1783
1880
  ``Ascend`` ``GPU``
1784
1881
 
1785
1882
  Examples:
1883
+ >>> import mindspore
1884
+ >>> import numpy as np
1885
+ >>> from mindspore import Tensor, ops, nn
1786
1886
  >>> class NetAccumulateNV2(nn.Cell):
1787
1887
  ... def __init__(self):
1788
1888
  ... super(NetAccumulateNV2, self).__init__()
@@ -1821,10 +1921,19 @@ class Neg(Primitive):
1821
1921
 
1822
1922
  Refer to :func:`mindspore.ops.neg` for more details.
1823
1923
 
1924
+ Inputs:
1925
+ - **x** (Tensor) - The input tensor whose dtype is Number.
1926
+
1927
+ Outputs:
1928
+ Tensor, has the same shape and dtype as input.
1929
+
1824
1930
  Supported Platforms:
1825
1931
  ``Ascend`` ``GPU`` ``CPU``
1826
1932
 
1827
1933
  Examples:
1934
+ >>> import mindspore
1935
+ >>> import numpy as np
1936
+ >>> from mindspore import Tensor, ops
1828
1937
  >>> neg = ops.Neg()
1829
1938
  >>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
1830
1939
  >>> output = neg(x)
@@ -1847,6 +1956,17 @@ class InplaceUpdateV2(Primitive):
1847
1956
 
1848
1957
  Refer to :func:`mindspore.ops.inplace_update` for more details.
1849
1958
 
1959
+ Inputs:
1960
+ - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
1961
+ float32, float16 and int32.
1962
+ - **indices** (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
1963
+ to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
1964
+ - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
1965
+ the first dimension, which must be the same as the size of `indices`.
1966
+
1967
+ Outputs:
1968
+ Tensor, with the same type and shape as the input `x`.
1969
+
1850
1970
  Supported Platforms:
1851
1971
  ``GPU`` ``CPU``
1852
1972
 
@@ -1876,33 +1996,25 @@ class InplaceUpdateV2(Primitive):
1876
1996
  return output
1877
1997
 
1878
1998
 
1879
- class InplaceUpdate(Primitive):
1880
- r"""
1881
- The InplaceUpdate interface is deprecated. Please use the :class:`mindspore.ops.InplaceUpdateV2` instead.
1882
-
1883
- Supported Platforms:
1884
- Deprecated
1885
- """
1886
-
1887
- @deprecated("2.0", "ops.InplaceUpdateV2", False)
1888
- @prim_attr_register
1889
- def __init__(self, indices):
1890
- """Initialize InplaceUpdate"""
1891
- self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
1892
- self.indices = indices
1893
- validator.check_value_type("indices", indices, [int, tuple], self.name)
1894
- if isinstance(indices, int):
1895
- self.indices = (indices,)
1896
- for item in self.indices:
1897
- validator.check_value_type("item of indices", item, [int], self.name)
1898
-
1899
-
1900
1999
  class InplaceAdd(Primitive):
1901
2000
  """
1902
2001
  Adds `v` into specified rows of `x`. Computes `y` = `x`; y[i,] += `v`.
1903
2002
 
1904
2003
  Refer to :func:`mindspore.ops.inplace_add` for more details.
1905
2004
 
2005
+ Args:
2006
+ indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
2007
+ to add with `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
2008
+
2009
+ Inputs:
2010
+ - **x** (Tensor) - The tensor to be added. It has shape :math:`(N,*)` where :math:`*` means
2011
+ any number of additional dimensions.
2012
+ - **input_v** (Tensor) - The value tensor add to `x`. It has the same dimension sizes as `x` except
2013
+ the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
2014
+
2015
+ Outputs:
2016
+ Tensor, has the same shape and dtype as `x`.
2017
+
1906
2018
  Supported Platforms:
1907
2019
  ``Ascend`` ``GPU`` ``CPU``
1908
2020
 
@@ -1942,10 +2054,28 @@ class InplaceIndexAdd(Primitive):
1942
2054
 
1943
2055
  Refer to :func:`mindspore.ops.inplace_index_add` for more details.
1944
2056
 
2057
+ Args:
2058
+ axis (int): The dimension along which to index. It should be in range :math:`[0, len(var.dim))`.
2059
+
2060
+ Inputs:
2061
+ - **var** (Parameter) - The input Parameter to add to, with data type uint8, int8, int16, int32,
2062
+ float16, float32, float64.
2063
+ - **indices** (Tensor) - The indies along `axis` to perform the addition. A 1D Tensor
2064
+ of shape :math:`(updates.shape[axis],)`, every value of it
2065
+ should be in range :math:`[0, var.shape[axis])` with data type int32.
2066
+ - **updates** (Tensor) - The input Tensor with the value to add. Must have same data type as `var`.
2067
+ The shape must be the same as `var` except the `axis` th dimension.
2068
+
2069
+ Outputs:
2070
+ Tensor, updated result, has the same shape and dtype as `var`.
2071
+
1945
2072
  Supported Platforms:
1946
2073
  ``Ascend`` ``CPU``
1947
2074
 
1948
2075
  Examples:
2076
+ >>> import mindspore
2077
+ >>> import numpy as np
2078
+ >>> from mindspore import Tensor, ops, Parameter
1949
2079
  >>> var = Parameter(Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32))
1950
2080
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
1951
2081
  >>> updates = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
@@ -1977,6 +2107,19 @@ class InplaceSub(Primitive):
1977
2107
 
1978
2108
  Refer to :func:`mindspore.ops.inplace_sub` for more details.
1979
2109
 
2110
+ Args:
2111
+ indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
2112
+ to subtract by `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
2113
+
2114
+ Inputs:
2115
+ - **x** (Tensor) - The tensor to be subtracted. It has shape :math:`(N,*)` where :math:`*` means
2116
+ any number of additional dimensions.
2117
+ - **input_v** (Tensor) - The value tensor subtract from `x`. It has the same dimension sizes as `x` except
2118
+ the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
2119
+
2120
+ Outputs:
2121
+ Tensor, has the same shape and dtype as `x`.
2122
+
1980
2123
  Supported Platforms:
1981
2124
  ``Ascend`` ``GPU`` ``CPU``
1982
2125
 
@@ -2014,10 +2157,34 @@ class Sub(_MathBinaryOp):
2014
2157
 
2015
2158
  Refer to :func:`mindspore.ops.sub` for more details.
2016
2159
 
2160
+ Note:
2161
+ - One of the two inputs must be a Tensor, when the two inputs have different shapes,
2162
+ they must be able to broadcast to a common shape.
2163
+ - The two inputs can not be bool type at the same time,
2164
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
2165
+ - The two inputs comply with the implicit type conversion rules to make the data types
2166
+ consistent.
2167
+
2168
+ Inputs:
2169
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
2170
+ a bool or a tensor whose data type is
2171
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
2172
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
2173
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
2174
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
2175
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
2176
+
2177
+ Outputs:
2178
+ Tensor, the shape is the same as the two inputs after broadcasting,
2179
+ and the data type is the one with higher precision or higher digits among the two inputs.
2180
+
2017
2181
  Supported Platforms:
2018
2182
  ``Ascend`` ``GPU`` ``CPU``
2019
2183
 
2020
2184
  Examples:
2185
+ >>> import mindspore
2186
+ >>> import numpy as np
2187
+ >>> from mindspore import Tensor, ops
2021
2188
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
2022
2189
  >>> y = Tensor(np.array([4, 5, 6]), mindspore.int32)
2023
2190
  >>> sub = ops.Sub()
@@ -2042,10 +2209,34 @@ class Mul(_MathBinaryOp):
2042
2209
 
2043
2210
  Refer to :func:`mindspore.ops.mul` for more details.
2044
2211
 
2212
+ Note:
2213
+ - One of the two inputs must be a Tensor, when the two inputs have different shapes,
2214
+ they must be able to broadcast to a common shape.
2215
+ - The two inputs can not be bool type at the same time,
2216
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
2217
+ - The two inputs comply with the implicit type conversion rules to make the data types
2218
+ consistent.
2219
+
2220
+ Inputs:
2221
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
2222
+ a bool or a tensor whose data type is
2223
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
2224
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
2225
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
2226
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
2227
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
2228
+
2229
+ Outputs:
2230
+ Tensor, the shape is the same as the one after broadcasting,
2231
+ and the data type is the one with higher precision or higher digits among the two inputs.
2232
+
2045
2233
  Supported Platforms:
2046
2234
  ``Ascend`` ``GPU`` ``CPU``
2047
2235
 
2048
2236
  Examples:
2237
+ >>> import mindspore
2238
+ >>> import numpy as np
2239
+ >>> from mindspore import Tensor, ops
2049
2240
  >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
2050
2241
  >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
2051
2242
  >>> mul = ops.Mul()
@@ -2134,6 +2325,9 @@ class SquaredDifference(Primitive):
2134
2325
  ``Ascend`` ``GPU`` ``CPU``
2135
2326
 
2136
2327
  Examples:
2328
+ >>> import mindspore
2329
+ >>> import numpy as np
2330
+ >>> from mindspore import Tensor, ops
2137
2331
  >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
2138
2332
  >>> y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
2139
2333
  >>> squared_difference = ops.SquaredDifference()
@@ -2158,10 +2352,10 @@ class Square(Primitive):
2158
2352
  out_{i} = (x_{i})^2
2159
2353
 
2160
2354
  Inputs:
2161
- - **x** (Tensor) - The input tensor with a dtype of Number, its rank must be in [0, 7] inclusive.
2355
+ - **x** (Tensor) - The input tensor.
2162
2356
 
2163
2357
  Outputs:
2164
- Tensor, has the same shape and dtype as the `x`.
2358
+ Tensor, has the same shape and dtype as `x`.
2165
2359
 
2166
2360
  Raises:
2167
2361
  TypeError: If `x` is not a Tensor.
@@ -2170,6 +2364,9 @@ class Square(Primitive):
2170
2364
  ``Ascend`` ``GPU`` ``CPU``
2171
2365
 
2172
2366
  Examples:
2367
+ >>> import mindspore
2368
+ >>> import numpy as np
2369
+ >>> from mindspore import Tensor, ops
2173
2370
  >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
2174
2371
  >>> square = ops.Square()
2175
2372
  >>> output = square(x)
@@ -2187,24 +2384,21 @@ class Rsqrt(Primitive):
2187
2384
  r"""
2188
2385
  Computes reciprocal of square root of input tensor element-wise.
2189
2386
 
2190
- .. math::
2191
-
2192
- out_{i} = \frac{1}{\sqrt{x_{i}}}
2387
+ Refer to :func:`mindspore.ops.rsqrt` for more details.
2193
2388
 
2194
2389
  Inputs:
2195
- - **x** (Tensor) - The input of Rsqrt. Its rank must be in [0, 7] inclusive and
2196
- each element must be a non-negative number.
2390
+ - **x** (Tensor) - The input Tensor, each element must be a non-negative,
2391
+ if an element is negative, the calculation result is nan.
2197
2392
 
2198
2393
  Outputs:
2199
2394
  Tensor, has the same type and shape as `x`.
2200
2395
 
2201
- Raises:
2202
- TypeError: If `x` is not a Tensor.
2203
-
2204
2396
  Supported Platforms:
2205
2397
  ``Ascend`` ``GPU`` ``CPU``
2206
2398
 
2207
2399
  Examples:
2400
+ >>> import mindspore
2401
+ >>> from mindspore import Tensor, ops
2208
2402
  >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
2209
2403
  >>> rsqrt = ops.Rsqrt()
2210
2404
  >>> output = rsqrt(input_tensor)
@@ -2231,8 +2425,7 @@ class Sqrt(Primitive):
2231
2425
  out_{i} = \sqrt{x_{i}}
2232
2426
 
2233
2427
  Inputs:
2234
- - **x** (Tensor) - The input tensor with a dtype of Number, the shape is :math:`(N, *)`
2235
- where :math:`*` means, any number of additional dimensions.
2428
+ - **x** (Tensor) - The input tensor.
2236
2429
 
2237
2430
  Outputs:
2238
2431
  Tensor, has the same shape and data type as the `x`.
@@ -2244,6 +2437,9 @@ class Sqrt(Primitive):
2244
2437
  ``Ascend`` ``GPU`` ``CPU``
2245
2438
 
2246
2439
  Examples:
2440
+ >>> import mindspore
2441
+ >>> import numpy as np
2442
+ >>> from mindspore import Tensor, ops
2247
2443
  >>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
2248
2444
  >>> sqrt = ops.Sqrt()
2249
2445
  >>> output = sqrt(x)
@@ -2267,7 +2463,6 @@ class Reciprocal(PrimitiveWithCheck):
2267
2463
 
2268
2464
  Inputs:
2269
2465
  - **x** (Tensor) - The input tensor.
2270
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2271
2466
 
2272
2467
  Outputs:
2273
2468
  Tensor, has the same shape as the `x`.
@@ -2279,6 +2474,9 @@ class Reciprocal(PrimitiveWithCheck):
2279
2474
  ``Ascend`` ``GPU`` ``CPU``
2280
2475
 
2281
2476
  Examples:
2477
+ >>> import mindspore
2478
+ >>> import numpy as np
2479
+ >>> from mindspore import Tensor, ops
2282
2480
  >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2283
2481
  >>> reciprocal = ops.Reciprocal()
2284
2482
  >>> output = reciprocal(x)
@@ -2310,10 +2508,26 @@ class Pow(Primitive):
2310
2508
 
2311
2509
  Refer to :func:`mindspore.ops.pow` for more details.
2312
2510
 
2313
- Supported Platforms:
2314
- ``Ascend`` ``GPU`` ``CPU``
2315
-
2511
+ Inputs:
2512
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
2513
+ a bool or a tensor whose data type is
2514
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
2515
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
2516
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
2517
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
2518
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
2519
+
2520
+ Outputs:
2521
+ Tensor, the shape is the same as the one after broadcasting,
2522
+ and the data type is the one with higher precision or higher digits among the two inputs.
2523
+
2524
+ Supported Platforms:
2525
+ ``Ascend`` ``GPU`` ``CPU``
2526
+
2316
2527
  Examples:
2528
+ >>> import mindspore
2529
+ >>> import numpy as np
2530
+ >>> from mindspore import Tensor, ops
2317
2531
  >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2318
2532
  >>> y = 3.0
2319
2533
  >>> pow = ops.Pow()
@@ -2352,10 +2566,19 @@ class Exp(Primitive):
2352
2566
 
2353
2567
  Refer to :func:`mindspore.ops.exp` for more details.
2354
2568
 
2569
+ Inputs:
2570
+ - **x** (Tensor) - The input tensor.
2571
+
2572
+ Outputs:
2573
+ Tensor, has the same shape and dtype as the `x`.
2574
+
2355
2575
  Supported Platforms:
2356
2576
  ``Ascend`` ``GPU`` ``CPU``
2357
2577
 
2358
2578
  Examples:
2579
+ >>> import mindspore
2580
+ >>> import numpy as np
2581
+ >>> from mindspore import Tensor, ops
2359
2582
  >>> x = Tensor(np.array([0.0, 1.0, 3.0]), mindspore.float32)
2360
2583
  >>> exp = ops.Exp()
2361
2584
  >>> output = exp(x)
@@ -2382,10 +2605,10 @@ class Logit(Primitive):
2382
2605
  Refer to :func:`mindspore.ops.logit` for more details.
2383
2606
 
2384
2607
  Args:
2385
- eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: -1.0.
2608
+ eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: ``-1.0`` .
2386
2609
 
2387
2610
  Inputs:
2388
- - **x** (Tensor) - The input tensor.
2611
+ - **x** (Tensor) - The input tensor of type float16, float32 or float64.
2389
2612
 
2390
2613
  Outputs:
2391
2614
  Tensor, with the same shape and dtype as the `x`.
@@ -2394,6 +2617,8 @@ class Logit(Primitive):
2394
2617
  ``Ascend`` ``GPU`` ``CPU``
2395
2618
 
2396
2619
  Examples:
2620
+ >>> import numpy as np
2621
+ >>> from mindspore import Tensor, ops
2397
2622
  >>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
2398
2623
  >>> op = ops.Logit(eps=1e-5)
2399
2624
  >>> output = op(x)
@@ -2416,20 +2641,21 @@ class ReduceStd(Primitive):
2416
2641
 
2417
2642
  Args:
2418
2643
  axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
2419
- Default: (), reduce all dimensions. Only constant value is allowed.
2644
+ Default: ``()`` , reduce all dimensions. Only constant value is allowed.
2420
2645
  Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`.
2421
- unbiased (bool, optional): Whether to use Bessels correction.
2422
- If True, will use the Bessel correction unbiased estimation.
2423
- If False, will through the biased estimation to calculate the standard deviation.
2424
- Default: True.
2646
+ unbiased (bool, optional): Whether to use Bessel's correction.
2647
+ If ``True`` , will use the Bessel correction unbiased estimation.
2648
+ If ``False`` , will through the biased estimation to calculate the standard deviation.
2649
+ Default: ``True`` .
2425
2650
  keep_dims (bool, optional): Whether the output Tensor has dim retained or not.
2426
- If True, keep these reduced dimensions specified by `axis` and the length is 1.
2427
- If False, don't keep these dimensions.
2428
- Default: Fasle.
2651
+ If ``True`` , keep these reduced dimensions specified by `axis` and the length is 1.
2652
+ If ``False`` , don't keep these dimensions.
2653
+ Default: ``Fasle`` .
2429
2654
 
2430
2655
  Inputs:
2431
- - **input_x** (Tensor[Number]) - The input Tensor, it has dtype Number with shape
2656
+ - **input_x** (Tensor[Number]) - The input Tensor with shape
2432
2657
  :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2658
+ Supported dtypes: float16, float32.
2433
2659
 
2434
2660
  Outputs:
2435
2661
  Tuple(output_std, output_mean) containing the standard deviation and mean.
@@ -2473,10 +2699,6 @@ class Einsum(Primitive):
2473
2699
  dimensions specified notation based on the Einstein summation convention(Einsum).
2474
2700
  You can use this operator to perform diagonal/reducesum/transpose/matmul/mul/inner product operations, etc.
2475
2701
 
2476
- The inputs must be a tuple of tensors.
2477
- When the inputs are only one tensor, you can input (tensor, )
2478
- dtypes of them should be float16/float32/float64.
2479
-
2480
2702
  Args:
2481
2703
  equation (str): An attribute, represent the operation you want to do.
2482
2704
  the value can contain only letters([a-z][A-Z]), commas(,), ellipsis(...),
@@ -2487,7 +2709,10 @@ class Einsum(Primitive):
2487
2709
  and the right of it indicates the desired output dimension.
2488
2710
 
2489
2711
  Inputs:
2490
- - **x** (Tuple) - input tensor used for calculation. the data type of the tensor must be the same.
2712
+ - **x** () - Input tensor used for calculation.
2713
+ The inputs must be a tuple/list of Tensors.
2714
+ When the inputs are only one tensor, you can input (tensor, ).
2715
+ Dtypes of them should be float16/float32/float64 and dtype of the tensor(s) must be the same.
2491
2716
 
2492
2717
  Outputs:
2493
2718
  Tensor, the shape of it can be obtained from the equation,
@@ -2495,11 +2720,15 @@ class Einsum(Primitive):
2495
2720
 
2496
2721
  Raises:
2497
2722
  TypeError: If equation itself is invalid, or the equation does not match the input tensor.
2723
+ TypeError: If dtype of the input Tensors are not the same or dtype is not float16, float32 or float64.
2498
2724
 
2499
2725
  Supported Platforms:
2500
2726
  ``GPU``
2501
2727
 
2502
2728
  Examples:
2729
+ >>> import mindspore
2730
+ >>> import numpy as np
2731
+ >>> from mindspore import Tensor, ops
2503
2732
  >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2504
2733
  >>> equation = "i->"
2505
2734
  >>> einsum = ops.Einsum(equation)
@@ -2565,7 +2794,6 @@ class Einsum(Primitive):
2565
2794
  seg_equation = equation.split("->")
2566
2795
  if len(seg_equation) > 2:
2567
2796
  raise TypeError("the equation can contain only one arrow !")
2568
- self.add_prim_attr('equation', equation)
2569
2797
  self.init_prim_io_names(inputs=['inputs'], outputs=['output'])
2570
2798
 
2571
2799
 
@@ -2580,9 +2808,9 @@ class Diagonal(Primitive):
2580
2808
  offset (int): The offset of main diagonal, which controls which diagonal to consider. If :math:`offset=0`,
2581
2809
  return the main diagonal elements with respect to dim1 and dim2. If :math:`offset>0`, return the
2582
2810
  diagonal elements that are `offset` units upward from the main diagonal. If :math:`offset<0`, return the
2583
- diagonal elements that are `offset` units downward from the main diagonal. Default: 0.
2584
- dim1 (int): The first dimension with respect to which to take diagonal. Default: 0.
2585
- dim2 (int): The second dimension with respect to which to take diagonal. Default: 1.
2811
+ diagonal elements that are `offset` units downward from the main diagonal. Default: ``0`` .
2812
+ dim1 (int): The first dimension with respect to which to take diagonal. Default: ``0`` .
2813
+ dim2 (int): The second dimension with respect to which to take diagonal. Default: ``1`` .
2586
2814
 
2587
2815
  Inputs:
2588
2816
  - **x** (Tensor) - The input to take diagonal, with float32 or double data type.
@@ -2637,10 +2865,19 @@ class Expm1(Primitive):
2637
2865
 
2638
2866
  Refer to :func:`mindspore.ops.expm1` for more details.
2639
2867
 
2868
+ Inputs:
2869
+ - **x** (Tensor) - The input tensor.
2870
+
2871
+ Outputs:
2872
+ Tensor, has the same shape and dtype as `x`.
2873
+
2640
2874
  Supported Platforms:
2641
2875
  ``Ascend`` ``GPU`` ``CPU``
2642
2876
 
2643
2877
  Examples:
2878
+ >>> import mindspore
2879
+ >>> import numpy as np
2880
+ >>> from mindspore import Tensor, ops
2644
2881
  >>> x = Tensor(np.array([0.0, 2.0, 3.0, 5.0]), mindspore.float32)
2645
2882
  >>> expm1 = ops.Expm1()
2646
2883
  >>> output = expm1(x)
@@ -2664,9 +2901,9 @@ class Histogram(Primitive):
2664
2901
  Elements lower than min and higher than max are ignored.
2665
2902
 
2666
2903
  Args:
2667
- bins (int, optional): Number of histogram bins, optional. Default 100. If specified, must be positive.
2668
- min (float, optional): An optional float of the lower end of the range (inclusive). Default value is 0.0.
2669
- max (float, optional): An optional float of the upper end of the range (inclusive). Default value is 0.0.
2904
+ bins (int, optional): Number of histogram bins, optional. Default: ``100`` . If specified, must be positive.
2905
+ min (float, optional): An optional float of the lower end of the range (inclusive). Default value is ``0.0`` .
2906
+ max (float, optional): An optional float of the upper end of the range (inclusive). Default value is ``0.0`` .
2670
2907
 
2671
2908
  Inputs:
2672
2909
  - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
@@ -2711,7 +2948,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2711
2948
 
2712
2949
  Args:
2713
2950
  nbins (int): The number of histogram bins, the type is a positive integer.
2714
- dtype (str, optional): An optional attribute. The dtype must be str. Default: "int32".
2951
+ dtype (str, optional): An optional attribute. The dtype must be str. Default: ``'int32'`` .
2715
2952
 
2716
2953
  Inputs:
2717
2954
  - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
@@ -2730,6 +2967,8 @@ class HistogramFixedWidth(PrimitiveWithInfer):
2730
2967
  ``Ascend`` ``GPU``
2731
2968
 
2732
2969
  Examples:
2970
+ >>> import mindspore
2971
+ >>> from mindspore import Tensor, ops
2733
2972
  >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
2734
2973
  >>> range_op = Tensor([0.0, 5.0], mindspore.float16)
2735
2974
  >>> hist = ops.HistogramFixedWidth(5)
@@ -2755,10 +2994,19 @@ class Log(Primitive):
2755
2994
 
2756
2995
  Refer to :func:`mindspore.ops.log` for more details.
2757
2996
 
2997
+ Inputs:
2998
+ - **x** (Tensor) - Input Tensor of any dimension. The value must be greater than 0.
2999
+
3000
+ Outputs:
3001
+ Tensor, has the same shape and dtype as the `x`.
3002
+
2758
3003
  Supported Platforms:
2759
3004
  ``Ascend`` ``GPU`` ``CPU``
2760
3005
 
2761
3006
  Examples:
3007
+ >>> import mindspore
3008
+ >>> import numpy as np
3009
+ >>> from mindspore import Tensor, ops
2762
3010
  >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2763
3011
  >>> log = ops.Log()
2764
3012
  >>> output = log(x)
@@ -2782,10 +3030,19 @@ class Log1p(Primitive):
2782
3030
 
2783
3031
  Refer to :func:`mindspore.ops.log1p` for more details.
2784
3032
 
3033
+ Inputs:
3034
+ - **x** (Tensor) - The input tensor. The value must be greater than -1.
3035
+
3036
+ Outputs:
3037
+ Tensor, has the same shape as the `x`.
3038
+
2785
3039
  Supported Platforms:
2786
3040
  ``Ascend`` ``GPU`` ``CPU``
2787
3041
 
2788
3042
  Examples:
3043
+ >>> import mindspore
3044
+ >>> import numpy as np
3045
+ >>> from mindspore import Tensor, ops
2789
3046
  >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2790
3047
  >>> log1p = ops.Log1p()
2791
3048
  >>> output = log1p(x)
@@ -2824,12 +3081,21 @@ class Hypot(Primitive):
2824
3081
  ``Ascend`` ``GPU`` ``CPU``
2825
3082
 
2826
3083
  Examples:
3084
+ >>> import mindspore
3085
+ >>> import numpy as np
3086
+ >>> from mindspore import Tensor, ops
2827
3087
  >>> x1 = Tensor(np.array([3., 5., 7.]))
2828
3088
  >>> x2 = Tensor(np.array([4., 12., 24.]))
2829
3089
  >>> hypot_ = ops.Hypot()
2830
3090
  >>> y = hypot_(x1, x2)
2831
3091
  >>> print(y)
2832
3092
  [ 5. 13. 25.]
3093
+ >>> x1 = Tensor(2.1, mindspore.float32)
3094
+ >>> x2 = Tensor(2.1, mindspore.float32)
3095
+ >>> hypot_ = ops.Hypot()
3096
+ >>> y = hypot_(x1, x2)
3097
+ >>> print(y)
3098
+ 2.9698484
2833
3099
  """
2834
3100
 
2835
3101
  __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@@ -2870,6 +3136,8 @@ class Heaviside(Primitive):
2870
3136
  ``Ascend`` ``GPU`` ``CPU``
2871
3137
 
2872
3138
  Examples:
3139
+ >>> import numpy as np
3140
+ >>> from mindspore import Tensor, ops
2873
3141
  >>> x = Tensor(np.array([-1.5, 0., 2.]))
2874
3142
  >>> values = Tensor(np.array([0.5]))
2875
3143
  >>> heaviside = ops.Heaviside()
@@ -2889,10 +3157,22 @@ class Erf(Primitive):
2889
3157
 
2890
3158
  Refer to :func:`mindspore.ops.erf` for more details.
2891
3159
 
3160
+ Inputs:
3161
+ - **x** (Tensor) - Input Tensor of Gaussian error function. Supported dtypes:
3162
+
3163
+ - Ascend: float16, float32.
3164
+ - GPU/CPU: float16, float32, float64.
3165
+
3166
+ Outputs:
3167
+ Tensor, has the same shape and dtype as the `x`.
3168
+
2892
3169
  Supported Platforms:
2893
3170
  ``Ascend`` ``GPU`` ``CPU``
2894
3171
 
2895
3172
  Examples:
3173
+ >>> import mindspore
3174
+ >>> import numpy as np
3175
+ >>> from mindspore import Tensor, ops
2896
3176
  >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2897
3177
  >>> erf = ops.Erf()
2898
3178
  >>> output = erf(x)
@@ -2912,10 +3192,22 @@ class Erfc(Primitive):
2912
3192
 
2913
3193
  Refer to :func:`mindspore.ops.erfc` for more details.
2914
3194
 
3195
+ Inputs:
3196
+ - **x** (Tensor) - The input tensor. Supported dtypes:
3197
+
3198
+ - Ascend: float16, float32.
3199
+ - GPU/CPU: float16, float32, float64.
3200
+
3201
+ Outputs:
3202
+ Tensor, has the same shape and dtype as `x`.
3203
+
2915
3204
  Supported Platforms:
2916
3205
  ``Ascend`` ``GPU`` ``CPU``
2917
3206
 
2918
3207
  Examples:
3208
+ >>> import mindspore
3209
+ >>> import numpy as np
3210
+ >>> from mindspore import Tensor, ops
2919
3211
  >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2920
3212
  >>> erfc = ops.Erfc()
2921
3213
  >>> output = erfc(x)
@@ -2935,10 +3227,23 @@ class Minimum(_MathBinaryOp):
2935
3227
 
2936
3228
  Refer to :func:`mindspore.ops.minimum` for more details.
2937
3229
 
3230
+ Inputs:
3231
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3232
+ a bool or a tensor whose data type is number or bool.
3233
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3234
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
3235
+
3236
+ Outputs:
3237
+ Tensor, the shape is the same as the one after broadcasting,
3238
+ and the data type is the one with higher precision or higher digits among the two inputs.
3239
+
2938
3240
  Supported Platforms:
2939
3241
  ``Ascend`` ``GPU`` ``CPU``
2940
3242
 
2941
3243
  Examples:
3244
+ >>> import mindspore
3245
+ >>> import numpy as np
3246
+ >>> from mindspore import Tensor, ops
2942
3247
  >>> # case 1 : same data type
2943
3248
  >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
2944
3249
  >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
@@ -2970,10 +3275,23 @@ class Maximum(_MathBinaryOp):
2970
3275
 
2971
3276
  Refer to :func:`mindspore.ops.maximum` for more details.
2972
3277
 
3278
+ Inputs:
3279
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3280
+ a bool or a tensor whose data type is number or bool.
3281
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3282
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
3283
+
3284
+ Outputs:
3285
+ Tensor, the shape is the same as the one after broadcasting,
3286
+ and the data type is the one with higher precision or higher digits among the two inputs.
3287
+
2973
3288
  Supported Platforms:
2974
3289
  ``Ascend`` ``GPU`` ``CPU``
2975
3290
 
2976
3291
  Examples:
3292
+ >>> import mindspore
3293
+ >>> import numpy as np
3294
+ >>> from mindspore import Tensor, ops
2977
3295
  >>> # case 1 : same data type
2978
3296
  >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
2979
3297
  >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
@@ -3010,6 +3328,9 @@ class RealDiv(_MathBinaryOp):
3010
3328
  ``Ascend`` ``GPU`` ``CPU``
3011
3329
 
3012
3330
  Examples:
3331
+ >>> import mindspore
3332
+ >>> import numpy as np
3333
+ >>> from mindspore import Tensor, ops
3013
3334
  >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
3014
3335
  >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
3015
3336
  >>> realdiv = ops.RealDiv()
@@ -3032,38 +3353,36 @@ class Div(_MathBinaryOp):
3032
3353
  r"""
3033
3354
  Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
3034
3355
 
3035
- .. math::
3036
-
3037
- out_{i} = \frac{x_i}{y_i}
3356
+ Refer to :func:`mindspore.ops.div` for more details.
3038
3357
 
3039
3358
  Note:
3040
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3041
- - The inputs must be two tensors or one tensor and one scalar.
3042
- - When the inputs are two tensors,
3043
- dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
3044
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3359
+ - One of the two inputs must be a Tensor, when the two inputs have different shapes,
3360
+ they must be able to broadcast to a common shape.
3361
+ - The two inputs can not be bool type at the same time,
3362
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
3363
+ - The two inputs comply with the implicit type conversion rules to make the data types
3364
+ consistent.
3045
3365
 
3046
3366
  Inputs:
3047
3367
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
3048
3368
  a bool or a tensor whose data type is
3049
- `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3050
- `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_.
3369
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
3370
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
3051
3371
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
3052
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3053
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
3372
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
3373
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
3054
3374
 
3055
3375
  Outputs:
3056
3376
  Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
3057
3377
  and the data type is the one with higher precision or higher digits among the two inputs.
3058
3378
 
3059
- Raises:
3060
- TypeError: If neither `x` nor `y` is a Tensor.
3061
- TypeError: If data types of `x` and `y` are both Tensor with bool\_.
3062
-
3063
3379
  Supported Platforms:
3064
3380
  ``Ascend`` ``GPU`` ``CPU``
3065
3381
 
3066
3382
  Examples:
3383
+ >>> import mindspore
3384
+ >>> import numpy as np
3385
+ >>> from mindspore import Tensor, ops
3067
3386
  >>> # case 1 :has same data type and shape of the two inputs
3068
3387
  >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
3069
3388
  >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
@@ -3138,14 +3457,12 @@ class DivNoNan(Primitive):
3138
3457
  Inputs:
3139
3458
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
3140
3459
  a bool or a tensor whose data type is
3141
- `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ or
3142
- `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`
3143
- _.
3460
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
3461
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
3144
3462
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
3145
3463
  a bool when the first input is a bool or a tensor whose data type is number or bool\_.
3146
3464
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
3147
3465
 
3148
-
3149
3466
  Outputs:
3150
3467
  Tensor, the shape is the same as the one after broadcasting,
3151
3468
  and the data type is the one with higher precision or higher digits among the two inputs.
@@ -3157,6 +3474,9 @@ class DivNoNan(Primitive):
3157
3474
  ``Ascend`` ``GPU`` ``CPU``
3158
3475
 
3159
3476
  Examples:
3477
+ >>> import mindspore
3478
+ >>> import numpy as np
3479
+ >>> from mindspore import Tensor, ops
3160
3480
  >>> x1 = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
3161
3481
  >>> x2 = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
3162
3482
  >>> div_no_nan = ops.DivNoNan()
@@ -3209,6 +3529,9 @@ class MulNoNan(_MathBinaryOp):
3209
3529
  ``Ascend`` ``GPU`` ``CPU``
3210
3530
 
3211
3531
  Examples:
3532
+ >>> import mindspore
3533
+ >>> import numpy as np
3534
+ >>> from mindspore import Tensor, ops
3212
3535
  >>> # case 1 : same data type and shape of two inputs, there are some 0 in y.
3213
3536
  >>> x = Tensor(np.array([[-1.0, 6.0, np.inf], [np.nan, -7.0, 4.0]]), mindspore.float32)
3214
3537
  >>> y = Tensor(np.array([[-1.0, 4.0, 0], [0, -3.0, 1.0]]), mindspore.float32)
@@ -3247,16 +3570,35 @@ class FloorDiv(Primitive):
3247
3570
 
3248
3571
  Refer to :func:`mindspore.ops.floor_div` for more details.
3249
3572
 
3573
+ Inputs:
3574
+ - **x** (Union[Tensor, Number, bool]) - The first input is a Number or
3575
+ a bool or a tensor whose data type is Number or bool.
3576
+ - **y** (Union[Tensor, Number, bool]) - The second input is a Number or
3577
+ a bool when the first input is a tensor or a tensor whose data type is Number or bool.
3578
+
3579
+ Outputs:
3580
+ Tensor, the shape is the same as the one after broadcasting,
3581
+ and the data type is the one with higher precision or higher digits among the two inputs.
3582
+
3250
3583
  Supported Platforms:
3251
3584
  ``Ascend`` ``GPU`` ``CPU``
3252
3585
 
3253
3586
  Examples:
3587
+ >>> import mindspore
3588
+ >>> import numpy as np
3589
+ >>> from mindspore import Tensor, ops
3254
3590
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
3255
3591
  >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
3256
3592
  >>> floor_div = ops.FloorDiv()
3257
3593
  >>> output = floor_div(x, y)
3258
3594
  >>> print(output)
3259
3595
  [ 0 1 -1]
3596
+ >>> x = Tensor(2.0, mindspore.float32)
3597
+ >>> y = Tensor(2.0, mindspore.float32)
3598
+ >>> floor_div = ops.FloorDiv()
3599
+ >>> output = floor_div(x, y)
3600
+ >>> print(output)
3601
+ 1.0
3260
3602
  """
3261
3603
  __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
3262
3604
 
@@ -3298,6 +3640,9 @@ class TruncateDiv(Primitive):
3298
3640
  ``Ascend`` ``GPU`` ``CPU``
3299
3641
 
3300
3642
  Examples:
3643
+ >>> import mindspore
3644
+ >>> import numpy as np
3645
+ >>> from mindspore import Tensor, ops
3301
3646
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
3302
3647
  >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
3303
3648
  >>> truncate_div = ops.TruncateDiv()
@@ -3330,7 +3675,7 @@ class TruncateMod(Primitive):
3330
3675
  - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
3331
3676
  double thousandths in the mini form.
3332
3677
  - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
3333
- - If shape is expressed as :math:`(D1, D2, ..., Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
3678
+ - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
3334
3679
 
3335
3680
  Inputs:
3336
3681
  - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, or a bool,
@@ -3351,6 +3696,9 @@ class TruncateMod(Primitive):
3351
3696
  ``Ascend`` ``GPU`` ``CPU``
3352
3697
 
3353
3698
  Examples:
3699
+ >>> import mindspore
3700
+ >>> import numpy as np
3701
+ >>> from mindspore import Tensor, ops
3354
3702
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
3355
3703
  >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
3356
3704
  >>> truncate_mod = ops.TruncateMod()
@@ -3384,7 +3732,7 @@ class Mod(_MathBinaryOp):
3384
3732
  - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
3385
3733
  double thousandths in the mini form.
3386
3734
  - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
3387
- - If shape is expressed as :math:`(D1,D2... ,Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
3735
+ - If shape is expressed as :math:`(D1, D2, ..., Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
3388
3736
 
3389
3737
  Inputs:
3390
3738
  - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, a bool
@@ -3407,6 +3755,9 @@ class Mod(_MathBinaryOp):
3407
3755
  ``Ascend`` ``GPU`` ``CPU``
3408
3756
 
3409
3757
  Examples:
3758
+ >>> import mindspore
3759
+ >>> import numpy as np
3760
+ >>> from mindspore import Tensor, ops
3410
3761
  >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
3411
3762
  >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
3412
3763
  >>> mod = ops.Mod()
@@ -3429,10 +3780,22 @@ class Floor(Primitive):
3429
3780
 
3430
3781
  Refer to :func:`mindspore.ops.floor` for more details.
3431
3782
 
3783
+ Inputs:
3784
+ - **x** (Tensor) - The input tensor. Supported dtypes:
3785
+
3786
+ - Ascend: float16, float32.
3787
+ - GPU/CPU: float16, float32, float64.
3788
+
3789
+ Outputs:
3790
+ Tensor, has the same shape as `x`.
3791
+
3432
3792
  Supported Platforms:
3433
3793
  ``Ascend`` ``GPU`` ``CPU``
3434
3794
 
3435
3795
  Examples:
3796
+ >>> import mindspore
3797
+ >>> import numpy as np
3798
+ >>> from mindspore import Tensor, ops
3436
3799
  >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
3437
3800
  >>> floor = ops.Floor()
3438
3801
  >>> output = floor(x)
@@ -3452,10 +3815,23 @@ class FloorMod(Primitive):
3452
3815
 
3453
3816
  Refer to :func:`mindspore.ops.floor_mod` for more details.
3454
3817
 
3818
+ Inputs:
3819
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3820
+ a bool or a tensor whose data type is number or bool.
3821
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3822
+ a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
3823
+
3824
+ Outputs:
3825
+ Tensor, the shape is the same as the one after broadcasting,
3826
+ and the data type is the one with higher precision of the two inputs.
3827
+
3455
3828
  Supported Platforms:
3456
3829
  ``Ascend`` ``GPU`` ``CPU``
3457
3830
 
3458
3831
  Examples:
3832
+ >>> import mindspore
3833
+ >>> import numpy as np
3834
+ >>> from mindspore import Tensor, ops
3459
3835
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
3460
3836
  >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
3461
3837
  >>> floor_mod = ops.FloorMod()
@@ -3478,15 +3854,28 @@ class Ceil(PrimitiveWithInfer):
3478
3854
 
3479
3855
  Refer to :func:`mindspore.ops.ceil` for more details.
3480
3856
 
3857
+ Inputs:
3858
+ - **x** (Tensor) - The input tensor with a dtype of float16 or float32.
3859
+
3860
+ Outputs:
3861
+ Tensor, has the same shape as `x`.
3862
+
3481
3863
  Supported Platforms:
3482
3864
  ``Ascend`` ``GPU`` ``CPU``
3483
3865
 
3484
3866
  Examples:
3867
+ >>> import mindspore
3868
+ >>> import numpy as np
3869
+ >>> from mindspore import Tensor, ops
3485
3870
  >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
3486
3871
  >>> ceil_op = ops.Ceil()
3487
3872
  >>> output = ceil_op(x)
3488
3873
  >>> print(output)
3489
3874
  [ 2. 3. -1.]
3875
+ >>> x = Tensor(2.1, mindspore.float32)
3876
+ >>> output = ceil_op(x)
3877
+ >>> print(output)
3878
+ 3.0
3490
3879
  """
3491
3880
 
3492
3881
  @prim_attr_register
@@ -3528,6 +3917,9 @@ class Xdivy(Primitive):
3528
3917
  ``Ascend`` ``GPU`` ``CPU``
3529
3918
 
3530
3919
  Examples:
3920
+ >>> import mindspore
3921
+ >>> import numpy as np
3922
+ >>> from mindspore import Tensor, ops
3531
3923
  >>> x = Tensor(np.array([2, 4, -1]), mindspore.float32)
3532
3924
  >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
3533
3925
  >>> xdivy = ops.Xdivy()
@@ -3593,10 +3985,26 @@ class Xlogy(Primitive):
3593
3985
 
3594
3986
  Refer to :func:`mindspore.ops.xlogy` for more details.
3595
3987
 
3988
+ Inputs:
3989
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
3990
+ a bool or a tensor whose data type is
3991
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
3992
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
3993
+ - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
3994
+ a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
3995
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
3996
+
3997
+ Outputs:
3998
+ Tensor, the shape is the same as the one after broadcasting,
3999
+ and the data type is the one with higher precision or higher digits among the two inputs.
4000
+
3596
4001
  Supported Platforms:
3597
4002
  ``Ascend`` ``GPU`` ``CPU``
3598
4003
 
3599
4004
  Examples:
4005
+ >>> import mindspore
4006
+ >>> import numpy as np
4007
+ >>> from mindspore import Tensor, ops
3600
4008
  >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
3601
4009
  >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
3602
4010
  >>> xlogy = ops.Xlogy()
@@ -3618,6 +4026,12 @@ class Acosh(Primitive):
3618
4026
 
3619
4027
  Refer to :func:`mindspore.ops.acosh` for more details.
3620
4028
 
4029
+ Inputs:
4030
+ - **x** (Tensor) - The input Tensor. Input value must be in range [1, inf].
4031
+
4032
+ Outputs:
4033
+ Tensor, has the same shape and type as `x`.
4034
+
3621
4035
  Supported Platforms:
3622
4036
  ``Ascend`` ``GPU`` ``CPU``
3623
4037
 
@@ -3630,6 +4044,10 @@ class Acosh(Primitive):
3630
4044
  >>> output = acosh(x)
3631
4045
  >>> print(output)
3632
4046
  [0. 0.9624237 1.7627472 5.298292 ]
4047
+ >>> x = Tensor(2.6)
4048
+ >>> output = acosh(x)
4049
+ >>> print(output)
4050
+ 1.609438
3633
4051
  """
3634
4052
 
3635
4053
  @prim_attr_register
@@ -3644,15 +4062,28 @@ class Cosh(Primitive):
3644
4062
 
3645
4063
  Refer to :func:`mindspore.ops.cosh` for more details.
3646
4064
 
4065
+ Inputs:
4066
+ - **x** (Tensor) - The input Tensor.
4067
+
4068
+ Outputs:
4069
+ Tensor, has the same shape and dtype as `x`.
4070
+
3647
4071
  Supported Platforms:
3648
4072
  ``Ascend`` ``GPU`` ``CPU``
3649
4073
 
3650
4074
  Examples:
4075
+ >>> import mindspore
4076
+ >>> import numpy as np
4077
+ >>> from mindspore import Tensor, ops
3651
4078
  >>> cosh = ops.Cosh()
3652
4079
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
3653
4080
  >>> output = cosh(x)
3654
4081
  >>> print(output)
3655
4082
  [1.0289385 1.364684 1.048436 1.0040528]
4083
+ >>> x = Tensor(2.1, mindspore.float32)
4084
+ >>> output = cosh(x)
4085
+ >>> print(output)
4086
+ 4.144313
3656
4087
  """
3657
4088
 
3658
4089
  @prim_attr_register
@@ -3666,10 +4097,19 @@ class Asinh(Primitive):
3666
4097
 
3667
4098
  Refer to :func:`mindspore.ops.asinh` for more details.
3668
4099
 
4100
+ Inputs:
4101
+ - **x** (Tensor) - The input Tensor, its rank should be less than 8.
4102
+
4103
+ Outputs:
4104
+ Tensor, has the same shape and type as `x`.
4105
+
3669
4106
  Supported Platforms:
3670
4107
  ``Ascend`` ``GPU`` ``CPU``
3671
4108
 
3672
4109
  Examples:
4110
+ >>> import mindspore
4111
+ >>> import numpy as np
4112
+ >>> from mindspore import Tensor, ops
3673
4113
  >>> asinh = ops.Asinh()
3674
4114
  >>> x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
3675
4115
  >>> output = asinh(x)
@@ -3722,6 +4162,9 @@ class Sinh(Primitive):
3722
4162
  ``Ascend`` ``GPU`` ``CPU``
3723
4163
 
3724
4164
  Examples:
4165
+ >>> import mindspore
4166
+ >>> import numpy as np
4167
+ >>> from mindspore import Tensor, ops
3725
4168
  >>> sinh = ops.Sinh()
3726
4169
  >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
3727
4170
  >>> output = sinh(x)
@@ -3744,7 +4187,7 @@ class _LogicBinaryOp(_BinaryOp):
3744
4187
  """Staticmethod of infer dtype for _LogicBinaryOp."""
3745
4188
  args_dtype = {"x": x_dtype, "y": y_dtype}
3746
4189
  validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
3747
- return mstype.tensor_type(mstype.bool_)
4190
+ return mstype.TensorType(mstype.bool_)
3748
4191
 
3749
4192
  def infer_dtype(self, x_dtype, y_dtype):
3750
4193
  return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
@@ -3757,6 +4200,21 @@ class Quantile(Primitive):
3757
4200
 
3758
4201
  Refer to :func:`mindspore.ops.quantile` and :func:`mindspore.ops.nanquantile` for more details.
3759
4202
 
4203
+ Args:
4204
+ dim (int, optional): The dimension to reduce. By default, `axis` is ``None`` resulting in the
4205
+ input tensor being flattened before computation. Default: ``None`` .
4206
+ keep_dims (bool, optional): Whether the output tensor has dim retained or not. Default: ``False`` .
4207
+ ignore_nan (bool, optional): Whether to ignore NaN values in the input. Default: ``False`` .
4208
+
4209
+ Inputs:
4210
+ - **input** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4211
+ Supported dtypes: float32, float64.
4212
+ - **q** (Union[float, Tensor]) - A scalar or 1D tensor of quantile values in the range [0, 1].
4213
+ Supported dtypes: float32, float64.
4214
+
4215
+ Outputs:
4216
+ Tensor, has the same dtype as the `input`.
4217
+
3760
4218
  Supported Platforms:
3761
4219
 
3762
4220
 
@@ -3792,10 +4250,23 @@ class Equal(Primitive):
3792
4250
 
3793
4251
  Refer to :func:`mindspore.ops.equal` for more details.
3794
4252
 
4253
+ Inputs:
4254
+ - **x** (Union[Tensor, Number]) - The first input is a Number or
4255
+ a tensor whose data type is Number.
4256
+ - **y** (Union[Tensor, Number]) - The second input is a Number
4257
+ when the first input is a tensor or a tensor whose data type is Number.
4258
+ The data type is the same as the first input.
4259
+
4260
+ Outputs:
4261
+ Tensor, it has the same shape as the `x` and `y` after broadcasting, and the data type is bool.
4262
+
3795
4263
  Supported Platforms:
3796
4264
  ``Ascend`` ``GPU`` ``CPU``
3797
4265
 
3798
4266
  Examples:
4267
+ >>> import mindspore
4268
+ >>> import numpy as np
4269
+ >>> from mindspore import Tensor, ops
3799
4270
  >>> # case 1: The shape of two inputs are different
3800
4271
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3801
4272
  >>> equal = ops.Equal()
@@ -3821,7 +4292,7 @@ class Equal(Primitive):
3821
4292
 
3822
4293
  class ApproximateEqual(_LogicBinaryOp):
3823
4294
  r"""
3824
- Returns True if abs(x-y) is smaller than tolerance element-wise, otherwise False.
4295
+ Returns ``True`` if abs(x-y) is smaller than tolerance element-wise, otherwise False.
3825
4296
 
3826
4297
  .. math::
3827
4298
 
@@ -3837,7 +4308,7 @@ class ApproximateEqual(_LogicBinaryOp):
3837
4308
  the relatively highest precision data type.
3838
4309
 
3839
4310
  Args:
3840
- tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
4311
+ tolerance (float): The maximum deviation that two elements can be considered equal. Default: ``1e-05`` .
3841
4312
 
3842
4313
  Inputs:
3843
4314
  - **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
@@ -3849,13 +4320,16 @@ class ApproximateEqual(_LogicBinaryOp):
3849
4320
 
3850
4321
  Raises:
3851
4322
  TypeError: If `tolerance` is not a float.
3852
- RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
4323
+ TypeError: If the data type of `x`, `y` conversion of Parameter is given
3853
4324
  but data type conversion of Parameter is not supported.
3854
4325
 
3855
4326
  Supported Platforms:
3856
4327
  ``Ascend`` ``GPU`` ``CPU``
3857
4328
 
3858
4329
  Examples:
4330
+ >>> import mindspore
4331
+ >>> import numpy as np
4332
+ >>> from mindspore import Tensor, ops
3859
4333
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3860
4334
  >>> y = Tensor(np.array([2, 3, 6]), mindspore.float32)
3861
4335
  >>> approximate_equal = ops.ApproximateEqual(2.)
@@ -3894,6 +4368,9 @@ class EqualCount(PrimitiveWithInfer):
3894
4368
  ``Ascend`` ``GPU`` ``CPU``
3895
4369
 
3896
4370
  Examples:
4371
+ >>> import mindspore
4372
+ >>> import numpy as np
4373
+ >>> from mindspore import Tensor, ops
3897
4374
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3898
4375
  >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
3899
4376
  >>> equal_count = ops.EqualCount()
@@ -3914,12 +4391,24 @@ class NotEqual(Primitive):
3914
4391
 
3915
4392
  Refer to :func:`mindspore.ops.ne` for more details.
3916
4393
 
4394
+ Inputs:
4395
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
4396
+ a bool or a tensor whose data type is number or bool.
4397
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
4398
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
4399
+
4400
+ Outputs:
4401
+ Tensor, it has the same shape as the `x` and `y` after broadcasting, and the data type is bool.
4402
+
3917
4403
  Supported Platforms:
3918
4404
  ``Ascend`` ``GPU`` ``CPU``
3919
4405
 
3920
4406
  Examples:
3921
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3922
- >>> not_equal = ops.NotEqual()
4407
+ >>> import mindspore
4408
+ >>> import numpy as np
4409
+ >>> from mindspore import Tensor, ops
4410
+ >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4411
+ >>> not_equal = ops.NotEqual()
3923
4412
  >>> output = not_equal(x, 2.0)
3924
4413
  >>> print(output)
3925
4414
  [ True False True]
@@ -3945,10 +4434,25 @@ class Greater(PrimitiveWithCheck):
3945
4434
 
3946
4435
  Refer to :func:`mindspore.ops.gt` for more details.
3947
4436
 
4437
+ Inputs:
4438
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
4439
+ a bool or a tensor whose data type is
4440
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4441
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
4442
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
4443
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4444
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
4445
+
4446
+ Outputs:
4447
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4448
+
3948
4449
  Supported Platforms:
3949
4450
  ``Ascend`` ``GPU`` ``CPU``
3950
4451
 
3951
4452
  Examples:
4453
+ >>> import mindspore
4454
+ >>> import numpy as np
4455
+ >>> from mindspore import Tensor, ops
3952
4456
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3953
4457
  >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3954
4458
  >>> greater = ops.Greater()
@@ -3976,14 +4480,27 @@ class Greater(PrimitiveWithCheck):
3976
4480
 
3977
4481
  class GreaterEqual(PrimitiveWithCheck):
3978
4482
  r"""
3979
- Computes the boolean value of :math:`x >= y` element-wise.
4483
+ Given two Tensors, compares them element-wise to check if each element in the first
4484
+ Tensor is greater than or equal to the corresponding element in the second Tensor.
3980
4485
 
3981
4486
  Refer to :func:`mindspore.ops.ge` for more details.
3982
4487
 
4488
+ Inputs:
4489
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
4490
+ a bool or a tensor whose data type is number or bool.
4491
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
4492
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
4493
+
4494
+ Outputs:
4495
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4496
+
3983
4497
  Supported Platforms:
3984
4498
  ``Ascend`` ``GPU`` ``CPU``
3985
4499
 
3986
4500
  Examples:
4501
+ >>> import mindspore
4502
+ >>> import numpy as np
4503
+ >>> from mindspore import Tensor, ops
3987
4504
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3988
4505
  >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3989
4506
  >>> greater_equal = ops.GreaterEqual()
@@ -4013,7 +4530,7 @@ class Lerp(Primitive):
4013
4530
  Refer to :func:`mindspore.ops.lerp` for more details.
4014
4531
 
4015
4532
  Inputs:
4016
- - **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
4533
+ - **start** (Tensor) - The tensor with the starting points. Data type must be float16, float32 or float64.
4017
4534
  - **end** (Tensor) - The tensor with the ending points. Data type must be the same as `start`.
4018
4535
  - **weight** (Union[float, Tensor]) - The weight for the interpolation formula. Must be a float
4019
4536
  or a scalar tensor with float16 or float32 data type.
@@ -4025,6 +4542,9 @@ class Lerp(Primitive):
4025
4542
  ``Ascend`` ``GPU`` ``CPU``
4026
4543
 
4027
4544
  Examples:
4545
+ >>> import mindspore
4546
+ >>> import numpy as np
4547
+ >>> from mindspore import Tensor, ops
4028
4548
  >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
4029
4549
  >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
4030
4550
  >>> lerp = ops.Lerp()
@@ -4063,6 +4583,8 @@ class Gcd(Primitive):
4063
4583
  ``Ascend`` ``GPU`` ``CPU``
4064
4584
 
4065
4585
  Examples:
4586
+ >>> import numpy as np
4587
+ >>> from mindspore import Tensor, ops
4066
4588
  >>> x1 = Tensor(np.array([7, 8, 9]))
4067
4589
  >>> x2 = Tensor(np.array([14, 6, 12]))
4068
4590
  >>> gcd_ = ops.Gcd()
@@ -4084,10 +4606,22 @@ class Less(PrimitiveWithCheck):
4084
4606
 
4085
4607
  Refer to :func:`mindspore.ops.less` for more details.
4086
4608
 
4609
+ Inputs:
4610
+ - **x** (Union[Tensor, Number, bool]) - The first input is a number or
4611
+ a bool or a tensor whose data type is number or bool.
4612
+ - **y** (Union[Tensor, Number, bool]) - The second input is a number or
4613
+ a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
4614
+
4615
+ Outputs:
4616
+ Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
4617
+
4087
4618
  Supported Platforms:
4088
4619
  ``Ascend`` ``GPU`` ``CPU``
4089
4620
 
4090
4621
  Examples:
4622
+ >>> import mindspore
4623
+ >>> import numpy as np
4624
+ >>> from mindspore import Tensor, ops
4091
4625
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4092
4626
  >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
4093
4627
  >>> less = ops.Less()
@@ -4116,10 +4650,25 @@ class LessEqual(PrimitiveWithCheck):
4116
4650
 
4117
4651
  Refer to :func:`mindspore.ops.le` for more details.
4118
4652
 
4653
+ Inputs:
4654
+ - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
4655
+ a bool or a tensor whose data type is
4656
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4657
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
4658
+ - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
4659
+ the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4660
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
4661
+
4662
+ Outputs:
4663
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4664
+
4119
4665
  Supported Platforms:
4120
4666
  ``Ascend`` ``GPU`` ``CPU``
4121
4667
 
4122
4668
  Examples:
4669
+ >>> import mindspore
4670
+ >>> import numpy as np
4671
+ >>> from mindspore import Tensor, ops
4123
4672
  >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4124
4673
  >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
4125
4674
  >>> less_equal = ops.LessEqual()
@@ -4148,10 +4697,19 @@ class LogicalNot(Primitive):
4148
4697
 
4149
4698
  Refer to :func:`mindspore.ops.logical_not` for more details.
4150
4699
 
4700
+ Inputs:
4701
+ - **x** (Tensor) - The input tensor, the dtype must be bool.
4702
+
4703
+ Outputs:
4704
+ Tensor, the shape is the same as the `x`, and the dtype is bool.
4705
+
4151
4706
  Supported Platforms:
4152
4707
  ``Ascend`` ``GPU`` ``CPU``
4153
4708
 
4154
4709
  Examples:
4710
+ >>> import mindspore
4711
+ >>> import numpy as np
4712
+ >>> from mindspore import Tensor, ops
4155
4713
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
4156
4714
  >>> logical_not = ops.LogicalNot()
4157
4715
  >>> output = logical_not(x)
@@ -4171,16 +4729,43 @@ class LogicalAnd(_LogicBinaryOp):
4171
4729
 
4172
4730
  Refer to :func:`mindspore.ops.logical_and` for more details.
4173
4731
 
4732
+ Inputs:
4733
+ - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
4734
+ converted to bool.
4735
+ - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
4736
+ a tensor whose data type can be implicitly converted to bool.
4737
+
4738
+ Outputs:
4739
+ Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
4740
+
4174
4741
  Supported Platforms:
4175
4742
  ``Ascend`` ``GPU`` ``CPU``
4176
4743
 
4177
4744
  Examples:
4745
+ >>> import mindspore
4746
+ >>> import numpy as np
4747
+ >>> from mindspore import Tensor, ops
4178
4748
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
4179
4749
  >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
4180
4750
  >>> logical_and = ops.LogicalAnd()
4181
4751
  >>> output = logical_and(x, y)
4182
4752
  >>> print(output)
4183
4753
  [ True False False]
4754
+ >>> x = Tensor(1, mindspore.bool_)
4755
+ >>> y = Tensor(0, mindspore.bool_)
4756
+ >>> output = ops.LogicalAnd()(x, y)
4757
+ >>> print(output)
4758
+ False
4759
+ >>> x = True
4760
+ >>> y = Tensor(0, mindspore.bool_)
4761
+ >>> output = ops.LogicalAnd()(x, y)
4762
+ >>> print(output)
4763
+ False
4764
+ >>> x = True
4765
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
4766
+ >>> output = ops.LogicalAnd()(x, y)
4767
+ >>> print(output)
4768
+ [True False]
4184
4769
  """
4185
4770
 
4186
4771
 
@@ -4190,16 +4775,43 @@ class LogicalOr(_LogicBinaryOp):
4190
4775
 
4191
4776
  Refer to :func:`mindspore.ops.logical_or` for more details.
4192
4777
 
4778
+ Inputs:
4779
+ - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
4780
+ converted to bool.
4781
+ - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
4782
+ a tensor whose data type can be implicitly converted to bool.
4783
+
4784
+ Outputs:
4785
+ Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
4786
+
4193
4787
  Supported Platforms:
4194
4788
  ``Ascend`` ``GPU`` ``CPU``
4195
4789
 
4196
4790
  Examples:
4791
+ >>> import mindspore
4792
+ >>> import numpy as np
4793
+ >>> from mindspore import Tensor, ops
4197
4794
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
4198
4795
  >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
4199
4796
  >>> logical_or = ops.LogicalOr()
4200
4797
  >>> output = logical_or(x, y)
4201
4798
  >>> print(output)
4202
4799
  [ True True True]
4800
+ >>> x = Tensor(1, mindspore.bool_)
4801
+ >>> y = Tensor(0, mindspore.bool_)
4802
+ >>> output = ops.LogicalOr()(x, y)
4803
+ >>> print(output)
4804
+ True
4805
+ >>> x = True
4806
+ >>> y = Tensor(0, mindspore.bool_)
4807
+ >>> output = ops.LogicalOr()(x, y)
4808
+ >>> print(output)
4809
+ True
4810
+ >>> x = True
4811
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
4812
+ >>> output = ops.LogicalOr()(x, y)
4813
+ >>> print(output)
4814
+ [True True]
4203
4815
  """
4204
4816
 
4205
4817
 
@@ -4212,16 +4824,43 @@ class LogicalXor(Primitive):
4212
4824
 
4213
4825
  Refer to :func:`mindspore.ops.logical_xor` for more details.
4214
4826
 
4827
+ Inputs:
4828
+ - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
4829
+ converted to bool.
4830
+ - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
4831
+ a tensor whose data type can be implicitly converted to bool.
4832
+
4833
+ Outputs:
4834
+ Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
4835
+
4215
4836
  Supported Platforms:
4216
4837
  ``Ascend`` ``CPU``
4217
4838
 
4218
4839
  Examples:
4840
+ >>> import mindspore
4841
+ >>> import numpy as np
4842
+ >>> from mindspore import Tensor, ops
4219
4843
  >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
4220
4844
  >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
4221
4845
  >>> logical_xor = ops.LogicalXor()
4222
4846
  >>> output = logical_xor(x, y)
4223
4847
  >>> print(output)
4224
4848
  [ False True True]
4849
+ >>> x = Tensor(1, mindspore.bool_)
4850
+ >>> y = Tensor(0, mindspore.bool_)
4851
+ >>> output = ops.LogicalXor()(x, y)
4852
+ >>> print(output)
4853
+ True
4854
+ >>> x = True
4855
+ >>> y = Tensor(0, mindspore.bool_)
4856
+ >>> output = ops.LogicalXor()(x, y)
4857
+ >>> print(output)
4858
+ True
4859
+ >>> x = True
4860
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
4861
+ >>> output = ops.LogicalXor()(x, y)
4862
+ >>> print(output)
4863
+ [False True]
4225
4864
  """
4226
4865
 
4227
4866
  @prim_attr_register
@@ -4236,15 +4875,28 @@ class IsNan(Primitive):
4236
4875
 
4237
4876
  Refer to :func:`mindspore.ops.isnan` for more details.
4238
4877
 
4878
+ Inputs:
4879
+ - **x** (Tensor) - The input tensor.
4880
+
4881
+ Outputs:
4882
+ Tensor, has the same shape of input, and the dtype is bool.
4883
+
4239
4884
  Supported Platforms:
4240
4885
  ``Ascend`` ``GPU`` ``CPU``
4241
4886
 
4242
4887
  Examples:
4888
+ >>> import mindspore
4889
+ >>> import numpy as np
4890
+ >>> from mindspore import Tensor, ops
4243
4891
  >>> is_nan = ops.IsNan()
4244
4892
  >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4245
4893
  >>> output = is_nan(x)
4246
4894
  >>> print(output)
4247
4895
  [ True False False]
4896
+ >>> x = Tensor(2.1, mindspore.float64)
4897
+ >>> output = is_nan(x)
4898
+ >>> print(output)
4899
+ False
4248
4900
  """
4249
4901
 
4250
4902
  @prim_attr_register
@@ -4259,15 +4911,28 @@ class IsInf(Primitive):
4259
4911
 
4260
4912
  Refer to :func:`mindspore.ops.isinf` for more details.
4261
4913
 
4914
+ Inputs:
4915
+ - **x** (Tensor) - The input tensor.
4916
+
4917
+ Outputs:
4918
+ Tensor, has the same shape of input, and the dtype is bool.
4919
+
4262
4920
  Supported Platforms:
4263
4921
  ``Ascend`` ``GPU`` ``CPU``
4264
4922
 
4265
4923
  Examples:
4924
+ >>> import mindspore
4925
+ >>> import numpy as np
4926
+ >>> from mindspore import Tensor, ops
4266
4927
  >>> is_inf = ops.IsInf()
4267
4928
  >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4268
4929
  >>> output = is_inf(x)
4269
4930
  >>> print(output)
4270
4931
  [False False True]
4932
+ >>> x = Tensor(2.1, mindspore.float64)
4933
+ >>> output = is_inf(x)
4934
+ >>> print(output)
4935
+ False
4271
4936
  """
4272
4937
 
4273
4938
  @prim_attr_register
@@ -4286,6 +4951,9 @@ class IsFinite(Primitive):
4286
4951
  ``Ascend`` ``GPU`` ``CPU``
4287
4952
 
4288
4953
  Examples:
4954
+ >>> import mindspore
4955
+ >>> import numpy as np
4956
+ >>> from mindspore import Tensor, ops
4289
4957
  >>> is_finite = ops.IsFinite()
4290
4958
  >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4291
4959
  >>> output = is_finite(x)
@@ -4305,7 +4973,7 @@ class FloatStatus(Primitive):
4305
4973
 
4306
4974
  Inputs:
4307
4975
  - **x** (Tensor) - The input tensor. The data type must be float16, float32 or float64.
4308
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4976
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4309
4977
 
4310
4978
  Outputs:
4311
4979
  Tensor, has the shape of :math:`(1,)`, and the dtype is `mindspore.dtype.float32`.
@@ -4317,6 +4985,9 @@ class FloatStatus(Primitive):
4317
4985
  ``GPU``
4318
4986
 
4319
4987
  Examples:
4988
+ >>> import mindspore
4989
+ >>> import numpy as np
4990
+ >>> from mindspore import Tensor, ops
4320
4991
  >>> float_status = ops.FloatStatus()
4321
4992
  >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4322
4993
  >>> result = float_status(x)
@@ -4346,6 +5017,7 @@ class NPUAllocFloatStatus(Primitive):
4346
5017
  ``Ascend``
4347
5018
 
4348
5019
  Examples:
5020
+ >>> from mindspore import ops
4349
5021
  >>> alloc_status = ops.NPUAllocFloatStatus()
4350
5022
  >>> output = alloc_status()
4351
5023
  >>> print(output)
@@ -4355,7 +5027,8 @@ class NPUAllocFloatStatus(Primitive):
4355
5027
  @prim_attr_register
4356
5028
  def __init__(self):
4357
5029
  """Initialize NPUAllocFloatStatus"""
4358
- logger.warning("The 'NPUAllocFloatStatus' operator will be deprecated in the future. Please don't use it.")
5030
+ logger.warning("The 'NPUAllocFloatStatus' operator will be deprecated in the future, "
5031
+ "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
4359
5032
 
4360
5033
 
4361
5034
  class NPUGetFloatStatus(Primitive):
@@ -4391,7 +5064,7 @@ class NPUGetFloatStatus(Primitive):
4391
5064
  >>> import numpy as np
4392
5065
  >>> import mindspore.nn as nn
4393
5066
  >>> from mindspore import ops
4394
- >>> from mindspore.common import dtype as mstype
5067
+ >>> from mindspore import dtype as mstype
4395
5068
  >>> from mindspore.common.tensor import Tensor
4396
5069
  >>> class Net(nn.Cell):
4397
5070
  ... def __init__(self):
@@ -4425,7 +5098,8 @@ class NPUGetFloatStatus(Primitive):
4425
5098
  @prim_attr_register
4426
5099
  def __init__(self):
4427
5100
  """Initialize NPUGetFloatStatus"""
4428
- logger.warning("The 'NPUGetFloatStatus' operator will be deprecated in the future. Please don't use it.")
5101
+ logger.warning("The 'NPUGetFloatStatus' operator will be deprecated in the future, "
5102
+ "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
4429
5103
 
4430
5104
 
4431
5105
  class NPUClearFloatStatus(Primitive):
@@ -4455,7 +5129,7 @@ class NPUClearFloatStatus(Primitive):
4455
5129
  >>> import numpy as np
4456
5130
  >>> import mindspore.nn as nn
4457
5131
  >>> from mindspore import ops
4458
- >>> from mindspore.common import dtype as mstype
5132
+ >>> from mindspore import dtype as mstype
4459
5133
  >>> from mindspore.common.tensor import Tensor
4460
5134
  >>> class Net(nn.Cell):
4461
5135
  ... def __init__(self):
@@ -4489,7 +5163,8 @@ class NPUClearFloatStatus(Primitive):
4489
5163
  @prim_attr_register
4490
5164
  def __init__(self):
4491
5165
  """Initialize NPUClearFloatStatus"""
4492
- logger.warning("The 'NPUClearFloatStatus' operator will be deprecated in the future. Please don't use it.")
5166
+ logger.warning("The 'NPUClearFloatStatus' operator will be deprecated in the future,"
5167
+ "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
4493
5168
 
4494
5169
 
4495
5170
  class NPUGetFloatStatusV2(Primitive):
@@ -4538,9 +5213,10 @@ class NPUGetFloatStatusV2(Primitive):
4538
5213
  ... self.get_status = NPUGetFloatStatusV2()
4539
5214
  ... self.sub = ops.Sub()
4540
5215
  ... self.neg = ops.Neg()
4541
- ... self.not_equal = ops.NotEqual()
4542
- ... self.reduce_any = ops.ReduceAny(keep_dims=False)
5216
+ ... self.equal = ops.Equal()
5217
+ ... self.reduce_all = ops.ReduceAll(keep_dims=False)
4543
5218
  ... self.base = Tensor([0], dtype=ms.int32)
5219
+ ... self.logic_not = ops.LogicalNot()
4544
5220
  ...
4545
5221
  ... def construct(self, x):
4546
5222
  ... init = Tensor([0]*8, dtype=ms.int32)
@@ -4549,8 +5225,9 @@ class NPUGetFloatStatusV2(Primitive):
4549
5225
  ... res = self.sub(x, self.neg(x))
4550
5226
  ... init = ops.depend(init, res)
4551
5227
  ... get_status = self.get_status(init)
4552
- ... flag = self.not_equal(self.base, get_status)
4553
- ... overflow = self.reduce_any(flag)
5228
+ ... flag = self.equal(self.base, get_status)
5229
+ ... overall_finite = self.reduce_all(flag)
5230
+ ... overflow = self.logic_not(overall_finite)
4554
5231
  ... return overflow
4555
5232
  ...
4556
5233
  >>> value = 65504
@@ -4620,9 +5297,10 @@ class NPUClearFloatStatusV2(Primitive):
4620
5297
  ... self.get_status = NPUGetFloatStatusV2()
4621
5298
  ... self.sub = ops.Sub()
4622
5299
  ... self.neg = ops.Neg()
4623
- ... self.not_equal = ops.NotEqual()
4624
- ... self.reduce_any = ops.ReduceAny(keep_dims=False)
5300
+ ... self.equal = ops.Equal()
5301
+ ... self.reduce_all = ops.ReduceAll(keep_dims=False)
4625
5302
  ... self.base = Tensor([0], dtype=ms.int32)
5303
+ ... self.logic_not = ops.LogicalNot()
4626
5304
  ...
4627
5305
  ... def construct(self, x):
4628
5306
  ... init = Tensor([0]*8, dtype=ms.int32)
@@ -4631,8 +5309,9 @@ class NPUClearFloatStatusV2(Primitive):
4631
5309
  ... res = self.sub(x, self.neg(x))
4632
5310
  ... init = ops.depend(init, res)
4633
5311
  ... get_status = self.get_status(init)
4634
- ... flag = self.not_equal(self.base, get_status)
4635
- ... overflow = self.reduce_any(flag)
5312
+ ... flag = self.equal(self.base, get_status)
5313
+ ... overall_finite = self.reduce_all(flag)
5314
+ ... overflow = self.logic_not(overall_finite)
4636
5315
  ... return overflow
4637
5316
  ...
4638
5317
  >>> value = 65504
@@ -4662,10 +5341,19 @@ class Cos(Primitive):
4662
5341
 
4663
5342
  Refer to :func:`mindspore.ops.cos` for more details.
4664
5343
 
5344
+ Inputs:
5345
+ - **x** (Tensor) - The input Tensor.
5346
+
5347
+ Outputs:
5348
+ Tensor, has the same shape as `x`.
5349
+
4665
5350
  Supported Platforms:
4666
5351
  ``Ascend`` ``GPU`` ``CPU``
4667
5352
 
4668
5353
  Examples:
5354
+ >>> import mindspore
5355
+ >>> import numpy as np
5356
+ >>> from mindspore import Tensor, ops
4669
5357
  >>> cos = ops.Cos()
4670
5358
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4671
5359
  >>> output = cos(x)
@@ -4684,10 +5372,20 @@ class ACos(Primitive):
4684
5372
 
4685
5373
  Refer to :func:`mindspore.ops.acos` for more details.
4686
5374
 
5375
+ Inputs:
5376
+ - **x** (Tensor) - The shape of tensor is
5377
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5378
+
5379
+ Outputs:
5380
+ Tensor, has the same shape and dtype as `x`.
5381
+
4687
5382
  Supported Platforms:
4688
5383
  ``Ascend`` ``GPU`` ``CPU``
4689
5384
 
4690
5385
  Examples:
5386
+ >>> import mindspore
5387
+ >>> import numpy as np
5388
+ >>> from mindspore import Tensor, ops
4691
5389
  >>> acos = ops.ACos()
4692
5390
  >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4693
5391
  >>> output = acos(x)
@@ -4711,6 +5409,9 @@ class Sin(Primitive):
4711
5409
  ``Ascend`` ``GPU`` ``CPU``
4712
5410
 
4713
5411
  Examples:
5412
+ >>> import mindspore
5413
+ >>> import numpy as np
5414
+ >>> from mindspore import Tensor, ops
4714
5415
  >>> sin = ops.Sin()
4715
5416
  >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
4716
5417
  >>> output = sin(x)
@@ -4725,14 +5426,27 @@ class Sin(Primitive):
4725
5426
 
4726
5427
  class Asin(Primitive):
4727
5428
  r"""
4728
- Computes arcsine of input tensors element-wise.
5429
+ Computes arcsine of input tensor element-wise.
4729
5430
 
4730
5431
  Refer to :func:`mindspore.ops.asin` for more details.
4731
5432
 
5433
+ Note:
5434
+ Complex64 and complex128 are not supported on Ascend currently.
5435
+
5436
+ Inputs:
5437
+ - **x** (Tensor) - The shape of tensor is
5438
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5439
+
5440
+ Outputs:
5441
+ Tensor, has the same shape and dtype as `x`.
5442
+
4732
5443
  Supported Platforms:
4733
5444
  ``Ascend`` ``GPU`` ``CPU``
4734
5445
 
4735
5446
  Examples:
5447
+ >>> import mindspore
5448
+ >>> import numpy as np
5449
+ >>> from mindspore import Tensor, ops
4736
5450
  >>> asin = ops.Asin()
4737
5451
  >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4738
5452
  >>> output = asin(x)
@@ -4763,7 +5477,7 @@ class NMSWithMask(PrimitiveWithInfer):
4763
5477
 
4764
5478
  Args:
4765
5479
  iou_threshold (float): Specifies the threshold of overlap boxes with respect to
4766
- IOU. Default: 0.5.
5480
+ IOU. Default: ``0.5`` .
4767
5481
 
4768
5482
  Inputs:
4769
5483
  - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
@@ -4793,6 +5507,9 @@ class NMSWithMask(PrimitiveWithInfer):
4793
5507
  ``Ascend`` ``GPU`` ``CPU``
4794
5508
 
4795
5509
  Examples:
5510
+ >>> import mindspore
5511
+ >>> import numpy as np
5512
+ >>> from mindspore import Tensor, ops
4796
5513
  >>> bbox = np.array([[100.0, 100.0, 50.0, 68.0, 0.63], [150.0, 75.0, 165.0, 115.0, 0.55],
4797
5514
  ... [12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
4798
5515
  >>> bbox[:, 2] += bbox[:, 0]
@@ -4810,7 +5527,6 @@ class NMSWithMask(PrimitiveWithInfer):
4810
5527
  """Initialize NMSWithMask"""
4811
5528
  validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
4812
5529
  self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
4813
- self.is_ge = context.get_context("enable_ge")
4814
5530
 
4815
5531
  def infer_shape(self, bboxes_shape):
4816
5532
  cls_name = self.name
@@ -4832,15 +5548,28 @@ class Abs(Primitive):
4832
5548
 
4833
5549
  Refer to :func:`mindspore.ops.abs` for more details.
4834
5550
 
5551
+ Inputs:
5552
+ - **x** (Tensor) - The input tensor.
5553
+
5554
+ Outputs:
5555
+ Tensor, has the same shape as the `x`.
5556
+
4835
5557
  Supported Platforms:
4836
5558
  ``Ascend`` ``GPU`` ``CPU``
4837
5559
 
4838
5560
  Examples:
5561
+ >>> import mindspore
5562
+ >>> import numpy as np
5563
+ >>> from mindspore import Tensor, ops
4839
5564
  >>> x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
4840
5565
  >>> abs = ops.Abs()
4841
5566
  >>> output = abs(x)
4842
5567
  >>> print(output)
4843
5568
  [1. 1. 0.]
5569
+ >>> x = Tensor(3.6)
5570
+ >>> output = abs(x)
5571
+ >>> print(output)
5572
+ 3.6
4844
5573
  """
4845
5574
 
4846
5575
  @prim_attr_register
@@ -4859,8 +5588,7 @@ class Sign(Primitive):
4859
5588
  1, &if\ x > 0\end{cases}
4860
5589
 
4861
5590
  Inputs:
4862
- - **x** (Tensor) - The input tensor.
4863
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5591
+ - **x** (Tensor) - The input tensor of any dimension.
4864
5592
 
4865
5593
  Outputs:
4866
5594
  Tensor, has the same shape and dtype as the `x`.
@@ -4872,6 +5600,9 @@ class Sign(Primitive):
4872
5600
  ``Ascend`` ``GPU`` ``CPU``
4873
5601
 
4874
5602
  Examples:
5603
+ >>> import mindspore
5604
+ >>> import numpy as np
5605
+ >>> from mindspore import Tensor, ops
4875
5606
  >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
4876
5607
  >>> sign = ops.Sign()
4877
5608
  >>> output = sign(x)
@@ -4888,12 +5619,21 @@ class Round(Primitive):
4888
5619
  r"""
4889
5620
  Returns half to even of a tensor element-wise.
4890
5621
 
4891
- Refer to :func:`mindspore.ops.round` for more detailsed.
5622
+ Refer to :func:`mindspore.ops.round` for more details.
5623
+
5624
+ Inputs:
5625
+ - **x** (Tensor) - The input tensor.
5626
+
5627
+ Outputs:
5628
+ Tensor, has the same shape and type as the `x`.
4892
5629
 
4893
5630
  Supported Platforms:
4894
5631
  ``Ascend`` ``GPU`` ``CPU``
4895
5632
 
4896
5633
  Examples:
5634
+ >>> import mindspore
5635
+ >>> import numpy as np
5636
+ >>> from mindspore import Tensor, ops
4897
5637
  >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4898
5638
  >>> round = ops.Round()
4899
5639
  >>> output = round(x)
@@ -4913,10 +5653,19 @@ class Tan(Primitive):
4913
5653
 
4914
5654
  Refer to :func:`mindspore.ops.tan` for more details.
4915
5655
 
5656
+ Inputs:
5657
+ - **x** (Tensor) - Input tensor of any dimension.
5658
+
5659
+ Outputs:
5660
+ Tensor, has the same shape as `x`.
5661
+
4916
5662
  Supported Platforms:
4917
5663
  ``Ascend`` ``GPU`` ``CPU``
4918
5664
 
4919
5665
  Examples:
5666
+ >>> import mindspore
5667
+ >>> import numpy as np
5668
+ >>> from mindspore import Tensor, ops
4920
5669
  >>> tan = ops.Tan()
4921
5670
  >>> x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
4922
5671
  >>> output = tan(x)
@@ -4936,10 +5685,22 @@ class Atan(Primitive):
4936
5685
 
4937
5686
  Refer to :func:`mindspore.ops.atan` for more details.
4938
5687
 
5688
+ Inputs:
5689
+ - **x** (Tensor): The input Tensor. Supported dtypes:
5690
+
5691
+ - Ascend: float16, float32.
5692
+ - GPU/CPU: float16, float32, float64.
5693
+
5694
+ Outputs:
5695
+ A Tensor, has the same type as the input.
5696
+
4939
5697
  Supported Platforms:
4940
5698
  ``Ascend`` ``GPU`` ``CPU``
4941
5699
 
4942
5700
  Examples:
5701
+ >>> import mindspore
5702
+ >>> import numpy as np
5703
+ >>> from mindspore import Tensor, ops
4943
5704
  >>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
4944
5705
  >>> atan = ops.Atan()
4945
5706
  >>> output = atan(x)
@@ -4957,15 +5718,21 @@ class Atanh(Primitive):
4957
5718
  r"""
4958
5719
  Computes inverse hyperbolic tangent of the input element-wise.
4959
5720
 
4960
- .. warning::
4961
- This is an experimental API that is subject to change or deletion.
4962
-
4963
5721
  Refer to :func:`mindspore.ops.atanh` for more details.
4964
5722
 
5723
+ Inputs:
5724
+ - **x** (Tensor): The input Tensor.
5725
+
5726
+ Outputs:
5727
+ A Tensor, has the same type as the input.
5728
+
4965
5729
  Supported Platforms:
4966
5730
  ``Ascend`` ``GPU`` ``CPU``
4967
5731
 
4968
5732
  Examples:
5733
+ >>> import mindspore
5734
+ >>> import numpy as np
5735
+ >>> from mindspore import Tensor, ops
4969
5736
  >>> x = Tensor(np.array([0, -0.5]), mindspore.float32)
4970
5737
  >>> atanh = ops.Atanh()
4971
5738
  >>> output = atanh(x)
@@ -4985,10 +5752,22 @@ class Atan2(_MathBinaryOp):
4985
5752
 
4986
5753
  Refer to :func:`mindspore.ops.atan2` for more details.
4987
5754
 
5755
+ Inputs:
5756
+ - **x** (Tensor) - The input tensor with shape
5757
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5758
+ - **y** (Tensor) - The input tensor. It has the same shape with `x` or
5759
+ its shape is able to broadcast with `x`.
5760
+
5761
+ Outputs:
5762
+ Tensor, the shape is the same as the one after broadcasting, and the data type is same as `x`.
5763
+
4988
5764
  Supported Platforms:
4989
5765
  ``Ascend`` ``GPU`` ``CPU``
4990
5766
 
4991
5767
  Examples:
5768
+ >>> import mindspore
5769
+ >>> import numpy as np
5770
+ >>> from mindspore import Tensor, ops
4992
5771
  >>> x = Tensor(np.array([0, 1]), mindspore.float32)
4993
5772
  >>> y = Tensor(np.array([1, 1]), mindspore.float32)
4994
5773
  >>> atan2 = ops.Atan2()
@@ -5008,7 +5787,6 @@ class SquareSumAll(Primitive):
5008
5787
  Returns the square sum of a tensor element-wise.
5009
5788
 
5010
5789
  .. math::
5011
-
5012
5790
  \left\{\begin{matrix}out_{x} = {\textstyle \sum_{0}^{N}} (x_{i})^2
5013
5791
  \\out_{y} = {\textstyle \sum_{0}^{N}} (y_{i})^2
5014
5792
  \end{matrix}\right.
@@ -5058,10 +5836,21 @@ class BitwiseAnd(_BitwiseBinaryOp):
5058
5836
 
5059
5837
  Refer to :func:`mindspore.ops.bitwise_and` for more details.
5060
5838
 
5839
+ Inputs:
5840
+ - **x** (Tensor) - The first input tensor with shape
5841
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5842
+ - **y** (Tensor) - The second input tensor with same type as the `x`.
5843
+
5844
+ Outputs:
5845
+ Tensor, has the same type as the `x`.
5846
+
5061
5847
  Supported Platforms:
5062
5848
  ``Ascend`` ``GPU`` ``CPU``
5063
5849
 
5064
5850
  Examples:
5851
+ >>> import mindspore
5852
+ >>> import numpy as np
5853
+ >>> from mindspore import Tensor, ops
5065
5854
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
5066
5855
  >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
5067
5856
  >>> bitwise_and = ops.BitwiseAnd()
@@ -5077,10 +5866,21 @@ class BitwiseOr(_BitwiseBinaryOp):
5077
5866
 
5078
5867
  Refer to :func:`mindspore.ops.bitwise_or` for more details.
5079
5868
 
5869
+ Inputs:
5870
+ - **x** (Tensor) - The first input tensor with shape
5871
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5872
+ - **y** (Tensor) - The second input tensor with same type as the `x`.
5873
+
5874
+ Outputs:
5875
+ Tensor, has the same type as the `x`.
5876
+
5080
5877
  Supported Platforms:
5081
5878
  ``Ascend`` ``GPU`` ``CPU``
5082
5879
 
5083
5880
  Examples:
5881
+ >>> import mindspore
5882
+ >>> import numpy as np
5883
+ >>> from mindspore import Tensor, ops
5084
5884
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
5085
5885
  >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
5086
5886
  >>> bitwise_or = ops.BitwiseOr()
@@ -5096,10 +5896,21 @@ class BitwiseXor(_BitwiseBinaryOp):
5096
5896
 
5097
5897
  Refer to :func:`mindspore.ops.bitwise_xor` for more details.
5098
5898
 
5899
+ Inputs:
5900
+ - **x** (Tensor) - The first input tensor with shape
5901
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5902
+ - **y** (Tensor) - The second input tensor with same type as the `x`.
5903
+
5904
+ Outputs:
5905
+ Tensor, has the same type as the `x`.
5906
+
5099
5907
  Supported Platforms:
5100
5908
  ``Ascend`` ``GPU`` ``CPU``
5101
5909
 
5102
5910
  Examples:
5911
+ >>> import mindspore
5912
+ >>> import numpy as np
5913
+ >>> from mindspore import Tensor, ops
5103
5914
  >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
5104
5915
  >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
5105
5916
  >>> bitwise_xor = ops.BitwiseXor()
@@ -5110,18 +5921,38 @@ class BitwiseXor(_BitwiseBinaryOp):
5110
5921
 
5111
5922
 
5112
5923
  class BesselI0(Primitive):
5113
- """
5114
- Computes BesselI0 of input element-wise.
5924
+ r"""
5925
+ Computes modified Bessel function of the first kind, order 0 element-wise.
5926
+
5927
+ The formula is defined as:
5928
+
5929
+ .. math::
5930
+ \begin{array}{ll} \\
5931
+ I_{0}(x)=J_{0}(\mathrm{i} x)=\sum_{m=0}^{\infty}
5932
+ \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
5933
+ \end{array}
5934
+
5935
+ where :math:`J_{0}` is Bessel function of the first kind, order 0.
5115
5936
 
5116
5937
  .. warning::
5117
5938
  This is an experimental API that is subject to change or deletion.
5118
5939
 
5119
5940
  Refer to :func:`mindspore.ops.bessel_i0` for more details.
5120
5941
 
5942
+ Inputs:
5943
+ - **x** (Tensor) - The input tensor.
5944
+ Data type must be float16, float32 or float64.
5945
+
5946
+ Outputs:
5947
+ Tensor, has the same shape as `x`.
5948
+
5121
5949
  Supported Platforms:
5122
5950
  ``GPU`` ``CPU``
5123
5951
 
5124
5952
  Examples:
5953
+ >>> import mindspore
5954
+ >>> import numpy as np
5955
+ >>> from mindspore import Tensor, ops
5125
5956
  >>> bessel_i0 = ops.BesselI0()
5126
5957
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5127
5958
  >>> output = bessel_i0(x)
@@ -5135,18 +5966,38 @@ class BesselI0(Primitive):
5135
5966
 
5136
5967
 
5137
5968
  class BesselI1(Primitive):
5138
- """
5139
- Computes BesselI1 of input element-wise.
5969
+ r"""
5970
+ Computes modified Bessel function of the first kind, order 1 element-wise.
5971
+
5972
+ The formula is defined as:
5973
+
5974
+ .. math::
5975
+ \begin{array}{ll} \\
5976
+ I_{1}(x)=\mathrm{i}^{-1} J_{1}(\mathrm{i} x)=\sum_{m=0}^
5977
+ {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
5978
+ \end{array}
5979
+
5980
+ where :math:`J_{1}` is Bessel function of the first kind, order 1.
5140
5981
 
5141
5982
  .. warning::
5142
5983
  This is an experimental API that is subject to change or deletion.
5143
5984
 
5144
5985
  Refer to :func:`mindspore.ops.bessel_i1` for more details.
5145
5986
 
5987
+ Inputs:
5988
+ - **x** (Tensor) - The input tensor.
5989
+ Data type must be float16, float32 or float64.
5990
+
5991
+ Outputs:
5992
+ Tensor, has the same shape as `x`.
5993
+
5146
5994
  Supported Platforms:
5147
5995
  ``GPU`` ``CPU``
5148
5996
 
5149
5997
  Examples:
5998
+ >>> import mindspore
5999
+ >>> import numpy as np
6000
+ >>> from mindspore import Tensor, ops
5150
6001
  >>> bessel_i1 = ops.BesselI1()
5151
6002
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5152
6003
  >>> output = bessel_i1(x)
@@ -5161,14 +6012,17 @@ class BesselI1(Primitive):
5161
6012
 
5162
6013
  class BesselI0e(Primitive):
5163
6014
  r"""
5164
- Computes BesselI0e of input element-wise.
6015
+ Computes exponential scaled modified Bessel function of the first kind, order 0 element-wise.
5165
6016
 
5166
6017
  The formula is defined as:
5167
6018
 
5168
6019
  .. math::
5169
- BesselI0e(x) = \exp(|x|) * bessel\_i0(x)
6020
+ \begin{array}{ll} \\
6021
+ \text I_{0}e(x)=e^{(-|x|)} * I_{0}(x)=e^{(-|x|)} * \sum_{m=0}^
6022
+ {\infty} \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
6023
+ \end{array}
5170
6024
 
5171
- where bessel_i0 is Bessel function of the first kind with 0 order.
6025
+ where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
5172
6026
 
5173
6027
  Inputs:
5174
6028
  - **x** (Tensor) - The input tensor.
@@ -5185,6 +6039,9 @@ class BesselI0e(Primitive):
5185
6039
  ``Ascend`` ``GPU`` ``CPU``
5186
6040
 
5187
6041
  Examples:
6042
+ >>> import mindspore
6043
+ >>> import numpy as np
6044
+ >>> from mindspore import Tensor, ops
5188
6045
  >>> bessel_i0e = ops.BesselI0e()
5189
6046
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5190
6047
  >>> output = bessel_i0e(x)
@@ -5200,14 +6057,17 @@ class BesselI0e(Primitive):
5200
6057
 
5201
6058
  class BesselI1e(Primitive):
5202
6059
  r"""
5203
- Computes BesselI1e of input element-wise.
6060
+ Computes exponential scaled modified Bessel function of the first kind, order 1 element-wise.
5204
6061
 
5205
6062
  The formula is defined as:
5206
6063
 
5207
6064
  .. math::
5208
- BesselI1e(x) = \exp(|x|) * bessel\_i1(x)
6065
+ \begin{array}{ll} \\
6066
+ \text I_{1}e(x)=e^{(-|x|)} * I_{1}(x)=e^{(-|x|)} * \sum_{m=0}^
6067
+ {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
6068
+ \end{array}
5209
6069
 
5210
- where bessel_i1 is Bessel function of the first kind with 1 order.
6070
+ where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
5211
6071
 
5212
6072
  Inputs:
5213
6073
  - **x** (Tensor) - The input tensor.
@@ -5224,6 +6084,9 @@ class BesselI1e(Primitive):
5224
6084
  ``Ascend`` ``GPU`` ``CPU``
5225
6085
 
5226
6086
  Examples:
6087
+ >>> import mindspore
6088
+ >>> import numpy as np
6089
+ >>> from mindspore import Tensor, ops
5227
6090
  >>> bessel_i1e = ops.BesselI1e()
5228
6091
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5229
6092
  >>> output = bessel_i1e(x)
@@ -5239,7 +6102,17 @@ class BesselI1e(Primitive):
5239
6102
 
5240
6103
  class BesselK0(Primitive):
5241
6104
  r"""
5242
- Computes BesselK0 of input element-wise.
6105
+ Computes modified Bessel function of the second kind, order 0 element-wise.
6106
+
6107
+ The formula is defined as:
6108
+
6109
+ .. math::
6110
+ \begin{array}{ll} \\
6111
+ K_{0}(x)= \lim_{\nu \to 0} \left(\frac{\pi}{2}\right) \frac
6112
+ {I_{-\nu}(x)-I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} d t
6113
+ \end{array}
6114
+
6115
+ where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
5243
6116
 
5244
6117
  .. warning::
5245
6118
  This is an experimental API that is subject to change or deletion.
@@ -5258,6 +6131,9 @@ class BesselK0(Primitive):
5258
6131
  ``GPU`` ``CPU``
5259
6132
 
5260
6133
  Examples:
6134
+ >>> import mindspore
6135
+ >>> import numpy as np
6136
+ >>> from mindspore import Tensor, ops
5261
6137
  >>> bessel_k0 = ops.BesselK0()
5262
6138
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5263
6139
  >>> output = bessel_k0(x)
@@ -5272,7 +6148,17 @@ class BesselK0(Primitive):
5272
6148
 
5273
6149
  class BesselK1(Primitive):
5274
6150
  r"""
5275
- Computes BesselK1 of input element-wise.
6151
+ Computes modified Bessel function of the second kind, order 1 element-wise.
6152
+
6153
+ The formula is defined as:
6154
+
6155
+ .. math::
6156
+ \begin{array}{ll} \\
6157
+ K_{1}(x)=\lim_{\nu \to 1} \left(\frac{\pi}{2}\right) \frac{I_{-\nu}(x)-
6158
+ I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} \cosh (t) d t
6159
+ \end{array}
6160
+
6161
+ where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
5276
6162
 
5277
6163
  .. warning::
5278
6164
  This is an experimental API that is subject to change or deletion.
@@ -5291,6 +6177,9 @@ class BesselK1(Primitive):
5291
6177
  ``GPU`` ``CPU``
5292
6178
 
5293
6179
  Examples:
6180
+ >>> import mindspore
6181
+ >>> import numpy as np
6182
+ >>> from mindspore import Tensor, ops
5294
6183
  >>> bessel_k1 = ops.BesselK1()
5295
6184
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5296
6185
  >>> output = bessel_k1(x)
@@ -5304,8 +6193,18 @@ class BesselK1(Primitive):
5304
6193
 
5305
6194
 
5306
6195
  class BesselK0e(Primitive):
5307
- """
5308
- Computes BesselK0e of input element-wise.
6196
+ r"""
6197
+ Computes exponential scaled modified Bessel function of the second kind, order 0 element-wise.
6198
+
6199
+ The formula is defined as:
6200
+
6201
+ .. math::
6202
+ \begin{array}{ll} \\
6203
+ K_{0}e(x)= e^{(-|x|)} * K_{0}(x) = e^{(-|x|)} * \int_{0}^
6204
+ {\infty} e^{-x \cosh t} d t
6205
+ \end{array}
6206
+
6207
+ where :math:`K_{0}` is modified Bessel function of the second kind, order 0.
5309
6208
 
5310
6209
  .. warning::
5311
6210
  This is an experimental API that is subject to change or deletion.
@@ -5324,6 +6223,9 @@ class BesselK0e(Primitive):
5324
6223
  ``GPU`` ``CPU``
5325
6224
 
5326
6225
  Examples:
6226
+ >>> import mindspore
6227
+ >>> import numpy as np
6228
+ >>> from mindspore import Tensor, ops
5327
6229
  >>> bessel_k0e = ops.BesselK0e()
5328
6230
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5329
6231
  >>> output = bessel_k0e(x)
@@ -5337,8 +6239,18 @@ class BesselK0e(Primitive):
5337
6239
 
5338
6240
 
5339
6241
  class BesselK1e(Primitive):
5340
- """
5341
- Computes BesselK1e of input element-wise.
6242
+ r"""
6243
+ Computes exponential scaled modified Bessel function of the second kind, order 1 element-wise.
6244
+
6245
+ The formula is defined as:
6246
+
6247
+ .. math::
6248
+ \begin{array}{ll} \\
6249
+ K_{1}e(x)= e^{(-|x|)} * K_{1}(x) = e^{(-|x|)} * \int_{0}
6250
+ ^{\infty} e^{-x \cosh t} \cosh (t) d t
6251
+ \end{array}
6252
+
6253
+ where :math:`K_{1}` is modified Bessel function of the second kind, order 1.
5342
6254
 
5343
6255
  .. warning::
5344
6256
  This is an experimental API that is subject to change or deletion.
@@ -5357,6 +6269,9 @@ class BesselK1e(Primitive):
5357
6269
  ``GPU`` ``CPU``
5358
6270
 
5359
6271
  Examples:
6272
+ >>> import mindspore
6273
+ >>> import numpy as np
6274
+ >>> from mindspore import Tensor, ops
5360
6275
  >>> bessel_k1e = ops.BesselK1e()
5361
6276
  >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
5362
6277
  >>> output = bessel_k1e(x)
@@ -5370,8 +6285,16 @@ class BesselK1e(Primitive):
5370
6285
 
5371
6286
 
5372
6287
  class BesselJ0(Primitive):
5373
- """
5374
- Computes BesselJ0 of input element-wise.
6288
+ r"""
6289
+ Computes Bessel function of the first kind, order 0 element-wise.
6290
+
6291
+ The formula is defined as:
6292
+
6293
+ .. math::
6294
+ \begin{array}{ll} \\
6295
+ J_{0}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta) d \theta
6296
+ =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m}}{2^{2 m} (m !)^2}
6297
+ \end{array}
5375
6298
 
5376
6299
  .. warning::
5377
6300
  This is an experimental API that is subject to change or deletion.
@@ -5390,6 +6313,9 @@ class BesselJ0(Primitive):
5390
6313
  ``GPU`` ``CPU``
5391
6314
 
5392
6315
  Examples:
6316
+ >>> import mindspore
6317
+ >>> import numpy as np
6318
+ >>> from mindspore import Tensor, ops
5393
6319
  >>> bessel_j0 = ops.BesselJ0()
5394
6320
  >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5395
6321
  >>> output = bessel_j0(x)
@@ -5404,8 +6330,16 @@ class BesselJ0(Primitive):
5404
6330
 
5405
6331
 
5406
6332
  class BesselJ1(Primitive):
5407
- """
5408
- Computes BesselJ1 of input element-wise.
6333
+ r"""
6334
+ Computes Bessel function of the first kind, order 1 element-wise.
6335
+
6336
+ The formula is defined as:
6337
+
6338
+ .. math::
6339
+ \begin{array}{ll} \\
6340
+ J_{1}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta- \theta) d \theta
6341
+ =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m+1}}{2^{2 m+1} m !(m+1) !}
6342
+ \end{array}
5409
6343
 
5410
6344
  .. warning::
5411
6345
  This is an experimental API that is subject to change or deletion.
@@ -5424,11 +6358,14 @@ class BesselJ1(Primitive):
5424
6358
  ``GPU`` ``CPU``
5425
6359
 
5426
6360
  Examples:
6361
+ >>> import mindspore
6362
+ >>> import numpy as np
6363
+ >>> from mindspore import Tensor, ops
5427
6364
  >>> bessel_j1 = ops.BesselJ1()
5428
6365
  >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5429
6366
  >>> output = bessel_j1(x)
5430
6367
  >>> print(output)
5431
- [0.24226846, 0.44005059, 0.57672481, -0.06604333]
6368
+ [0.24226846 0.44005059 0.57672481 -0.06604333]
5432
6369
  """
5433
6370
 
5434
6371
  @prim_attr_register
@@ -5438,8 +6375,17 @@ class BesselJ1(Primitive):
5438
6375
 
5439
6376
 
5440
6377
  class BesselY0(Primitive):
5441
- """
5442
- Computes BesselY0 of input element-wise.
6378
+ r"""
6379
+ Computes Bessel function of the second kind, order 0 element-wise.
6380
+
6381
+ The formula is defined as:
6382
+
6383
+ .. math::
6384
+ \begin{array}{ll} \\
6385
+ Y_{0}(x)=\lim_{n \to 0} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
6386
+ \end{array}
6387
+
6388
+ where :math:`J_{0}` is Bessel function of the first kind, order 0.
5443
6389
 
5444
6390
  .. warning::
5445
6391
  This is an experimental API that is subject to change or deletion.
@@ -5458,6 +6404,9 @@ class BesselY0(Primitive):
5458
6404
  ``GPU`` ``CPU``
5459
6405
 
5460
6406
  Examples:
6407
+ >>> import mindspore
6408
+ >>> import numpy as np
6409
+ >>> from mindspore import Tensor, ops
5461
6410
  >>> bessel_y0 = ops.BesselY0()
5462
6411
  >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5463
6412
  >>> output = bessel_y0(x)
@@ -5472,8 +6421,17 @@ class BesselY0(Primitive):
5472
6421
 
5473
6422
 
5474
6423
  class BesselY1(Primitive):
5475
- """
5476
- Computes BesselY1 of input element-wise.
6424
+ r"""
6425
+ Computes Bessel function of the second kind, order 1 element-wise.
6426
+
6427
+ The formula is defined as:
6428
+
6429
+ .. math::
6430
+ \begin{array}{ll} \\
6431
+ Y_{1}(x)=\lim_{n \to 1} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
6432
+ \end{array}
6433
+
6434
+ where :math:`J_{1}` is Bessel function of the first kind, order 1.
5477
6435
 
5478
6436
  .. warning::
5479
6437
  This is an experimental API that is subject to change or deletion.
@@ -5492,6 +6450,9 @@ class BesselY1(Primitive):
5492
6450
  ``GPU`` ``CPU``
5493
6451
 
5494
6452
  Examples:
6453
+ >>> import mindspore
6454
+ >>> import numpy as np
6455
+ >>> from mindspore import Tensor, ops
5495
6456
  >>> bessel_y1 = ops.BesselY1()
5496
6457
  >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5497
6458
  >>> output = bessel_y1(x)
@@ -5509,12 +6470,21 @@ class Inv(Primitive):
5509
6470
  r"""
5510
6471
  Computes Reciprocal of input tensor element-wise.
5511
6472
 
5512
- Refer to :func:`mindspore.ops.inv` for more details.
6473
+ Refer to :func:`mindspore.ops.inv` for more details.
6474
+
6475
+ Inputs:
6476
+ - **x** (Tensor) - Input tensor, it must be one of the following types: float16, float32 or int32.
6477
+
6478
+ Outputs:
6479
+ Tensor, has the same shape and data type as `x`.
5513
6480
 
5514
6481
  Supported Platforms:
5515
6482
  ``Ascend`` ``GPU`` ``CPU``
5516
6483
 
5517
6484
  Examples:
6485
+ >>> import mindspore
6486
+ >>> import numpy as np
6487
+ >>> from mindspore import Tensor, ops
5518
6488
  >>> inv = ops.Inv()
5519
6489
  >>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
5520
6490
  >>> output = inv(x)
@@ -5537,6 +6507,9 @@ class Invert(Primitive):
5537
6507
  ``Ascend`` ``GPU`` ``CPU``
5538
6508
 
5539
6509
  Examples:
6510
+ >>> import mindspore
6511
+ >>> import numpy as np
6512
+ >>> from mindspore import Tensor, ops
5540
6513
  >>> invert = ops.Invert()
5541
6514
  >>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
5542
6515
  >>> output = invert(x)
@@ -5550,13 +6523,15 @@ class Invert(Primitive):
5550
6523
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
5551
6524
 
5552
6525
 
5553
- class Eps(PrimitiveWithInfer):
6526
+ class Eps(Primitive):
5554
6527
  """
5555
6528
  Create a Tensor with the same data type and shape as input, and the element value is the minimum value that the
5556
- corresponding data type can be expressed.
6529
+ corresponding data type can express.
6530
+
6531
+ Refer to :func:`mindspore.ops.eps` for more detail.
5557
6532
 
5558
6533
  Inputs:
5559
- - **x** (Tensor) - Tensor of any dimension used to obtain the minimum value that its data type can be expressed.
6534
+ - **x** (Tensor) - Tensor of any dimension used to obtain the minimum value that its data type can express.
5560
6535
  The data type must be float16, float32 or float64.
5561
6536
 
5562
6537
  Outputs:
@@ -5564,42 +6539,24 @@ class Eps(PrimitiveWithInfer):
5564
6539
 
5565
6540
  Raises:
5566
6541
  TypeError: If `x` is not a Tensor.
5567
- TypeError: If data type of `x` is neither float16 nor float32.
6542
+ TypeError: If data type of `x` is neither float16, float32, nor float64.
5568
6543
 
5569
6544
  Supported Platforms:
5570
6545
  ``Ascend`` ``GPU`` ``CPU``
5571
6546
 
5572
6547
  Examples:
6548
+ >>> import mindspore
6549
+ >>> from mindspore import Tensor, ops
5573
6550
  >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
5574
6551
  >>> output = ops.Eps()(x)
5575
6552
  >>> print(output)
5576
- [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
6553
+ [1.1920929e-07 1.1920929e-07 1.1920929e-07 1.1920929e-07]
5577
6554
  """
5578
6555
 
5579
6556
  @prim_attr_register
5580
6557
  def __init__(self):
5581
6558
  """Initialize Eps"""
5582
- self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
5583
-
5584
- def __infer__(self, input_x):
5585
- valid_dtypes = [mstype.float16, mstype.float32, mstype.float64]
5586
- validator.check_tensor_dtype_valid('input_x', input_x['dtype'], valid_dtypes, self.name)
5587
-
5588
- x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
5589
- if x_nptype == np.float16:
5590
- min_val = 2 ** (-14)
5591
- elif x_nptype == np.float32:
5592
- min_val = 2 ** (-16)
5593
- else:
5594
- min_val = 2 ** (-52)
5595
-
5596
- res = np.full(input_x['shape'], min_val, x_nptype)
5597
- out = {
5598
- 'value': Tensor(res),
5599
- 'shape': input_x['shape'],
5600
- 'dtype': input_x['dtype'],
5601
- }
5602
- return out
6559
+ self.init_prim_io_names(inputs=['x'], outputs=['y'])
5603
6560
 
5604
6561
 
5605
6562
  class LinSpace(Primitive):
@@ -5609,10 +6566,21 @@ class LinSpace(Primitive):
5609
6566
 
5610
6567
  Refer to :func:`mindspore.ops.linspace` for more details.
5611
6568
 
6569
+ Inputs:
6570
+ - **start** (Tensor) - Start value of interval, 0-D Tensor with dtype float32 or float64.
6571
+ - **stop** (Tensor) - Last value of interval, 0-D Tensor with dtype float32 or float64.
6572
+ - **num** (int) - Number of ticks in the interval, inclusive of `start` and `stop`.
6573
+ Supported dtypes: int32, int64.
6574
+
6575
+ Outputs:
6576
+ Tensor, has the same shape and dtype as `start`.
6577
+
5612
6578
  Supported Platforms:
5613
6579
  ``Ascend`` ``GPU`` ``CPU``
5614
6580
 
5615
6581
  Examples:
6582
+ >>> import mindspore
6583
+ >>> from mindspore import Tensor, ops
5616
6584
  >>> linspace = ops.LinSpace()
5617
6585
  >>> start = Tensor(1, mindspore.float32)
5618
6586
  >>> stop = Tensor(10, mindspore.float32)
@@ -5634,10 +6602,14 @@ class MatrixInverse(Primitive):
5634
6602
  result may be returned.
5635
6603
 
5636
6604
  Note:
5637
- The parameter 'adjoint' is only supporting False right now, because complex number is not supported at present.
6605
+ The parameter 'adjoint' is only supporting ``False`` right now, because complex number is not supported at
6606
+ present.
6607
+
6608
+ .. warning::
6609
+ This is an experimental API that is subject to change or deletion.
5638
6610
 
5639
6611
  Args:
5640
- adjoint (bool) : An optional bool. Default: False.
6612
+ adjoint (bool) : An optional bool. Default: ``False`` .
5641
6613
 
5642
6614
  Inputs:
5643
6615
  - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
@@ -5656,6 +6628,9 @@ class MatrixInverse(Primitive):
5656
6628
  ``Ascend`` ``GPU`` ``CPU``
5657
6629
 
5658
6630
  Examples:
6631
+ >>> import mindspore
6632
+ >>> import numpy as np
6633
+ >>> from mindspore import Tensor, ops
5659
6634
  >>> x = Tensor(np.array([[[-0.710504 , -1.1207525],
5660
6635
  ... [-1.7651395 , -1.7576632]],
5661
6636
  ... [[ 0.52412605, 1.9070215],
@@ -5682,28 +6657,32 @@ class MatrixPower(Primitive):
5682
6657
  When n equals 0, it returns a group of identity matrices. If n is negative,
5683
6658
  it computes the inverse of each matrix (if possible) raised to the power of abs(n).
5684
6659
 
6660
+ .. warning::
6661
+ This is an experimental API that is subject to change or deletion.
6662
+
5685
6663
  Args:
5686
6664
  n (int) : The exponent, a required int.
5687
6665
 
5688
6666
  Inputs:
5689
- - **x** (Tensor) - A 3-D Tensor. Supported data types are float16 and float32.
5690
- The shape is :math:`(b, m, m)`, represents b m-D square matrices.
6667
+ - **x** (Tensor) - A 3-D Tensor. The shape is :math:`(b, m, m)`, represents b m-D square matrices.
5691
6668
 
5692
6669
  Outputs:
5693
6670
  - **y** (Tensor) - A 3-D Tensor. Data type and shape are the same as `x`'s.
5694
6671
 
5695
6672
  Raises:
5696
6673
  TypeError: If the data type of `n` is not int.
5697
- TypeError: If the data type of `x` is neither float32 nor float16.
5698
6674
  TypeError: If x is not a Tensor.
5699
6675
  ValueError: If `x` is not a 3-D tensor.
5700
6676
  ValueError: If shape[1] and shape[2] of `x` are not the same.
5701
6677
  ValueError: If n is negative but got input x has singular matrices.
6678
+ ValueError: If `n` < 0 and input is int type.
5702
6679
 
5703
6680
  Supported Platforms:
5704
-
6681
+ ``Ascend`` ``CPU``
5705
6682
 
5706
6683
  Examples:
6684
+ >>> import mindspore
6685
+ >>> from mindspore import Tensor, ops
5707
6686
  >>> x = Tensor([[[0, 1], [-1, 0]], [[1, 0], [0, -1]]], dtype=ms.float32)
5708
6687
  >>> matrix_power = ops.MatrixPower(n=2)
5709
6688
  >>> y = matrix_power(x)
@@ -5726,8 +6705,15 @@ class MatrixDeterminant(Primitive):
5726
6705
 
5727
6706
  Refer to :func:`mindspore.ops.det` for more details.
5728
6707
 
5729
- Supported Platforms:
6708
+ Inputs:
6709
+ - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
6710
+ dimensions must be the same size.
5730
6711
 
6712
+ Outputs:
6713
+ Tensor, the shape is `x_shape[:-2]`, the dtype is same as `x`.
6714
+
6715
+ Supported Platforms:
6716
+ ``Ascend`` ``GPU`` ``CPU``
5731
6717
 
5732
6718
  Examples:
5733
6719
  >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
@@ -5750,6 +6736,15 @@ class LogMatrixDeterminant(Primitive):
5750
6736
 
5751
6737
  Refer to :func:`mindspore.ops.slogdet` for more details.
5752
6738
 
6739
+ Inputs:
6740
+ - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
6741
+ dimensions must be the same size. Supported dtypes: float32, float64, complex64 and complex128.
6742
+
6743
+ Outputs:
6744
+ - **sign** (Tensor) - The signs of the log determinants. The shape is `x_shape[:-2]`, the dtype is same as `x`.
6745
+ - **y** (Tensor) - The absolute values of the log determinants. The shape is `x_shape[:-2]`, the dtype is same
6746
+ as `x`.
6747
+
5753
6748
  Supported Platforms:
5754
6749
 
5755
6750
 
@@ -5813,9 +6808,11 @@ class IndexAdd(Primitive):
5813
6808
  Args:
5814
6809
  axis (int): The dimension along which to index.
5815
6810
  use_lock (bool): Whether to enable a lock to protect the updating process of variable tensors.
5816
- If true, when updating the value of `x`, this process will be protected by a lock by using atomic operation.
5817
- If false, the result may be unpredictable. Default: True.
5818
- check_index_bound (bool): If true, check index boundary. If false, don't check index boundary. Default: True.
6811
+ If ``True`` , when updating the value of `x`, this process will be protected by a lock by using atomic
6812
+ operation.
6813
+ If ``False`` , the result may be unpredictable. Default: ``True`` .
6814
+ check_index_bound (bool): If ``True`` , check index boundary. If ``False`` , don't check index boundary.
6815
+ Default: ``True`` .
5819
6816
 
5820
6817
  Inputs:
5821
6818
  - **x** (Parameter) - The input Parameter to add to.
@@ -5841,6 +6838,9 @@ class IndexAdd(Primitive):
5841
6838
  ``Ascend`` ``GPU`` ``CPU``
5842
6839
 
5843
6840
  Examples:
6841
+ >>> import mindspore
6842
+ >>> import numpy as np
6843
+ >>> from mindspore import Tensor, nn, ops, Parameter
5844
6844
  >>> class Net(nn.Cell):
5845
6845
  ... def __init__(self):
5846
6846
  ... super(Net, self).__init__()
@@ -5879,24 +6879,24 @@ class Erfinv(Primitive):
5879
6879
  r"""
5880
6880
  Computes the inverse error function of input. The inverse error function is defined in the range (-1, 1).
5881
6881
 
5882
- The formula is defined as:
5883
-
5884
- .. math::
5885
- erfinv(erf(x)) = x
6882
+ Refer to :func:`mindspore.ops.erfinv` for more details.
5886
6883
 
5887
6884
  Inputs:
5888
- - **input_x** (Tensor) - The input tensor to compute to, with data type float32, float16 or float64.
6885
+ - **input_x** (Tensor) - The input tensor. Supported dtypes:
6886
+
6887
+ - Ascend: float16, float32.
6888
+ - GPU/CPU: float16, float32, float64.
5889
6889
 
5890
6890
  Outputs:
5891
6891
  Tensor, has the same shape and dtype as `input_x`.
5892
6892
 
5893
- Raises:
5894
- TypeError: If dtype of `input_x` is not float16, float32 or float64.
5895
-
5896
6893
  Supported Platforms:
5897
6894
  ``Ascend`` ``GPU`` ``CPU``
5898
6895
 
5899
6896
  Examples:
6897
+ >>> import mindspore
6898
+ >>> import numpy as np
6899
+ >>> from mindspore import Tensor, ops
5900
6900
  >>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
5901
6901
  >>> erfinv = ops.Erfinv()
5902
6902
  >>> output = erfinv(x)
@@ -5933,6 +6933,9 @@ class Conj(Primitive):
5933
6933
  ``Ascend`` ``GPU`` ``CPU``
5934
6934
 
5935
6935
  Examples:
6936
+ >>> import mindspore
6937
+ >>> import numpy as np
6938
+ >>> from mindspore import Tensor, ops
5936
6939
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
5937
6940
  >>> conj = ops.Conj()
5938
6941
  >>> output = conj(x)
@@ -5975,6 +6978,9 @@ class ComplexAbs(Primitive):
5975
6978
  ``Ascend`` ``GPU`` ``CPU``
5976
6979
 
5977
6980
  Examples:
6981
+ >>> import mindspore
6982
+ >>> import numpy as np
6983
+ >>> from mindspore import Tensor, ops
5978
6984
  >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
5979
6985
  >>> complex_abs = ops.ComplexAbs()
5980
6986
  >>> output = complex_abs(x)
@@ -5994,7 +7000,7 @@ class Real(Primitive):
5994
7000
  If input is real, it is returned unchanged.
5995
7001
 
5996
7002
  Inputs:
5997
- - **input** (Tensor) - The input tensor to compute to.
7003
+ - **input** (Tensor) - The input tensor to compute with.
5998
7004
 
5999
7005
  Outputs:
6000
7006
  Tensor, the shape is the same as the input.
@@ -6006,6 +7012,9 @@ class Real(Primitive):
6006
7012
  ``Ascend`` ``GPU`` ``CPU``
6007
7013
 
6008
7014
  Examples:
7015
+ >>> import mindspore
7016
+ >>> import numpy as np
7017
+ >>> from mindspore import Tensor, ops
6009
7018
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
6010
7019
  >>> real = ops.Real()
6011
7020
  >>> output = real(x)
@@ -6041,6 +7050,9 @@ class Complex(Primitive):
6041
7050
  ``Ascend`` ``GPU`` ``CPU``
6042
7051
 
6043
7052
  Examples:
7053
+ >>> import mindspore
7054
+ >>> import numpy as np
7055
+ >>> from mindspore import Tensor, ops
6044
7056
  >>> real = Tensor(np.array([1]), mindspore.float32)
6045
7057
  >>> imag = Tensor(np.array([2]), mindspore.float32)
6046
7058
  >>> complex = ops.Complex()
@@ -6061,7 +7073,7 @@ class Imag(Primitive):
6061
7073
  If input is real, it is returned zeros.
6062
7074
 
6063
7075
  Inputs:
6064
- - **input** (Tensor) - The input tensor to compute to.
7076
+ - **input** (Tensor) - The input tensor.
6065
7077
 
6066
7078
  Outputs:
6067
7079
  Tensor, the shape is the same as the input.
@@ -6073,6 +7085,9 @@ class Imag(Primitive):
6073
7085
  ``Ascend`` ``GPU`` ``CPU``
6074
7086
 
6075
7087
  Examples:
7088
+ >>> import mindspore
7089
+ >>> import numpy as np
7090
+ >>> from mindspore import Tensor, ops
6076
7091
  >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
6077
7092
  >>> imag = ops.Imag()
6078
7093
  >>> output = imag(x)
@@ -6095,10 +7110,18 @@ class Angle(Primitive):
6095
7110
 
6096
7111
  Refer to :func:`mindspore.ops.angle` for more details.
6097
7112
 
7113
+ Inputs:
7114
+ - **input** (Tensor) - The input tensor. Supported types: complex64, complex128.
7115
+
7116
+ Outputs:
7117
+ Tensor, has the float32 or float64 type and the same shape as input.
7118
+
6098
7119
  Supported Platforms:
6099
7120
  ``Ascend`` ``GPU`` ``CPU``
6100
7121
 
6101
7122
  Examples:
7123
+ >>> import mindspore
7124
+ >>> from mindspore import Tensor, ops
6102
7125
  >>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
6103
7126
  >>> angle = ops.Angle()
6104
7127
  >>> output = angle(input)
@@ -6118,10 +7141,19 @@ class Trunc(Primitive):
6118
7141
 
6119
7142
  Refer to :func:`mindspore.ops.trunc` for more details.
6120
7143
 
7144
+ Inputs:
7145
+ - **input_x** (Tensor) - Input tensor of any dimension.
7146
+
7147
+ Outputs:
7148
+ Tensor, the same shape and data type as `input_x`.
7149
+
6121
7150
  Supported Platforms:
6122
7151
  ``Ascend`` ``GPU`` ``CPU``
6123
7152
 
6124
7153
  Examples:
7154
+ >>> import mindspore
7155
+ >>> import numpy as np
7156
+ >>> from mindspore import Tensor, ops
6125
7157
  >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]), mindspore.float32)
6126
7158
  >>> output = ops.Trunc()(x)
6127
7159
  >>> print(output)
@@ -6208,16 +7240,31 @@ class Igamma(Primitive):
6208
7240
 
6209
7241
  Refer to :func:`mindspore.ops.igamma` for more details.
6210
7242
 
7243
+ Inputs:
7244
+ - **a** (Tensor) - The input tensor.
7245
+ - **x** (Tensor) - The input tensor. It should have the same dtype with `a`.
7246
+
7247
+ Outputs:
7248
+ Tensor, has the same dtype as `a` and `x`.
7249
+
6211
7250
  Supported Platforms:
6212
7251
  ``Ascend`` ``GPU`` ``CPU``
6213
7252
 
6214
7253
  Examples:
7254
+ >>> import numpy as np
7255
+ >>> from mindspore import Tensor, ops
6215
7256
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
6216
7257
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
6217
7258
  >>> igamma = ops.Igamma()
6218
7259
  >>> output = igamma(a, x)
6219
7260
  >>> print (output)
6220
7261
  [0.593994 0.35276785 0.21486944 0.13337152]
7262
+ >>> a = Tensor(2.1, mindspore.float32)
7263
+ >>> x = Tensor(2.1, mindspore.float32)
7264
+ >>> igamma = ops.Igamma()
7265
+ >>> output = igamma(a, x)
7266
+ >>> print (output)
7267
+ 0.5917439
6221
7268
  """
6222
7269
 
6223
7270
  @prim_attr_register
@@ -6232,16 +7279,31 @@ class Igammac(Primitive):
6232
7279
 
6233
7280
  Refer to :func:`mindspore.ops.igammac` for more details.
6234
7281
 
7282
+ Inputs:
7283
+ - **a** (Tensor) - The input tensor.
7284
+ - **x** (Tensor) - The input tensor. It should have the same dtype with `a`.
7285
+
7286
+ Outputs:
7287
+ Tensor, has the same dtype as `a` and `x`.
7288
+
6235
7289
  Supported Platforms:
6236
7290
  ``Ascend`` ``GPU`` ``CPU``
6237
7291
 
6238
7292
  Examples:
7293
+ >>> import numpy as np
7294
+ >>> from mindspore import Tensor, ops
6239
7295
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
6240
7296
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
6241
7297
  >>> igammac = ops.Igammac()
6242
7298
  >>> output = igammac(a, x)
6243
7299
  >>> print (output)
6244
7300
  [0.40600586 0.6472318 0.7851304 0.8666283 ]
7301
+ >>> a = Tensor(2.1, mindspore.float32)
7302
+ >>> x = Tensor(2.1, mindspore.float32)
7303
+ >>> igammac = ops.Igammac()
7304
+ >>> output = igammac(a, x)
7305
+ >>> print (output)
7306
+ 0.40825662
6245
7307
  """
6246
7308
 
6247
7309
  @prim_attr_register
@@ -6257,6 +7319,18 @@ class IsClose(Primitive):
6257
7319
 
6258
7320
  Refer to :func:`mindspore.ops.isclose` for more details.
6259
7321
 
7322
+ Args:
7323
+ rtol(float, optional): Relative tolerance. Default: ``1e-05`` .
7324
+ atol(float, optional): Absolute tolerance. Default: ``1e-08`` .
7325
+ equal_nan(bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``True`` .
7326
+
7327
+ Inputs:
7328
+ - **input** (Tensor) - First tensor to compare, with data type belongs to float32, float16, int32.
7329
+ - **other** (Tensor) - Second tensor to compare, with data type belongs to float32, float16, int32.
7330
+
7331
+ Outputs:
7332
+ Tensor, with the same shape as `input` and `other` after broadcasting, its dtype is bool.
7333
+
6260
7334
  Supported Platforms:
6261
7335
  ``Ascend`` ``GPU`` ``CPU``
6262
7336
 
@@ -6291,6 +7365,13 @@ class MatrixExp(Primitive):
6291
7365
 
6292
7366
  Refer to :func:`mindspore.ops.matrix_exp` for more details.
6293
7367
 
7368
+ Inputs:
7369
+ - **x** (Tensor) - The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
7370
+ Supported dtypes: float64, float32, float16, complex64, complex128.
7371
+
7372
+ Outputs:
7373
+ Tensor, has the same shape and dtype as the `x`.
7374
+
6294
7375
  Supported Platforms:
6295
7376
 
6296
7377
 
@@ -6314,7 +7395,7 @@ class MatrixSolve(Primitive):
6314
7395
 
6315
7396
  Args:
6316
7397
  adjoint (bool, optional): Indicates whether the adjoint of the
6317
- matrix is used during the computation. Default: False, use its transpose instead.
7398
+ matrix is used during the computation. Default: ``False`` , use its transpose instead.
6318
7399
 
6319
7400
  Inputs:
6320
7401
  - **matrix** (Tensor) - A tensor of shape :math:`(..., M, M)`,
@@ -6342,13 +7423,17 @@ class MatrixSolve(Primitive):
6342
7423
  ``Ascend`` ``CPU``
6343
7424
 
6344
7425
  Examples:
7426
+ >>> import mindspore
7427
+ >>> import numpy as np
7428
+ >>> from mindspore import Tensor, ops
6345
7429
  >>> matrix = Tensor(np.array([[1.0 , 4.0],
6346
7430
  ... [2.0 , 7.0]]), mindspore.float32)
6347
7431
  >>> rhs = Tensor(np.array([[1.0] , [3.0]]), mindspore.float32)
6348
7432
  >>> matrix_solve = ops.MatrixSolve(adjoint = False)
6349
7433
  >>> output = matrix_solve(matrix, rhs)
6350
7434
  >>> print(output)
6351
- [[5.0], [-1.0]]
7435
+ [[5.0]
7436
+ [-1.0]]
6352
7437
  """
6353
7438
 
6354
7439
  @prim_attr_register
@@ -6366,7 +7451,7 @@ class MatrixSolveLs(Primitive):
6366
7451
  path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored.
6367
7452
 
6368
7453
  Args:
6369
- fast (bool): An optional bool. Defaults to True.
7454
+ fast (bool): An optional bool. Default: ``True`` .
6370
7455
 
6371
7456
  Inputs:
6372
7457
  - **matrix** (Tensor) - A Tensor. Must be one of the following data types: float64, float32, complex64,
@@ -6414,7 +7499,7 @@ class Lu(Primitive):
6414
7499
 
6415
7500
  Args:
6416
7501
  output_idx_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
6417
- Default: `mindspore.dtype.int32`.
7502
+ Default: ``mindspore.dtype.int32`` .
6418
7503
 
6419
7504
  Inputs:
6420
7505
  - **input** (Tensor) - A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
@@ -6525,10 +7610,32 @@ class LuUnpack(Primitive):
6525
7610
 
6526
7611
  Refer to :func:`mindspore.ops.lu_unpack` for more details.
6527
7612
 
7613
+ Args:
7614
+ unpack_data (bool, optional): A flag indicating if the LU_data should be unpacked.
7615
+ If ``False`` , then the returned L and U are None. Default: ``True`` .
7616
+ unpack_pivots (bool, optional): A flag indicating if the LU_pivots should be unpacked
7617
+ into a permutation matrix P. If ``False`` , then the returned P is None. Default: ``True`` .
7618
+
7619
+ Inputs:
7620
+ - **LU_data** (Tensor) - The packed LU factorization data. The shape of a tensor is :math:`(*, M, N)`,
7621
+ where :math:`*` is batch dimensions, with data type int8, uint8, int16, int32, int64, float16,
7622
+ float32, float64. The dims of LU_data must be equal to or greater than 2.
7623
+ - **LU_pivots** (Tensor) - The packed LU factorization pivots. The shape of a tensor is :math:`(*, min(M, N))`,
7624
+ where :math:`*` is batch dimensions, with data type int8, uint8, int16, int32, int64.
7625
+
7626
+ Outputs:
7627
+ - **pivots** (Tensor) - The permutation matrix of LU factorization. The shape is :math:`(*, M, M)`,
7628
+ the dtype is same as `LU_data`.
7629
+ - **L** (Tensor) - The L matrix of LU factorization. The dtype is the same as `LU_data`.
7630
+ - **U** (Tensor) - The U matrix of LU factorization. The dtype is the same as `LU_data`.
7631
+
6528
7632
  Supported Platforms:
6529
7633
  ``GPU`` ``CPU``
6530
7634
 
6531
7635
  Examples:
7636
+ >>> import numpy as np
7637
+ >>> from mindspore import Tensor, ops
7638
+ >>> from mindspore import dtype as mstype
6532
7639
  >>> LU_data = Tensor(np.array([[[-0.3806, -0.4872, 0.5536],
6533
7640
  ... [-0.1287, 0.6508, -0.2396],
6534
7641
  ... [ 0.2583, 0.5239, 0.6902]],
@@ -6578,6 +7685,12 @@ class Lgamma(Primitive):
6578
7685
 
6579
7686
  Refer to :func:`mindspore.ops.lgamma` for more details.
6580
7687
 
7688
+ Inputs:
7689
+ - **x** (Tensor) - The input tensor. The dtype can be float16, float32 or float64.
7690
+
7691
+ Outputs:
7692
+ Tensor, has the same dtype as `x`.
7693
+
6581
7694
  Supported Platforms:
6582
7695
  ``GPU`` ``CPU``
6583
7696
 
@@ -6587,6 +7700,10 @@ class Lgamma(Primitive):
6587
7700
  >>> output = lgamma(x)
6588
7701
  >>> print(output)
6589
7702
  [0.5723649 0.8854049 9.549267 ]
7703
+ >>> x = Tensor(2.1, mindspore.float32)
7704
+ >>> output = lgamma(x)
7705
+ >>> print(output)
7706
+ 0.045437694
6590
7707
  """
6591
7708
 
6592
7709
  @prim_attr_register
@@ -6619,6 +7736,8 @@ class Digamma(Primitive):
6619
7736
  ``GPU`` ``CPU``
6620
7737
 
6621
7738
  Examples:
7739
+ >>> import numpy as np
7740
+ >>> from mindspore import Tensor, ops
6622
7741
  >>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
6623
7742
  >>> digamma = ops.Digamma()
6624
7743
  >>> output = digamma(x)
@@ -6641,10 +7760,22 @@ class Polygamma(Primitive):
6641
7760
 
6642
7761
  Refer to :func:`mindspore.ops.polygamma` for more details.
6643
7762
 
7763
+ Inputs:
7764
+ - **a** (Tensor) - The order of the polygamma function, it has shape :math:`()`,
7765
+ supported types: int32, int64.
7766
+ - **x** (Tensor) - The tensor to compute the :math:`a`-th derivative of the polygamma function with,
7767
+ supported types: float16, float32, float64.
7768
+
7769
+ Outputs:
7770
+ Tensor, has the same dtype as `x`.
7771
+
6644
7772
  Supported Platforms:
6645
7773
  ``GPU`` ``CPU``
6646
7774
 
6647
7775
  Examples:
7776
+ >>> import mindspore
7777
+ >>> import numpy as np
7778
+ >>> from mindspore import Tensor, ops
6648
7779
  >>> x = Tensor(np.array([1.0, -0.5]), mindspore.float32)
6649
7780
  >>> a = Tensor(np.array(1), mindspore.int64)
6650
7781
  >>> polygamma = ops.Polygamma()
@@ -6677,6 +7808,15 @@ class CholeskyInverse(Primitive):
6677
7808
 
6678
7809
  Refer to :func:`mindspore.ops.cholesky_inverse` for more details.
6679
7810
 
7811
+ Args:
7812
+ upper(bool, optional): Whether to return a lower or upper triangular matrix. Default: ``False`` .
7813
+
7814
+ Inputs:
7815
+ - **x** (Tensor) - The input tensor whose rank is 2. Supported dtypes: float32, float64.
7816
+
7817
+ Outputs:
7818
+ Tensor, has the same shape and dtype as `x`.
7819
+
6680
7820
  Supported Platforms:
6681
7821
  ``Ascend`` ``CPU``
6682
7822
 
@@ -6707,7 +7847,7 @@ class Cross(Primitive):
6707
7847
  Refer to :func:`mindspore.ops.cross` for more details.
6708
7848
 
6709
7849
  Args:
6710
- dim (int): Spefcified dim along which to cumpute cross product with. Default: -65530.
7850
+ dim (int): Spefcified dim along which to cumpute cross product with. Default: ``-65530`` .
6711
7851
 
6712
7852
  Inputs:
6713
7853
  - **x1** (Tensor) - Input Tensor.
@@ -6724,7 +7864,7 @@ class Cross(Primitive):
6724
7864
  >>> import mindspore
6725
7865
  >>> import numpy as np
6726
7866
  >>> from mindspore import Tensor
6727
- >>> from mindspore.common import dtype as mstype
7867
+ >>> from mindspore import dtype as mstype
6728
7868
  >>> import mindspore.ops as ops
6729
7869
  >>> cross = ops.Cross(dim = 0)
6730
7870
  >>> x1 = Tensor([1, 2, 3], mstype.int8)
@@ -6805,7 +7945,7 @@ class RaggedRange(Primitive):
6805
7945
 
6806
7946
  class Trace(Primitive):
6807
7947
  """
6808
- Returns a new tensor that is the sum of the input trace.
7948
+ Computes the sum of the diagonal elements in a 2-D matrix.
6809
7949
 
6810
7950
  Note:
6811
7951
  Input must be matrix, and complex number is not supported at present.
@@ -6827,6 +7967,9 @@ class Trace(Primitive):
6827
7967
  ``Ascend`` ``GPU`` ``CPU``
6828
7968
 
6829
7969
  Examples:
7970
+ >>> import mindspore
7971
+ >>> import numpy as np
7972
+ >>> from mindspore import Tensor, ops
6830
7973
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
6831
7974
  >>> trace = ops.Trace()
6832
7975
  >>> output = trace(x)
@@ -6855,31 +7998,35 @@ class Median(Primitive):
6855
7998
  If `global_median` is True, computes the median of all elements of tensor.
6856
7999
 
6857
8000
  .. warning::
6858
- When attr `global_median` is True, the value of the second output tensor `indices` is meaningless.
8001
+ - `indices` does not necessarily contain the first occurrence of each median value found in the `input`,
8002
+ unless it is unique. The specific implementation of this API is device-specific.
8003
+ The results may be different on CPU and GPU.
8004
+ - When attr `global_median` is ``True`` , the value of the second output tensor `indices` is meaningless.
6859
8005
 
6860
8006
  Args:
6861
8007
  global_median (bool, optional): Whether the output tensor is the median of all
6862
- input tensor elements or not. Default: Fasle.
6863
- axis (int, optional): The specified dimension to compute median. Default: 0.
6864
- keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not. Default: False.
8008
+ input tensor elements or not. Default: ``False`` .
8009
+ axis (int, optional): The specified dimension to compute median. Default: ``0`` .
8010
+ keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not.
8011
+ Default: ``False`` .
8012
+ ignore_nan (bool, optional): Whether to ignore the NaN values in input Tensor. Default: ``False`` .
6865
8013
 
6866
8014
  Inputs:
6867
- - **x** (Tensor) - A Tensor to calculate median with. Supported dtype:int16, int32, int64, float32 or float64.
8015
+ - **x** (Tensor) - A Tensor to calculate median with.
6868
8016
 
6869
8017
  Outputs:
6870
8018
  - **y** (Tensor) - Median, has the same dtype as the `x`.
6871
8019
 
6872
- - If `global_median` is True, the `y` has only one element.
6873
- - If `keep_dims` is True, the `y` has the same shape as the `x` except the size
8020
+ - If `global_median` is ``True`` , the `y` has only one element.
8021
+ - If `keep_dims` is ``True`` , the `y` has the same shape as the `x` except the size
6874
8022
  of `y` in dimension `axis` is 1.
6875
8023
  - Otherwise, the `y` lacks `axis` dimension than input.
6876
8024
 
6877
8025
  - **indices** (Tensor) - Indices, Has the same shape as the `y`, with dtype int64.
6878
8026
 
6879
8027
  Raises:
6880
- TypeError: If dtype of `x` is not one of the following: int16, int32, int64, float32, float64.
6881
8028
  TypeError: If input `x` is not a Tensor.
6882
- TypeError: If `global_median` or `keep_dims` is assigned a nonboolean value.
8029
+ TypeError: If `global_median` , `keep_dims` or `ignore_nan` is assigned a nonboolean value.
6883
8030
  TypeError: If `axis` is not int.
6884
8031
  ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
6885
8032
 
@@ -6906,13 +8053,15 @@ class Median(Primitive):
6906
8053
  """
6907
8054
 
6908
8055
  @prim_attr_register
6909
- def __init__(self, global_median=False, axis=0, keep_dims=False):
8056
+ def __init__(self, global_median=False, axis=0, keep_dims=False, ignore_nan=False):
8057
+ self.add_prim_attr("cust_aicpu", self.name)
6910
8058
  validator.check_value_type("global_median", global_median, [bool], self.name)
6911
8059
  self.global_median = global_median
6912
8060
  if global_median is False:
6913
8061
  validator.check_value_type("axis", axis, [int], self.name)
6914
8062
  validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
6915
8063
  self.init_prim_io_names(inputs=['x'], outputs=['y', 'indices'])
8064
+ validator.check_value_type("ignore_nan", ignore_nan, [bool], self.name)
6916
8065
 
6917
8066
 
6918
8067
  class SparseSegmentMean(Primitive):
@@ -6948,12 +8097,12 @@ class Zeta(Primitive):
6948
8097
  r"""
6949
8098
  Compute the Hurwitz zeta function ζ(x,q) of input Tensor.
6950
8099
 
6951
- .. warning::
6952
- This is an experimental API that is subject to change or deletion.
6953
-
6954
8100
  .. math::
6955
8101
  \zeta \left ( x,q \right )= \textstyle \sum_{n=0} ^ {\infty} \left ( q+n\right )^{-x}
6956
8102
 
8103
+ .. warning::
8104
+ This is an experimental API that is subject to change or deletion.
8105
+
6957
8106
  Inputs:
6958
8107
  - **x** (Tensor) - A Tensor, types: float32, float64.
6959
8108
  - **q** (Tensor) - A Tensor, must have the same shape and type as `x`.
@@ -6971,6 +8120,9 @@ class Zeta(Primitive):
6971
8120
  ``Ascend`` ``GPU`` ``CPU``
6972
8121
 
6973
8122
  Examples:
8123
+ >>> import mindspore
8124
+ >>> import numpy as np
8125
+ >>> from mindspore import Tensor, ops
6974
8126
  >>> x = Tensor(np.array([10.]), mindspore.float32)
6975
8127
  >>> q = Tensor(np.array([1.]), mindspore.float32)
6976
8128
  >>> zeta = ops.Zeta()
@@ -6993,10 +8145,27 @@ class Bernoulli(Primitive):
6993
8145
 
6994
8146
  Refer to :func:`mindspore.ops.bernoulli` for more details.
6995
8147
 
8148
+ Args:
8149
+ seed (int, optional): The seed value for random generating. The value of `seed` must be -1 or a
8150
+ positive integer, and -1 means using the current timestamp. Default: ``-1`` .
8151
+ offset (int, optional): Used to change the starting position during the generation of
8152
+ random number sequence. Default: ``0`` .
8153
+
8154
+ Inputs:
8155
+ - **x** (Tensor) - Input Tensor.
8156
+ - **p** (Union[Tensor, float], optional) - Success probability, representing the probability of
8157
+ setting 1 for the corresponding position of the current Tensor. It has the same shape as `x`,
8158
+ the value of `p` must be in the range `[0, 1]`. Default: ``0.5`` .
8159
+
8160
+ Outputs:
8161
+ - **y** (Tensor) - with the same shape and type as `x` .
8162
+
6996
8163
  Supported Platforms:
6997
8164
  ``GPU`` ``CPU``
6998
8165
 
6999
8166
  Examples:
8167
+ >>> import mindspore
8168
+ >>> from mindspore import Tensor, ops
7000
8169
  >>> input_x = Tensor([0.1, 0.2, 0.3], mindspore.float32)
7001
8170
  >>> bernoulli = ops.Bernoulli()
7002
8171
  >>> output = bernoulli(input_x, Tensor([1.0]))
@@ -7027,7 +8196,7 @@ class TridiagonalSolve(Primitive):
7027
8196
  The penultimate dimension of diagonals must be 3.
7028
8197
 
7029
8198
  Args:
7030
- partial_pivoting (bool): decide if use the method of partial_pivoting. Default: True.
8199
+ partial_pivoting (bool): decide if use the method of partial_pivoting. Default: ``True`` .
7031
8200
 
7032
8201
  Inputs:
7033
8202
  - **diagonals** [Tensor] - The input tensor A of the equation AX = B, with data type of float32,
@@ -7077,10 +8246,24 @@ class Renorm(Primitive):
7077
8246
 
7078
8247
  Refer to :func:`mindspore.ops.renorm` for more details.
7079
8248
 
8249
+ Args:
8250
+ p (int): Power of norm calculation.
8251
+ dim (int): The dimension that expected to get the slice-tensor.
8252
+ maxnorm (float32): Max norm.
8253
+
8254
+ Inputs:
8255
+ - **x** (Tensor) - A Tensor, types: float32 or float16.
8256
+
8257
+ Outputs:
8258
+ Tensor, has the same dtype and shape as input.
8259
+
7080
8260
  Supported Platforms:
7081
8261
  ``Ascend`` ``GPU`` ``CPU``
7082
8262
 
7083
8263
  Examples:
8264
+ >>> import mindspore
8265
+ >>> import numpy as np
8266
+ >>> from mindspore import Tensor, ops
7084
8267
  >>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
7085
8268
  >>> y = ops.Renorm(p=1, dim=0, maxnorm=5.)(x)
7086
8269
  >>> print(y)
@@ -7111,10 +8294,24 @@ class Cholesky(Primitive):
7111
8294
 
7112
8295
  Refer to :func:`mindspore.ops.cholesky` for more details.
7113
8296
 
8297
+ Args:
8298
+ upper (bool, optional): Flag that indicates whether to return a upper or lower triangular matrix.
8299
+ Default: ``False`` .
8300
+
8301
+ Inputs:
8302
+ - **input_x** (Tensor) - Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
8303
+ consisting of symmetric positive-definite matrices, with float32 or float64 data type.
8304
+
8305
+ Outputs:
8306
+ Tensor, has the same shape and data type as `input_x`.
8307
+
7114
8308
  Supported Platforms:
7115
8309
  ``GPU`` ``CPU``
7116
8310
 
7117
8311
  Examples:
8312
+ >>> import mindspore
8313
+ >>> import numpy as np
8314
+ >>> from mindspore import Tensor, ops
7118
8315
  >>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
7119
8316
  >>> cholesky = ops.Cholesky(upper=False)
7120
8317
  >>> output = cholesky(input_x)
@@ -7140,6 +8337,23 @@ class STFT(Primitive):
7140
8337
 
7141
8338
  Refer to :func:`mindspore.ops.stft` for more details.
7142
8339
 
8340
+ Args:
8341
+ n_fft (int): The size of Fourier transform.
8342
+ hop_length (int): The distance between neighboring sliding window frames.
8343
+ win_length (int): the size of window frame and STFT filter.
8344
+ normalized (bool): controls whether to return the normalized STFT results.
8345
+ onesided (bool): controls whether to return half of results to
8346
+ avoid redundancy for real inputs.
8347
+ return_complex (bool): If ``True`` , return a complex tensor. If False, return
8348
+ a real tensor with an extra last dimension for the real and imaginary components.
8349
+
8350
+ Inputs:
8351
+ - **x** (Tensor) - Time sequence of stft, must be either a 1-D time tensor or a 2-D tensor.
8352
+ - **window** (Tensor) - the optional window function.
8353
+
8354
+ Outputs:
8355
+ Tensor, containing the result after STFT.
8356
+
7143
8357
  Supported Platforms:
7144
8358
  ``Ascend`` ``CPU``
7145
8359
 
@@ -7172,7 +8386,7 @@ class CholeskySolve(Primitive):
7172
8386
  Computes the solution of a set of linear equations with a positive definite matrix,
7173
8387
  according to its Cholesky decomposition factor `u` , and outputs the result as `c`.
7174
8388
 
7175
- If `upper` is set to `True`, `u` is upper triangular and `c` is returned such that:
8389
+ If `upper` is set to ``True`` , `u` is upper triangular and `c` is returned such that:
7176
8390
 
7177
8391
  .. math::
7178
8392
  c = (u^{T}u)^{{-1}}b
@@ -7184,7 +8398,7 @@ class CholeskySolve(Primitive):
7184
8398
 
7185
8399
  Args:
7186
8400
  upper (bool, optional): A flag indicates whether to treat the Cholesky factor
7187
- as an upper or a lower triangular matrix. Default: False.
8401
+ as an upper or a lower triangular matrix. Default: ``False`` .
7188
8402
 
7189
8403
  Inputs:
7190
8404
  - **x1** (Tensor) - Tensor of shape :math:`(*, N, M)`, indicating 2D or 3D matrices,
@@ -7253,39 +8467,48 @@ class FFTWithSize(Primitive):
7253
8467
 
7254
8468
  Note:
7255
8469
  - FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs.
7256
- - RFFT requires float32 or float64 inputs, return complex64 or complex128 outputs.
8470
+ - RFFT requires bool, uint8, int8, int16, int32, int64, float32 and float64 inputs,
8471
+ return complex64 or complex128 outputs.
7257
8472
  - IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs.
7258
8473
 
8474
+ .. warning::
8475
+ This is an experimental API that is subject to change or deletion.
8476
+
7259
8477
  Args:
7260
8478
  signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions
7261
8479
  of the fourier transform are realized, can only be 1, 2 or 3.
7262
- inverse (bool): Whether it is the inverse transformation.
7263
- real (bool): Whether it is the real transformation.
8480
+ inverse (bool): Whether it is the inverse transformation, used to select from FFT and RFFT or IFFT and IRFFT.
7264
8481
 
7265
- - "inverse:False real:False" corresponds to FFT.
7266
- - "inverse:True real:False" corresponds to IFFT.
7267
- - "inverse:False real:True" corresponds to RFFT.
7268
- - "inverse:True real:True" corresponds to IRFFT.
8482
+ - when set to ``True``: IFFT and IRFFT.
8483
+ - when set to ``False``: FFT and RFFT.
7269
8484
 
7270
- norm (str, optional): The normalization, optional values: ["backward", "forward", "ortho"].
7271
- Default value: "backward".
8485
+ real (bool): Whether it is the real transformation, combines with `inverse` to select a specific
8486
+ transformation mode:
7272
8487
 
7273
- - "backward" has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
8488
+ - `inverse` is ``False`` , `real` is ``False`` : corresponds to FFT.
8489
+ - `inverse` is ``True`` , `real` is ``False`` : corresponds to IFFT.
8490
+ - `inverse` is ``False`` , `real` is ``True`` : corresponds to RFFT.
8491
+ - `inverse` is ``True`` , `real` is ``True`` : corresponds to IRFFT.
8492
+
8493
+ norm (str, optional): The normalization, optional values: [ ``"backward"`` , ``"forward"`` , ``"ortho"`` ].
8494
+ Default value: ``"backward"`` .
8495
+
8496
+ - ``"backward"`` has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
7274
8497
  where n is the input x's element numbers.
7275
- - "ortho" has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
7276
- - "forward" has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
8498
+ - ``"ortho"`` has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
8499
+ - ``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
7277
8500
 
7278
- onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: True.
8501
+ onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: ``True`` .
7279
8502
  signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension),
7280
- only in IRFFT mode and set `onesided` to True requires the parameter, the following conditions must be
7281
- satisfied. Default: ().
8503
+ only in IRFFT mode and set `onesided` to ``True`` requires the parameter, the following conditions must be
8504
+ satisfied. Default: ``()`` .
7282
8505
 
7283
8506
  - The length of `signal_sizes` is equal to the signal_ndim of the IRFFT:
7284
- :math:`len(signal_sizes)=signal_ndim`.
8507
+ :math:`len(signal\_sizes)=signal\_ndim`.
7285
8508
  - The last dimension of `signal_sizes` divided by 2 is equal to
7286
- the last dimension of the IRFFT input: :math:`signal_size[-1]/2+1=x.shape[-1]`.
8509
+ the last dimension of the IRFFT input: :math:`signal\_size[-1]/2+1=x.shape[-1]`.
7287
8510
  - `signal_sizes` has exactly the same dimensions as the input shape
7288
- except for the last dimension: :math:`signal_sizes[:-1]=x.shape[:-1]`.
8511
+ except for the last dimension: :math:`signal\_sizes[:-1]=x.shape[:-1]`.
7289
8512
 
7290
8513
  Inputs:
7291
8514
  - **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim.
@@ -7294,17 +8517,19 @@ class FFTWithSize(Primitive):
7294
8517
  A tensor containing the complex-to-complex, real-to-complex or complex-to-real Fourier transform result.
7295
8518
 
7296
8519
  Raises:
7297
- TypeError: If the input type of FFT/IFFT/IRFF is not one of: complex64, complex128.
7298
- TypeError: If the input type of RFFT is not one of: float32, float64.
8520
+ TypeError: If the input type of FFT/IFFT/IRFFT is not one of: complex64, complex128.
7299
8521
  TypeError: If the input type is not Tensor.
7300
8522
  ValueError: If `x` dimension is less than signal_ndim.
7301
8523
  ValueError: If signal_ndim is greater than 3 or less than 1.
7302
8524
  ValueError: If norm is none of "backward", "forward" or "ortho".
7303
8525
 
7304
8526
  Supported Platforms:
7305
- ``GPU`` ``CPU``
8527
+ ``Ascend`` ``GPU`` ``CPU``
7306
8528
 
7307
8529
  Examples:
8530
+ >>> import mindspore
8531
+ >>> import numpy as np
8532
+ >>> from mindspore import Tensor, ops
7308
8533
  >>> # case FFT: signal_ndim: 1, inverse: False, real: False.
7309
8534
  >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
7310
8535
  >>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
@@ -7351,16 +8576,33 @@ class Polar(Primitive):
7351
8576
 
7352
8577
  Refer to :func:`mindspore.ops.polar` for more details.
7353
8578
 
8579
+ Inputs:
8580
+ - **abs** (Tensor) - Radial distance. Tensor of any dimension,
8581
+ must be one of the following types: float32, float64.
8582
+
8583
+ - **angle** (Tensor) - Polar angle. It has the same shape and dtype as `abs`.
8584
+
8585
+ Outputs:
8586
+ Tensor, has the same shape and data type as `abs`.
8587
+
7354
8588
  Supported Platforms:
7355
8589
  ``GPU`` ``CPU``
7356
8590
 
7357
8591
  Examples:
8592
+ >>> import mindspore
8593
+ >>> import numpy as np
8594
+ >>> from mindspore import Tensor, ops
7358
8595
  >>> polar = ops.Polar()
7359
8596
  >>> x1 = Tensor(np.array([1, 2]), mindspore.float64)
7360
8597
  >>> x2 = Tensor(np.array([3, 4]), mindspore.float64)
7361
8598
  >>> output = polar(x1, x2)
7362
8599
  >>> print(output)
7363
8600
  [-0.9899925 +0.14112001j -1.30728724-1.51360499j]
8601
+ >>> x1 = Tensor(2.1, mindspore.float32)
8602
+ >>> x2 = Tensor(2.1, mindspore.float32)
8603
+ >>> output = polar(x1, x2)
8604
+ >>> print(output)
8605
+ (-1.0601766+1.8127397j)
7364
8606
  """
7365
8607
 
7366
8608
  @prim_attr_register
@@ -7374,10 +8616,11 @@ class NextAfter(Primitive):
7374
8616
  Returns the next representable floating-point value after `x1` towards `x2` element-wise.
7375
8617
 
7376
8618
  Say there are two float32 numbers :math:`a, b`, and let the
7377
- representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
8619
+ representable delta of float32 data type is :math:`eps`.
8620
+ If :math:`a < b`,
7378
8621
  then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
7379
8622
  If :math:`a > b`,
7380
- the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
8623
+ the next representable of :math:`a` towards :math:`b` is :math:`a-eps`.
7381
8624
 
7382
8625
  .. math::
7383
8626
 
@@ -7387,12 +8630,10 @@ class NextAfter(Primitive):
7387
8630
  This is an experimental API that is subject to change or deletion.
7388
8631
 
7389
8632
  Inputs:
7390
- - **x1** (Tensor) - The shape of tensor is
7391
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
8633
+ - **x1** (Tensor) - The input Tensor of any dimension.
7392
8634
  Must be one of the following types: float32, float64.
7393
8635
 
7394
- - **x2** (Tensor) - The shape of tensor is
7395
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
8636
+ - **x2** (Tensor) - The input Tensor of any dimension.
7396
8637
  Must be one of the following types: float32, float64.
7397
8638
 
7398
8639
  Outputs:
@@ -7408,6 +8649,9 @@ class NextAfter(Primitive):
7408
8649
  ``Ascend`` ``GPU`` ``CPU``
7409
8650
 
7410
8651
  Examples:
8652
+ >>> import mindspore
8653
+ >>> import numpy as np
8654
+ >>> from mindspore import Tensor, ops
7411
8655
  >>> nextafter = ops.NextAfter()
7412
8656
  >>> x1 = Tensor(np.asarray([0.0]), mindspore.float32)
7413
8657
  >>> x2 = Tensor(np.asarray([0.1]), mindspore.float32)
@@ -7435,9 +8679,9 @@ class TrilIndices(Primitive):
7435
8679
  Args:
7436
8680
  row (int): number of rows in the 2-D matrix.
7437
8681
  col (int): number of columns in the 2-D matrix.
7438
- offset (int, optional): diagonal offset from the main diagonal. Default: 0.
8682
+ offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
7439
8683
  dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
7440
- An optional data type of `mstype.int32` and `mstype.int64`. Default: `mstype.int32`.
8684
+ An optional data type of ``mstype.int32`` and ``mstype.int64`` . Default: ``mstype.int32`` .
7441
8685
 
7442
8686
  Outputs:
7443
8687
  - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
@@ -7448,6 +8692,8 @@ class TrilIndices(Primitive):
7448
8692
  ``Ascend`` ``GPU`` ``CPU``
7449
8693
 
7450
8694
  Examples:
8695
+ >>> from mindspore import ops
8696
+ >>> from mindspore import dtype as mstype
7451
8697
  >>> net = ops.TrilIndices(4, 3, -1, mstype.int64)
7452
8698
  >>> output = net()
7453
8699
  >>> print(output)
@@ -7477,10 +8723,10 @@ class MatrixTriangularSolve(Primitive):
7477
8723
  Only GPU platforms now support the broadcast mechanism.
7478
8724
 
7479
8725
  Args:
7480
- lower (bool, optional): If True, the innermost matrices in `matrix` is
7481
- are lower triangular. Default: True.
8726
+ lower (bool, optional): If ``True`` , the innermost matrices in `matrix` is
8727
+ are lower triangular. Default: ``True`` .
7482
8728
  adjoint (bool, optional): Indicates whether the adjoint of the
7483
- matrix is used during the computation. Default: False, use its transpose instead.
8729
+ matrix is used during the computation. Default: ``False`` , use its transpose instead.
7484
8730
 
7485
8731
  Inputs:
7486
8732
  - **matrix** (Tensor) - Tensor of shape :math:`(*, M, M)`,
@@ -7525,7 +8771,7 @@ class CompareAndBitpack(Primitive):
7525
8771
  """
7526
8772
  Compare values of `x` to `threshold` and pack resulting bits into a `uint8`.
7527
8773
 
7528
- Each comparison returns a boolean true (if x_value > threshold) or and false otherwise.
8774
+ Each comparison returns a boolean ``True`` (if x_value > threshold) or and ``False`` otherwise.
7529
8775
 
7530
8776
  Given an `x` shaped :math:`(s_0, s_1, ..., s_n)`, the output is a `uint8`
7531
8777
  Tensor shaped :math:`(s_0, s_1, ..., s_n / 8)`.
@@ -7574,10 +8820,28 @@ class NanToNum(Primitive):
7574
8820
 
7575
8821
  Refer to :func:`mindspore.ops.nan_to_num` for more details.
7576
8822
 
8823
+ Args:
8824
+ nan (float, optional): The value to replace `NaN`. Default value is ``0.0`` .
8825
+ posinf (float, optional): If a Number, the value to replace positive infinity values with. If None, positive
8826
+ infinity values are replaced with the greatest finite value representable by `x`'s dtype.
8827
+ Default value is ``None`` .
8828
+ neginf (float, optional): if a Number, the value to replace negative infinity values with. If None, negative
8829
+ infinity values are replaced with the lowest finite value representable by `x`'s dtype.
8830
+ Default value is ``None`` .
8831
+
8832
+ Inputs:
8833
+ - **x** (Tensor) - Input Tensor of any dimensions. Supported data types: float32 or float16.
8834
+
8835
+ Outputs:
8836
+ Tensor, has the same shape and dtype as the `x`.
8837
+
7577
8838
  Supported Platforms:
7578
8839
  ``Ascend`` ``CPU``
7579
8840
 
7580
8841
  Examples:
8842
+ >>> import mindspore
8843
+ >>> import numpy as np
8844
+ >>> from mindspore import Tensor, ops
7581
8845
  >>> nan_to_num = ops.NanToNum()
7582
8846
  >>> x = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 3.14]), mindspore.float32)
7583
8847
  >>> output = nan_to_num(x)
@@ -7612,10 +8876,22 @@ class Orgqr(Primitive):
7612
8876
 
7613
8877
  Refer to :func:`mindspore.ops.orgqr` for more details.
7614
8878
 
8879
+ Inputs:
8880
+ - **x** (Tensor) - Tensor of shape :math:`(*, M, N)`, indicating 2D or 3D matrices,
8881
+ with float32, float64, complex64 and complex128 data type.
8882
+ - **tau** (Tensor) - Indicates the reflecting coefficient in Householder transformation, it has
8883
+ shape :math:`(*, K)`, where `K` is less than or equal to `N`, and it has the same type as `x`.
8884
+
8885
+ Outputs:
8886
+ Tensor, has the same shape and data type as `x`.
8887
+
7615
8888
  Supported Platforms:
7616
8889
  ``Ascend`` ``GPU`` ``CPU``
7617
8890
 
7618
8891
  Examples:
8892
+ >>> import mindspore
8893
+ >>> import numpy as np
8894
+ >>> from mindspore import Tensor, ops
7619
8895
  >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62.]]), mindspore.float32)
7620
8896
  >>> tau = Tensor(np.array([1.55, 1.94, 0.0]), mindspore.float32)
7621
8897
  >>> net = ops.Orgqr()
@@ -7645,9 +8921,9 @@ class TriuIndices(Primitive):
7645
8921
  Args:
7646
8922
  row (int): number of rows in the 2-D matrix.
7647
8923
  col (int): number of columns in the 2-D matrix.
7648
- offset (int, optional): diagonal offset from the main diagonal. Default: 0.
8924
+ offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
7649
8925
  dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
7650
- An optional data type of `mstype.int32` and `mstype.int64`. Default: `mstype.int32`.
8926
+ An optional data type of ``mstype.int32`` and ``mstype.int64`` . Default: ``mstype.int32`` .
7651
8927
 
7652
8928
  Outputs:
7653
8929
  - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
@@ -7658,6 +8934,8 @@ class TriuIndices(Primitive):
7658
8934
  ``Ascend`` ``GPU`` ``CPU``
7659
8935
 
7660
8936
  Examples:
8937
+ >>> from mindspore import ops
8938
+ >>> from mindspore import dtype as mstype
7661
8939
  >>> net = ops.TriuIndices(5, 4, 2, mstype.int64)
7662
8940
  >>> output = net()
7663
8941
  >>> print(output)
@@ -7718,6 +8996,9 @@ class Fmax(Primitive):
7718
8996
  ``CPU``
7719
8997
 
7720
8998
  Examples:
8999
+ >>> import mindspore
9000
+ >>> import numpy as np
9001
+ >>> from mindspore import Tensor, ops
7721
9002
  >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
7722
9003
  >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
7723
9004
  >>> fmax = ops.Fmax()
@@ -7743,8 +9024,8 @@ class Eig(Primitive):
7743
9024
  This is an experimental API that is subject to change or deletion.
7744
9025
 
7745
9026
  Args:
7746
- compute_v (bool, optional): If `True`, compute both eigenvalues and eigenvectors;
7747
- If `False`, just eigenvalues will be computed. Default: False.
9027
+ compute_v (bool, optional): If ``True`` , compute both eigenvalues and eigenvectors;
9028
+ If `False`, just eigenvalues will be computed. Default: ``False`` .
7748
9029
  Inputs:
7749
9030
  - **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`,
7750
9031
  with float32, float64, complex64 or complex128 data type.
@@ -7766,6 +9047,9 @@ class Eig(Primitive):
7766
9047
  ``Ascend`` ``CPU``
7767
9048
 
7768
9049
  Examples:
9050
+ >>> import mindspore
9051
+ >>> import numpy as np
9052
+ >>> from mindspore import Tensor, ops
7769
9053
  >>> input_x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]), mindspore.float32)
7770
9054
  >>> eig = ops.Eig(compute_v=True)
7771
9055
  >>> u, v = eig(input_x)
@@ -7790,8 +9074,8 @@ class SelfAdjointEig(Primitive):
7790
9074
  The eigenvalues are sorted in non-decreasing order.
7791
9075
 
7792
9076
  Args:
7793
- compute_v(bool): If `True` then eigenvectors will be computed and returned in v;
7794
- If `False`, only the eigenvalues will be computed. Default: True.
9077
+ compute_v(bool): If ``True`` then eigenvectors will be computed and returned in v;
9078
+ If ``False`` , only the eigenvalues will be computed. Default: ``True`` .
7795
9079
 
7796
9080
  Inputs:
7797
9081
  - **x** (Tensor) - Must be one of the following types:
@@ -7832,14 +9116,14 @@ class SelfAdjointEig(Primitive):
7832
9116
 
7833
9117
  class Qr(Primitive):
7834
9118
  """
7835
- Returns the QR decomposition of one or more matrices. If `full_matrices` is true, compute full-sized q and r,
7836
- If False (the default), compute the P columns of q where P is minimum of the 2 innermost dimensions of x.
9119
+ Returns the QR decomposition of one or more matrices. If `full_matrices` is ``True`` , compute full-sized q and r,
9120
+ If ``False`` (the default), compute the P columns of q where P is minimum of the 2 innermost dimensions of x.
7837
9121
 
7838
9122
  .. warning::
7839
9123
  This is an experimental API that is subject to change or deletion.
7840
9124
 
7841
9125
  Args:
7842
- full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: False.
9126
+ full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: ``False`` .
7843
9127
 
7844
9128
  Inputs:
7845
9129
  - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions.
@@ -7848,10 +9132,10 @@ class Qr(Primitive):
7848
9132
 
7849
9133
  Outputs:
7850
9134
  - **q** (Tensor) - The orthonormal matrices of x.
7851
- If `full_matrices` is true, the shape is :math:`(m, m)`, else the shape is :math:`(m, p)`.
9135
+ If `full_matrices` is ``True`` , the shape is :math:`(m, m)`, else the shape is :math:`(m, p)`.
7852
9136
  The dtype of `q` is same as `x`.
7853
9137
  - **r** (Tensor) - The upper triangular matrices of x.
7854
- If `full_matrices` is true, the shape is :math:`(m, n)`, else the shape is :math:`(p, n)`.
9138
+ If `full_matrices` is ``True`` , the shape is :math:`(m, n)`, else the shape is :math:`(p, n)`.
7855
9139
  The dtype of `r` is same as `x`.
7856
9140
 
7857
9141
  Raises:
@@ -7863,6 +9147,8 @@ class Qr(Primitive):
7863
9147
  ``Ascend`` ``GPU`` ``CPU``
7864
9148
 
7865
9149
  Examples:
9150
+ >>> from mindspore import Tensor, ops
9151
+ >>> from mindspore import dtype as mstype
7866
9152
  >>> qr_op = ops.Qr(full_matrices=False)
7867
9153
  >>> x = Tensor([[20., -31, 7], [4, 270, -90], [-8, 17, -32]], mstype.float32)
7868
9154
  >>> q, r = qr_op(x)
@@ -7892,10 +9178,10 @@ class Cauchy(Primitive):
7892
9178
 
7893
9179
  Args:
7894
9180
  size (list[int]): The size of tensor.
7895
- sigma (float, optional): the location parameter, specifying the location
7896
- of the peak of the distribution. Default: 1.0.
7897
- median (float, optional): the scale parameter which specifies the half-width
7898
- at half-maximum. Default: 0.0.
9181
+ median (float, optional): the location parameter, specifying the location
9182
+ of the peak of the distribution. Default: 0.0.
9183
+ sigma (float, optional): the scale parameter which specifies the half-width
9184
+ at half-maximum. Default: 1.0.
7899
9185
 
7900
9186
  Outputs:
7901
9187
  Tensor with cauchy distribution data. Tensor shape is size, and data type is float32.
@@ -7933,17 +9219,23 @@ class Ormqr(Primitive):
7933
9219
  Multiplies a(m, n) matrix C (given by other) with a matrix Q, where Q is represented using Householder
7934
9220
  reflectors (x, tau), which is the output of geqrf().
7935
9221
 
9222
+ Refer to :func:`mindspore.ops.ormqr` for more details.
9223
+
9224
+ .. warning::
9225
+ This is an experimental API that is subject to change or deletion.
9226
+
7936
9227
  Args:
7937
- left (bool, optional): controls the order of multiplication. If true, compute op(Q)*C.
7938
- If false, compute C*op(Q). Default: True.
7939
- transpose(bool, optional): controls whether the matrix Q is conjugate transposed or not.Default: False.
9228
+ left (bool, optional): controls the order of multiplication. If ``True`` , compute op(Q)*C.
9229
+ If ``False`` , compute C*op(Q). Default: ``True`` .
9230
+ transpose(bool, optional): controls whether the matrix Q is conjugate transposed or not.Default: ``False`` .
7940
9231
 
7941
9232
  Inputs:
7942
- - **x** (Tensor) - Tensor of shape: (*, mn, k) where mn equals to m or n depending on the the args of `left`,
9233
+ - **x** (Tensor) - Tensor of shape :math:`(*, mn, k)` where the value of mn depending on `left`,
9234
+ When `left` is ``True``, the value of mn is equal to m; otherwise, the value of mn is equal to n.
7943
9235
  and `*` is zero or more batch dimensions.
7944
- - **tau** (Tensor) - Tensor of shape (*, min(mn, k)) where `*` is zero or more batch dimensions,
9236
+ - **tau** (Tensor) - Tensor of shape :math:`(*, min(mn, k))` where `*` is zero or more batch dimensions,
7945
9237
  and its type is the same as `x`.
7946
- - **other** (Tensor) - Tensor of shape (*, m, n) where `*` is zero or more batch dimensions,
9238
+ - **other** (Tensor) - Tensor of shape :math:`(*, m, n)` where `*` is zero or more batch dimensions,
7947
9239
  and its type is the same as `x`.
7948
9240
 
7949
9241
  Outputs:
@@ -7956,20 +9248,23 @@ class Ormqr(Primitive):
7956
9248
  ValueError: If rank(x) - rank(tau) != 1.
7957
9249
  ValueError: If tau.shape[:-2] != x.shape[:-2]
7958
9250
  ValueError: If other.shape[:-2] != x.shape[:-2]
7959
- ValueError: If left == true, other.shape[-2] < tau.shape[-1].
7960
- ValueError: If left == true, other.shape[-2] != x.shape[-2].
7961
- ValueError: If left == false, other.shape[-1] < tau.shape[-1].
7962
- ValueError: If left == false, other.shape[-1] != x.shape[-2].
9251
+ ValueError: If left == True, other.shape[-2] < tau.shape[-1].
9252
+ ValueError: If left == True, other.shape[-2] != x.shape[-2].
9253
+ ValueError: If left == False, other.shape[-1] < tau.shape[-1].
9254
+ ValueError: If left == False, other.shape[-1] != x.shape[-2].
7963
9255
 
7964
9256
  Supported Platforms:
7965
9257
  ``GPU``
7966
9258
 
7967
9259
  Examples:
9260
+ >>> import mindspore
9261
+ >>> import numpy as np
9262
+ >>> from mindspore import Tensor, ops
7968
9263
  >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62]]), mindspore.float32)
7969
9264
  >>> tau = Tensor(np.array([1.55, 1.94, 3.0]), mindspore.float32)
7970
9265
  >>> other = Tensor(np.array([[-114.6, 10.9, 1.1],
7971
- [-0.304, 38.07, 69.38],
7972
- [-0.45, -0.17, 62]]), mindspore.float32)
9266
+ ... [-0.304, 38.07, 69.38],
9267
+ ... [-0.45, -0.17, 62]]), mindspore.float32)
7973
9268
  >>> net = ops.Ormqr()
7974
9269
  >>> y = net(x, tau, other)
7975
9270
  >>> print(y)