mindspore 2.1.0__cp37-cp37m-win_amd64.whl → 2.2.11__cp37-cp37m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (511) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +4 -1
  5. mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -1
  9. mindspore/_checkparam.py +23 -29
  10. mindspore/_extends/graph_kernel/__init__.py +0 -1
  11. mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
  12. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  13. mindspore/_extends/graph_kernel/splitter.py +4 -11
  14. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
  15. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +84 -67
  16. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  17. mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
  18. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
  19. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +6 -5
  20. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  21. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  22. mindspore/_extends/parse/__init__.py +13 -15
  23. mindspore/_extends/parse/namespace.py +7 -33
  24. mindspore/_extends/parse/parser.py +67 -72
  25. mindspore/_extends/parse/resources.py +1 -1
  26. mindspore/_extends/parse/standard_method.py +86 -106
  27. mindspore/_extends/parse/trope.py +1 -1
  28. mindspore/_extends/remote/kernel_build_server.py +25 -7
  29. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  30. mindspore/_install_custom.py +43 -0
  31. mindspore/amp.py +47 -11
  32. mindspore/atlprov.dll +0 -0
  33. mindspore/boost/boost.py +1 -8
  34. mindspore/boost/boost_cell_wrapper.py +3 -2
  35. mindspore/boost/grad_accumulation.py +1 -1
  36. mindspore/boost/group_loss_scale_manager.py +8 -7
  37. mindspore/c1.dll +0 -0
  38. mindspore/c1xx.dll +0 -0
  39. mindspore/c2.dll +0 -0
  40. mindspore/common/__init__.py +5 -3
  41. mindspore/common/_jit_fallback_utils.py +6 -0
  42. mindspore/common/_register_for_adapter.py +2 -0
  43. mindspore/common/_register_for_tensor.py +2 -2
  44. mindspore/common/_stub_tensor.py +13 -0
  45. mindspore/common/_utils.py +29 -0
  46. mindspore/common/api.py +174 -259
  47. mindspore/common/auto_dynamic_shape.py +494 -0
  48. mindspore/common/dtype.py +18 -11
  49. mindspore/common/dump.py +6 -4
  50. mindspore/common/initializer.py +14 -14
  51. mindspore/common/jit_config.py +33 -15
  52. mindspore/common/lazy_inline.py +126 -7
  53. mindspore/common/mindir_util.py +101 -0
  54. mindspore/common/parameter.py +51 -41
  55. mindspore/common/seed.py +4 -4
  56. mindspore/common/sparse_tensor.py +13 -14
  57. mindspore/common/tensor.py +243 -165
  58. mindspore/communication/__init__.py +7 -4
  59. mindspore/communication/_comm_helper.py +83 -4
  60. mindspore/communication/management.py +152 -84
  61. mindspore/config/op_info.config +14 -3
  62. mindspore/context.py +152 -61
  63. mindspore/dataset/__init__.py +5 -5
  64. mindspore/dataset/audio/__init__.py +2 -2
  65. mindspore/dataset/audio/transforms.py +52 -52
  66. mindspore/dataset/callback/ds_callback.py +16 -2
  67. mindspore/dataset/core/config.py +68 -51
  68. mindspore/dataset/engine/cache_client.py +33 -7
  69. mindspore/dataset/engine/datasets.py +250 -112
  70. mindspore/dataset/engine/datasets_audio.py +43 -211
  71. mindspore/dataset/engine/datasets_standard_format.py +16 -35
  72. mindspore/dataset/engine/datasets_text.py +43 -67
  73. mindspore/dataset/engine/datasets_user_defined.py +86 -100
  74. mindspore/dataset/engine/datasets_vision.py +219 -1029
  75. mindspore/dataset/engine/iterators.py +11 -4
  76. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
  77. mindspore/dataset/engine/obs/util.py +3 -0
  78. mindspore/dataset/engine/samplers.py +1 -1
  79. mindspore/dataset/engine/validators.py +19 -5
  80. mindspore/dataset/text/__init__.py +3 -3
  81. mindspore/dataset/text/transforms.py +101 -127
  82. mindspore/dataset/text/utils.py +205 -138
  83. mindspore/dataset/transforms/__init__.py +1 -1
  84. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  85. mindspore/dataset/transforms/transforms.py +95 -40
  86. mindspore/dataset/utils/browse_dataset.py +8 -2
  87. mindspore/dataset/utils/line_reader.py +17 -19
  88. mindspore/dataset/vision/__init__.py +3 -3
  89. mindspore/dataset/vision/c_transforms.py +6 -3
  90. mindspore/dataset/vision/transforms.py +409 -287
  91. mindspore/dataset/vision/utils.py +13 -14
  92. mindspore/dataset/vision/validators.py +11 -1
  93. mindspore/dnnl.dll +0 -0
  94. mindspore/dpcmi.dll +0 -0
  95. mindspore/experimental/map_parameter.py +14 -0
  96. mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
  97. mindspore/{nn/optim_ex → experimental/optim}/adam.py +60 -67
  98. mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
  99. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  100. mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
  101. mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
  102. mindspore/gen_ops.py +273 -0
  103. mindspore/include/OWNERS +0 -1
  104. mindspore/include/api/data_type.h +2 -1
  105. mindspore/include/api/graph.h +0 -15
  106. mindspore/include/api/kernel.h +2 -0
  107. mindspore/include/api/kernel_api.h +37 -12
  108. mindspore/include/api/model.h +17 -14
  109. mindspore/include/api/status.h +8 -3
  110. mindspore/include/api/types.h +37 -4
  111. mindspore/include/c_api/ms/abstract.h +67 -0
  112. mindspore/include/c_api/ms/attribute.h +197 -0
  113. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  114. mindspore/include/c_api/ms/base/macros.h +32 -0
  115. mindspore/include/c_api/ms/base/status.h +33 -0
  116. mindspore/include/c_api/ms/base/types.h +282 -0
  117. mindspore/include/c_api/ms/context.h +102 -0
  118. mindspore/include/c_api/ms/graph.h +160 -0
  119. mindspore/include/c_api/ms/node.h +606 -0
  120. mindspore/include/c_api/ms/tensor.h +161 -0
  121. mindspore/include/c_api/ms/value.h +84 -0
  122. mindspore/include/dataset/constants.h +6 -5
  123. mindspore/include/dataset/execute.h +23 -13
  124. mindspore/include/dataset/text.h +26 -26
  125. mindspore/include/dataset/transforms.h +13 -13
  126. mindspore/include/dataset/vision.h +60 -60
  127. mindspore/include/dataset/vision_ascend.h +5 -6
  128. mindspore/include/dataset/vision_lite.h +17 -17
  129. mindspore/jpeg62.dll +0 -0
  130. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  131. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  132. mindspore/mindspore_backend.dll +0 -0
  133. mindspore/mindspore_common.dll +0 -0
  134. mindspore/mindspore_core.dll +0 -0
  135. mindspore/mindspore_glog.dll +0 -0
  136. mindspore/mindspore_shared_lib.dll +0 -0
  137. mindspore/msobj140.dll +0 -0
  138. mindspore/mspdb140.dll +0 -0
  139. mindspore/mspdbcore.dll +0 -0
  140. mindspore/mspdbst.dll +0 -0
  141. mindspore/mspft140.dll +0 -0
  142. mindspore/msvcdis140.dll +0 -0
  143. mindspore/msvcp140_1.dll +0 -0
  144. mindspore/msvcp140_2.dll +0 -0
  145. mindspore/msvcp140_atomic_wait.dll +0 -0
  146. mindspore/msvcp140_codecvt_ids.dll +0 -0
  147. mindspore/nn/__init__.py +0 -2
  148. mindspore/nn/cell.py +313 -74
  149. mindspore/nn/dynamic_lr.py +21 -21
  150. mindspore/nn/layer/activation.py +22 -30
  151. mindspore/nn/layer/basic.py +15 -13
  152. mindspore/nn/layer/channel_shuffle.py +1 -1
  153. mindspore/nn/layer/container.py +271 -9
  154. mindspore/nn/layer/conv.py +323 -204
  155. mindspore/nn/layer/dense.py +8 -5
  156. mindspore/nn/layer/embedding.py +33 -27
  157. mindspore/nn/layer/flash_attention.py +61 -95
  158. mindspore/nn/layer/image.py +8 -6
  159. mindspore/nn/layer/math.py +16 -25
  160. mindspore/nn/layer/normalization.py +107 -66
  161. mindspore/nn/layer/padding.py +1 -1
  162. mindspore/nn/layer/pooling.py +131 -109
  163. mindspore/nn/layer/rnn_cells.py +27 -22
  164. mindspore/nn/layer/rnns.py +13 -16
  165. mindspore/nn/layer/thor_layer.py +1 -1
  166. mindspore/nn/layer/transformer.py +221 -154
  167. mindspore/nn/learning_rate_schedule.py +9 -1
  168. mindspore/nn/loss/loss.py +235 -174
  169. mindspore/nn/optim/ada_grad.py +2 -1
  170. mindspore/nn/optim/adadelta.py +1 -0
  171. mindspore/nn/optim/adafactor.py +2 -1
  172. mindspore/nn/optim/adam.py +7 -4
  173. mindspore/nn/optim/adamax.py +3 -2
  174. mindspore/nn/optim/adasum.py +2 -2
  175. mindspore/nn/optim/asgd.py +2 -3
  176. mindspore/nn/optim/ftrl.py +6 -5
  177. mindspore/nn/optim/lamb.py +7 -4
  178. mindspore/nn/optim/lars.py +1 -1
  179. mindspore/nn/optim/lazyadam.py +5 -3
  180. mindspore/nn/optim/momentum.py +2 -1
  181. mindspore/nn/optim/optimizer.py +53 -4
  182. mindspore/nn/optim/proximal_ada_grad.py +3 -4
  183. mindspore/nn/optim/rmsprop.py +4 -3
  184. mindspore/nn/optim/rprop.py +23 -12
  185. mindspore/nn/optim/sgd.py +26 -11
  186. mindspore/nn/optim/thor.py +9 -7
  187. mindspore/nn/probability/bijector/bijector.py +5 -5
  188. mindspore/nn/probability/bijector/power_transform.py +27 -27
  189. mindspore/nn/probability/bijector/softplus.py +3 -3
  190. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
  191. mindspore/nn/probability/distribution/bernoulli.py +5 -5
  192. mindspore/nn/probability/distribution/beta.py +3 -3
  193. mindspore/nn/probability/distribution/categorical.py +7 -7
  194. mindspore/nn/probability/distribution/cauchy.py +0 -1
  195. mindspore/nn/probability/distribution/distribution.py +3 -3
  196. mindspore/nn/probability/distribution/gamma.py +3 -3
  197. mindspore/nn/probability/distribution/geometric.py +4 -4
  198. mindspore/nn/probability/distribution/gumbel.py +4 -4
  199. mindspore/nn/probability/distribution/log_normal.py +2 -2
  200. mindspore/nn/probability/distribution/logistic.py +2 -2
  201. mindspore/nn/probability/distribution/poisson.py +4 -4
  202. mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
  203. mindspore/nn/probability/distribution/uniform.py +6 -6
  204. mindspore/nn/wrap/__init__.py +4 -2
  205. mindspore/nn/wrap/cell_wrapper.py +87 -34
  206. mindspore/nn/wrap/grad_reducer.py +8 -5
  207. mindspore/nn/wrap/loss_scale.py +105 -42
  208. mindspore/numpy/array_creations.py +1 -2
  209. mindspore/numpy/array_ops.py +3 -2
  210. mindspore/numpy/utils_const.py +5 -5
  211. mindspore/opencv_core452.dll +0 -0
  212. mindspore/opencv_imgcodecs452.dll +0 -0
  213. mindspore/opencv_imgproc452.dll +0 -0
  214. mindspore/ops/_grad_experimental/__init__.py +0 -5
  215. mindspore/ops/_grad_experimental/grad_array_ops.py +2 -3
  216. mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
  217. mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
  218. mindspore/ops/_grad_experimental/grad_implementations.py +11 -1
  219. mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
  220. mindspore/ops/_grad_experimental/grad_math_ops.py +19 -199
  221. mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
  222. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  223. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  224. mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
  225. mindspore/ops/_op_impl/aicpu/add.py +3 -3
  226. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  227. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  228. mindspore/ops/_op_impl/{_custom_op/flash_attention/constants.py → aicpu/eps.py} +18 -27
  229. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  230. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
  231. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  232. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  233. mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
  234. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  235. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  236. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  237. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  238. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  239. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  240. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  241. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
  242. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
  243. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  244. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  245. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  246. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  247. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  248. mindspore/ops/_op_impl/tbe/__init__.py +4 -4
  249. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  250. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  251. mindspore/ops/_primitive_cache.py +1 -1
  252. mindspore/ops/_tracefunc.py +45 -13
  253. mindspore/ops/_utils/utils.py +6 -1
  254. mindspore/ops/_vmap/vmap_array_ops.py +3 -3
  255. mindspore/ops/_vmap/vmap_base.py +3 -3
  256. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  257. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  258. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  259. mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
  260. mindspore/ops/arg_dtype_cast.py +54 -0
  261. mindspore/ops/composite/base.py +37 -10
  262. mindspore/ops/composite/math_ops.py +5 -4
  263. mindspore/ops/composite/multitype_ops/_compile_utils.py +275 -73
  264. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
  265. mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
  266. mindspore/ops/composite/multitype_ops/getitem_impl.py +42 -4
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  268. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  270. mindspore/ops/deprecated.py +304 -0
  271. mindspore/ops/function/__init__.py +4 -1
  272. mindspore/ops/function/array_func.py +174 -193
  273. mindspore/ops/function/clip_func.py +81 -13
  274. mindspore/ops/function/debug_func.py +1 -1
  275. mindspore/ops/function/grad/grad_func.py +18 -9
  276. mindspore/ops/function/image_func.py +10 -4
  277. mindspore/ops/function/linalg_func.py +5 -5
  278. mindspore/ops/function/math_func.py +575 -386
  279. mindspore/ops/function/nn_func.py +568 -260
  280. mindspore/ops/function/random_func.py +88 -57
  281. mindspore/ops/function/sparse_func.py +1 -1
  282. mindspore/ops/function/sparse_unary_func.py +14 -12
  283. mindspore/ops/function/vmap_func.py +6 -5
  284. mindspore/ops/functional.py +15 -10
  285. mindspore/ops/op_info_register.py +244 -25
  286. mindspore/ops/operations/__init__.py +31 -19
  287. mindspore/ops/operations/_grad_ops.py +71 -7
  288. mindspore/ops/operations/_inner_ops.py +350 -17
  289. mindspore/ops/operations/_quant_ops.py +4 -8
  290. mindspore/ops/operations/_sequence_ops.py +42 -0
  291. mindspore/ops/operations/array_ops.py +68 -282
  292. mindspore/ops/operations/comm_ops.py +107 -59
  293. mindspore/ops/operations/custom_ops.py +94 -70
  294. mindspore/ops/operations/debug_ops.py +8 -4
  295. mindspore/ops/operations/image_ops.py +18 -12
  296. mindspore/ops/operations/inner_ops.py +26 -3
  297. mindspore/ops/operations/math_ops.py +192 -144
  298. mindspore/ops/operations/nn_ops.py +857 -489
  299. mindspore/ops/operations/other_ops.py +0 -22
  300. mindspore/ops/operations/random_ops.py +53 -111
  301. mindspore/ops/operations/sparse_ops.py +3 -1
  302. mindspore/ops/primitive.py +24 -18
  303. mindspore/parallel/_auto_parallel_context.py +68 -8
  304. mindspore/parallel/_cost_model_context.py +2 -2
  305. mindspore/parallel/_offload_context.py +17 -3
  306. mindspore/parallel/_parallel_serialization.py +12 -5
  307. mindspore/parallel/_ps_context.py +12 -0
  308. mindspore/parallel/_tensor.py +18 -13
  309. mindspore/parallel/_transformer/layers.py +5 -3
  310. mindspore/parallel/_transformer/loss.py +1 -0
  311. mindspore/parallel/_transformer/moe.py +2 -2
  312. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  313. mindspore/parallel/_transformer/transformer.py +23 -3
  314. mindspore/parallel/_utils.py +11 -7
  315. mindspore/parallel/algo_parameter_config.py +85 -5
  316. mindspore/parallel/checkpoint_transform.py +19 -12
  317. mindspore/parallel/shard.py +21 -14
  318. mindspore/pgodb140.dll +0 -0
  319. mindspore/pgort140.dll +0 -0
  320. mindspore/profiler/common/struct_type.py +3 -3
  321. mindspore/profiler/common/util.py +4 -2
  322. mindspore/profiler/envprofiling.py +1 -1
  323. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  324. mindspore/profiler/parser/ascend_flops_generator.py +2 -2
  325. mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
  326. mindspore/profiler/parser/ascend_hccl_generator.py +249 -12
  327. mindspore/profiler/parser/ascend_msprof_exporter.py +150 -255
  328. mindspore/profiler/parser/ascend_msprof_generator.py +204 -17
  329. mindspore/profiler/parser/ascend_op_generator.py +6 -6
  330. mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
  331. mindspore/profiler/parser/ascend_timeline_generator.py +14 -187
  332. mindspore/profiler/parser/base_timeline_generator.py +10 -8
  333. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +16 -12
  334. mindspore/profiler/parser/flops_parser.py +15 -11
  335. mindspore/profiler/parser/framework_parser.py +38 -22
  336. mindspore/profiler/parser/hccl_parser.py +16 -12
  337. mindspore/profiler/parser/integrator.py +22 -11
  338. mindspore/profiler/parser/memory_usage_parser.py +2 -2
  339. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  340. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  341. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  342. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  343. mindspore/profiler/parser/optime_parser.py +1 -1
  344. mindspore/profiler/parser/profiler_info.py +21 -2
  345. mindspore/profiler/parser/step_trace_parser.py +11 -14
  346. mindspore/profiler/profiling.py +179 -89
  347. mindspore/rewrite/api/node.py +102 -19
  348. mindspore/rewrite/api/node_type.py +5 -1
  349. mindspore/rewrite/api/pattern_engine.py +1 -1
  350. mindspore/rewrite/api/scoped_value.py +9 -17
  351. mindspore/rewrite/api/symbol_tree.py +131 -47
  352. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  353. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  354. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  355. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
  356. mindspore/rewrite/common/rewrite_elog.py +5 -1
  357. mindspore/rewrite/namer.py +33 -24
  358. mindspore/rewrite/namespace.py +14 -5
  359. mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
  360. mindspore/rewrite/node/call_function.py +79 -0
  361. mindspore/rewrite/node/cell_container.py +135 -0
  362. mindspore/rewrite/node/control_flow.py +88 -0
  363. mindspore/rewrite/{node.py → node/node.py} +273 -234
  364. mindspore/rewrite/node/node_manager.py +254 -0
  365. mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
  366. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  367. mindspore/rewrite/parsers/assign_parser.py +216 -221
  368. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  369. mindspore/rewrite/parsers/class_def_parser.py +174 -113
  370. mindspore/rewrite/parsers/constant_parser.py +9 -6
  371. mindspore/rewrite/parsers/container_parser.py +9 -7
  372. mindspore/rewrite/parsers/for_parser.py +42 -21
  373. mindspore/rewrite/parsers/function_def_parser.py +24 -16
  374. mindspore/rewrite/parsers/if_parser.py +28 -24
  375. mindspore/rewrite/parsers/module_parser.py +196 -25
  376. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  377. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  378. mindspore/rewrite/parsers/return_parser.py +6 -6
  379. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  380. mindspore/rewrite/sparsify/utils.py +1 -1
  381. mindspore/rewrite/symbol_tree.py +523 -578
  382. mindspore/rewrite/symbol_tree_builder.py +9 -193
  383. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  384. mindspore/run_check/_check_version.py +6 -4
  385. mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
  386. mindspore/safeguard/rewrite_obfuscation.py +541 -0
  387. mindspore/tbbmalloc.dll +0 -0
  388. mindspore/tinyxml2.dll +0 -0
  389. mindspore/train/_utils.py +7 -3
  390. mindspore/train/amp.py +323 -123
  391. mindspore/train/anf_ir_pb2.py +14 -2
  392. mindspore/train/callback/_backup_and_restore.py +2 -12
  393. mindspore/train/callback/_callback.py +29 -4
  394. mindspore/train/callback/_checkpoint.py +23 -8
  395. mindspore/train/callback/_early_stop.py +2 -2
  396. mindspore/train/callback/_landscape.py +4 -4
  397. mindspore/train/callback/_loss_monitor.py +2 -2
  398. mindspore/train/callback/_on_request_exit.py +2 -2
  399. mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
  400. mindspore/train/callback/_summary_collector.py +15 -8
  401. mindspore/train/callback/_time_monitor.py +58 -5
  402. mindspore/train/data_sink.py +5 -11
  403. mindspore/train/dataset_helper.py +84 -57
  404. mindspore/train/loss_scale_manager.py +2 -2
  405. mindspore/train/metrics/__init__.py +3 -3
  406. mindspore/train/metrics/cosine_similarity.py +1 -1
  407. mindspore/train/metrics/hausdorff_distance.py +3 -2
  408. mindspore/train/metrics/mean_surface_distance.py +3 -2
  409. mindspore/train/metrics/metric.py +39 -19
  410. mindspore/train/metrics/roc.py +2 -2
  411. mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
  412. mindspore/train/mind_ir_pb2.py +85 -36
  413. mindspore/train/model.py +187 -47
  414. mindspore/train/serialization.py +487 -161
  415. mindspore/train/summary/_summary_adapter.py +1 -1
  416. mindspore/train/summary/_writer_pool.py +3 -2
  417. mindspore/train/summary/summary_record.py +37 -17
  418. mindspore/train/train_thor/convert_utils.py +3 -3
  419. mindspore/train/train_thor/dataset_helper.py +1 -1
  420. mindspore/turbojpeg.dll +0 -0
  421. mindspore/vcmeta.dll +0 -0
  422. mindspore/vcruntime140.dll +0 -0
  423. mindspore/vcruntime140_1.dll +0 -0
  424. mindspore/version.py +1 -1
  425. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +7 -4
  426. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +429 -486
  427. mindspore/_extends/graph_kernel/expander.py +0 -80
  428. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
  429. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  430. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  431. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  432. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  433. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  434. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  435. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  436. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  437. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  438. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  439. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  440. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  441. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  442. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  443. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  444. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  445. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  446. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  447. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  448. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  449. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  450. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  451. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  452. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  453. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  454. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  455. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  456. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  457. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  458. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  459. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  460. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  461. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  462. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  463. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  464. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  465. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  466. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  467. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  468. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  469. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  470. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  471. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  472. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  473. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  474. mindspore/dataset/datapreprocess/__init__.py +0 -20
  475. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  476. mindspore/include/api/net.h +0 -142
  477. mindspore/nn/lr_scheduler.py +0 -262
  478. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
  479. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
  480. mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
  481. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  482. mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
  483. mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
  484. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -350
  485. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -409
  486. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -578
  487. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -199
  488. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -446
  489. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  490. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
  491. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
  492. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
  493. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
  494. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
  495. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
  496. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  497. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  498. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
  499. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
  500. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  501. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  502. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
  503. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
  504. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
  505. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
  506. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
  507. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  508. mindspore/rewrite/node_visitor.py +0 -44
  509. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
  510. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -0
  511. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
@@ -127,6 +127,8 @@ cast_ = P.Cast()
127
127
  #####################################
128
128
  # Private Operation Functions.
129
129
  #####################################
130
+ addcdiv_ = P.Addcdiv()
131
+ addcuml_ = P.Addcmul()
130
132
  addn_ = P.AddN()
131
133
  angle_ = Angle()
132
134
  log_ = P.Log()
@@ -142,6 +144,7 @@ asin_ = P.Asin()
142
144
  polar_ = Polar()
143
145
  acos_ = P.ACos()
144
146
  atan_ = P.Atan()
147
+ atan2_ = P.Atan2()
145
148
  sinh_ = P.Sinh()
146
149
  cosh_ = P.Cosh()
147
150
  tanh_ = P.Tanh()
@@ -177,6 +180,7 @@ tensor_round_ = P.Round()
177
180
  linspace_ = P.LinSpace()
178
181
  matrix_exp_ = MatrixExp()
179
182
  exp2_ = P.Pow()
183
+ trunc_ = P.Trunc()
180
184
  truncate_div_ = P.TruncateDiv()
181
185
  truncate_mod_ = P.TruncateMod()
182
186
  sparse_segment_mean_ = SparseSegmentMean()
@@ -188,6 +192,38 @@ cumsum_ = P.CumSum()
188
192
  shape_ = P.Shape()
189
193
  reshape_ = P.Reshape()
190
194
  dtype_ = P.DType()
195
+ eps_ = P.Eps()
196
+ rank_ = P.Rank()
197
+ expand_dims_ = P.ExpandDims()
198
+ sign_ = P.Sign()
199
+ nextafter_ = P.NextAfter()
200
+ matrix_inverse_ = P.MatrixInverse()
201
+ matrix_determinant_ = P.MatrixDeterminant()
202
+ log_matrix_determinant_ = P.LogMatrixDeterminant()
203
+ trace_ = P.Trace()
204
+ real_ = P.Real()
205
+ rsqrt_ = P.Rsqrt()
206
+ reciprocal_ = P.Reciprocal()
207
+ tile_ = P.Tile()
208
+ batch_matmul_ = P.BatchMatMul()
209
+ fill_v2_ = P.FillV2()
210
+ imag_ = P.Imag()
211
+ log1p_ = P.Log1p()
212
+ accumulate_ = P.AccumulateNV2()
213
+ conj_ = P.Conj()
214
+ erfinv_ = P.Erfinv()
215
+ cumprod_ = P.CumProd()
216
+ lgamma_ = P.Lgamma()
217
+ digamma_ = P.Digamma()
218
+ poly_gamma_ = P.Polygamma()
219
+ isinf_ = P.IsInf()
220
+ zeros_ = P.Zeros()
221
+ ones_ = P.Ones()
222
+ logical_xor_ = P.LogicalXor()
223
+ zeta_ = P.Zeta()
224
+ div_ = P.Div()
225
+ matmul_ = P.MatMul()
226
+
191
227
 
192
228
  #####################################
193
229
  # Element-wise Operation Functions.
@@ -284,12 +320,13 @@ def add(input, other):
284
320
  [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
285
321
  - The two inputs comply with the implicit type conversion rules to make the data types
286
322
  consistent.
323
+ - When input is Tensor, it's dimension should be greater than or equal to 1.
287
324
 
288
325
  Args:
289
326
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
290
327
  a bool or a tensor whose data type is
291
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
292
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
328
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
329
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
293
330
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
294
331
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
295
332
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
@@ -324,7 +361,7 @@ def add(input, other):
324
361
  >>> print(output.dtype)
325
362
  Float32
326
363
  """
327
- return _get_cache_prim(P.Add)()(input, other)
364
+ return tensor_add(input, other)
328
365
 
329
366
 
330
367
  def addcdiv(input, tensor1, tensor2, value=1):
@@ -365,7 +402,7 @@ def addcdiv(input, tensor1, tensor2, value=1):
365
402
  >>> print(y)
366
403
  [1.25 1.6666667 2.5 5. ]
367
404
  """
368
- return _get_cache_prim(P.Addcdiv)()(input, tensor1, tensor2, Tensor(value))
405
+ return addcdiv_(input, tensor1, tensor2, Tensor(value))
369
406
 
370
407
 
371
408
  def addcmul(input, tensor1, tensor2, value=1):
@@ -411,7 +448,7 @@ def addcmul(input, tensor1, tensor2, value=1):
411
448
  [ 3. 5. 7.]
412
449
  [ 4. 7. 10.]]
413
450
  """
414
- return _get_cache_prim(P.Addcmul)()(input, tensor1, tensor2, Tensor(value))
451
+ return addcuml_(input, tensor1, tensor2, Tensor(value))
415
452
 
416
453
 
417
454
  def angle(input):
@@ -489,8 +526,7 @@ def bincount(input, weights=None, minlength=0):
489
526
  raise TypeError(f"For math function 'bincount', 'weights' must be Tensor, but got {type(weights)}.")
490
527
  if not isinstance(minlength, int) or isinstance(minlength, bool):
491
528
  raise TypeError(f"For math function 'bincount', 'minlength' must be int but got {type(minlength)}.")
492
- rank_op = _get_cache_prim(P.Rank)()
493
- if rank_op(input) != 1:
529
+ if rank_(input) != 1:
494
530
  raise ValueError(f"For math function 'bincount', 'input' should be one-dimensional tensor.")
495
531
  if not (input >= 0).all():
496
532
  raise ValueError(f"For 'bincount', elements of 'input' should be non-negative.")
@@ -554,7 +590,7 @@ def bucketize(input, boundaries, *, right=False):
554
590
 
555
591
  bucketize_op = _get_cache_prim(P.Bucketize)
556
592
  epsilon_ = 0. if right else 1.e-6
557
- boundaries = [boundary+epsilon_ for boundary in boundaries]
593
+ boundaries = [boundary + epsilon_ for boundary in boundaries]
558
594
  return bucketize_op(boundaries)(input)
559
595
 
560
596
 
@@ -628,18 +664,15 @@ def argmin(input, axis=None, keepdims=False):
628
664
  return Tensor(0)
629
665
  is_axis_none = False
630
666
  if axis is None:
631
- input = P.Reshape()(input, (-1,))
667
+ input = reshape_(input, (-1,))
632
668
  axis = 0
633
669
  is_axis_none = True
634
670
  out = _get_cache_prim(P.Argmin)(axis)(input)
635
671
  if keepdims and not is_axis_none:
636
- out = P.ExpandDims()(out, axis)
672
+ out = expand_dims_(out, axis)
637
673
  return out
638
674
 
639
675
 
640
- neg_tensor = P.Neg()
641
-
642
-
643
676
  def neg(input):
644
677
  """
645
678
  Returns a tensor with negative values of the input tensor element-wise.
@@ -857,8 +890,8 @@ def sub(input, other):
857
890
  Args:
858
891
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
859
892
  a bool or a tensor whose data type is
860
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
861
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
893
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
894
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
862
895
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
863
896
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
864
897
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
@@ -952,8 +985,8 @@ def mul(input, other):
952
985
  Args:
953
986
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
954
987
  a bool or a tensor whose data type is
955
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
956
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
988
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
989
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
957
990
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
958
991
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
959
992
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
@@ -1050,10 +1083,10 @@ def div(input, other, *, rounding_mode=None):
1050
1083
  raise ValueError("For ops.div, rounding_mode value should be None, 'floor' or 'trunc'.")
1051
1084
 
1052
1085
  if rounding_mode == 'floor':
1053
- return _get_cache_prim(P.FloorDiv)()(input, other)
1054
- output = _get_cache_prim(P.Div)()(input, other)
1086
+ return tensor_floordiv(input, other)
1087
+ output = div_(input, other)
1055
1088
  if rounding_mode == 'trunc':
1056
- output = _get_cache_prim(P.Trunc)()(output)
1089
+ output = trunc_(output)
1057
1090
  return output
1058
1091
 
1059
1092
 
@@ -1241,8 +1274,8 @@ def pow(input, exponent):
1241
1274
  Args:
1242
1275
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
1243
1276
  a bool or a tensor whose data type is
1244
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
1245
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
1277
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
1278
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1246
1279
  exponent (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
1247
1280
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
1248
1281
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -1436,7 +1469,7 @@ def logdet(input):
1436
1469
  Calculates log determinant of one or a batch of square matrices.
1437
1470
 
1438
1471
  Args:
1439
- input (Tensor): Input Tensor of any dimension.
1472
+ input (Tensor): Tensor of shape :math:`(*, n, n)` where :math:`*` means zero or more batch dimensions.
1440
1473
 
1441
1474
  Returns:
1442
1475
  Tensor, the log determinant of `input`. If the matrix determinant is smaller than 0, nan will be returned. If
@@ -1491,8 +1524,7 @@ def floor(input):
1491
1524
  >>> print(output)
1492
1525
  [ 1. 2. -2.]
1493
1526
  """
1494
- _floor = _get_cache_prim(P.Floor)()
1495
- return _floor(input)
1527
+ return floor_(input)
1496
1528
 
1497
1529
 
1498
1530
  def i0(input):
@@ -1560,9 +1592,10 @@ def inplace_add(x, v, indices):
1560
1592
  `indices` refers to the left-most dimension.
1561
1593
 
1562
1594
  Args:
1563
- x (Tensor): The first input is a tensor whose data type is float16, float32, float64 or int32.
1564
- v (Tensor): The second input is a tensor that has the same dimension sizes as `x` except
1565
- the first dimension, which must be the same as indices' size. It has the same data type with `x`.
1595
+ x (Tensor): The tensor to be added. It has shape :math:`(N,*)` where :math:`*` means
1596
+ any number of additional dimensions.
1597
+ v (Tensor): The value tensor add to `x`. It has the same dimension sizes as `x` except
1598
+ the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
1566
1599
  indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
1567
1600
  to add with `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
1568
1601
 
@@ -1650,10 +1683,10 @@ def inplace_sub(x, v, indices):
1650
1683
  `indices` refers to the left-most dimension.
1651
1684
 
1652
1685
  Args:
1653
- x (Tensor): The first input is a tensor whose data type is float16, float32, float64 or int32.
1654
- Tensors of arbitrary dimensions are supported.
1655
- v (Tensor): The second input is a tensor who has the same dimension sizes as `x` except
1656
- the first dimension, which must be the same as indices' size. It has the same data type with `x`.
1686
+ x (Tensor): TThe tensor to be subtracted. It has shape :math:`(N,*)` where :math:`*` means
1687
+ any number of additional dimensions.
1688
+ v (Tensor): The value tensor subtract from `x`. It has the same dimension sizes as `x` except
1689
+ the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
1657
1690
  indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
1658
1691
  to subtract with `v`. It is an int or tuple, whose value is in [0, the first dimension size of `x`).
1659
1692
 
@@ -1891,7 +1924,7 @@ def sign(input):
1891
1924
  """
1892
1925
  if not isinstance(input, Tensor):
1893
1926
  raise TypeError(f"For sign, the input must be a Tensor, but got {type(input)}")
1894
- return _get_cache_prim(ops.Sign)()(input)
1927
+ return sign_(input)
1895
1928
 
1896
1929
 
1897
1930
  def signbit(input):
@@ -2295,7 +2328,7 @@ def t(input):
2295
2328
  if input.ndim > 2:
2296
2329
  raise ValueError(f"For t(), the dimension of tensor should be less than 3, but got {input.ndim}.")
2297
2330
  if input.ndim == 2:
2298
- return _get_cache_prim(P.Transpose)()(input, (1, 0))
2331
+ return transpose_(input, (1, 0))
2299
2332
  return input
2300
2333
 
2301
2334
 
@@ -2352,8 +2385,8 @@ def xlogy(input, other):
2352
2385
  Args:
2353
2386
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
2354
2387
  a bool or a tensor whose data type is
2355
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
2356
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
2388
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
2389
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
2357
2390
  other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
2358
2391
  a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
2359
2392
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -2452,8 +2485,7 @@ def arctan2(input, other):
2452
2485
  >>> print(output)
2453
2486
  [0. 0.7853982]
2454
2487
  """
2455
- _atan2 = _get_cache_prim(P.Atan2)()
2456
- return _atan2(input, other)
2488
+ return atan2_(input, other)
2457
2489
 
2458
2490
 
2459
2491
  def polar(abs, angle): # pylint: disable=redefined-outer-name
@@ -2857,16 +2889,17 @@ def atan2(input, other):
2857
2889
  - At least one of the `input` and `other` args is Tensor.
2858
2890
 
2859
2891
  Args:
2860
- input (Tensor, Number.number): The input tensor or scalar.
2892
+ input (Tensor): The input tensor with shape
2893
+ :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2861
2894
  The data type should be one of the following types: float16, float32, float64
2862
- other (Tensor, Number.number): The input tensor or scalar. It has the same shape with `input` or
2895
+ other (Tensor): The input tensor. It has the same shape with `input` or
2863
2896
  its shape is able to broadcast with `input`.
2864
2897
 
2865
2898
  Returns:
2866
- Tensor or scalar, the shape is the same as the one after broadcasting, and the data type is same as `input`.
2899
+ Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
2867
2900
 
2868
2901
  Raises:
2869
- TypeError: If `input` or `other` is not a Tensor or scalar.
2902
+ TypeError: If `input` or `other` is not a Tensor.
2870
2903
  RuntimeError: If the data type of `input` and `other` conversion of Parameter is required
2871
2904
  when data type conversion of Parameter is not supported.
2872
2905
 
@@ -2883,8 +2916,7 @@ def atan2(input, other):
2883
2916
  >>> print(output)
2884
2917
  [0. 0.7853982]
2885
2918
  """
2886
- _atan2 = _get_cache_prim(P.Atan2)()
2887
- return _atan2(input, other)
2919
+ return atan2_(input, other)
2888
2920
 
2889
2921
 
2890
2922
  def bitwise_and(input, other):
@@ -3154,7 +3186,6 @@ def nextafter(input, other):
3154
3186
  >>> print(output_)
3155
3187
  [1.e-45]
3156
3188
  """
3157
- nextafter_ = _get_cache_prim(P.NextAfter)()
3158
3189
  return nextafter_(input, other)
3159
3190
 
3160
3191
 
@@ -3218,7 +3249,7 @@ def inverse(input):
3218
3249
  [ 1.5 -0.5]]
3219
3250
  """
3220
3251
  _check_is_tensor("input", input, "inverse")
3221
- return _get_cache_prim(P.MatrixInverse)()(input)
3252
+ return matrix_inverse_(input)
3222
3253
 
3223
3254
 
3224
3255
  def invert(x):
@@ -3229,13 +3260,14 @@ def invert(x):
3229
3260
  out_i = \sim x_{i}
3230
3261
 
3231
3262
  Args:
3232
- x (Tensor): The input Tensor.
3263
+ x (Tensor): The input Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
3264
+ The data type should be one of the following types: int16, uint16.
3233
3265
 
3234
3266
  Returns:
3235
3267
  Tensor, has the same shape as `x`.
3236
3268
 
3237
3269
  Raises:
3238
- TypeError: If dtype of `x` is neither int nor uint.
3270
+ TypeError: If dtype of `x` is neither int16 nor uint16.
3239
3271
 
3240
3272
  Supported Platforms:
3241
3273
  ``Ascend`` ``GPU`` ``CPU``
@@ -3261,8 +3293,10 @@ def erf(input):
3261
3293
  erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
3262
3294
 
3263
3295
  Args:
3264
- input (Tensor): The input tensor of Gaussian error function. Its data type
3265
- must be float16 float32 or float64.
3296
+ input (Tensor): The input tensor of Gaussian error function. Supported dtypes:
3297
+
3298
+ - Ascend: float16, float32.
3299
+ - GPU/CPU: float16, float32, float64.
3266
3300
 
3267
3301
  Returns:
3268
3302
  Tensor, has the same shape and dtype as the `input`.
@@ -3295,7 +3329,10 @@ def erfc(input):
3295
3329
  erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
3296
3330
 
3297
3331
  Args:
3298
- input (Tensor): The input tensor with a dtype of float16, float32 or float64.
3332
+ input (Tensor): The input tensor. Supported dtypes:
3333
+
3334
+ - Ascend: float16, float32.
3335
+ - GPU/CPU: float16, float32, float64.
3299
3336
 
3300
3337
  Returns:
3301
3338
  Tensor, has the same shape and dtype as `input`.
@@ -3321,7 +3358,15 @@ def erfc(input):
3321
3358
 
3322
3359
  def bessel_j0(x):
3323
3360
  r"""
3324
- Computes the Bessel j0 function of x element-wise.
3361
+ Computes Bessel function of the first kind, order 0 element-wise.
3362
+
3363
+ The formula is defined as:
3364
+
3365
+ .. math::
3366
+ \begin{array}{ll} \\
3367
+ J_{0}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta) d \theta
3368
+ =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m}}{2^{2 m} (m !)^2}
3369
+ \end{array}
3325
3370
 
3326
3371
  Args:
3327
3372
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3350,7 +3395,15 @@ def bessel_j0(x):
3350
3395
 
3351
3396
  def bessel_j1(x):
3352
3397
  r"""
3353
- Computes the Bessel j1 function of x element-wise.
3398
+ Computes Bessel function of the first kind, order 1 element-wise.
3399
+
3400
+ The formula is defined as:
3401
+
3402
+ .. math::
3403
+ \begin{array}{ll} \\
3404
+ J_{1}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta- \theta) d \theta
3405
+ =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m+1}}{2^{2 m+1} m !(m+1) !}
3406
+ \end{array}
3354
3407
 
3355
3408
  Args:
3356
3409
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3379,7 +3432,15 @@ def bessel_j1(x):
3379
3432
 
3380
3433
  def bessel_i0(x):
3381
3434
  r"""
3382
- Computes the Bessel i0 function of x element-wise.
3435
+ Computes modified Bessel function of the first kind, order 0 element-wise.
3436
+
3437
+ .. math::
3438
+ \begin{array}{ll} \\
3439
+ I_{0}(x)=J_{0}(\mathrm{i} x)=\sum_{m=0}^{\infty}
3440
+ \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
3441
+ \end{array}
3442
+
3443
+ where :math:`J_{0}` is Bessel function of the first kind, order 0.
3383
3444
 
3384
3445
  Args:
3385
3446
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3408,7 +3469,17 @@ def bessel_i0(x):
3408
3469
 
3409
3470
  def bessel_i0e(x):
3410
3471
  r"""
3411
- Computes the Bessel i0e function of x element-wise.
3472
+ Computes exponential scaled modified Bessel function of the first kind, order 0 element-wise.
3473
+
3474
+ The formula is defined as:
3475
+
3476
+ .. math::
3477
+ \begin{array}{ll} \\
3478
+ \text I_{0}e(x)=e^{(-|x|)} * I_{0}(x)=e^{(-|x|)} * \sum_{m=0}^
3479
+ {\infty} \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
3480
+ \end{array}
3481
+
3482
+ where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
3412
3483
 
3413
3484
  Args:
3414
3485
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3437,7 +3508,17 @@ def bessel_i0e(x):
3437
3508
 
3438
3509
  def bessel_k0(x):
3439
3510
  r"""
3440
- Computes the Bessel k0 function of x element-wise.
3511
+ Computes modified Bessel function of the second kind, order 0 element-wise.
3512
+
3513
+ The formula is defined as:
3514
+
3515
+ .. math::
3516
+ \begin{array}{ll} \\
3517
+ K_{0}(x)= \lim_{\nu \to 0} \left(\frac{\pi}{2}\right) \frac
3518
+ {I_{-\nu}(x)-I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} d t
3519
+ \end{array}
3520
+
3521
+ where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
3441
3522
 
3442
3523
  Args:
3443
3524
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3466,7 +3547,17 @@ def bessel_k0(x):
3466
3547
 
3467
3548
  def bessel_k0e(x):
3468
3549
  r"""
3469
- Computes the Bessel k0e function of x element-wise.
3550
+ Computes exponential scaled modified Bessel function of the second kind, order 0 element-wise.
3551
+
3552
+ The formula is defined as:
3553
+
3554
+ .. math::
3555
+ \begin{array}{ll} \\
3556
+ K_{0}e(x)= e^{(-|x|)} * K_{0}(x) = e^{(-|x|)} * \int_{0}^
3557
+ {\infty} e^{-x \cosh t} d t
3558
+ \end{array}
3559
+
3560
+ where :math:`K_{0}` is modified Bessel function of the second kind, order 0.
3470
3561
 
3471
3562
  Args:
3472
3563
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3495,7 +3586,16 @@ def bessel_k0e(x):
3495
3586
 
3496
3587
  def bessel_y0(x):
3497
3588
  r"""
3498
- Computes the Bessel y0 function of x element-wise.
3589
+ Computes Bessel function of the second kind, order 0 element-wise.
3590
+
3591
+ The formula is defined as:
3592
+
3593
+ .. math::
3594
+ \begin{array}{ll} \\
3595
+ Y_{0}(x)=\lim_{n \to 0} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
3596
+ \end{array}
3597
+
3598
+ where :math:`J_{0}` is Bessel function of the first kind, order 0.
3499
3599
 
3500
3600
  Args:
3501
3601
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3524,7 +3624,16 @@ def bessel_y0(x):
3524
3624
 
3525
3625
  def bessel_y1(x):
3526
3626
  r"""
3527
- Computes the Bessel y1 function of x element-wise.
3627
+ Computes Bessel function of the second kind, order 1 element-wise.
3628
+
3629
+ The formula is defined as:
3630
+
3631
+ .. math::
3632
+ \begin{array}{ll} \\
3633
+ Y_{1}(x)=\lim_{n \to 1} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
3634
+ \end{array}
3635
+
3636
+ where :math:`J_{1}` is Bessel function of the first kind, order 1.
3528
3637
 
3529
3638
  Args:
3530
3639
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -3551,6 +3660,36 @@ def bessel_y1(x):
3551
3660
  return bessel_y1_(x)
3552
3661
 
3553
3662
 
3663
+ def eps(x):
3664
+ r"""
3665
+ Create a Tensor with the same data type and shape as input, and the element value is the minimum value that the
3666
+ corresponding data type can express.
3667
+
3668
+ Args:
3669
+ x (Tensor): Tensor of any dimension used to obtain the minimum value that its data type can express.
3670
+ The data type must be float16, float32 or float64.
3671
+
3672
+ Returns:
3673
+ Tensor, has the same type and shape as `x`, but filled with `x` dtype minimum val.
3674
+
3675
+ Raises:
3676
+ TypeError: If `x` is not a Tensor.
3677
+ TypeError: If data type of `x` is neither float16, float32, nor float64.
3678
+
3679
+ Supported Platforms:
3680
+ ``Ascend`` ``GPU`` ``CPU``
3681
+
3682
+ Examples:
3683
+ >>> import mindspore
3684
+ >>> from mindspore import Tensor, ops
3685
+ >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
3686
+ >>> output = ops.eps(x)
3687
+ >>> print(output)
3688
+ [1.1920929e-07 1.1920929e-07 1.1920929e-07 1.1920929e-07]
3689
+ """
3690
+ return eps_(x)
3691
+
3692
+
3554
3693
  def linspace(start, end, steps):
3555
3694
  r"""
3556
3695
  Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
@@ -3633,7 +3772,7 @@ def det(input):
3633
3772
  Supported Platforms:
3634
3773
  ``Ascend`` ``GPU`` ``CPU``
3635
3774
  """
3636
- return _get_cache_prim(P.MatrixDeterminant)()(input)
3775
+ return matrix_determinant_(input)
3637
3776
 
3638
3777
 
3639
3778
  def matrix_determinant(input):
@@ -3641,7 +3780,7 @@ def matrix_determinant(input):
3641
3780
  `matrix_determinant` is deprecated, please use `det` instead.
3642
3781
  """
3643
3782
  logger.warning("matrix_determinant is deprecated, please use `det` instead.")
3644
- return _get_cache_prim(P.MatrixDeterminant)()(input)
3783
+ return matrix_determinant_(input)
3645
3784
 
3646
3785
 
3647
3786
  def log_matrix_determinant(input):
@@ -3649,7 +3788,7 @@ def log_matrix_determinant(input):
3649
3788
  `log_matrix_determinant` is deprecated, please use `matrix_solve` instead.
3650
3789
  """
3651
3790
  logger.warning("`log_matrix_determinant` is deprecated, please use `matrix_solve` instead.")
3652
- return _get_cache_prim(P.LogMatrixDeterminant)()(input)
3791
+ return log_matrix_determinant_(input)
3653
3792
 
3654
3793
 
3655
3794
  def matrix_exp(input):
@@ -3830,7 +3969,7 @@ def slogdet(input):
3830
3969
  >>> print(output)
3831
3970
  [2.80336046e+00 3.04452229e+00]
3832
3971
  """
3833
- return _get_cache_prim(P.LogMatrixDeterminant)()(input)
3972
+ return log_matrix_determinant_(input)
3834
3973
 
3835
3974
 
3836
3975
  def trace(input):
@@ -3870,7 +4009,6 @@ def trace(input):
3870
4009
  >>> print(output)
3871
4010
  24.0
3872
4011
  """
3873
- trace_ = _get_cache_prim(P.Trace)()
3874
4012
  return trace_(input)
3875
4013
 
3876
4014
 
@@ -3992,7 +4130,7 @@ def trunc(input):
3992
4130
  >>> print(output)
3993
4131
  [3. 0. 0. -3.]
3994
4132
  """
3995
- return _get_cache_prim(P.Trunc)()(input)
4133
+ return trunc_(input)
3996
4134
 
3997
4135
 
3998
4136
  def ldexp(x, other):
@@ -4043,11 +4181,7 @@ def ldexp(x, other):
4043
4181
  [[2.]
4044
4182
  [8.]]
4045
4183
  """
4046
-
4047
- pow_ops = _get_cache_prim(P.Pow)()
4048
- mul_ops = _get_cache_prim(P.Mul)()
4049
-
4050
- out = mul_ops(x, pow_ops(2.0, other))
4184
+ out = tensor_mul(x, tensor_pow(2.0, other))
4051
4185
  return out
4052
4186
 
4053
4187
 
@@ -4102,11 +4236,11 @@ def logit(input, eps=None):
4102
4236
  #####################################
4103
4237
 
4104
4238
 
4105
- def less(x, y):
4239
+ def less(input, other):
4106
4240
  r"""
4107
- Computes the boolean value of :math:`x < y` element-wise.
4241
+ Computes the boolean value of :math:`input < other` element-wise.
4108
4242
 
4109
- Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4243
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
4110
4244
  The inputs must be two tensors or one tensor and one scalar.
4111
4245
  When the inputs are one tensor and one scalar,
4112
4246
  the scalar could only be a constant.
@@ -4114,21 +4248,21 @@ def less(x, y):
4114
4248
  .. math::
4115
4249
 
4116
4250
  out_{i} =\begin{cases}
4117
- & \text{True, if } x_{i}<y_{i} \\
4118
- & \text{False, if } x_{i}>=y_{i}
4251
+ & \text{True, if } input_{i}<other_{i} \\
4252
+ & \text{False, if } input_{i}>=other_{i}
4119
4253
  \end{cases}
4120
4254
 
4121
4255
  Args:
4122
- x (Union[Tensor, Number, bool]): The first input is a number or
4256
+ input (Union[Tensor, Number, bool]): The first input is a number or
4123
4257
  a bool or a tensor whose data type is number or bool.
4124
- y (Union[Tensor, Number, bool]): The second input is a number or
4258
+ other (Union[Tensor, Number, bool]): The second input is a number or
4125
4259
  a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
4126
4260
 
4127
4261
  Returns:
4128
4262
  Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
4129
4263
 
4130
4264
  Raises:
4131
- TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
4265
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4132
4266
 
4133
4267
  Supported Platforms:
4134
4268
  ``Ascend`` ``GPU`` ``CPU``
@@ -4143,7 +4277,7 @@ def less(x, y):
4143
4277
  >>> print(output)
4144
4278
  [False False True]
4145
4279
  """
4146
- return tensor_lt(x, y)
4280
+ return tensor_lt(input, other)
4147
4281
 
4148
4282
 
4149
4283
  def lt(input, other):
@@ -4156,28 +4290,29 @@ def lt(input, other):
4156
4290
  return less(input, other)
4157
4291
 
4158
4292
 
4159
- def le(x, y):
4293
+ def le(input, other):
4160
4294
  r"""
4161
- Computes the boolean value of :math:`x <= y` element-wise.
4295
+ Computes the boolean value of :math:`input <= other` element-wise.
4162
4296
 
4163
4297
  .. math::
4164
4298
 
4165
4299
  out_{i} =\begin{cases}
4166
- & \text{True, if } x_{i}<=y_{i} \\
4167
- & \text{False, if } x_{i}>y_{i}
4300
+ & \text{True, if } input_{i}<=other_{i} \\
4301
+ & \text{False, if } input_{i}>other_{i}
4168
4302
  \end{cases}
4169
4303
 
4170
4304
  .. note::
4171
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4305
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4306
+ consistent.
4172
4307
  - The inputs must be two tensors or one tensor and one scalar.
4173
4308
  - When the inputs are one tensor and one scalar, the scalar could only be a constant.
4174
4309
 
4175
4310
  Args:
4176
- x (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4311
+ input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4177
4312
  a bool or a tensor whose data type is
4178
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
4179
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
4180
- y (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4313
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4314
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
4315
+ other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4181
4316
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4182
4317
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
4183
4318
 
@@ -4185,7 +4320,7 @@ def le(x, y):
4185
4320
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4186
4321
 
4187
4322
  Raises:
4188
- TypeError: If neither `x` nor `y` is a Tensor.
4323
+ TypeError: If neither `input` nor `other` is a Tensor.
4189
4324
 
4190
4325
  Supported Platforms:
4191
4326
  ``Ascend`` ``GPU`` ``CPU``
@@ -4200,22 +4335,23 @@ def le(x, y):
4200
4335
  >>> print(output)
4201
4336
  [ True False True]
4202
4337
  """
4203
- return tensor_le(x, y)
4338
+ return tensor_le(input, other)
4204
4339
 
4205
4340
 
4206
- def gt(x, y):
4341
+ def gt(input, other):
4207
4342
  r"""
4208
- Compare the value of the input parameters :math:`x,y` element-wise, and the output result is a bool value.
4343
+ Compare the value of the input parameters :math:`input,other` element-wise, and the output result is a bool value.
4209
4344
 
4210
4345
  .. math::
4211
4346
 
4212
4347
  out_{i} =\begin{cases}
4213
- & \text{True, if } x_{i}>y_{i} \\
4214
- & \text{False, if } x_{i}<=y_{i}
4348
+ & \text{True, if } input_{i}>other_{i} \\
4349
+ & \text{False, if } input_{i}<=other_{i}
4215
4350
  \end{cases}
4216
4351
 
4217
4352
  Note:
4218
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4353
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4354
+ consistent.
4219
4355
  - The inputs must be two tensors or one tensor and one scalar.
4220
4356
  - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
4221
4357
  and the shapes of them can be broadcast.
@@ -4225,11 +4361,11 @@ def gt(x, y):
4225
4361
  in another input by copying the value of the dimension.
4226
4362
 
4227
4363
  Args:
4228
- x (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4364
+ input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4229
4365
  a bool or a tensor whose data type is
4230
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
4231
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ .
4232
- y (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4366
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4367
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
4368
+ other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4233
4369
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4234
4370
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
4235
4371
 
@@ -4237,7 +4373,7 @@ def gt(x, y):
4237
4373
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4238
4374
 
4239
4375
  Raises:
4240
- TypeError: If neither `x` nor `y` is a Tensor.
4376
+ TypeError: If neither `input` nor `other` is a Tensor.
4241
4377
 
4242
4378
  Supported Platforms:
4243
4379
  ``Ascend`` ``GPU`` ``CPU``
@@ -4252,16 +4388,16 @@ def gt(x, y):
4252
4388
  >>> print(output)
4253
4389
  [False True False]
4254
4390
  """
4255
- _greater = _get_cache_prim(P.Greater)()
4256
- return _greater(x, y)
4391
+ return tensor_gt(input, other)
4257
4392
 
4258
4393
 
4259
- def ge(x, y):
4394
+ def ge(input, other):
4260
4395
  r"""
4261
- Computes the boolean value of :math:`x >= y` element-wise.
4396
+ Computes the boolean value of :math:`input >= other` element-wise.
4262
4397
 
4263
4398
  Note:
4264
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4399
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4400
+ consistent.
4265
4401
  - The inputs must be two tensors or one tensor and one scalar.
4266
4402
  - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
4267
4403
  and the shapes of them can be broadcast.
@@ -4273,21 +4409,21 @@ def ge(x, y):
4273
4409
  .. math::
4274
4410
 
4275
4411
  out_{i} =\begin{cases}
4276
- & \text{True, if } x_{i}>=y_{i} \\
4277
- & \text{False, if } x_{i}<y_{i}
4412
+ & \text{True, if } input_{i}>=other_{i} \\
4413
+ & \text{False, if } input_{i}<other_{i}
4278
4414
  \end{cases}
4279
4415
 
4280
4416
  Args:
4281
- x (Union[Tensor, Number, bool]): The first input is a number or
4417
+ input (Union[Tensor, Number, bool]): The first input is a number or
4282
4418
  a bool or a tensor whose data type is number or bool.
4283
- y (Union[Tensor, Number, bool]): The second input is a number or
4419
+ other (Union[Tensor, Number, bool]): The second input is a number or
4284
4420
  a bool when the first input is a tensor or a tensor whose data type is number or bool.
4285
4421
 
4286
4422
  Returns:
4287
4423
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4288
4424
 
4289
4425
  Raises:
4290
- TypeError: If neither `x` nor `y` is a Tensor.
4426
+ TypeError: If neither `input` nor `other` is a Tensor.
4291
4427
 
4292
4428
  Supported Platforms:
4293
4429
  ``Ascend`` ``GPU`` ``CPU``
@@ -4302,14 +4438,15 @@ def ge(x, y):
4302
4438
  >>> print(output)
4303
4439
  [True True False]
4304
4440
  """
4305
- _greater_equal = _get_cache_prim(P.GreaterEqual)()
4306
- return _greater_equal(x, y)
4441
+ return tensor_ge(input, other)
4307
4442
 
4308
4443
 
4309
- def equal(input, other):
4444
+ def eq(input, other):
4310
4445
  r"""
4311
4446
  Computes the equivalence between two tensors element-wise.
4312
4447
 
4448
+ The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
4449
+
4313
4450
  .. math::
4314
4451
 
4315
4452
  out_{i} =\begin{cases}
@@ -4319,16 +4456,14 @@ def equal(input, other):
4319
4456
 
4320
4457
  Note:
4321
4458
  - `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
4322
- - The inputs must be two tensors or one tensor and one scalar.
4323
- - When the inputs are two tensors, the shapes of them could be broadcast.
4324
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
4459
+ - The shapes of the inputs can be broadcasted to each other.
4325
4460
 
4326
4461
  Args:
4327
4462
  input (Union[Tensor, Number]): The first input is a number or
4328
4463
  a tensor whose data type is number.
4329
- other (Union[Tensor, Number]): The second input is a number
4330
- when the first input is a tensor or a tensor whose data type is number.
4331
- The data type is the same as the first input.
4464
+ other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
4465
+ The data type is the same as the first input. If the first input is a number,
4466
+ the second input should be a tensor.
4332
4467
 
4333
4468
  Returns:
4334
4469
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
@@ -4341,16 +4476,66 @@ def equal(input, other):
4341
4476
 
4342
4477
  Examples:
4343
4478
  >>> import mindspore
4344
- >>> import numpy as np
4345
4479
  >>> from mindspore import Tensor, ops
4346
4480
  >>> # case 1: The shape of two inputs are different
4347
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4481
+ >>> x = Tensor([1, 2, 3], mindspore.float32)
4482
+ >>> output = ops.eq(x, 2.0)
4483
+ >>> print(output)
4484
+ [False True False]
4485
+ >>> # case 2: The shape of two inputs are the same
4486
+ >>> x = Tensor([1, 2, 3], mindspore.int32)
4487
+ >>> y = Tensor([1, 2, 4], mindspore.int32)
4488
+ >>> output = ops.eq(x, y)
4489
+ >>> print(output)
4490
+ [ True True False]
4491
+ """
4492
+ return equal_(input, other)
4493
+
4494
+
4495
+ def equal(input, other):
4496
+ r"""
4497
+ Computes the equivalence between two tensors element-wise.
4498
+
4499
+ The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
4500
+
4501
+ .. math::
4502
+
4503
+ out_{i} =\begin{cases}
4504
+ & \text{True, if } input_{i} = other_{i} \\
4505
+ & \text{False, if } input_{i} \ne other_{i}
4506
+ \end{cases}
4507
+
4508
+ Note:
4509
+ - `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
4510
+ - The shapes of the inputs can be broadcasted to each other.
4511
+
4512
+ Args:
4513
+ input (Union[Tensor, Number]): The first input is a number or
4514
+ a tensor whose data type is number.query.dtye
4515
+ other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
4516
+ The data type is the same as the first input. If the first input is a number,
4517
+ the second input should be a tensor.
4518
+
4519
+ Returns:
4520
+ Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4521
+
4522
+ Raises:
4523
+ TypeError: If neither `input` nor `other` is a Tensor.
4524
+
4525
+ Supported Platforms:
4526
+ ``Ascend`` ``GPU`` ``CPU``
4527
+
4528
+ Examples:
4529
+ >>> import mindspore
4530
+ >>> from mindspore import Tensor, ops
4531
+ >>> # case 1: The shape of two inputs are different
4532
+ >>> x = Tensor([1, 2, 3], mindspore.float32)
4348
4533
  >>> output = ops.equal(x, 2.0)
4349
4534
  >>> print(output)
4350
4535
  [False True False]
4351
4536
  >>> # case 2: The shape of two inputs are the same
4352
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4353
- >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
4537
+ >>> x = Tensor([1, 2, 3], mindspore.int32)
4538
+ >>> y = Tensor([1, 2, 4], mindspore.int32)
4354
4539
  >>> output = ops.equal(x, y)
4355
4540
  >>> print(output)
4356
4541
  [ True True False]
@@ -4358,12 +4543,13 @@ def equal(input, other):
4358
4543
  return equal_(input, other)
4359
4544
 
4360
4545
 
4361
- def ne(x, y):
4546
+ def ne(input, other):
4362
4547
  r"""
4363
4548
  Computes the non-equivalence of two tensors element-wise.
4364
4549
 
4365
4550
  Note:
4366
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4551
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4552
+ consistent.
4367
4553
  - The inputs must be two tensors or one tensor and one scalar.
4368
4554
  - When the inputs are two tensors, the shapes of them could be broadcast.
4369
4555
  - When the inputs are one tensor and one scalar, the scalar could only be a constant.
@@ -4372,42 +4558,41 @@ def ne(x, y):
4372
4558
  .. math::
4373
4559
 
4374
4560
  out_{i} =\begin{cases}
4375
- & \text{True, if } x_{i} \ne y_{i} \\
4376
- & \text{False, if } x_{i} = y_{i}
4561
+ & \text{True, if } input_{i} \ne other_{i} \\
4562
+ & \text{False, if } input_{i} = other_{i}
4377
4563
  \end{cases}
4378
4564
 
4379
4565
  Args:
4380
- x (Union[Tensor, Number, bool]): The first input is a number or
4566
+ input (Union[Tensor, Number, bool]): The first input is a number or
4381
4567
  a bool or a tensor whose data type is number or bool.
4382
- y (Union[Tensor, Number, bool]): The second input is a number or
4568
+ other (Union[Tensor, Number, bool]): The second input is a number or
4383
4569
  a bool when the first input is a tensor or a tensor whose data type is number or bool.
4384
4570
 
4385
4571
  Returns:
4386
4572
  Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
4387
4573
 
4388
4574
  Raises:
4389
- TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
4390
- TypeError: If neither `x` nor `y` is a Tensor.
4575
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4576
+ TypeError: If neither `input` nor `other` is a Tensor.
4391
4577
 
4392
4578
  Supported Platforms:
4393
4579
  ``Ascend`` ``GPU`` ``CPU``
4394
4580
 
4395
4581
  Examples:
4396
4582
  >>> import mindspore
4397
- >>> import numpy as np
4398
4583
  >>> from mindspore import Tensor, ops
4399
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4584
+ >>> x = Tensor([1, 2, 3], mindspore.float32)
4400
4585
  >>> output = ops.ne(x, 2.0)
4401
4586
  >>> print(output)
4402
4587
  [ True False True]
4403
4588
  >>>
4404
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4405
- >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
4589
+ >>> x = Tensor([1, 2, 3], mindspore.int32)
4590
+ >>> y = Tensor([1, 2, 4], mindspore.int32)
4406
4591
  >>> output = ops.ne(x, y)
4407
4592
  >>> print(output)
4408
4593
  [False False True]
4409
4594
  """
4410
- return not_equal_(x, y)
4595
+ return not_equal_(input, other)
4411
4596
 
4412
4597
 
4413
4598
  def not_equal(input, other):
@@ -4508,27 +4693,27 @@ def isfinite(x):
4508
4693
  return isfinite_(x)
4509
4694
 
4510
4695
 
4511
- def isnan(x):
4696
+ def isnan(input):
4512
4697
  r"""
4513
4698
  Determines which elements are NaN for each position.
4514
4699
 
4515
4700
  .. math::
4516
4701
 
4517
4702
  out_i = \begin{cases}
4518
- & \ True,\ \text{ if } x_{i} = \text{Nan} \\
4519
- & \ False,\ \text{ if } x_{i} \ne \text{Nan}
4703
+ & \ True,\ \text{ if } input_{i} = \text{Nan} \\
4704
+ & \ False,\ \text{ if } input_{i} \ne \text{Nan}
4520
4705
  \end{cases}
4521
4706
 
4522
4707
  where :math:`Nan` means not a number.
4523
4708
 
4524
4709
  Args:
4525
- x (Tensor): The input tensor.
4710
+ input (Tensor): The input tensor.
4526
4711
 
4527
4712
  Returns:
4528
- Tensor, has the same shape of input, and the dtype is bool.
4713
+ Tensor, has the same shape of `input`, and the dtype is bool.
4529
4714
 
4530
4715
  Raises:
4531
- TypeError: If `x` is not a Tensor.
4716
+ TypeError: If `input` is not a Tensor.
4532
4717
 
4533
4718
  Supported Platforms:
4534
4719
  ``Ascend`` ``GPU`` ``CPU``
@@ -4546,34 +4731,34 @@ def isnan(x):
4546
4731
  >>> print(output)
4547
4732
  False
4548
4733
  """
4549
- return isnan_(x)
4734
+ return isnan_(input)
4550
4735
 
4551
4736
 
4552
- def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
4737
+ def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
4553
4738
  """
4554
- Returns a new Tensor with boolean elements representing if each element of `x1`
4555
- is “close” to the corresponding element of `x2`. Closeness is defined as:
4739
+ Returns a new Tensor with boolean elements representing if each element of `input`
4740
+ is “close” to the corresponding element of `other`. Closeness is defined as:
4556
4741
 
4557
4742
  .. math::
4558
- x1x2∣ ≤ atol + rtol × ∣x2
4743
+ inputother∣ ≤ atol + rtol × ∣other
4559
4744
 
4560
4745
  Args:
4561
- x1 (Tensor): First Tensor to compare, with data type belongs to float32, float16, int32.
4562
- x2 (Tensor): Second Tensor to compare, with data type belongs to float32, float16, int32.
4746
+ input (Tensor): First Tensor to compare, with data type belongs to float32, float16, int32.
4747
+ other (Tensor): Second Tensor to compare, with data type belongs to float32, float16, int32.
4563
4748
  rtol (float, optional): Relative tolerance. Default: ``1e-05`` .
4564
4749
  atol (float, optional): Absolute tolerance. Default: ``1e-08`` .
4565
4750
  equal_nan (bool, optional): If True, then two NaNs will be considered equal. Default: ``False`` .
4566
4751
 
4567
4752
  Returns:
4568
- A bool Tensor, with the shape as broadcasted result of the input `x1` and `x2`.
4753
+ A bool Tensor, with the shape as broadcasted result of the input `input` and `other`.
4569
4754
 
4570
4755
  Raises:
4571
- TypeError: If either of `x1` and `x2` is not Tensor.
4572
- TypeError: If either of `x1` and `x2` is not float16, float32 or int32.
4756
+ TypeError: If either of `input` and `other` is not Tensor.
4757
+ TypeError: If either of `input` and `other` is not float16, float32 or int32.
4573
4758
  TypeError: If either of `atol` and `rtol` is not float.
4574
4759
  TypeError: If `equal_nan` is not bool.
4575
- TypeError: If the dtype of `x1` is not same as the `x2`.
4576
- ValueError: If `x1` and `x2` can not be broadcast.
4760
+ TypeError: If the dtype of `input` is not same as the `other`.
4761
+ ValueError: If `input` and `other` can not be broadcast.
4577
4762
  ValueError: If either of `atol` and `rtol` is less than zero.
4578
4763
 
4579
4764
  Supported Platforms:
@@ -4590,7 +4775,7 @@ def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
4590
4775
  [ True False False False True]
4591
4776
  """
4592
4777
  is_close = _get_cache_prim(P.IsClose)(rtol=rtol, atol=atol, equal_nan=equal_nan)
4593
- return is_close(x1, x2)
4778
+ return is_close(input, other)
4594
4779
 
4595
4780
 
4596
4781
  def isreal(input):
@@ -4622,14 +4807,11 @@ def isreal(input):
4622
4807
  _check_is_tensor("input", input, "isreal")
4623
4808
 
4624
4809
  # Note: Integral and Floating tensor values are always real
4625
- fillv2_op = _get_cache_prim(P.FillV2)()
4626
4810
  value = Tensor(1, mstype.bool_)
4627
4811
  real_dtype = mstype.int_type + mstype.uint_type + mstype.float_type + (mstype.bool_,)
4628
4812
  if input.dtype in real_dtype:
4629
- return fillv2_op(input.shape, value)
4630
-
4631
- imag_op = _get_cache_prim(P.Imag)()
4632
- return imag_op(input) == 0
4813
+ return fill_v2_(input.shape, value)
4814
+ return imag_(input) == 0
4633
4815
 
4634
4816
 
4635
4817
  def is_complex(input):
@@ -4764,12 +4946,13 @@ def fmax(input, other):
4764
4946
  return fmax_(input, other)
4765
4947
 
4766
4948
 
4767
- def maximum(x, y):
4949
+ def maximum(input, other):
4768
4950
  r"""
4769
4951
  Computes the maximum of input tensors element-wise.
4770
4952
 
4771
4953
  Note:
4772
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4954
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4955
+ consistent.
4773
4956
  - The inputs must be two tensors or one tensor and one scalar.
4774
4957
  - When the inputs are two tensors,
4775
4958
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
@@ -4779,12 +4962,12 @@ def maximum(x, y):
4779
4962
  - If one of the elements being compared is a NaN, then that element is returned.
4780
4963
 
4781
4964
  .. math::
4782
- output_i = \max(x_i, y_i)
4965
+ output_i = \max(input_i, other_i)
4783
4966
 
4784
4967
  Args:
4785
- x (Union[Tensor, Number, bool]): The first input is a number or
4968
+ input (Union[Tensor, Number, bool]): The first input is a number or
4786
4969
  a bool or a tensor whose data type is number or bool.
4787
- y (Union[Tensor, Number, bool]): The second input is a number or
4970
+ other (Union[Tensor, Number, bool]): The second input is a number or
4788
4971
  a bool when the first input is a tensor or a tensor whose data type is number or bool.
4789
4972
 
4790
4973
  Returns:
@@ -4792,8 +4975,8 @@ def maximum(x, y):
4792
4975
  and the data type is the one with higher precision or higher digits among the two inputs.
4793
4976
 
4794
4977
  Raises:
4795
- TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
4796
- ValueError: If `x` and `y` are not the same shape.
4978
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4979
+ ValueError: If `input` and `other` are not the same shape.
4797
4980
 
4798
4981
  Supported Platforms:
4799
4982
  ``Ascend`` ``GPU`` ``CPU``
@@ -4815,7 +4998,7 @@ def maximum(x, y):
4815
4998
  >>> print(output.dtype)
4816
4999
  Float32
4817
5000
  """
4818
- return maximum_(x, y)
5001
+ return maximum_(input, other)
4819
5002
 
4820
5003
 
4821
5004
  def fmin(input, other):
@@ -4861,12 +5044,13 @@ def fmin(input, other):
4861
5044
  return fmin_(input, other)
4862
5045
 
4863
5046
 
4864
- def minimum(x, y):
5047
+ def minimum(input, other):
4865
5048
  r"""
4866
5049
  Computes the minimum of input tensors element-wise.
4867
5050
 
4868
5051
  Note:
4869
- - Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
5052
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
5053
+ consistent.
4870
5054
  - The inputs must be two tensors or one tensor and one scalar.
4871
5055
  - When the inputs are two tensors, dtypes of them cannot be bool at the same time.
4872
5056
  - When the inputs are one tensor and one scalar, the scalar could only be a constant.
@@ -4874,12 +5058,12 @@ def minimum(x, y):
4874
5058
  - If one of the elements being compared is a NaN, then that element is returned.
4875
5059
 
4876
5060
  .. math::
4877
- output_i = \min(x_i, y_i)
5061
+ output_i = \min(input_i, other_i)
4878
5062
 
4879
5063
  Args:
4880
- x (Union[Tensor, Number, bool]): The first input is a number or
5064
+ input (Union[Tensor, Number, bool]): The first input is a number or
4881
5065
  a bool or a tensor whose data type is number or bool.
4882
- y (Union[Tensor, Number, bool]): The second input is a number or
5066
+ other (Union[Tensor, Number, bool]): The second input is a number or
4883
5067
  a bool when the first input is a tensor or a tensor whose data type is number or bool.
4884
5068
 
4885
5069
  Returns:
@@ -4887,8 +5071,8 @@ def minimum(x, y):
4887
5071
  and the data type is the one with higher precision or higher digits among the two inputs.
4888
5072
 
4889
5073
  Raises:
4890
- TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
4891
- ValueError: If `x` and `y` are not the same shape after broadcast.
5074
+ TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
5075
+ ValueError: If `input` and `other` are not the same shape after broadcast.
4892
5076
 
4893
5077
  Supported Platforms:
4894
5078
  ``Ascend`` ``GPU`` ``CPU``
@@ -4910,7 +5094,7 @@ def minimum(x, y):
4910
5094
  >>> print(output.dtype)
4911
5095
  Float32
4912
5096
  """
4913
- return minimum_(x, y)
5097
+ return minimum_(input, other)
4914
5098
 
4915
5099
 
4916
5100
  def median(input, axis=-1, keepdims=False):
@@ -4993,8 +5177,8 @@ def nanmedian(input, axis=-1, keepdims=False):
4993
5177
  >>> import mindspore
4994
5178
  >>> from mindspore import Tensor, ops
4995
5179
  >>> x = Tensor([[0.57, 0.11, float("nan")],
4996
- >>> [0.38, float("nan"), float("nan")],
4997
- >>> [0.36, 0.16, float("nan")]], mindspore.float32)
5180
+ ... [0.38, float("nan"), float("nan")],
5181
+ ... [0.36, 0.16, float("nan")]], mindspore.float32)
4998
5182
  >>> y, idx = ops.nanmedian(x, axis=0, keepdims=False)
4999
5183
  >>> print(y)
5000
5184
  [0.38 0.11 nan]
@@ -5153,7 +5337,7 @@ def ormqr(input, tau, other, left=True, transpose=False):
5153
5337
  >>> import numpy as np
5154
5338
  >>> from mindspore import Tensor, ops
5155
5339
  >>> input = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62]]),
5156
- >>> mindspore.float32)
5340
+ ... mindspore.float32)
5157
5341
  >>> tau = Tensor(np.array([1.55, 1.94, 3.0]), mindspore.float32)
5158
5342
  >>> other = Tensor(np.array([[-114.6, 10.9, 1.1],
5159
5343
  ... [-0.304, 38.07, 69.38],
@@ -5507,7 +5691,7 @@ def vander(x, N=None):
5507
5691
  if N <= 0:
5508
5692
  raise ValueError(
5509
5693
  f"For vander, N must be greater than 0, but got {N}.")
5510
- exponent = ops.range(Tensor(N-1), Tensor(-1), Tensor(-1))
5694
+ exponent = ops.range(Tensor(N - 1), Tensor(-1), Tensor(-1))
5511
5695
  x = F.expand_dims(x, 1)
5512
5696
  exponent = F.expand_dims(exponent, 0)
5513
5697
  return F.tensor_pow(x, exponent)
@@ -5629,10 +5813,10 @@ def var_mean(input, axis=None, ddof=0, keepdims=False):
5629
5813
  axis = _check_var_std_input(input, ddof, keepdims, axis, "var_mean")
5630
5814
  if ddof in (0, 1):
5631
5815
  output = _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
5632
- return _get_cache_prim(P.Pow)()(output[0], 2), output[1]
5816
+ return tensor_pow(output[0], 2), output[1]
5633
5817
  x_mean = mean(input, axis, True)
5634
- x_sub = _get_cache_prim(P.Sub)()(input, x_mean)
5635
- x_pow = _get_cache_prim(P.Pow)()(x_sub, 2)
5818
+ x_sub = tensor_sub(input, x_mean)
5819
+ x_pow = tensor_pow(x_sub, 2)
5636
5820
  x_sum = sum(x_pow, axis, keepdims)
5637
5821
  res_mean = mean(input, axis, keepdims)
5638
5822
  nums = 1
@@ -5762,7 +5946,7 @@ def std_mean(input, axis=None, ddof=0, keepdims=False):
5762
5946
  if ddof in (0, 1):
5763
5947
  return _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
5764
5948
  output = var_mean(input, axis, ddof, keepdims)
5765
- return _get_cache_prim(P.Pow)()(output[0], 0.5), output[1]
5949
+ return tensor_pow(output[0], 0.5), output[1]
5766
5950
 
5767
5951
 
5768
5952
  def real(input):
@@ -5791,7 +5975,7 @@ def real(input):
5791
5975
  >>> print(output)
5792
5976
  1.3
5793
5977
  """
5794
- return _get_cache_prim(ops.Real)()(input)
5978
+ return real_(input)
5795
5979
 
5796
5980
 
5797
5981
  def reciprocal(input):
@@ -5828,7 +6012,7 @@ def reciprocal(input):
5828
6012
  raise TypeError(f"For reciprocal, the input must be a Tensor, but got {type(input)}.")
5829
6013
  if not is_complex(input) and not ops.is_floating_point(input):
5830
6014
  input = ops.cast(input, mstype.float32)
5831
- return _get_cache_prim(ops.Reciprocal)()(input)
6015
+ return reciprocal_(input)
5832
6016
 
5833
6017
 
5834
6018
  def rsqrt(input):
@@ -5860,7 +6044,7 @@ def rsqrt(input):
5860
6044
  >>> print(output)
5861
6045
  [ nan 1.8349396 0.80530024 nan]
5862
6046
  """
5863
- return _get_cache_prim(ops.Rsqrt)()(input)
6047
+ return rsqrt_(input)
5864
6048
 
5865
6049
 
5866
6050
  def sqrt(x):
@@ -5971,8 +6155,7 @@ def outer(input, vec2):
5971
6155
  if len(vec2.shape) != 1:
5972
6156
  raise ValueError("the input vec2 must be a 1-D vector!")
5973
6157
  input = input.reshape(-1, 1)
5974
- mul_ops = _get_cache_prim(P.Mul)()
5975
- y = mul_ops(input, vec2)
6158
+ y = tensor_mul(input, vec2)
5976
6159
  return y
5977
6160
 
5978
6161
 
@@ -6006,10 +6189,6 @@ def mv(mat, vec):
6006
6189
  >>> print(output)
6007
6190
  [11. 13. 7.]
6008
6191
  """
6009
-
6010
- matmul_op = _get_cache_prim(P.MatMul)()
6011
- reshape_op = _get_cache_prim(P.Reshape)()
6012
-
6013
6192
  if not isinstance(mat, (Tensor, Tensor_)):
6014
6193
  raise TypeError("The input mat must be Tensor.")
6015
6194
  if not isinstance(vec, (Tensor, Tensor_)):
@@ -6020,9 +6199,9 @@ def mv(mat, vec):
6020
6199
  raise ValueError("The input vec must be 1-D Tensor.")
6021
6200
 
6022
6201
  length_vec = get_x_shape(vec.shape)
6023
- vec = reshape_op(vec, (length_vec[0], 1))
6202
+ vec = reshape_(vec, (length_vec[0], 1))
6024
6203
 
6025
- out = matmul_op(mat, vec)
6204
+ out = matmul_(mat, vec)
6026
6205
  out = out.T
6027
6206
  out = out[0]
6028
6207
  return out
@@ -6080,8 +6259,7 @@ def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
6080
6259
  raise TypeError(f"For 'addbmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
6081
6260
  if not isinstance(beta, (int, float)):
6082
6261
  raise TypeError(f"For 'addbmm', parameter 'beta' must be an int or float, but got {type(beta)}.")
6083
- bmm_op = _get_cache_prim(P.BatchMatMul)()
6084
- bmm_res = bmm_op(batch1, batch2)
6262
+ bmm_res = batch_matmul_(batch1, batch2)
6085
6263
  return beta * input + alpha * (bmm_res.sum(axis=0))
6086
6264
 
6087
6265
 
@@ -6129,8 +6307,7 @@ def addmm(input, mat1, mat2, *, beta=1, alpha=1):
6129
6307
  raise TypeError(f"For 'addmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
6130
6308
  if not isinstance(beta, (int, float)):
6131
6309
  raise TypeError(f"For 'addmm', parameter 'beta' must be an int or float, but got {type(beta)}.")
6132
- matmul_op = _get_cache_prim(P.MatMul)()
6133
- return beta * input + alpha * (matmul_op(mat1, mat2))
6310
+ return beta * input + alpha * (matmul_(mat1, mat2))
6134
6311
 
6135
6312
 
6136
6313
  def addmv(input, mat, vec, *, beta=1, alpha=1):
@@ -6180,11 +6357,10 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
6180
6357
  [30. 27.]
6181
6358
  """
6182
6359
 
6183
- dtypeop = P.DType()
6184
- input_dtype = dtypeop(input)
6360
+ input_dtype = dtype_(input)
6185
6361
  if not (isinstance(input, Tensor) and isinstance(mat, Tensor) and isinstance(vec, Tensor)):
6186
6362
  raise TypeError("For Addmv, inputs must be all tensors.")
6187
- if dtypeop(mat) != dtypeop(vec):
6363
+ if dtype_(mat) != dtype_(vec):
6188
6364
  raise TypeError("For Addmv, the mat and vec should be the same dtype.")
6189
6365
  _check_input_1d(vec.shape, "vec", "Addmv")
6190
6366
  _check_input_2d(mat.shape, "mat", "Addmv")
@@ -6278,11 +6454,10 @@ def addr(x, vec1, vec2, *, beta=1, alpha=1):
6278
6454
  [ 9. 12.]]
6279
6455
  """
6280
6456
 
6281
- dtypeop = P.DType()
6282
- input_dtype = dtypeop(x)
6457
+ input_dtype = dtype_(x)
6283
6458
  if not (isinstance(x, Tensor) and isinstance(vec1, Tensor) and isinstance(vec2, Tensor)):
6284
6459
  raise TypeError("For Addr, inputs must be all tensors.")
6285
- if dtypeop(vec1) != dtypeop(vec2):
6460
+ if dtype_(vec1) != dtype_(vec2):
6286
6461
  raise TypeError("For Addr, the vec1 and vec2 should be the same dtype.")
6287
6462
  _check_input_1d(vec1.shape, "vec1", "Addr")
6288
6463
  _check_input_1d(vec2.shape, "vec2", "Addr")
@@ -6296,12 +6471,11 @@ def addr(x, vec1, vec2, *, beta=1, alpha=1):
6296
6471
  alpha = scalar_cast(alpha, mstype.int32)
6297
6472
  beta = scalar_cast(beta, mstype.int32)
6298
6473
  matmul_op = P.MatMul()
6299
- reshape_op = P.Reshape()
6300
6474
 
6301
6475
  length_vec1 = get_x_shape(vec1.shape)
6302
- vec1 = reshape_op(vec1, (length_vec1[0], 1))
6476
+ vec1 = reshape_(vec1, (length_vec1[0], 1))
6303
6477
  length_vec2 = get_x_shape(vec2.shape)
6304
- vec2 = reshape_op(vec2, (1, length_vec2[0]))
6478
+ vec2 = reshape_(vec2, (1, length_vec2[0]))
6305
6479
 
6306
6480
  out = beta * x + alpha * matmul_op(vec1, vec2)
6307
6481
  return out
@@ -6527,7 +6701,15 @@ def bernoulli(input, p=0.5, seed=None):
6527
6701
 
6528
6702
  def bessel_i1(x):
6529
6703
  r"""
6530
- Computes the Bessel i1 function of x element-wise.
6704
+ Computes modified Bessel function of the first kind, order 1 element-wise.
6705
+
6706
+ .. math::
6707
+ \begin{array}{ll} \\
6708
+ I_{1}(x)=\mathrm{i}^{-1} J_{1}(\mathrm{i} x)=\sum_{m=0}^
6709
+ {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
6710
+ \end{array}
6711
+
6712
+ where :math:`J_{1}` is Bessel function of the first kind, order 1.
6531
6713
 
6532
6714
  Args:
6533
6715
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -6556,7 +6738,17 @@ def bessel_i1(x):
6556
6738
 
6557
6739
  def bessel_i1e(x):
6558
6740
  r"""
6559
- Computes the Bessel i1e function of x element-wise.
6741
+ Computes exponential scaled modified Bessel function of the first kind, order 1 element-wise.
6742
+
6743
+ The formula is defined as:
6744
+
6745
+ .. math::
6746
+ \begin{array}{ll} \\
6747
+ \text I_{1}e(x)=e^{(-|x|)} * I_{1}(x)=e^{(-|x|)} * \sum_{m=0}^
6748
+ {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
6749
+ \end{array}
6750
+
6751
+ where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
6560
6752
 
6561
6753
  Args:
6562
6754
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -6585,7 +6777,17 @@ def bessel_i1e(x):
6585
6777
 
6586
6778
  def bessel_k1(x):
6587
6779
  r"""
6588
- Computes the Bessel k1 function of x element-wise.
6780
+ Computes modified Bessel function of the second kind, order 1 element-wise.
6781
+
6782
+ The formula is defined as:
6783
+
6784
+ .. math::
6785
+ \begin{array}{ll} \\
6786
+ K_{1}(x)=\lim_{\nu \to 1} \left(\frac{\pi}{2}\right) \frac{I_{-\nu}(x)-
6787
+ I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} \cosh (t) d t
6788
+ \end{array}
6789
+
6790
+ where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
6589
6791
 
6590
6792
  Args:
6591
6793
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -6614,7 +6816,17 @@ def bessel_k1(x):
6614
6816
 
6615
6817
  def bessel_k1e(x):
6616
6818
  r"""
6617
- Computes the Bessel k1e function of x element-wise.
6819
+ Computes exponential scaled modified Bessel function of the second kind, order 1 element-wise.
6820
+
6821
+ The formula is defined as:
6822
+
6823
+ .. math::
6824
+ \begin{array}{ll} \\
6825
+ K_{1}e(x)= e^{(-|x|)} * K_{1}(x) = e^{(-|x|)} * \int_{0}
6826
+ ^{\infty} e^{-x \cosh t} \cosh (t) d t
6827
+ \end{array}
6828
+
6829
+ where :math:`K_{1}` is modified Bessel function of the second kind, order 1.
6618
6830
 
6619
6831
  Args:
6620
6832
  x (Tensor): The input tensor. The data type must be float16, float32 or float64.
@@ -6676,8 +6888,7 @@ def deg2rad(x):
6676
6888
  """
6677
6889
  if not isinstance(x, (Tensor, Tensor_)):
6678
6890
  raise TypeError("The input x must be tensor")
6679
- dtype_op = _get_cache_prim(P.DType)()
6680
- x_dtype = dtype_op(x)
6891
+ x_dtype = dtype_(x)
6681
6892
  _check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32, mstype.float64], "")
6682
6893
  if x_dtype == mstype.float16:
6683
6894
  out = x * (Tensor(math.pi / 180.0).astype(mstype.float16))
@@ -6717,8 +6928,7 @@ def rad2deg(x):
6717
6928
  """
6718
6929
  if not isinstance(x, (Tensor, Tensor_)):
6719
6930
  raise TypeError("The input x must be tensor")
6720
- dtype_op = _get_cache_prim(P.DType)()
6721
- x_dtype = dtype_op(x)
6931
+ x_dtype = dtype_(x)
6722
6932
  _check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32, mstype.float64], "")
6723
6933
  if x_dtype == mstype.float16:
6724
6934
  out = x * (Tensor(180.0 / math.pi).astype(mstype.float16))
@@ -6823,14 +7033,12 @@ def cummin(input, axis):
6823
7033
  if axis == 0:
6824
7034
  out1, out2 = cummin_op(input)
6825
7035
  else:
6826
- transpose = _get_cache_prim(P.Transpose)()
6827
- _shape_op = _get_cache_prim(P.Shape)()
6828
- x_shape = _shape_op(input)
7036
+ x_shape = shape_(input)
6829
7037
  prem = _create_cummin_perm(axis, x_shape)
6830
- input = transpose(input, prem)
7038
+ input = transpose_(input, prem)
6831
7039
  out1, out2 = cummin_op(input)
6832
- out1 = transpose(out1, prem)
6833
- out2 = transpose(out2, prem)
7040
+ out1 = transpose_(out1, prem)
7041
+ out2 = transpose_(out2, prem)
6834
7042
  return [out1, out2]
6835
7043
 
6836
7044
 
@@ -6896,7 +7104,8 @@ def cumsum(x, axis, dtype=None):
6896
7104
  For the case of dynamic shape, the dtype of `x` only support int32, float16 or float32.
6897
7105
 
6898
7106
  Args:
6899
- x (Tensor): The input Tensor to accumulate.
7107
+ x (Tensor): The input Tensor of shape :math:`(N,*)` where :math:`*` means, any number
7108
+ of additional dimensions.
6900
7109
  axis (int): Axis along which the cumulative sum is computed.
6901
7110
  dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
6902
7111
  the input Tensor will be cast to `dtype` before the computation. This is useful for preventing overflows.
@@ -7145,7 +7354,7 @@ def dstack(inputs):
7145
7354
  if tensor.ndim <= 1:
7146
7355
  tensor = _expand(tensor, 2)
7147
7356
  if tensor.ndim == 2:
7148
- tensor = P.ExpandDims()(tensor, 2)
7357
+ tensor = expand_dims_(tensor, 2)
7149
7358
  trans_inputs += (tensor,)
7150
7359
  if not trans_inputs:
7151
7360
  raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
@@ -7410,8 +7619,7 @@ def cartesian_prod(*inputs):
7410
7619
  meshgrid_output = meshgrid(inputs)
7411
7620
  stack = P.Stack(axis=-1)
7412
7621
  stack_output = stack(meshgrid_output)
7413
- reshape = P.Reshape()
7414
- return reshape(stack_output, (-1, len(inputs)))
7622
+ return reshape_(stack_output, (-1, len(inputs)))
7415
7623
 
7416
7624
 
7417
7625
  def atleast_3d(inputs):
@@ -7461,13 +7669,13 @@ def atleast_3d(inputs):
7461
7669
  """
7462
7670
 
7463
7671
  def _expand3(arr):
7464
- ndim = P.Rank()(arr)
7672
+ ndim = rank_(arr)
7465
7673
  if ndim == 0:
7466
- return P.Reshape()(arr, (1, 1, 1))
7674
+ return reshape_(arr, (1, 1, 1))
7467
7675
  if ndim == 1:
7468
- return P.Reshape()(arr, (1, P.Size()(arr), 1))
7676
+ return reshape_(arr, (1, P.Size()(arr), 1))
7469
7677
  if ndim == 2:
7470
- return P.Reshape()(arr, P.Shape()(arr) + (1,))
7678
+ return reshape_(arr, P.Shape()(arr) + (1,))
7471
7679
  return arr
7472
7680
 
7473
7681
  if isinstance(inputs, Tensor):
@@ -7565,7 +7773,7 @@ def vstack(inputs):
7565
7773
  ndim_diff = 2 - len(shape)
7566
7774
  if ndim_diff > 0:
7567
7775
  shape = [1] * ndim_diff + [i for i in shape]
7568
- tensor = P.Reshape()(tensor, tuple(shape))
7776
+ tensor = reshape_(tensor, tuple(shape))
7569
7777
  trans_tup += (tensor,)
7570
7778
  if not trans_tup:
7571
7779
  raise ValueError("For 'vstack', need at least one tensor to concatenate.")
@@ -7677,6 +7885,8 @@ def combinations(input, r=2, with_replacement=False):
7677
7885
  raise TypeError(f"For 'combinations', 'x' must be a tensor, but got {type(input)}")
7678
7886
  if input.ndim != 1:
7679
7887
  raise ValueError(f"For 'combinations', the dimension 'x' must be 1, but got {input.ndim}")
7888
+ if not isinstance(r, int):
7889
+ raise TypeError(f"For 'combinations', 'r' must be an integer, but got {type(r)}")
7680
7890
  comb_func = _combinations_with_replacement if with_replacement else _combinations
7681
7891
  ret = comb_func(input, r)
7682
7892
  if ret.size == 0:
@@ -7776,17 +7986,17 @@ def copysign(x, other):
7776
7986
  other = _type_convert(Tensor, other)
7777
7987
  other = _broadcast_to_shape(other, P.Shape()(x))
7778
7988
 
7779
- if _check_same_type(P.DType()(x), mstype.bool_):
7989
+ if _check_same_type(dtype_(x), mstype.bool_):
7780
7990
  raise TypeError("copysign does not accept dtype bool.")
7781
7991
 
7782
- if _check_same_type(P.DType()(x), mstype.complex64):
7992
+ if _check_same_type(dtype_(x), mstype.complex64):
7783
7993
  raise TypeError("copysign does not accept dtype complex64.")
7784
- if _check_same_type(P.DType()(other), mstype.complex64):
7994
+ if _check_same_type(dtype_(other), mstype.complex64):
7785
7995
  raise TypeError("copysign does not accept dtype complex64.")
7786
7996
 
7787
- if _check_same_type(P.DType()(x), mstype.complex128):
7997
+ if _check_same_type(dtype_(x), mstype.complex128):
7788
7998
  raise TypeError("copysign does not accept dtype complex128.")
7789
- if _check_same_type(P.DType()(other), mstype.complex128):
7999
+ if _check_same_type(dtype_(other), mstype.complex128):
7790
8000
  raise TypeError("copysign does not accept dtype complex128.")
7791
8001
 
7792
8002
  x_float = (
@@ -7796,7 +8006,7 @@ def copysign(x, other):
7796
8006
  )
7797
8007
  pos_tensor = P.Abs()(x_float)
7798
8008
  less_zero = P.Less()(other, 0)
7799
- return P.Select()(less_zero, P.Neg()(pos_tensor), pos_tensor)
8009
+ return P.Select()(less_zero, neg_tensor(pos_tensor), pos_tensor)
7800
8010
 
7801
8011
 
7802
8012
  def hann_window(window_length, periodic=True, *, dtype=None):
@@ -7948,14 +8158,12 @@ def logsumexp(input, axis, keep_dims=False):
7948
8158
  >>> print(output.shape)
7949
8159
  (3, 1, 5, 6)
7950
8160
  """
7951
- _exp = _get_cache_prim(P.Exp)()
7952
8161
  _reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
7953
- _log = _get_cache_prim(P.Log)()
7954
8162
 
7955
8163
  input_max = ops.ReduceMax(keep_dims=True)(input, axis)
7956
- input_exp = _exp(input - input_max)
8164
+ input_exp = tensor_exp(input - input_max)
7957
8165
  input_sumexp = _reduce_sum(input_exp, axis)
7958
- input_logsumexp = _log(input_sumexp)
8166
+ input_logsumexp = log_(input_sumexp)
7959
8167
  if not keep_dims:
7960
8168
  input_max = input_max.squeeze(axis=axis)
7961
8169
  return input_logsumexp + input_max
@@ -8467,50 +8675,53 @@ def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
8467
8675
  ``Ascend`` ``GPU`` ``CPU``
8468
8676
 
8469
8677
  Note:
8470
- - Currently, complex numbers are not supported.
8471
- - Running on ``Ascend`` platform is not supported when ord is `2` , `-2` or `nuc` .
8678
+ Currently, complex numbers are not supported.
8472
8679
 
8473
8680
  Examples:
8474
8681
  >>> import mindspore as ms
8475
8682
  >>> import mindspore.ops as ops
8476
- >>> x = ops.arange(-12, 13, dtype=ms.float32)
8683
+ >>> data_range = ops.arange(-13, 13, dtype=ms.float32)
8684
+ >>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
8685
+ >>> x = data_range[data_range != 0]
8477
8686
  >>> y = x.reshape(5, 5)
8478
8687
  >>> print(ops.norm(x))
8479
- 36.05551
8688
+ 38.327538
8480
8689
  >>> print(ops.norm(x, float('inf')))
8481
- 12.0
8690
+ 13.0
8482
8691
  >>> print(ops.norm(x, float('-inf')))
8483
- 0.0
8692
+ 1.0
8484
8693
  >>> print(ops.norm(x, 0))
8485
- 24.0
8694
+ 25.0
8486
8695
  >>> print(ops.norm(x, 1))
8487
- 156.0
8696
+ 169.0
8488
8697
  >>> print(ops.norm(x, -1))
8489
- 0.0
8698
+ 0.15915091
8490
8699
  >>> print(ops.norm(x, 2))
8491
- 36.05551
8700
+ 38.327538
8492
8701
  >>> print(ops.norm(x, -2))
8493
- 0.0
8702
+ 0.5647041
8494
8703
  >>> print(ops.norm(x, 3))
8495
- 23.000631
8704
+ 24.309084
8496
8705
  >>> print(ops.norm(x, -3))
8497
- 0.0
8706
+ 0.74708974
8498
8707
  >>> print(ops.norm(y))
8499
- 36.05551
8708
+ 38.327538
8500
8709
  >>> print(ops.norm(y, 'fro'))
8501
- 36.05551
8710
+ 38.327538
8502
8711
  >>> print(ops.norm(y, 'nuc'))
8503
- 42.42641
8712
+ 45.56681
8504
8713
  >>> print(ops.norm(y, float('inf')))
8505
- 50.0
8714
+ 55.0
8506
8715
  >>> print(ops.norm(y, float('-inf')))
8507
- 6.0
8716
+ 9.0
8508
8717
  >>> print(ops.norm(y, 1))
8509
- 32.0
8718
+ 35.0
8510
8719
  >>> print(ops.norm(y, -1))
8511
- 30.0
8720
+ 33.0
8512
8721
  >>> print(ops.norm(y, 2))
8513
- 35.355343
8722
+ 37.57774
8723
+ >>> print(ops.norm(y, -2))
8724
+ 1.590545e-07
8514
8725
  >>> m = ms.Tensor([[1., -1., 2.], [-2., 3., -4.]])
8515
8726
  >>> print(ops.norm(m, dim=0))
8516
8727
  [2.236068 3.1622777 4.472136 ]
@@ -8518,6 +8729,10 @@ def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
8518
8729
  [2.4494898 5.3851647]
8519
8730
  >>> print(ops.norm(m, ord=1, dim=1))
8520
8731
  [4. 9.]
8732
+ >>> print(ops.norm(m, ord=-2, dim=0))
8733
+ [0.8944272 0.94868326 1.7888544 ]
8734
+ >>> print(ops.norm(m, ord=2, dim=1))
8735
+ [2.4494898 5.3851647]
8521
8736
  >>> n = ops.arange(27, dtype=ms.float32).reshape(3, 3, 3)
8522
8737
  >>> print(ops.norm(n, dim=(1, 2)))
8523
8738
  [14.282857 39.76179 66.45299 ]
@@ -9073,7 +9288,7 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
9073
9288
  """
9074
9289
  _check_logits_tensor(logits)
9075
9290
  _check_logits_shape(logits)
9076
- logits_dtype = _get_cache_prim(P.DType)()(logits)
9291
+ logits_dtype = dtype_(logits)
9077
9292
  _check_input_dtype("logits", logits_dtype, [mstype.float16, mstype.float32], "gumbel_softmax")
9078
9293
  _check_attr_dtype("tau", tau, [float], "gumbel_softmax")
9079
9294
  _check_attr_dtype("hard", hard, [bool], "gumbel_softmax")
@@ -9085,14 +9300,13 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
9085
9300
  _check_int_range(dim, -len(logits.shape),
9086
9301
  len(logits.shape), 'dim', "gumbel_softmax")
9087
9302
 
9088
- log_op = _get_cache_prim(P.Log)()
9089
9303
  const_op = _get_cache_prim(P.ScalarToTensor)()
9090
9304
 
9091
- sample_shape = _get_cache_prim(P.Shape)()(logits)
9305
+ sample_shape = shape_(logits)
9092
9306
  uniform = C.uniform(sample_shape, const_op(
9093
9307
  0.0, mstype.float32), const_op(1.0, mstype.float32))
9094
- uniform = _get_cache_prim(P.Cast)()(uniform, logits_dtype)
9095
- gumbel = neg_tensor(log_op(neg_tensor(log_op(uniform))))
9308
+ uniform = cast_(uniform, logits_dtype)
9309
+ gumbel = neg_tensor(log_(neg_tensor(log_(uniform))))
9096
9310
  gumbel = (logits + gumbel) / tau
9097
9311
  y_soft = _get_cache_prim(P.Softmax)(dim)(gumbel)
9098
9312
  if hard:
@@ -9194,13 +9408,13 @@ def stft(x, n_fft, hop_length=None, win_length=None, window=None, center=True,
9194
9408
  k] \exp \left(-j \frac{2 \pi \cdot \omega k}{\text { win_length }}\right)
9195
9409
 
9196
9410
  where :math:`m` is the index of the sliding window, and
9197
- :math:`ω` is the frequency in range :math:`0 \leq \omega < \text{n\_fft}0≤ω<n_fft`.
9411
+ :math:`ω` is the frequency in range :math:`0 \leq \omega < \text{n\_fft}0≤ω<n\_fft`.
9198
9412
 
9199
9413
  Args:
9200
9414
  x (Tensor): Time sequences of stft, must be either a 1-D time tensor or a 2-D tensor.
9201
9415
  n_fft (int): The size of Fourier transform.
9202
9416
  hop_length (int, optional): The distance between neighboring sliding window
9203
- frames. Default: ``None``(treated as equal to :math:`floor(n_fft / 4)`).
9417
+ frames. Default: ``None``(treated as equal to :math:`floor(n\_fft / 4)`).
9204
9418
  win_length (int, optional): the size of window frame and STFT filter.
9205
9419
  Default: ``None``(treated as equal to `n_fft`).
9206
9420
  window (Tensor, optional): the optional window function, 1-D tensor of size `win_length`.
@@ -9225,14 +9439,14 @@ def stft(x, n_fft, hop_length=None, win_length=None, window=None, center=True,
9225
9439
  If `return_complex` is False, it returns a real Tensor with shape :math:`(*, N, T, 2)`.
9226
9440
 
9227
9441
  `N` is size of Fourier transform, it depends on parameter `onesided`:
9228
- - If `onesided` is False, :math:`N = n_fft`.
9229
- - If `onesided` is True, :math:`N = n_fft // 2 + 1`.
9442
+ - If `onesided` is False, :math:`N = n\_fft`.
9443
+ - If `onesided` is True, :math:`N = n\_fft // 2 + 1`.
9230
9444
 
9231
9445
  `T` is the total number of frames used, calculated by this formula:
9232
- :math:`T = 1 + (len - n_fft) / hop_length`, where `len` depends on parameter `center`:
9233
- - If `center` is False, :math:`len = signal_length`.
9234
- - If `center` is True, :math:`len = signal_length + (n_fft // 2) * 2`.
9235
- where :math:`signal_length` is the signal length, it equals to :math:`x.shape[-1]`.
9446
+ :math:`T = 1 + (len - n\_fft) / hop\_length`, where `len` depends on parameter `center`:
9447
+ - If `center` is False, :math:`len = signal\_length`.
9448
+ - If `center` is True, :math:`len = signal\_length + (n\_fft // 2) * 2`.
9449
+ where :math:`signal\_length` is the signal length, it equals to :math:`x.shape[-1]`.
9236
9450
 
9237
9451
  Raises:
9238
9452
  TypeError: If `x` is not a 1-D or 2-D tensor.
@@ -9262,8 +9476,7 @@ def stft(x, n_fft, hop_length=None, win_length=None, window=None, center=True,
9262
9476
  window = ops.ones(win_length, mstype.float32)
9263
9477
 
9264
9478
  def _is_complex(x):
9265
- dtype = P.DType()
9266
- return dtype(x) in [mstype.complex64, mstype.complex128]
9479
+ return dtype_(x) in [mstype.complex64, mstype.complex128]
9267
9480
 
9268
9481
  if onesided is None:
9269
9482
  onesided = (not _is_complex(x)) and (not _is_complex(window))
@@ -9372,20 +9585,17 @@ def _check_input_2d(input_shape, param_name, func_name):
9372
9585
  @_primexpr
9373
9586
  def _expand(x, ndim):
9374
9587
  """Expand x to ndim from axis, which can be 0 or -1."""
9375
- rank_op = _get_cache_prim(P.Rank)()
9376
- expand_dims_op = _get_cache_prim(P.ExpandDims)()
9377
- while rank_op(x) < ndim:
9378
- x = expand_dims_op(x, 0)
9588
+ while rank_(x) < ndim:
9589
+ x = expand_dims_(x, 0)
9379
9590
  return x
9380
9591
 
9381
9592
 
9382
9593
  def _broadcast_to(x, shape_cur, shape_to, ndim_to):
9383
9594
  """Broadcasts x from shape_cur to shape_to."""
9384
- tile_op = _get_cache_prim(P.Tile)()
9385
9595
  tile_size_op = _get_cache_prim(TileSize)()
9386
9596
  size = tile_size_op(shape_cur, shape_to, ndim_to)
9387
9597
  F.stop_gradient(size)
9388
- return tile_op(x, size)
9598
+ return tile_(x, size)
9389
9599
 
9390
9600
 
9391
9601
  def matmul(input, other):
@@ -9449,17 +9659,13 @@ def matmul(input, other):
9449
9659
  if not (isinstance(input, Tensor) and isinstance(other, Tensor)):
9450
9660
  raise TypeError("For matmul op, inputs must be all tensors.")
9451
9661
 
9452
- rank_op = _get_cache_prim(P.Rank)()
9453
- input_rank, other_rank = rank_op(input), rank_op(other)
9662
+ input_rank, other_rank = rank_(input), rank_(other)
9454
9663
  if input_rank == 2 and other_rank == 2:
9455
9664
  _matmul = _get_cache_prim(P.MatMul)(False, False)
9456
9665
  return _matmul(input, other)
9457
9666
 
9458
- shape_op = _get_cache_prim(P.Shape)()
9459
- reshape_op = _get_cache_prim(P.Reshape)()
9460
-
9461
- ndim1_orig, ndim2_orig = rank_op(input), rank_op(other)
9462
- shape1_orig, shape2_orig = shape_op(input), shape_op(other)
9667
+ ndim1_orig, ndim2_orig = rank_(input), rank_(other)
9668
+ shape1_orig, shape2_orig = shape_(input), shape_(other)
9463
9669
  transpose_b = ndim2_orig == 1
9464
9670
  shape_backbone = _check_matmul_shapes(shape1_orig, shape2_orig, 'matmul')
9465
9671
  # infers the shape of the output
@@ -9471,21 +9677,21 @@ def matmul(input, other):
9471
9677
 
9472
9678
  input = _expand(input, 2)
9473
9679
  other = _expand(other, 2)
9474
- if rank_op(other) == 2:
9475
- if rank_op(input) > 2:
9476
- input = reshape_op(input, (-1, shape1_orig[-1]))
9680
+ if rank_(other) == 2:
9681
+ if rank_(input) > 2:
9682
+ input = reshape_(input, (-1, shape1_orig[-1]))
9477
9683
  res = _matmul(input, other)
9478
9684
  else:
9479
9685
  # broadcasts input.shape[:-2] with other.shape[:-2]
9480
9686
  ndim_aligned = _max(ndim1_orig, ndim2_orig)
9481
9687
  input = _expand(input, ndim_aligned)
9482
9688
  other = _expand(other, ndim_aligned)
9483
- shape1_aligned, shape2_aligned = shape_op(input), shape_op(other)
9689
+ shape1_aligned, shape2_aligned = shape_(input), shape_(other)
9484
9690
  input = _broadcast_to(input, shape1_aligned[:-2], shape_backbone, ndim_aligned)
9485
9691
  other = _broadcast_to(other, shape2_aligned[:-2], shape_backbone, ndim_aligned)
9486
9692
  res = _batch_matmul(input, other)
9487
9693
 
9488
- return reshape_op(res, shape_out)
9694
+ return reshape_(res, shape_out)
9489
9695
 
9490
9696
 
9491
9697
  def inner(input, other):
@@ -9605,8 +9811,7 @@ def bmm(input_x, mat2):
9605
9811
  if not (isinstance(input_x, Tensor) and isinstance(mat2, Tensor)):
9606
9812
  raise TypeError("For bmm op, inputs input_x and mat2 must be all tensors.")
9607
9813
 
9608
- bmm_op = _get_cache_prim(P.BatchMatMul)()
9609
- return bmm_op(input_x, mat2)
9814
+ return batch_matmul_(input_x, mat2)
9610
9815
 
9611
9816
 
9612
9817
  def quantile(input, q, axis=None, keepdims=False):
@@ -9770,15 +9975,14 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
9770
9975
  [5. 5. 5.]
9771
9976
  [5. 5. 5.]]]
9772
9977
  """
9773
- dtypeop = _get_cache_prim(P.DType)()
9774
9978
  bmmop = _get_cache_prim(P.BatchMatMul)(False, False)
9775
9979
  if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
9776
9980
  raise TypeError("For Baddbmm, inputs must be all tensors.")
9777
9981
  if len(batch1.shape) != 3 or len(batch2.shape) != 3:
9778
9982
  raise ValueError("For batch1 and batch2 must be 3-D tensors each containing the same number of matrices, "
9779
9983
  f"but got length of batch1:'{len(batch1.shape)}', length of batch2:'{len(batch2.shape)}'.")
9780
- input_dtype = dtypeop(input)
9781
- if not (input_dtype == dtypeop(batch1) and input_dtype == dtypeop(batch2)):
9984
+ input_dtype = dtype_(input)
9985
+ if not (input_dtype == dtype_(batch1) and input_dtype == dtype_(batch2)):
9782
9986
  raise TypeError("For Baddbmm, the inputs should be the same dtype.")
9783
9987
  if input_dtype in (mstype.float16, mstype.float32, mstype.float64):
9784
9988
  if not (isinstance(alpha, (int, float)) and isinstance(beta, (int, float))):
@@ -9826,10 +10030,7 @@ def log2(input):
9826
10030
  >>> print(output)
9827
10031
  [1. 2. 3.]
9828
10032
  """
9829
-
9830
- dtype_op = _get_cache_prim(P.DType)()
9831
-
9832
- x_dtype = dtype_op(input)
10033
+ x_dtype = dtype_(input)
9833
10034
  denominator = log_(_make_tensor(2, x_dtype))
9834
10035
  frac_log = log_(input)
9835
10036
  output = frac_log / denominator
@@ -10045,10 +10246,7 @@ def log10(input):
10045
10246
  >>> print(output)
10046
10247
  [0.301 0.602 1. ]
10047
10248
  """
10048
-
10049
- dtype_op = P.DType()
10050
-
10051
- x_dtype = dtype_op(input)
10249
+ x_dtype = dtype_(input)
10052
10250
  denominator = log_(_make_tensor(10, x_dtype))
10053
10251
  frac_log = log_(input)
10054
10252
  output = frac_log / denominator
@@ -10083,8 +10281,7 @@ def log1p(input):
10083
10281
  >>> print(output)
10084
10282
  [0.6931472 1.0986123 1.609438 ]
10085
10283
  """
10086
- _log1p = _get_cache_prim(P.Log1p)()
10087
- return _log1p(input)
10284
+ return log1p_(input)
10088
10285
 
10089
10286
 
10090
10287
  def kron(input, other):
@@ -10235,7 +10432,7 @@ def all(input, axis=None, keep_dims=False):
10235
10432
  if axis is None:
10236
10433
  axis = ()
10237
10434
  if input.dtype != mstype.bool_:
10238
- input = _get_cache_prim(P.Cast)()(input, mstype.bool_)
10435
+ input = cast_(input, mstype.bool_)
10239
10436
  return _get_cache_prim(P.ReduceAll)(keep_dims)(input, axis)
10240
10437
 
10241
10438
 
@@ -10294,7 +10491,7 @@ def any(input, axis=None, keep_dims=False):
10294
10491
  if axis is None:
10295
10492
  axis = ()
10296
10493
  if input.dtype != mstype.bool_:
10297
- input = _get_cache_prim(P.Cast)()(input, mstype.bool_)
10494
+ input = cast_(input, mstype.bool_)
10298
10495
  return _get_cache_prim(P.ReduceAny)(keep_dims)(input, axis)
10299
10496
 
10300
10497
 
@@ -10369,7 +10566,7 @@ def accumulate_n(x):
10369
10566
  ValueError: If there is an input element with a different shape.
10370
10567
 
10371
10568
  Supported Platforms:
10372
- ``Ascend``
10569
+ ``Ascend`` ``GPU``
10373
10570
 
10374
10571
  Examples:
10375
10572
  >>> import mindspore
@@ -10381,8 +10578,6 @@ def accumulate_n(x):
10381
10578
  >>> print(output)
10382
10579
  [10. 14. 18.]
10383
10580
  """
10384
-
10385
- accumulate_ = _get_cache_prim(P.AccumulateNV2)()
10386
10581
  return accumulate_(x)
10387
10582
 
10388
10583
 
@@ -10405,7 +10600,7 @@ def iou(anchor_boxes, gt_boxes, mode='iou'):
10405
10600
  Args:
10406
10601
  anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . "N" indicates the number of anchor boxes,
10407
10602
  and the value "4" refers to "x0", "y0", "x1", and "y1".
10408
- Data type must be either float16float32 or float64.
10603
+ Data type must be either float16, float32 or float64.
10409
10604
  gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . "M" indicates the number of ground
10410
10605
  truth boxes, and the value "4" refers to "x0", "y0", "x1", and "y1".
10411
10606
  Data type must be either float16, float32 or float64.
@@ -10816,7 +11011,7 @@ def conj(input):
10816
11011
  """
10817
11012
  if not isinstance(input, (Tensor, Tensor_)):
10818
11013
  raise TypeError("For conj op, input must be Tensor.")
10819
- return _get_cache_prim(P.Conj)()(input)
11014
+ return conj_(input)
10820
11015
 
10821
11016
 
10822
11017
  def cross(input, other, dim=None):
@@ -10879,7 +11074,7 @@ def _einsum_convert_num_to_char(num):
10879
11074
  # pylint: disable=chained-comparison
10880
11075
  if num >= 26 and num < 52:
10881
11076
  return chr(num - 26 + ord('a'))
10882
- raise ValueError(f"For Einsum, the number in sublist should be in range [0 52), but got {num}")
11077
+ raise ValueError(f"For Einsum, the number in sublist should be in range [0, 52), but got {num}")
10883
11078
 
10884
11079
 
10885
11080
  def einsum(equation, *operands):
@@ -11000,7 +11195,10 @@ def erfinv(input):
11000
11195
  where :math:`x` is the `input`.
11001
11196
 
11002
11197
  Args:
11003
- input (Tensor): The input tensor to compute with, with data type float16, float32 or float64.
11198
+ input (Tensor): The input tensor. Supported dtypes:
11199
+
11200
+ - Ascend: float16, float32.
11201
+ - GPU/CPU: float16, float32, float64.
11004
11202
 
11005
11203
  Returns:
11006
11204
  Tensor, has the same shape and dtype as `input`.
@@ -11020,7 +11218,7 @@ def erfinv(input):
11020
11218
  >>> print(output)
11021
11219
  [ 0. 0.47695306 -1.1630805 ]
11022
11220
  """
11023
- return _get_cache_prim(P.Erfinv)()(input)
11221
+ return erfinv_(input)
11024
11222
 
11025
11223
 
11026
11224
  def less_equal(input, other):
@@ -11042,8 +11240,8 @@ def less_equal(input, other):
11042
11240
  Args:
11043
11241
  input (Union[Tensor, Number, bool]): The first input is a Number or
11044
11242
  a bool or a tensor whose data type is
11045
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
11046
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
11243
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11244
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
11047
11245
  other (Union[Tensor, Number, bool]): The second input, when the first input is a Tensor,
11048
11246
  the second input should be a Number or bool value, or a Tensor whose data type is number or bool\_.
11049
11247
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -11067,7 +11265,7 @@ def less_equal(input, other):
11067
11265
  >>> print(output)
11068
11266
  [ True False True]
11069
11267
  """
11070
- return _get_cache_prim(P.LessEqual)()(input, other)
11268
+ return tensor_le(input, other)
11071
11269
 
11072
11270
 
11073
11271
  def cumprod(input, dim, dtype=None):
@@ -11105,10 +11303,9 @@ def cumprod(input, dim, dtype=None):
11105
11303
  >>> print(output)
11106
11304
  [1. 2. 6.]
11107
11305
  """
11108
- cumprod_op = _get_cache_prim(P.CumProd)()
11109
- output = cumprod_op(input, dim)
11306
+ output = cumprod_(input, dim)
11110
11307
  if dtype:
11111
- output = _get_cache_prim(P.Cast)()(output, dtype)
11308
+ output = cast_(output, dtype)
11112
11309
  return output
11113
11310
 
11114
11311
 
@@ -11119,8 +11316,8 @@ def greater(input, other):
11119
11316
  Args:
11120
11317
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
11121
11318
  a bool or a tensor whose data type is
11122
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
11123
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ .
11319
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11320
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
11124
11321
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
11125
11322
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
11126
11323
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -11141,8 +11338,7 @@ def greater(input, other):
11141
11338
  >>> print(output)
11142
11339
  [False True False]
11143
11340
  """
11144
- greater_op = _get_cache_prim(P.Greater)()
11145
- return greater_op(input, other)
11341
+ return tensor_gt(input, other)
11146
11342
 
11147
11343
 
11148
11344
  def greater_equal(input, other):
@@ -11152,8 +11348,8 @@ def greater_equal(input, other):
11152
11348
  Args:
11153
11349
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
11154
11350
  a bool or a tensor whose data type is
11155
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ or
11156
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ .
11351
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11352
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
11157
11353
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
11158
11354
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
11159
11355
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -11174,8 +11370,7 @@ def greater_equal(input, other):
11174
11370
  >>> print(output)
11175
11371
  [True True False]
11176
11372
  """
11177
- greater_equal_op = _get_cache_prim(P.GreaterEqual)()
11178
- return greater_equal_op(input, other)
11373
+ return tensor_ge(input, other)
11179
11374
 
11180
11375
 
11181
11376
  def igamma(input, other):
@@ -11313,8 +11508,7 @@ def lgamma(input):
11313
11508
  >>> print(output)
11314
11509
  0.045437694
11315
11510
  """
11316
- lgamma_op = _get_cache_prim(P.Lgamma)()
11317
- return lgamma_op(input)
11511
+ return lgamma_(input)
11318
11512
 
11319
11513
 
11320
11514
  def digamma(input):
@@ -11348,8 +11542,7 @@ def digamma(input):
11348
11542
  >>> print(output)
11349
11543
  [ 0.0365 -1.964 2.14 ]
11350
11544
  """
11351
- digamma_op = _get_cache_prim(P.Digamma)()
11352
- return digamma_op(input)
11545
+ return digamma_(input)
11353
11546
 
11354
11547
 
11355
11548
  def polygamma(n, input):
@@ -11388,8 +11581,7 @@ def polygamma(n, input):
11388
11581
  >>> print(output)
11389
11582
  [ 0.37446456 15.49884838]
11390
11583
  """
11391
- polygamma_op = _get_cache_prim(P.Polygamma)()
11392
- return polygamma_op(n, input)
11584
+ return poly_gamma_(n, input)
11393
11585
 
11394
11586
 
11395
11587
  def isinf(input):
@@ -11430,20 +11622,19 @@ def isinf(input):
11430
11622
  >>> print(output)
11431
11623
  False
11432
11624
  """
11433
- isinf_op = _get_cache_prim(P.IsInf)()
11434
- return isinf_op(input)
11625
+ return isinf_(input)
11435
11626
 
11436
11627
 
11437
11628
  def _is_sign_inf(x, fn):
11438
11629
  """Tests element-wise for infinity with sign."""
11439
11630
  shape = x.shape
11440
- zeros_tensor = _get_cache_prim(P.Zeros)()(shape, mstype.float32)
11441
- ones_tensor = _get_cache_prim(P.Ones)()(shape, mstype.float32)
11442
- is_inf = _get_cache_prim(P.IsInf)()(x)
11631
+ zeros_tensor = zeros_(shape, mstype.float32)
11632
+ ones_tensor = ones_(shape, mstype.float32)
11633
+ is_inf = isinf_(x)
11443
11634
  is_sign = fn(x, zeros_tensor)
11444
11635
  res = ops.select(is_inf, ones_tensor, zeros_tensor)
11445
11636
  res = ops.select(is_sign, res, zeros_tensor)
11446
- return _get_cache_prim(P.Cast)()(res, mstype.bool_)
11637
+ return cast_(res, mstype.bool_)
11447
11638
 
11448
11639
 
11449
11640
  def isposinf(input):
@@ -11554,8 +11745,7 @@ def logical_xor(input, other):
11554
11745
  input = input.astype(mstype.bool_)
11555
11746
  if isinstance(other, Tensor) and other.dtype != mstype.bool_:
11556
11747
  other = other.astype(mstype.bool_)
11557
- logical_xor_op = _get_cache_prim(P.LogicalXor)()
11558
- return logical_xor_op(input, other)
11748
+ return logical_xor_(input, other)
11559
11749
 
11560
11750
 
11561
11751
  def imag(input):
@@ -11584,7 +11774,7 @@ def imag(input):
11584
11774
  >>> print(output)
11585
11775
  0.4
11586
11776
  """
11587
- return _get_cache_prim(P.Imag)()(input)
11777
+ return imag_(input)
11588
11778
 
11589
11779
 
11590
11780
  @_primexpr
@@ -11651,7 +11841,7 @@ def nansum(input, axis=None, keepdims=False, *, dtype=None):
11651
11841
  axis = ()
11652
11842
  if input.dtype == mstype.bool_:
11653
11843
  input = input.astype(mstype.int64)
11654
- is_nan = _get_cache_prim(P.IsNan)()(input)
11844
+ is_nan = isnan_(input)
11655
11845
  input = ops.masked_fill(input, is_nan, 0)
11656
11846
  input = _get_cache_prim(P.ReduceSum)(keepdims)(input, axis)
11657
11847
  if dtype is not None and input.dtype != dtype:
@@ -11709,8 +11899,8 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
11709
11899
  zeros = ops.Zeros()
11710
11900
  if not isinstance(input, (Tensor, Tensor_)):
11711
11901
  raise TypeError("For 'diag_embed', 'input' must be Tensor.")
11712
- dtypeop = P.DType()
11713
- input_dtype = dtypeop(input)
11902
+
11903
+ input_dtype = dtype_(input)
11714
11904
  if not (input_dtype in (mstype.int8, mstype.int16, mstype.int32, mstype.int64, mstype.uint8, mstype.uint16,
11715
11905
  mstype.uint32, mstype.uint64, mstype.float16, mstype.float32, mstype.float64)):
11716
11906
  raise TypeError("For 'diag_embed', the dtype of 'input' must be int8, int16, int32, int64, "
@@ -11818,16 +12008,17 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
11818
12008
  [54.]]]
11819
12009
  """
11820
12010
  if not isinstance(input, Tensor):
11821
- raise TypeError("For 'sum', 'input' must be Tensor.")
11822
- if dim is not None:
11823
- if not isinstance(dim, (int, tuple, list)):
11824
- raise TypeError("For 'sum', 'dim' must be int, tuple(int), list(int) or None.")
12011
+ raise TypeError(f"For 'sum', 'input' must be Tensor, but got{type(input)}")
12012
+ if dim is not None and not isinstance(dim, (int, tuple, list)):
12013
+ raise TypeError(f"For 'sum', 'dim' must be int, tuple(int), list(int) or None, but got {type(dim)}")
11825
12014
  if not isinstance(keepdim, bool):
11826
- raise TypeError("For 'sum', 'keepdim' must be bool.")
12015
+ raise TypeError(f"For 'sum', 'keepdim' must be bool, but got {type(keepdim)}")
11827
12016
 
12017
+ if input.dtype == mstype.bool_:
12018
+ input = input.astype(mstype.int64)
11828
12019
  if dtype is not None:
11829
12020
  input = input.astype(dtype)
11830
- reduce_sum = P.ReduceSum(keep_dims=keepdim)
12021
+ reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims=keepdim)
11831
12022
  if dim is not None:
11832
12023
  out = reduce_sum(input, dim)
11833
12024
  else:
@@ -11858,9 +12049,7 @@ def tanhshrink(input):
11858
12049
 
11859
12050
  if input.dtype in mstype.int_type + mstype.uint_type:
11860
12051
  input = input.astype(mstype.float64)
11861
-
11862
- tanh_op = _get_cache_prim(P.Tanh)()
11863
- return input - tanh_op(input)
12052
+ return input - tanh_(input)
11864
12053
 
11865
12054
 
11866
12055
  def zeta(input, other):
@@ -11905,17 +12094,17 @@ def zeta(input, other):
11905
12094
  if not isinstance(other, Tensor):
11906
12095
  raise TypeError(f"For 'zeta', at least one of the inputs should be Tensor.")
11907
12096
  _dtype = other.dtype
11908
- input = _get_cache_prim(P.Cast)()(input, _dtype)
12097
+ input = cast_(input, _dtype)
11909
12098
  if isinstance(other, (int, float)):
11910
12099
  if not isinstance(input, Tensor):
11911
12100
  raise TypeError(f"For 'zeta', at least one of the inputs should be Tensor.")
11912
12101
  _dtype = input.dtype
11913
- other = _get_cache_prim(P.Cast)()(other, _dtype)
12102
+ other = cast_(other, _dtype)
11914
12103
  if input.size < other.size:
11915
12104
  input = _get_cache_prim(P.BroadcastTo)(other.shape)(input)
11916
12105
  elif input.size > other.size:
11917
12106
  other = _get_cache_prim(P.BroadcastTo)(input.shape)(other)
11918
- output = _get_cache_prim(P.Zeta)()(input, other)
12107
+ output = zeta_(input, other)
11919
12108
  return output
11920
12109
 
11921
12110
 
@@ -12079,7 +12268,7 @@ def _permute_input(input, input_dim, ret_dim):
12079
12268
  for value in ret_dim:
12080
12269
  is_transformed_dim[value] = True
12081
12270
 
12082
- # partition dim_permute
12271
+ # partition dim_permute
12083
12272
  dim_permute_a, dim_permute_b = [], []
12084
12273
  for i in range(len(dim_permute)):
12085
12274
  value = dim_permute[i]
@@ -12111,7 +12300,7 @@ def _permute_input(input, input_dim, ret_dim):
12111
12300
  dim_permute = dim_permute_a + dim_permute_b
12112
12301
 
12113
12302
  # permute
12114
- input = P.Transpose()(input, tuple(dim_permute))
12303
+ input = transpose_(input, tuple(dim_permute))
12115
12304
 
12116
12305
  return input, dim_permute
12117
12306
 
@@ -12129,7 +12318,7 @@ def _reshape_input(input, signal_ndim, batch_dims):
12129
12318
  i += 1
12130
12319
  if j >= len(batched_sizes):
12131
12320
  break
12132
- input = P.Reshape()(input, tuple(batched_sizes))
12321
+ input = reshape_(input, tuple(batched_sizes))
12133
12322
  return input
12134
12323
 
12135
12324
 
@@ -12137,8 +12326,8 @@ def _check_fftwithsize_input(input, s, dim, norm, fft_func_name): # pylint: dis
12137
12326
  """Check the input of fftwithsize"""
12138
12327
  if not isinstance(input, (Tensor, Tensor_)):
12139
12328
  raise TypeError("For '{fft_func_name}', 'input' must be Tensor.")
12140
- dtypeop = P.DType()
12141
- input_dtype = dtypeop(input)
12329
+
12330
+ input_dtype = dtype_(input)
12142
12331
  if fft_func_name in ('FFTN', 'IFFTN'):
12143
12332
  if not input_dtype in (mstype.complex64, mstype.complex128):
12144
12333
  raise TypeError("For '{fft_func_name}', the dtype of 'input' must be complex64, complex128, "
@@ -12201,7 +12390,7 @@ def _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_size
12201
12390
 
12202
12391
  type_size = np.dtype(mstype.dtype_to_nptype(out.dtype)).itemsize
12203
12392
  if out.shape != out_sizes or out.strides != out_strides:
12204
- out = as_strided(out, out_sizes, [int(i/type_size) for i in out_strides])
12393
+ out = as_strided(out, out_sizes, [int(i / type_size) for i in out_strides])
12205
12394
  return out
12206
12395
 
12207
12396
 
@@ -12246,8 +12435,8 @@ def fft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-na
12246
12435
  """
12247
12436
  if not isinstance(input, (Tensor, Tensor_)):
12248
12437
  raise TypeError("For 'FFT', 'input' must be Tensor.")
12249
- dtypeop = P.DType()
12250
- input_dtype = dtypeop(input)
12438
+
12439
+ input_dtype = dtype_(input)
12251
12440
  if not input_dtype in (mstype.complex64, mstype.complex128):
12252
12441
  raise TypeError("For 'FFT', the dtype of 'input' must be complex64, complex128, "
12253
12442
  f"but got '{input_dtype}'.")
@@ -12417,8 +12606,8 @@ def ifft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-n
12417
12606
  """
12418
12607
  if not isinstance(input, (Tensor, Tensor_)):
12419
12608
  raise TypeError("For 'IFFT', 'input' must be Tensor.")
12420
- dtypeop = P.DType()
12421
- input_dtype = dtypeop(input)
12609
+
12610
+ input_dtype = dtype_(input)
12422
12611
  if not input_dtype in (mstype.complex64, mstype.complex128):
12423
12612
  raise TypeError("For 'IFFT', the dtype of 'input' must be complex64, complex128, "
12424
12613
  f"but got '{input_dtype}'.")
@@ -12554,6 +12743,7 @@ def _check_validate_axis(axis, name):
12554
12743
  if isinstance(axis, (tuple, list)):
12555
12744
  for idx, item in enumerate(axis):
12556
12745
  validator.check_value_type("axis[%d]" % idx, item, [int], name)
12746
+
12557
12747
  _check(axis)
12558
12748
  axis = validator.check_value_type('axis', axis, [int, tuple, list], name)
12559
12749
  return axis
@@ -12697,7 +12887,6 @@ def _axes_int_check(x1_shape, x2_shape, axes, prim_name=None):
12697
12887
  raise ValueError(f"{msg_prefix} 'axes' cannot be greater than the length of 'x1_shape' and 'x2_shape', "
12698
12888
  f"but got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.")
12699
12889
 
12700
-
12701
12890
  if isinstance(axes, int):
12702
12891
  _check_lt_zero(axes)
12703
12892
  if axes == 0:
@@ -12831,7 +13020,6 @@ def tensor_dot(x1, x2, axes):
12831
13020
  [2. 2. 2]
12832
13021
  [2. 2. 2]]
12833
13022
  """
12834
- transpose_op = _get_cache_prim(P.Transpose)()
12835
13023
  matmul_op = _get_cache_prim(P.MatMul)(False, False)
12836
13024
  # input validity checks
12837
13025
  x1_shape = shape_(x1)
@@ -12844,8 +13032,8 @@ def tensor_dot(x1, x2, axes):
12844
13032
  x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape(x2_shape, axes, 1)
12845
13033
  output_shape = x1_ret + x2_ret # combine free axes from both inputs
12846
13034
  # run tensor_dot op
12847
- x1_transposed = transpose_op(x1, x1_transpose_fwd)
12848
- x2_transposed = transpose_op(x2, x2_transpose_fwd)
13035
+ x1_transposed = transpose_(x1, x1_transpose_fwd)
13036
+ x2_transposed = transpose_(x2, x2_transpose_fwd)
12849
13037
  x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
12850
13038
  x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
12851
13039
  mul_result = matmul_op(x1_reshaped, x2_reshaped)
@@ -12907,7 +13095,7 @@ def vecdot(x, y, *, axis=-1):
12907
13095
  ndim = x.ndim if x.ndim > y.ndim else y.ndim
12908
13096
  if (axis < -ndim) or (axis >= ndim):
12909
13097
  raise ValueError(f"For vecdot, the dim is out of range.")
12910
- if (x.dtype == mstype.complex64) or (x.dtype == mstype.complex128):
13098
+ if x.dtype in mstype.complex_type:
12911
13099
  x = x.conj()
12912
13100
  result = x * y
12913
13101
  result = result.sum(axis=axis)
@@ -13013,7 +13201,6 @@ def dot(input, other):
13013
13201
  >>> print(output.shape)
13014
13202
  (3, 2, 2, 1, 2)
13015
13203
  """
13016
- transpose_op = _get_cache_prim(P.Transpose)()
13017
13204
  matmul_op = _get_cache_prim(P.MatMul)(False, False)
13018
13205
  input_shape = shape_(input)
13019
13206
  other_shape = shape_(other)
@@ -13024,7 +13211,7 @@ def dot(input, other):
13024
13211
 
13025
13212
  if len(input_shape) > 2 or len(other_shape) > 2:
13026
13213
  other_shape_transpose = _get_transpose_shape(other_shape)
13027
- other_transpose = transpose_op(other, other_shape_transpose)
13214
+ other_transpose = transpose_(other, other_shape_transpose)
13028
13215
  input_reshape = reshape_(input, (-1, input_shape[-1]))
13029
13216
  other_reshape = reshape_(other_transpose, (other_shape[-2], -1))
13030
13217
  mul_result = matmul_op(input_reshape, other_reshape)
@@ -13039,11 +13226,13 @@ def _get_batch_size(x1_shape, x2_shape, prim_name=None):
13039
13226
  """
13040
13227
  Get batch sizes from two inputs
13041
13228
  """
13229
+
13042
13230
  def _check():
13043
13231
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
13044
13232
  if len(x1_shape) < 2 or len(x2_shape) < 2:
13045
13233
  raise ValueError(f"{msg_prefix} inputs x1, x2 should have 'dimension >= 2', "
13046
13234
  f"but got 'len(x1_shape)': ({len(x1_shape)}) and 'len(x2_shape)': ({len(x2_shape)}).")
13235
+
13047
13236
  _check()
13048
13237
  return x1_shape[0], x2_shape[0]
13049
13238
 
@@ -13235,8 +13424,6 @@ def batch_dot(x1, x2, axes=None):
13235
13424
  (2, 2, 5, 5)
13236
13425
 
13237
13426
  """
13238
- transpose_op = _get_cache_prim(P.Transpose)()
13239
- batch_matmul_op = _get_cache_prim(P.BatchMatMul)()
13240
13427
  squeeze_one_op = _get_cache_prim(P.Squeeze)(1)
13241
13428
  squeeze_minus_one_op = _get_cache_prim(P.Squeeze)(-1)
13242
13429
  # input validity checks
@@ -13266,13 +13453,13 @@ def batch_dot(x1, x2, axes=None):
13266
13453
  x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape_batchdot(x2_shape, axes, 1)
13267
13454
  output_shape = _get_output_shape(x1_batch_size, x1_ret, x2_ret)
13268
13455
 
13269
- x1_transposed = transpose_op(x1, x1_transpose_fwd)
13270
- x2_transposed = transpose_op(x2, x2_transpose_fwd)
13456
+ x1_transposed = transpose_(x1, x1_transpose_fwd)
13457
+ x2_transposed = transpose_(x2, x2_transpose_fwd)
13271
13458
  x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
13272
13459
  x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
13273
13460
 
13274
13461
  # Batch matmal op part
13275
- mul_result = batch_matmul_op(x1_reshaped, x2_reshaped)
13462
+ mul_result = batch_matmul_(x1_reshaped, x2_reshaped)
13276
13463
 
13277
13464
  final_result = reshape_(mul_result, output_shape)
13278
13465
 
@@ -13361,6 +13548,7 @@ __all__ = [
13361
13548
  'exp',
13362
13549
  'tensor_expm1',
13363
13550
  'expm1',
13551
+ 'eq',
13364
13552
  'equal',
13365
13553
  'not_equal',
13366
13554
  'ne',
@@ -13559,5 +13747,6 @@ __all__ = [
13559
13747
  'vecdot',
13560
13748
  'dot',
13561
13749
  'batch_dot',
13750
+ 'eps'
13562
13751
  ]
13563
13752
  __all__.sort()