mindspore 2.2.14__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1154) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -4
  3. mindspore/_akg/akg/composite/build_module.py +155 -11
  4. mindspore/_akg/akg/config/repository.json +38 -0
  5. mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
  7. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
  8. mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
  9. mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
  10. mindspore/_akg/akg/utils/gen_random.py +14 -8
  11. mindspore/_akg/akg/utils/op_dsl.py +11 -0
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
  13. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  14. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +58 -0
  17. mindspore/_extends/builtin_operations.py +2 -1
  18. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  19. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  20. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  24. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  25. mindspore/_extends/parse/__init__.py +18 -14
  26. mindspore/_extends/parse/compile_config.py +229 -0
  27. mindspore/_extends/parse/parser.py +155 -59
  28. mindspore/_extends/parse/resources.py +40 -7
  29. mindspore/_extends/parse/standard_method.py +124 -204
  30. mindspore/_extends/remote/kernel_build_server.py +2 -0
  31. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  32. mindspore/_profiler.py +30 -0
  33. mindspore/amp.py +24 -18
  34. mindspore/bin/cache_admin +0 -0
  35. mindspore/bin/cache_server +0 -0
  36. mindspore/boost/boost_cell_wrapper.py +1 -1
  37. mindspore/boost/group_loss_scale_manager.py +1 -1
  38. mindspore/common/__init__.py +3 -1
  39. mindspore/common/_jit_fallback_utils.py +2 -3
  40. mindspore/common/_register_for_adapter.py +7 -0
  41. mindspore/common/_stub_tensor.py +6 -1
  42. mindspore/common/_utils.py +5 -17
  43. mindspore/common/api.py +91 -48
  44. mindspore/common/auto_dynamic_shape.py +27 -14
  45. mindspore/common/dtype.py +5 -4
  46. mindspore/common/dump.py +5 -4
  47. mindspore/common/initializer.py +1 -1
  48. mindspore/common/jit_config.py +20 -11
  49. mindspore/common/lazy_inline.py +58 -17
  50. mindspore/common/mindir_util.py +12 -2
  51. mindspore/common/mutable.py +79 -14
  52. mindspore/common/parameter.py +19 -4
  53. mindspore/common/seed.py +9 -9
  54. mindspore/common/sparse_tensor.py +251 -18
  55. mindspore/common/symbol.py +122 -0
  56. mindspore/common/tensor.py +321 -433
  57. mindspore/communication/__init__.py +3 -3
  58. mindspore/communication/_comm_helper.py +5 -0
  59. mindspore/communication/management.py +53 -38
  60. mindspore/config/op_info.config +22 -54
  61. mindspore/context.py +167 -59
  62. mindspore/dataset/__init__.py +5 -5
  63. mindspore/dataset/audio/__init__.py +6 -6
  64. mindspore/dataset/audio/transforms.py +711 -158
  65. mindspore/dataset/callback/ds_callback.py +2 -2
  66. mindspore/dataset/engine/cache_client.py +2 -2
  67. mindspore/dataset/engine/datasets.py +72 -38
  68. mindspore/dataset/engine/datasets_audio.py +14 -14
  69. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  70. mindspore/dataset/engine/datasets_text.py +38 -38
  71. mindspore/dataset/engine/datasets_user_defined.py +7 -7
  72. mindspore/dataset/engine/datasets_vision.py +75 -71
  73. mindspore/dataset/engine/offload.py +5 -7
  74. mindspore/dataset/text/__init__.py +3 -3
  75. mindspore/dataset/text/transforms.py +408 -121
  76. mindspore/dataset/text/utils.py +9 -9
  77. mindspore/dataset/transforms/__init__.py +1 -1
  78. mindspore/dataset/transforms/transforms.py +261 -76
  79. mindspore/dataset/utils/browse_dataset.py +9 -9
  80. mindspore/dataset/vision/__init__.py +3 -3
  81. mindspore/dataset/vision/c_transforms.py +5 -5
  82. mindspore/dataset/vision/transforms.py +2264 -514
  83. mindspore/dataset/vision/utils.py +40 -9
  84. mindspore/dataset/vision/validators.py +7 -1
  85. mindspore/experimental/optim/__init__.py +12 -2
  86. mindspore/experimental/optim/adadelta.py +161 -0
  87. mindspore/experimental/optim/adagrad.py +168 -0
  88. mindspore/experimental/optim/adam.py +35 -34
  89. mindspore/experimental/optim/adamax.py +170 -0
  90. mindspore/experimental/optim/adamw.py +40 -16
  91. mindspore/experimental/optim/asgd.py +153 -0
  92. mindspore/experimental/optim/lr_scheduler.py +60 -119
  93. mindspore/experimental/optim/nadam.py +157 -0
  94. mindspore/experimental/optim/optimizer.py +15 -8
  95. mindspore/experimental/optim/radam.py +194 -0
  96. mindspore/experimental/optim/rmsprop.py +154 -0
  97. mindspore/experimental/optim/rprop.py +164 -0
  98. mindspore/experimental/optim/sgd.py +28 -19
  99. mindspore/hal/__init__.py +34 -0
  100. mindspore/hal/_ascend.py +57 -0
  101. mindspore/hal/_base.py +57 -0
  102. mindspore/hal/_cpu.py +56 -0
  103. mindspore/hal/_gpu.py +57 -0
  104. mindspore/hal/device.py +356 -0
  105. mindspore/hal/event.py +179 -0
  106. mindspore/hal/stream.py +337 -0
  107. mindspore/include/api/data_type.h +2 -2
  108. mindspore/include/api/dual_abi_helper.h +16 -3
  109. mindspore/include/api/model.h +1 -3
  110. mindspore/include/api/status.h +14 -0
  111. mindspore/include/c_api/model_c.h +173 -0
  112. mindspore/include/c_api/ms/base/types.h +1 -0
  113. mindspore/include/c_api/types_c.h +19 -0
  114. mindspore/include/dataset/execute.h +1 -3
  115. mindspore/include/mindapi/base/format.h +125 -23
  116. mindspore/include/mindapi/base/types.h +7 -0
  117. mindspore/lib/libdnnl.so.2 +0 -0
  118. mindspore/lib/libmindspore.so +0 -0
  119. mindspore/lib/libmindspore_backend.so +0 -0
  120. mindspore/lib/libmindspore_common.so +0 -0
  121. mindspore/lib/libmindspore_core.so +0 -0
  122. mindspore/lib/libmindspore_glog.so.0 +0 -0
  123. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  124. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  125. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  126. mindspore/lib/libmindspore_shared_lib.so +0 -0
  127. mindspore/lib/libmpi_adapter.so +0 -0
  128. mindspore/lib/libmpi_collective.so +0 -0
  129. mindspore/lib/libnnacl.so +0 -0
  130. mindspore/lib/libopencv_core.so.4.5 +0 -0
  131. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  132. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  133. mindspore/lib/libps_cache.so +0 -0
  134. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
  135. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
  136. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
  137. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
  138. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
  139. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
  140. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  141. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
  142. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  143. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  144. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6325 -1767
  145. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  163. mindspore/lib/plugin/ascend/libakg.so +0 -0
  164. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  165. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  166. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  167. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  168. mindspore/lib/plugin/cpu/libakg.so +0 -0
  169. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  170. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  171. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  172. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  173. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  174. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  175. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  176. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  177. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  178. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  179. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  180. mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
  181. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  182. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  183. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  184. mindspore/mindrecord/__init__.py +5 -1
  185. mindspore/mindrecord/config.py +809 -0
  186. mindspore/mindrecord/filereader.py +25 -0
  187. mindspore/mindrecord/filewriter.py +74 -56
  188. mindspore/mindrecord/mindpage.py +40 -6
  189. mindspore/mindrecord/shardutils.py +3 -2
  190. mindspore/mindrecord/shardwriter.py +7 -0
  191. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  192. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  193. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  194. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  195. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  196. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  197. mindspore/multiprocessing/__init__.py +68 -0
  198. mindspore/nn/cell.py +86 -133
  199. mindspore/nn/dynamic_lr.py +2 -2
  200. mindspore/nn/layer/activation.py +79 -90
  201. mindspore/nn/layer/basic.py +4 -80
  202. mindspore/nn/layer/channel_shuffle.py +3 -16
  203. mindspore/nn/layer/container.py +3 -3
  204. mindspore/nn/layer/conv.py +71 -71
  205. mindspore/nn/layer/embedding.py +105 -44
  206. mindspore/nn/layer/image.py +4 -7
  207. mindspore/nn/layer/normalization.py +46 -38
  208. mindspore/nn/layer/padding.py +26 -39
  209. mindspore/nn/layer/pooling.py +13 -9
  210. mindspore/nn/layer/rnn_cells.py +5 -15
  211. mindspore/nn/layer/rnns.py +6 -5
  212. mindspore/nn/layer/thor_layer.py +1 -2
  213. mindspore/nn/layer/timedistributed.py +1 -1
  214. mindspore/nn/layer/transformer.py +52 -50
  215. mindspore/nn/learning_rate_schedule.py +6 -5
  216. mindspore/nn/loss/loss.py +43 -64
  217. mindspore/nn/optim/ada_grad.py +4 -2
  218. mindspore/nn/optim/adadelta.py +3 -1
  219. mindspore/nn/optim/adafactor.py +1 -1
  220. mindspore/nn/optim/adam.py +102 -181
  221. mindspore/nn/optim/adamax.py +4 -2
  222. mindspore/nn/optim/adasum.py +2 -2
  223. mindspore/nn/optim/asgd.py +4 -2
  224. mindspore/nn/optim/ftrl.py +31 -61
  225. mindspore/nn/optim/lamb.py +5 -3
  226. mindspore/nn/optim/lars.py +2 -2
  227. mindspore/nn/optim/lazyadam.py +6 -4
  228. mindspore/nn/optim/momentum.py +13 -25
  229. mindspore/nn/optim/optimizer.py +6 -3
  230. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  231. mindspore/nn/optim/rmsprop.py +9 -3
  232. mindspore/nn/optim/rprop.py +4 -2
  233. mindspore/nn/optim/sgd.py +6 -5
  234. mindspore/nn/optim/thor.py +2 -2
  235. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  236. mindspore/nn/probability/distribution/beta.py +2 -2
  237. mindspore/nn/probability/distribution/categorical.py +4 -6
  238. mindspore/nn/probability/distribution/cauchy.py +2 -2
  239. mindspore/nn/probability/distribution/exponential.py +1 -1
  240. mindspore/nn/probability/distribution/gumbel.py +2 -2
  241. mindspore/nn/probability/distribution/poisson.py +2 -2
  242. mindspore/nn/probability/distribution/uniform.py +2 -2
  243. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  244. mindspore/nn/wrap/__init__.py +2 -1
  245. mindspore/nn/wrap/cell_wrapper.py +33 -12
  246. mindspore/nn/wrap/grad_reducer.py +148 -8
  247. mindspore/nn/wrap/loss_scale.py +7 -7
  248. mindspore/numpy/__init__.py +2 -0
  249. mindspore/numpy/array_creations.py +2 -0
  250. mindspore/numpy/array_ops.py +1 -5
  251. mindspore/numpy/fft.py +431 -0
  252. mindspore/numpy/math_ops.py +54 -60
  253. mindspore/numpy/utils.py +3 -0
  254. mindspore/ops/__init__.py +5 -4
  255. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  256. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -22
  257. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  258. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  259. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  260. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  261. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  262. mindspore/ops/_op_impl/__init__.py +0 -1
  263. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  264. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  265. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  266. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  267. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  268. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  269. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  270. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  271. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  272. mindspore/ops/_vmap/vmap_array_ops.py +137 -101
  273. mindspore/ops/_vmap/vmap_base.py +8 -1
  274. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  275. mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
  276. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  277. mindspore/ops/_vmap/vmap_math_ops.py +74 -49
  278. mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
  279. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  280. mindspore/ops/auto_generate/__init__.py +31 -0
  281. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
  282. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
  283. mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
  284. mindspore/ops/auto_generate/gen_extend_func.py +130 -0
  285. mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
  286. mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
  287. mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
  288. mindspore/ops/composite/__init__.py +5 -2
  289. mindspore/ops/composite/base.py +118 -17
  290. mindspore/ops/composite/math_ops.py +9 -48
  291. mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
  292. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
  293. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  296. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  298. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  299. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  300. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  303. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  304. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  305. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  307. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  308. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  313. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  314. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  315. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  316. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  317. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  318. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  319. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  320. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  321. mindspore/ops/deprecated.py +14 -3
  322. mindspore/ops/extend/__init__.py +46 -0
  323. mindspore/ops/extend/array_func.py +152 -0
  324. mindspore/ops/extend/math_func.py +76 -0
  325. mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
  326. mindspore/ops/function/__init__.py +19 -11
  327. mindspore/ops/function/array_func.py +251 -1440
  328. mindspore/ops/function/clip_func.py +12 -13
  329. mindspore/ops/function/debug_func.py +1 -4
  330. mindspore/ops/function/fft_func.py +31 -0
  331. mindspore/ops/function/grad/grad_func.py +24 -17
  332. mindspore/ops/function/image_func.py +27 -21
  333. mindspore/ops/function/linalg_func.py +35 -68
  334. mindspore/ops/function/math_func.py +451 -2360
  335. mindspore/ops/function/nn_func.py +459 -780
  336. mindspore/ops/function/other_func.py +4 -5
  337. mindspore/ops/function/parameter_func.py +5 -93
  338. mindspore/ops/function/random_func.py +24 -80
  339. mindspore/ops/function/sparse_unary_func.py +9 -16
  340. mindspore/ops/function/spectral_func.py +1 -1
  341. mindspore/ops/function/vmap_func.py +14 -14
  342. mindspore/ops/functional.py +56 -62
  343. mindspore/ops/op_info_register.py +22 -19
  344. mindspore/ops/operations/__init__.py +19 -19
  345. mindspore/ops/operations/_grad_ops.py +20 -723
  346. mindspore/ops/operations/_inner_ops.py +178 -286
  347. mindspore/ops/operations/_scalar_ops.py +5 -480
  348. mindspore/ops/operations/_sequence_ops.py +4 -34
  349. mindspore/ops/operations/array_ops.py +99 -2491
  350. mindspore/ops/operations/comm_ops.py +38 -46
  351. mindspore/ops/operations/custom_ops.py +8 -8
  352. mindspore/ops/operations/debug_ops.py +100 -31
  353. mindspore/ops/operations/image_ops.py +1 -217
  354. mindspore/ops/operations/inner_ops.py +3 -38
  355. mindspore/ops/operations/linalg_ops.py +1 -49
  356. mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
  357. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  358. mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
  359. mindspore/ops/operations/math_ops.py +703 -4601
  360. mindspore/ops/operations/nn_ops.py +374 -1748
  361. mindspore/ops/operations/other_ops.py +50 -42
  362. mindspore/ops/operations/random_ops.py +3 -52
  363. mindspore/ops/primitive.py +196 -96
  364. mindspore/ops_generate/__init__.py +27 -0
  365. mindspore/ops_generate/arg_dtype_cast.py +248 -0
  366. mindspore/ops_generate/arg_handler.py +147 -0
  367. mindspore/ops_generate/gen_aclnn_implement.py +266 -0
  368. mindspore/ops_generate/gen_ops.py +1062 -0
  369. mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
  370. mindspore/ops_generate/gen_pyboost_func.py +932 -0
  371. mindspore/ops_generate/gen_utils.py +188 -0
  372. mindspore/ops_generate/op_proto.py +138 -0
  373. mindspore/ops_generate/pyboost_utils.py +364 -0
  374. mindspore/ops_generate/template.py +238 -0
  375. mindspore/parallel/__init__.py +5 -4
  376. mindspore/parallel/_auto_parallel_context.py +21 -76
  377. mindspore/parallel/_cell_wrapper.py +16 -9
  378. mindspore/parallel/_cost_model_context.py +1 -1
  379. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  380. mindspore/parallel/_parallel_serialization.py +30 -46
  381. mindspore/parallel/_ps_context.py +1 -1
  382. mindspore/parallel/_recovery_context.py +1 -1
  383. mindspore/parallel/_tensor.py +19 -7
  384. mindspore/parallel/_transformer/__init__.py +1 -1
  385. mindspore/parallel/_transformer/layers.py +1 -1
  386. mindspore/parallel/_transformer/loss.py +1 -1
  387. mindspore/parallel/_transformer/moe.py +1 -1
  388. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  389. mindspore/parallel/_transformer/transformer.py +1 -1
  390. mindspore/parallel/_utils.py +131 -6
  391. mindspore/parallel/algo_parameter_config.py +6 -6
  392. mindspore/parallel/checkpoint_transform.py +180 -196
  393. mindspore/parallel/cluster/__init__.py +15 -0
  394. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  395. mindspore/parallel/cluster/process_entity/_api.py +345 -0
  396. mindspore/parallel/cluster/process_entity/_utils.py +116 -0
  397. mindspore/parallel/cluster/run.py +139 -0
  398. mindspore/parallel/mpi/__init__.py +1 -1
  399. mindspore/parallel/mpi/_mpi_config.py +1 -1
  400. mindspore/parallel/parameter_broadcast.py +152 -0
  401. mindspore/parallel/shard.py +99 -2
  402. mindspore/profiler/common/util.py +20 -0
  403. mindspore/profiler/envprofiling.py +1 -1
  404. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  405. mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
  406. mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
  407. mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
  408. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
  409. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
  410. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
  411. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
  412. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  413. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
  414. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  415. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  416. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  417. mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
  418. mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
  419. mindspore/profiler/parser/ascend_msprof_generator.py +68 -285
  420. mindspore/profiler/parser/ascend_op_generator.py +75 -42
  421. mindspore/profiler/parser/ascend_timeline_generator.py +293 -135
  422. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  423. mindspore/profiler/parser/framework_parser.py +3 -2
  424. mindspore/profiler/parser/integrator.py +3 -1
  425. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  426. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  427. mindspore/profiler/parser/profiler_info.py +5 -0
  428. mindspore/profiler/profiling.py +296 -166
  429. mindspore/rewrite/__init__.py +2 -13
  430. mindspore/rewrite/api/node.py +121 -35
  431. mindspore/rewrite/api/pattern_engine.py +2 -3
  432. mindspore/rewrite/api/scoped_value.py +16 -15
  433. mindspore/rewrite/api/symbol_tree.py +45 -29
  434. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  435. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  436. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  437. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  438. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  439. mindspore/rewrite/common/__init__.py +1 -2
  440. mindspore/rewrite/common/config.py +24 -0
  441. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  442. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  443. mindspore/rewrite/common/namespace.py +118 -0
  444. mindspore/rewrite/node/__init__.py +5 -5
  445. mindspore/rewrite/node/call_function.py +23 -7
  446. mindspore/rewrite/node/cell_container.py +7 -3
  447. mindspore/rewrite/node/control_flow.py +53 -28
  448. mindspore/rewrite/node/node.py +212 -196
  449. mindspore/rewrite/node/node_manager.py +51 -22
  450. mindspore/rewrite/node/node_topological_manager.py +3 -23
  451. mindspore/rewrite/parsers/__init__.py +12 -0
  452. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  453. mindspore/rewrite/parsers/assign_parser.py +635 -413
  454. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  455. mindspore/rewrite/parsers/class_def_parser.py +107 -144
  456. mindspore/rewrite/parsers/constant_parser.py +5 -5
  457. mindspore/rewrite/parsers/container_parser.py +4 -6
  458. mindspore/rewrite/parsers/expr_parser.py +55 -0
  459. mindspore/rewrite/parsers/for_parser.py +31 -98
  460. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  461. mindspore/rewrite/parsers/if_parser.py +28 -10
  462. mindspore/rewrite/parsers/module_parser.py +8 -182
  463. mindspore/rewrite/parsers/parser.py +1 -5
  464. mindspore/rewrite/parsers/parser_register.py +1 -1
  465. mindspore/rewrite/parsers/return_parser.py +5 -10
  466. mindspore/rewrite/parsers/while_parser.py +59 -0
  467. mindspore/rewrite/sparsify/utils.py +1 -1
  468. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  469. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  470. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  471. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  472. mindspore/run_check/_check_version.py +6 -14
  473. mindspore/run_check/run_check.py +1 -1
  474. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  475. mindspore/scipy/__init__.py +2 -1
  476. mindspore/scipy/fft.py +133 -0
  477. mindspore/scipy/linalg.py +140 -55
  478. mindspore/scipy/ops.py +15 -71
  479. mindspore/scipy/ops_grad.py +5 -34
  480. mindspore/scipy/optimize/line_search.py +2 -2
  481. mindspore/scipy/optimize/minimize.py +1 -1
  482. mindspore/train/__init__.py +3 -2
  483. mindspore/train/_utils.py +178 -4
  484. mindspore/train/amp.py +167 -245
  485. mindspore/train/callback/_backup_and_restore.py +4 -4
  486. mindspore/train/callback/_callback.py +4 -4
  487. mindspore/train/callback/_checkpoint.py +39 -13
  488. mindspore/train/callback/_early_stop.py +2 -2
  489. mindspore/train/callback/_landscape.py +14 -8
  490. mindspore/train/callback/_loss_monitor.py +2 -2
  491. mindspore/train/callback/_on_request_exit.py +2 -2
  492. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  493. mindspore/train/callback/_summary_collector.py +7 -7
  494. mindspore/train/callback/_time_monitor.py +2 -2
  495. mindspore/train/data_sink.py +1 -1
  496. mindspore/train/dataset_helper.py +13 -4
  497. mindspore/train/loss_scale_manager.py +2 -2
  498. mindspore/train/metrics/accuracy.py +7 -7
  499. mindspore/train/metrics/confusion_matrix.py +8 -6
  500. mindspore/train/metrics/cosine_similarity.py +6 -4
  501. mindspore/train/metrics/error.py +2 -2
  502. mindspore/train/metrics/metric.py +3 -3
  503. mindspore/train/metrics/perplexity.py +2 -1
  504. mindspore/train/metrics/topk.py +2 -2
  505. mindspore/train/mind_ir_pb2.py +75 -6
  506. mindspore/train/model.py +24 -22
  507. mindspore/train/serialization.py +256 -132
  508. mindspore/train/summary/summary_record.py +51 -28
  509. mindspore/train/train_thor/convert_utils.py +3 -3
  510. mindspore/version.py +1 -1
  511. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
  512. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +515 -1061
  513. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
  514. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  517. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  518. mindspore/config/super_bar_config.json +0 -544
  519. mindspore/gen_ops.py +0 -273
  520. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  521. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  522. mindspore/nn/layer/flash_attention.py +0 -189
  523. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  524. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  525. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  526. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  527. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  528. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  529. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  530. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  531. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  532. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  533. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  534. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  535. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  536. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  537. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  538. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  539. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  540. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  542. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  543. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  544. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  547. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  548. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  549. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  550. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  551. mindspore/ops/_op_impl/tbe/add.py +0 -42
  552. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  553. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  554. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  555. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  556. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  557. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  558. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  559. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  560. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  561. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  562. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  563. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  565. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  566. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  567. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  568. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  569. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  570. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  571. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  572. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  573. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  574. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  575. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  576. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  577. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  578. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  579. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  580. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  581. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  582. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  583. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  584. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  585. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  586. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  587. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  588. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  589. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  590. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  591. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  592. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  594. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  595. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  596. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  597. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  598. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  599. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  600. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  601. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  602. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  603. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  604. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  605. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  606. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  607. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  608. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  609. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  610. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  611. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  612. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  613. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  614. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  615. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  616. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  617. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  618. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  620. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  621. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  622. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  623. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  624. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  625. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  626. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  629. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  630. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  631. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  632. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  633. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  634. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  635. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  636. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  637. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  638. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  639. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  640. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  641. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  642. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  643. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  644. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  645. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  646. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  647. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  648. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  650. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  651. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  652. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  653. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  654. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  655. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  656. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  657. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  658. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  659. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  660. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  661. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  662. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  663. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  665. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  666. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  667. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  668. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  669. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  670. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  671. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  672. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  673. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  674. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  675. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  676. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  677. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  678. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  679. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  680. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  681. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  682. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  683. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  684. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  685. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  686. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  687. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  688. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  689. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  690. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  691. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  692. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  693. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  694. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  695. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  696. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  697. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  698. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  699. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  701. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  702. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  703. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  704. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  705. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  706. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  707. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  708. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  709. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  710. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  711. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  712. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  713. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  714. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  715. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  716. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  717. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  718. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  719. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  720. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  721. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  722. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  723. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  724. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  725. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  726. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  727. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  728. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  729. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  730. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  731. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  732. mindspore/ops/_op_impl/tbe/div.py +0 -41
  733. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  734. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  735. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  736. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  737. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  738. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  739. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  740. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  741. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  742. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  743. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  744. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  745. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  746. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  747. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  748. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  749. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  750. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  751. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  752. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  753. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  754. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  755. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  756. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  757. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  758. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  759. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  761. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  762. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  763. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  764. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  765. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  766. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  767. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  768. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  769. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  770. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  771. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  772. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  773. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  774. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  775. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  776. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  777. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  778. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  779. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  780. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  781. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  782. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  783. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  784. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  785. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  786. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  787. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  788. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  789. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  790. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  791. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  792. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  793. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  794. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  795. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  796. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  797. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  798. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  799. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  800. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  801. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  802. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  803. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  804. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  805. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  806. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  807. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  808. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  809. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  810. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  811. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  812. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  813. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  814. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  815. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  816. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  817. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  818. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  819. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  820. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  821. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  822. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  823. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  824. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  825. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  826. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  827. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  828. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  829. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  830. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  831. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  832. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  833. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  834. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  835. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  836. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  837. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  838. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  839. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  840. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  841. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  843. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  844. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  845. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  846. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  847. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  849. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  850. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  851. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  852. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  853. mindspore/ops/_op_impl/tbe/less.py +0 -41
  854. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  855. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  856. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  857. mindspore/ops/_op_impl/tbe/log.py +0 -40
  858. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  859. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  861. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  862. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  863. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  864. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  866. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  867. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  868. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  869. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  870. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  871. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  872. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  873. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  874. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  875. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  876. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  877. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  878. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  879. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  880. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  881. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  882. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  883. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  884. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  885. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  886. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  887. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  888. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  889. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  890. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  891. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  892. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  893. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  894. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  895. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  896. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  897. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  898. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  899. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  900. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  901. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  902. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  903. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  904. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  905. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  906. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  907. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  908. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  909. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  910. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  911. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  912. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  913. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  914. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  915. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  916. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  917. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  918. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  919. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  920. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  921. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  922. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  923. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  924. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  925. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  926. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  927. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  928. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  929. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  930. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  931. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  932. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  933. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  934. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  935. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  936. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  937. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  938. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  939. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  940. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  941. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  942. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  943. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  944. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  945. mindspore/ops/_op_impl/tbe/range.py +0 -39
  946. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  947. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  948. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  949. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  950. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  951. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  952. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  953. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  954. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  955. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  956. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  957. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  958. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  959. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  960. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  962. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  963. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  964. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  965. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  966. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  967. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  968. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  969. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  970. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  971. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  972. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  973. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  974. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  975. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  976. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  977. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  978. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  979. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  980. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  981. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  982. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  984. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  985. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  986. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  987. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  988. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  989. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  990. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  991. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  992. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  993. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  994. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  995. mindspore/ops/_op_impl/tbe/round.py +0 -38
  996. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  997. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  998. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  999. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1000. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1001. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1002. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1003. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1004. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1005. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1006. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1011. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1012. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1013. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1014. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1015. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1016. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1017. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1018. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1019. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1020. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1021. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1022. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1023. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1024. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1026. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1027. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1028. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1029. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1030. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1031. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1032. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1033. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1034. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1035. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1036. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1037. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1038. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1040. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1041. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1042. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1043. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1044. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1045. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1046. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1047. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1048. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1049. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1050. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1051. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1052. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1053. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1054. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1055. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1057. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1058. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1060. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1061. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1062. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1063. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1073. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1074. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1075. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1076. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1077. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1078. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1079. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1080. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1082. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1083. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1084. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1085. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1086. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1087. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1088. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1089. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1090. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1091. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1095. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1096. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1097. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1098. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1099. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1100. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1101. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1102. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1103. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1104. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1105. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1107. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1108. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1109. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1110. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1111. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1112. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1113. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1114. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1115. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1116. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1117. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1118. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1119. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1120. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1121. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1122. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1123. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1124. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1125. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1126. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1127. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1128. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1129. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1135. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1136. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1137. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1138. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1139. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1140. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1141. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1142. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1143. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1144. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1145. mindspore/ops/_tracefunc.py +0 -241
  1146. mindspore/ops/arg_dtype_cast.py +0 -54
  1147. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1148. mindspore/rewrite/ast_creator_register.py +0 -37
  1149. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1150. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1151. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1152. mindspore/rewrite/namespace.py +0 -53
  1153. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
  1154. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  # ============================================================================
15
15
 
16
+ # pylint: disable=unused-import
16
17
  """Defines math operators with functional form."""
17
18
 
18
19
  import collections
@@ -21,6 +22,7 @@ import math
21
22
  import numbers
22
23
  import numpy as np
23
24
 
25
+ import mindspore as ms
24
26
  from mindspore import log as logger
25
27
  import mindspore.ops as ops
26
28
  from mindspore.common import dtype as mstype
@@ -28,13 +30,18 @@ from mindspore.ops import operations as P
28
30
  from mindspore.ops import composite as C
29
31
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
30
32
  from mindspore.ops.primitive import constexpr, _primexpr
31
- from mindspore.ops.operations._inner_ops import Cummin, TileSize
33
+ from mindspore.ops.operations._inner_ops import TileSize
34
+ from mindspore.ops.auto_generate import Cummin
32
35
  from mindspore.ops.operations.math_ops import STFT
33
- from mindspore.ops.operations.math_ops import Logit
34
36
  from mindspore.ops.operations.math_ops import LuUnpack
35
37
  from mindspore.ops.operations.math_ops import Roll
36
38
  from mindspore.ops.operations.math_ops import Ormqr
37
39
  from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
40
+ from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
41
+ matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
42
+ asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
43
+ floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
44
+ log, log1p, neg, not_equal, pow, round)
38
45
  from mindspore.nn import layer
39
46
  from mindspore._checkparam import check_is_number
40
47
  from mindspore import _checkparam as validator
@@ -63,7 +70,6 @@ from mindspore.ops.operations.math_ops import (
63
70
  Heaviside,
64
71
  Lcm,
65
72
  Gcd,
66
- Sinc,
67
73
  Quantile,
68
74
  NanToNum,
69
75
  SparseSegmentMean,
@@ -101,128 +107,125 @@ def get_x_shape(x_shape):
101
107
  # Public Operation Functions.
102
108
  #####################################
103
109
  absolute_ = P.Abs()
104
- tensor_ceil = P.Ceil()
110
+ cast_ = P.Cast()
105
111
  tensor_add = P.Add()
106
- neg_tensor = P.Neg()
107
- tensor_sub = P.Sub()
108
- tensor_mul = P.Mul()
112
+ tensor_ceil = P.Ceil()
109
113
  tensor_div = P.RealDiv()
114
+ tensor_exp = P.Exp()
115
+ tensor_expm1 = P.Expm1()
110
116
  tensor_floordiv = P.FloorDiv()
111
117
  floordiv = tensor_floordiv
112
- xdivy_ = P.Xdivy()
113
- tensor_pow = P.Pow()
114
- pows = tensor_pow
118
+ tensor_ge = P.GreaterEqual()
119
+ tensor_gt = greater
120
+ tensor_le = P.LessEqual()
121
+ tensor_lt = P.Less()
115
122
  tensor_mod = P.FloorMod()
116
123
  floormod = tensor_mod
117
- tensor_exp = P.Exp()
118
- tensor_expm1 = P.Expm1()
119
- tensor_lt = P.Less()
120
- tensor_le = P.LessEqual()
121
- tensor_gt = P.Greater()
122
- tensor_ge = P.GreaterEqual()
124
+ tensor_mul = P.Mul()
125
+ tensor_pow = P.Pow()
126
+ pows = tensor_pow
127
+ tensor_sub = P.Sub()
123
128
  transpose_ = P.Transpose()
124
- not_equal_ = P.NotEqual()
125
- cast_ = P.Cast()
129
+ xdivy_ = P.Xdivy()
126
130
 
127
131
  #####################################
128
132
  # Private Operation Functions.
129
133
  #####################################
134
+ accumulate_ = P.AccumulateNV2()
135
+ acos_ = P.ACos()
136
+ acosh_ = P.Acosh()
130
137
  addcdiv_ = P.Addcdiv()
131
138
  addcuml_ = P.Addcmul()
132
139
  addn_ = P.AddN()
133
140
  angle_ = Angle()
134
- log_ = P.Log()
135
- floor_ = P.Floor()
136
- logical_not_ = P.LogicalNot()
137
- logical_or_ = P.LogicalOr()
138
- logical_and_ = P.LogicalAnd()
139
- sin_ = P.Sin()
140
- sinc_ = Sinc()
141
- cos_ = P.Cos()
142
- tan_ = P.Tan()
143
141
  asin_ = P.Asin()
144
- polar_ = Polar()
145
- acos_ = P.ACos()
146
- atan_ = P.Atan()
147
- atan2_ = P.Atan2()
148
- sinh_ = P.Sinh()
149
- cosh_ = P.Cosh()
150
- tanh_ = P.Tanh()
151
142
  asinh_ = P.Asinh()
152
- acosh_ = P.Acosh()
143
+ atan2_ = P.Atan2()
144
+ atan_ = P.Atan()
153
145
  atanh_ = P.Atanh()
154
- bitwise_and_ = P.BitwiseAnd()
155
- bitwise_or_ = P.BitwiseOr()
156
- bitwise_xor_ = P.BitwiseXor()
157
- inv_ = P.math_ops.Inv()
158
- invert_ = P.Invert()
159
- erf_ = P.Erf()
160
- erfc_ = P.Erfc()
161
- bessel_j1_ = BesselJ1()
162
- bessel_j0_ = BesselJ0()
146
+ batch_matmul_ = P.BatchMatMul()
163
147
  bessel_i0_ = BesselI0()
164
148
  bessel_i0e_ = P.BesselI0e()
165
- bessel_k0_ = BesselK0()
166
- bessel_k0e_ = BesselK0e()
167
- bessel_y0_ = BesselY0()
168
- bessel_y1_ = BesselY1()
169
149
  bessel_i1_ = BesselI1()
170
150
  bessel_i1e_ = P.BesselI1e()
151
+ bessel_j0_ = BesselJ0()
152
+ bessel_j1_ = BesselJ1()
153
+ bessel_k0_ = BesselK0()
154
+ bessel_k0e_ = BesselK0e()
171
155
  bessel_k1_ = BesselK1()
172
156
  bessel_k1e_ = BesselK1e()
173
- equal_ = P.Equal()
174
- isfinite_ = P.IsFinite()
175
- isnan_ = P.IsNan()
176
- maximum_ = P.Maximum()
177
- minimum_ = P.Minimum()
178
- lerp_ = P.Lerp()
179
- tensor_round_ = P.Round()
180
- linspace_ = P.LinSpace()
181
- matrix_exp_ = MatrixExp()
182
- exp2_ = P.Pow()
183
- trunc_ = P.Trunc()
184
- truncate_div_ = P.TruncateDiv()
185
- truncate_mod_ = P.TruncateMod()
186
- sparse_segment_mean_ = SparseSegmentMean()
187
- lu_unpack_ = LuUnpack()
188
- xlogy_ = P.Xlogy()
189
- square_ = P.Square()
190
- sqrt_ = P.Sqrt()
157
+ bessel_y0_ = BesselY0()
158
+ bessel_y1_ = BesselY1()
159
+ bitwise_and_ = P.BitwiseAnd()
160
+ bitwise_or_ = P.BitwiseOr()
161
+ bitwise_xor_ = P.BitwiseXor()
162
+ conj_ = P.Conj()
163
+ cumprod_ = P.CumProd()
191
164
  cumsum_ = P.CumSum()
192
- shape_ = P.Shape()
193
- reshape_ = P.Reshape()
165
+ cumulative_logsumexp_ = CumulativeLogsumexp()
166
+ digamma_ = P.Digamma()
167
+ div_ = P.Div()
194
168
  dtype_ = P.DType()
195
169
  eps_ = P.Eps()
196
- rank_ = P.Rank()
170
+ erf_ = P.Erf()
171
+ erfc_ = P.Erfc()
172
+ erfinv_ = P.Erfinv()
173
+ exp2_ = P.Pow()
197
174
  expand_dims_ = P.ExpandDims()
198
- sign_ = P.Sign()
199
- nextafter_ = P.NextAfter()
200
- matrix_inverse_ = P.MatrixInverse()
201
- matrix_determinant_ = P.MatrixDeterminant()
202
- log_matrix_determinant_ = P.LogMatrixDeterminant()
203
- trace_ = P.Trace()
204
- real_ = P.Real()
205
- rsqrt_ = P.Rsqrt()
206
- reciprocal_ = P.Reciprocal()
207
- tile_ = P.Tile()
208
- batch_matmul_ = P.BatchMatMul()
209
175
  fill_v2_ = P.FillV2()
176
+ floor_ = P.Floor()
177
+ gcd_ = Gcd()
178
+ igamma_ = Igamma()
179
+ igammac_ = Igammac()
210
180
  imag_ = P.Imag()
211
- log1p_ = P.Log1p()
212
- accumulate_ = P.AccumulateNV2()
213
- conj_ = P.Conj()
214
- erfinv_ = P.Erfinv()
215
- cumprod_ = P.CumProd()
181
+ inv_ = P.math_ops.Inv()
182
+ invert_ = P.Invert()
183
+ isfinite_ = P.IsFinite()
184
+ isinf_ = P.IsInf()
185
+ isnan_ = P.IsNan()
186
+ lcm_ = Lcm()
187
+ lerp_ = P.Lerp()
216
188
  lgamma_ = P.Lgamma()
217
- digamma_ = P.Digamma()
189
+ linspace_ = P.LinSpace()
190
+ log1p_ = P.Log1p()
191
+ log_ = P.Log()
192
+ log_matrix_determinant_ = P.LogMatrixDeterminant()
193
+ logical_and_ = P.LogicalAnd()
194
+ logical_not_ = P.LogicalNot()
195
+ logical_or_ = P.LogicalOr()
196
+ logical_xor_ = P.LogicalXor()
197
+ lu_solve_ = LuSolve()
198
+ lu_unpack_ = LuUnpack()
199
+ matmul_ = P.MatMul()
200
+ matrix_determinant_ = P.MatrixDeterminant()
201
+ matrix_inverse_ = P.MatrixInverse()
202
+ mod_ = P.Mod()
203
+ nextafter_ = P.NextAfter()
204
+ ones_ = P.Ones()
205
+ polar_ = Polar()
218
206
  poly_gamma_ = P.Polygamma()
219
- isinf_ = P.IsInf()
207
+ rank_ = P.Rank()
208
+ reciprocal_ = P.Reciprocal()
209
+ reduce_sum_ = P.ReduceSum()
210
+ reshape_ = P.Reshape()
211
+ select_ = P.Select()
212
+ slice_ = P.Slice()
213
+ size_ = P.Size()
214
+ scalar_to_tensor_ = P.ScalarToTensor()
215
+ shape_ = P.Shape()
216
+ sign_ = P.Sign()
217
+ sparse_segment_mean_ = SparseSegmentMean()
218
+ tan_ = P.Tan()
219
+ tanh_ = P.Tanh()
220
+ tensor_round_ = P.Round()
221
+ tile_ = P.Tile()
222
+ tile_size_ = TileSize()
223
+ trunc_ = P.Trunc()
224
+ truncate_div_ = P.TruncateDiv()
225
+ truncate_mod_ = P.TruncateMod()
226
+ xlogy_ = P.Xlogy()
220
227
  zeros_ = P.Zeros()
221
- ones_ = P.Ones()
222
- logical_xor_ = P.LogicalXor()
223
228
  zeta_ = P.Zeta()
224
- div_ = P.Div()
225
- matmul_ = P.MatMul()
226
229
 
227
230
 
228
231
  #####################################
@@ -262,39 +265,6 @@ def addn(x):
262
265
  return addn_(x)
263
266
 
264
267
 
265
- def abs(input):
266
- r"""
267
- Returns absolute value of a tensor element-wise.
268
-
269
- .. math::
270
-
271
- out_i = |input_i|
272
-
273
- Args:
274
- input (Tensor): The input tensor. The shape of tensor is
275
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
276
-
277
- Returns:
278
- Tensor, has the same shape as the `input`.
279
-
280
- Raises:
281
- TypeError: If `input` is not a Tensor.
282
-
283
- Supported Platforms:
284
- ``Ascend`` ``GPU`` ``CPU``
285
-
286
- Examples:
287
- >>> import mindspore
288
- >>> import numpy as np
289
- >>> from mindspore import Tensor, ops
290
- >>> input = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
291
- >>> output = ops.abs(input)
292
- >>> print(output)
293
- [1. 1. 0.]
294
- """
295
- return absolute_(input)
296
-
297
-
298
268
  def absolute(input):
299
269
  """
300
270
  Alias for :func:`mindspore.ops.abs` .
@@ -305,70 +275,10 @@ def absolute(input):
305
275
  return abs(input)
306
276
 
307
277
 
308
- def add(input, other):
309
- r"""
310
- Adds other value to input Tensor.
311
-
312
- .. math::
313
-
314
- out_{i} = input_{i} + other_{i}
315
-
316
- Note:
317
- - One of the two inputs must be a Tensor, when the two inputs have different shapes,
318
- they must be able to broadcast to a common shape.
319
- - The two inputs can not be bool type at the same time,
320
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
321
- - The two inputs comply with the implicit type conversion rules to make the data types
322
- consistent.
323
- - When input is Tensor, it's dimension should be greater than or equal to 1.
324
-
325
- Args:
326
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
327
- a bool or a tensor whose data type is
328
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
329
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
330
- other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
331
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
332
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
333
-
334
- Returns:
335
- Tensor, the shape is the same as the one of the input `input` , `other` after broadcasting,
336
- and the data type is the one with higher precision or higher digits among the two inputs.
337
-
338
- Raises:
339
- TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
340
-
341
- Supported Platforms:
342
- ``Ascend`` ``GPU`` ``CPU``
343
-
344
- Examples:
345
- >>> import numpy as np
346
- >>> import mindspore
347
- >>> from mindspore import Tensor, ops
348
- >>> # case 1: x and y are both Tensor.
349
- >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
350
- >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
351
- >>> output = ops.add(x, y)
352
- >>> print(output)
353
- [5. 7. 9.]
354
- >>> # case 2: x is a scalar and y is a Tensor
355
- >>> x = Tensor(1, mindspore.int32)
356
- >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
357
- >>> output = ops.add(x, y)
358
- >>> print(output)
359
- [5. 6. 7.]
360
- >>> # the data type of x is int32, the data type of y is float32,
361
- >>> # and the output is the data format of higher precision float32.
362
- >>> print(output.dtype)
363
- Float32
364
- """
365
- return tensor_add(input, other)
366
-
367
-
368
278
  def addcdiv(input, tensor1, tensor2, value=1):
369
279
  r"""
370
280
  Performs the element-wise division of tensor tensor1 by tensor tensor2,
371
- multiply the result by the scalar value and add it to input_data.
281
+ multiply the result by the scalar value and add it to input data.
372
282
 
373
283
  .. math::
374
284
  y[i] = input[i] + value[i] * (tensor1[i] / tensor2[i])
@@ -409,7 +319,7 @@ def addcdiv(input, tensor1, tensor2, value=1):
409
319
  def addcmul(input, tensor1, tensor2, value=1):
410
320
  r"""
411
321
  Performs the element-wise product of tensor tensor1 and tensor tensor2,
412
- multiply the result by the scalar value and add it to input_data.
322
+ multiply the result by the scalar value and add it to input data.
413
323
 
414
324
  .. math::
415
325
  output[i] = input[i] + value[i] * (tensor1[i] * tensor2[i])
@@ -421,7 +331,7 @@ def addcmul(input, tensor1, tensor2, value=1):
421
331
  value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: ``1`` .
422
332
 
423
333
  Returns:
424
- Tensor, has the same shape and dtype as x1*x2.
334
+ Tensor, has the same shape and dtype as tensor1*tensor2.
425
335
 
426
336
  Raises:
427
337
  TypeError: If dtype of `tensor1`, `tensor2`, `input` is not Tensor.
@@ -452,36 +362,6 @@ def addcmul(input, tensor1, tensor2, value=1):
452
362
  return addcuml_(input, tensor1, tensor2, Tensor(value))
453
363
 
454
364
 
455
- def angle(input):
456
- """
457
- Returns the element-wise argument of a complex tensor.
458
- The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
459
- is the imaginary part. The argument returned by this function is of the form :math:`atan2(b, a)`.
460
-
461
- Args:
462
- input (Tensor): The input tensor. types: complex64, complex128.
463
-
464
- Returns:
465
- Tensor, has the float32 or float64 type and the same shape as input.
466
-
467
- Raises:
468
- TypeError: If `input` is not a Tensor.
469
- TypeError: If the dtype of `input` is not one of: complex64, complex128.
470
-
471
- Supported Platforms:
472
- ``Ascend`` ``GPU`` ``CPU``
473
-
474
- Examples:
475
- >>> import mindspore
476
- >>> from mindspore import Tensor, ops
477
- >>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
478
- >>> output = ops.angle(input)
479
- >>> print(output)
480
- [1.7607845 1.0899091]
481
- """
482
- return angle_(input)
483
-
484
-
485
365
  def bincount(input, weights=None, minlength=0):
486
366
  """
487
367
  Counts the number of occurrences of each value in `input`.
@@ -494,6 +374,9 @@ def bincount(input, weights=None, minlength=0):
494
374
  Each value in the output Tensor marks the number of occurrences of that index in `input`.
495
375
  If 'weights' is specified, the output results are weighted, i.e ``out[n] += weight[i]`` instead of ``out[n] += 1``.
496
376
 
377
+ Note:
378
+ If `input` contains negative value, the result will be undefined.
379
+
497
380
  Args:
498
381
  input (Tensor): 1-d input tensor.
499
382
  weights (Tensor, optional): Weights, a tensor of the same shape as `input`. Default: ``None`` .
@@ -505,7 +388,6 @@ def bincount(input, weights=None, minlength=0):
505
388
  Raises:
506
389
  TypeError: If `input` or `weights` is not a tensor.
507
390
  ValueError: If `input` is not one-dimensional, or if `input` and `weights` do not have the same shape.
508
- ValueError: If `input` contains negative value.
509
391
  ValueError: If `minlength` is a negative integer.
510
392
 
511
393
  Supported Platforms:
@@ -529,23 +411,21 @@ def bincount(input, weights=None, minlength=0):
529
411
  raise TypeError(f"For math function 'bincount', 'minlength' must be int but got {type(minlength)}.")
530
412
  if rank_(input) != 1:
531
413
  raise ValueError(f"For math function 'bincount', 'input' should be one-dimensional tensor.")
532
- if not (input >= 0).all():
533
- raise ValueError(f"For 'bincount', elements of 'input' should be non-negative.")
534
414
  if input.shape[0] == 0:
535
- return Tensor([])
415
+ return Tensor_([])
536
416
  if minlength < 0:
537
417
  raise ValueError(f"For 'bincount', 'minlength' should be >= 0 but got {minlength}.")
538
418
  if max(input.astype(mstype.float32)) > minlength - 1:
539
419
  length = (max(input.astype(mstype.float32)) + 1).astype(mstype.int32)
540
420
  else:
541
- length = P.Cast()(minlength, mstype.int32)
421
+ length = cast_(minlength, mstype.int32)
542
422
  idx = F.arange(length).expand_dims(-1)
543
- idx_mapping = equal(input, idx)
423
+ idx_mapping = equal(input, idx.astype(input.dtype))
544
424
  if weights is not None:
545
425
  if input.shape != weights.shape:
546
426
  raise ValueError('for bincount `input` and `weights` must have the same length')
547
427
  idx_mapping *= weights
548
- return P.ReduceSum()(idx_mapping.astype(mstype.float32), 1).ravel()
428
+ return reduce_sum_(idx_mapping.astype(mstype.float32), 1).ravel()
549
429
 
550
430
 
551
431
  def bucketize(input, boundaries, *, right=False):
@@ -674,38 +554,6 @@ def argmin(input, axis=None, keepdims=False):
674
554
  return out
675
555
 
676
556
 
677
- def neg(input):
678
- """
679
- Returns a tensor with negative values of the input tensor element-wise.
680
-
681
- .. math::
682
-
683
- out_{i} = - input_{i}
684
-
685
- Args:
686
- input (Tensor): The input tensor with a dtype of Number.
687
-
688
- Returns:
689
- Tensor, has the same shape and dtype as input.
690
-
691
- Raises:
692
- TypeError: If `input` is not a Tensor.
693
-
694
- Supported Platforms:
695
- ``Ascend`` ``GPU`` ``CPU``
696
-
697
- Examples:
698
- >>> import mindspore
699
- >>> import numpy as np
700
- >>> from mindspore import Tensor, ops
701
- >>> input = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
702
- >>> output = ops.neg(input)
703
- >>> print(output)
704
- [-1. -2. 1. -2. 0. 3.5]
705
- """
706
- return neg_tensor(input)
707
-
708
-
709
557
  def negative(input):
710
558
  r"""
711
559
  Alias for :func:`mindspore.ops.neg` .
@@ -713,7 +561,7 @@ def negative(input):
713
561
  Supported Platforms:
714
562
  ``Ascend`` ``GPU`` ``CPU``
715
563
  """
716
- return neg_tensor(input)
564
+ return neg(input)
717
565
 
718
566
 
719
567
  def positive(input):
@@ -778,7 +626,7 @@ def permute(input, axis):
778
626
 
779
627
  Args:
780
628
  input (Tensor): Input Tensor.
781
- axis (Union[tuple(int), int]): Permute will permute the tensor to the input `axis` order.
629
+ axis (tuple(int)): Permute will permute the tensor to the input `axis` order.
782
630
 
783
631
  Returns:
784
632
  Tensor, has the same dimension as input tensor, with `axis` suitably permuted.
@@ -807,135 +655,22 @@ def permute(input, axis):
807
655
  return transpose_(input, axis)
808
656
 
809
657
 
810
- def ceil(input):
658
+ def subtract(input, other, *, alpha=1):
811
659
  r"""
812
- Rounds a tensor up to the closest integer element-wise.
660
+ Performs the element-wise subtract of input tensors.
813
661
 
814
662
  .. math::
815
-
816
- out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
663
+ output[i] = input[i] - alpha * other[i]
817
664
 
818
665
  Args:
819
- input (Tensor): The input tensor with a dtype of float16 or float32.
666
+ input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
667
+ other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
668
+
669
+ Keyword Args:
670
+ alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
820
671
 
821
672
  Returns:
822
- Tensor, has the same shape as the `input`.
823
-
824
- Raises:
825
- TypeError: If `input` is not a Tensor.
826
- TypeError: If dtype of `input` is not float16 or float32.
827
-
828
- Supported Platforms:
829
- ``Ascend`` ``GPU`` ``CPU``
830
-
831
- Examples:
832
- >>> import mindspore
833
- >>> import numpy as np
834
- >>> from mindspore import Tensor, ops
835
- >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
836
- >>> output = ops.ceil(x)
837
- >>> print(output)
838
- [ 2. 3. -1.]
839
- """
840
- return tensor_ceil(input)
841
-
842
-
843
- def round(input):
844
- r"""
845
- Returns half to even of a tensor element-wise.
846
-
847
- .. math::
848
-
849
- out_i \approx input_i
850
-
851
- Args:
852
- input (Tensor): The input tensor.
853
-
854
- Returns:
855
- Tensor, has the same shape and type as the `input`.
856
-
857
- Raises:
858
- TypeError: If `input` is not a Tensor.
859
-
860
- Supported Platforms:
861
- ``Ascend`` ``GPU`` ``CPU``
862
-
863
- Examples:
864
- >>> import mindspore
865
- >>> import numpy as np
866
- >>> from mindspore import Tensor, ops
867
- >>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
868
- >>> output = ops.round(input)
869
- >>> print(output)
870
- [ 1. 2. 2. 2. -4.]
871
- """
872
- return tensor_round_(input)
873
-
874
-
875
- def sub(input, other):
876
- r"""
877
- Subtracts the second input tensor from the first input tensor element-wise.
878
-
879
- .. math::
880
-
881
- out_{i} = input_{i} - other_{i}
882
-
883
- Note:
884
- - One of the two inputs must be a Tensor, when the two inputs have different shapes,
885
- they must be able to broadcast to a common shape.
886
- - The two inputs can not be bool type at the same time,
887
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
888
- - The two inputs comply with the implicit type conversion rules to make the data types
889
- consistent.
890
-
891
- Args:
892
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
893
- a bool or a tensor whose data type is
894
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
895
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
896
- other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
897
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
898
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
899
-
900
- Returns:
901
- Tensor, the shape is the same as the one after broadcasting,
902
- and the data type is the one with higher precision or higher digits among the two inputs.
903
-
904
- Raises:
905
- TypeError: If `input` and `other` are not number.Number or bool or Tensor.
906
-
907
- Supported Platforms:
908
- ``Ascend`` ``GPU`` ``CPU``
909
-
910
- Examples:
911
- >>> import mindspore
912
- >>> import numpy as np
913
- >>> from mindspore import Tensor, ops
914
- >>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
915
- >>> other = Tensor(np.array([4, 5, 6]), mindspore.int32)
916
- >>> output = ops.sub(input, other)
917
- >>> print(output)
918
- [-3 -3 -3]
919
- """
920
- return tensor_sub(input, other)
921
-
922
-
923
- def subtract(input, other, *, alpha=1):
924
- r"""
925
- Performs the element-wise subtract of input tensors.
926
-
927
- .. math::
928
- output[i] = input[i] - alpha * other[i]
929
-
930
- Args:
931
- input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
932
- other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
933
-
934
- Keyword Args:
935
- alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
936
-
937
- Returns:
938
- Tensor, has the same shape and dtype as input tensors.
673
+ Tensor, has the same shape and dtype as input tensors.
939
674
 
940
675
  Raises:
941
676
  TypeError: `input` or `other` is neither Tensor nor number.Number.
@@ -967,55 +702,6 @@ def true_divide(dividend, divisor):
967
702
  return div(dividend, divisor, rounding_mode=None)
968
703
 
969
704
 
970
- def mul(input, other):
971
- r"""
972
- Multiplies two tensors element-wise.
973
-
974
- .. math::
975
-
976
- out_{i} = input_{i} * other_{i}
977
-
978
- Note:
979
- - One of the two inputs must be a Tensor, when the two inputs have different shapes,
980
- they must be able to broadcast to a common shape.
981
- - The two inputs can not be bool type at the same time,
982
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
983
- - The two inputs comply with the implicit type conversion rules to make the data types
984
- consistent.
985
-
986
- Args:
987
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
988
- a bool or a tensor whose data type is
989
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
990
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
991
- other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
992
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
993
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
994
-
995
- Returns:
996
- Tensor, the shape is the same as the one after broadcasting,
997
- and the data type is the one with higher precision or higher digits among the two inputs.
998
-
999
- Raises:
1000
- TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
1001
- ValueError: If `input` and `other` are not the same shape.
1002
-
1003
- Supported Platforms:
1004
- ``Ascend`` ``GPU`` ``CPU``
1005
-
1006
- Examples:
1007
- >>> import mindspore
1008
- >>> import numpy as np
1009
- >>> from mindspore import Tensor, ops
1010
- >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1011
- >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
1012
- >>> output = ops.mul(x, y)
1013
- >>> print(output)
1014
- [ 4. 10. 18.]
1015
- """
1016
- return tensor_mul(input, other)
1017
-
1018
-
1019
705
  def multiply(input, other):
1020
706
  r"""
1021
707
  Alias for :func:`mindspore.ops.asinh`.
@@ -1030,18 +716,17 @@ def div(input, other, *, rounding_mode=None):
1030
716
  r"""
1031
717
  Divides the first input tensor by the second input tensor in floating-point type element-wise.
1032
718
 
719
+ .. math::
720
+
721
+ out_{i} = input_{i} / other_{i}
722
+
1033
723
  Note:
1034
- - One of the two inputs must be a Tensor, when the two inputs have different shapes,
1035
- they must be able to broadcast to a common shape.
724
+ - When the two inputs have different shapes, they must be able to broadcast to a common shape.
1036
725
  - The two inputs can not be bool type at the same time,
1037
726
  [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
1038
727
  - The two inputs comply with the implicit type conversion rules to make the data types
1039
728
  consistent.
1040
729
 
1041
- .. math::
1042
-
1043
- out_{i} = input_{i} / other_{i}
1044
-
1045
730
  Args:
1046
731
  input (Union[Tensor, Number, bool]): The first input is a number or
1047
732
  a bool or a tensor whose data type is number or bool.
@@ -1162,60 +847,6 @@ def floor_div(x, y):
1162
847
  return tensor_floordiv(x, y)
1163
848
 
1164
849
 
1165
- def floor_divide(input, other):
1166
- """
1167
- Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
1168
-
1169
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1170
- The inputs must be two tensors or one tensor and one scalar.
1171
- When the inputs are two tensors,
1172
- dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1173
- When the inputs are one tensor and one scalar,
1174
- the scalar could only be a constant.
1175
-
1176
- .. math::
1177
-
1178
- out_{i} = \\text{floor}( \\frac{x_i}{y_i})
1179
-
1180
- where the :math:`floor` indicates the Floor operator, for more details,
1181
- please refer to the :class:`mindspore.ops.Floor` operator.
1182
-
1183
- .. warning::
1184
- This is an experimental API that is subject to change or deletion.
1185
-
1186
- Args:
1187
- input (Union[Tensor, Number, bool]): The first input is a number or
1188
- a bool or a tensor whose data type is number or bool.
1189
- other (Union[Tensor, Number, bool]): The second input is a number or
1190
- a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
1191
- Returns:
1192
- Tensor, the shape is the same as the one after broadcasting,
1193
- and the data type is the one with higher precision or higher digits among the two inputs.
1194
-
1195
- Raises:
1196
- TypeError: If neither `input` nor `other` is a Tensor.
1197
-
1198
- Supported Platforms:
1199
- ``Ascend`` ``GPU`` ``CPU``
1200
-
1201
- Examples:
1202
- >>> import mindspore
1203
- >>> from mindspore import Tensor, ops
1204
- >>> import numpy as np
1205
- >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
1206
- >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
1207
- >>> output = ops.floor_divide(x, y)
1208
- >>> print(output)
1209
- [ 0 1 -1]
1210
- >>> x = Tensor(2.0, mindspore.float32)
1211
- >>> y = Tensor(2.0, mindspore.float32)
1212
- >>> output = ops.floor_divide(x, y)
1213
- >>> print(output)
1214
- 1.0
1215
- """
1216
- return tensor_floordiv(input, other)
1217
-
1218
-
1219
850
  def fmod(input, other):
1220
851
  """
1221
852
  Computes the floating-point remainder of the division operation input/other.
@@ -1257,214 +888,6 @@ def fmod(input, other):
1257
888
  return input - div(input, other, rounding_mode="trunc") * other
1258
889
 
1259
890
 
1260
- def pow(input, exponent):
1261
- r"""
1262
- Calculates the `exponent` power of each element in `input`.
1263
-
1264
- .. math::
1265
-
1266
- out_{i} = input_{i} ^{ exponent_{i}}
1267
-
1268
- .. note::
1269
- - Inputs of `input` and `exponent` comply with the implicit type conversion rules to make the
1270
- data types consistent.
1271
- - The inputs must be two tensors or one tensor and one scalar.
1272
- - When the inputs are two tensors,
1273
- dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
1274
-
1275
- Args:
1276
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
1277
- a bool or a tensor whose data type is
1278
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
1279
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1280
- exponent (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
1281
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
1282
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1283
-
1284
- Returns:
1285
- Tensor, the shape is the same as the one after broadcasting,
1286
- and the data type is the one with higher precision or higher digits among the two inputs.
1287
-
1288
- Raises:
1289
- TypeError: If `input` and `exponent` is not one of the following: Tensor, number.Number or bool.
1290
- ValueError: If the shape of `input` and `exponent` are different.
1291
-
1292
- Supported Platforms:
1293
- ``Ascend`` ``GPU`` ``CPU``
1294
-
1295
- Examples:
1296
- >>> import mindspore
1297
- >>> import numpy as np
1298
- >>> from mindspore import Tensor, ops
1299
- >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1300
- >>> y = 3.0
1301
- >>> output = ops.pow(x, y)
1302
- >>> print(output)
1303
- [ 1. 8. 64.]
1304
- >>>
1305
- >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1306
- >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
1307
- >>> output = ops.pow(x, y)
1308
- >>> print(output)
1309
- [ 1. 16. 64.]
1310
- """
1311
- return tensor_pow(input, exponent)
1312
-
1313
-
1314
- def floor_mod(x, y):
1315
- r"""
1316
- Computes the remainder of division element-wise. It's a flooring divide.
1317
- E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
1318
-
1319
- Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1320
- The inputs must be two tensors or one tensor and one scalar.
1321
- When the inputs are two tensors,
1322
- dtypes of them cannot be both bool, and the shapes of them could be broadcast.
1323
- When the inputs are one tensor and one scalar,
1324
- the scalar could only be a constant.
1325
-
1326
- .. math::
1327
-
1328
- out_{i} =\text{floor}(x_{i} // y_{i})
1329
-
1330
- where the :math:`floor` indicates the Floor operator, for more details,
1331
- please refer to the :class:`mindspore.ops.Floor` operator.
1332
-
1333
- .. warning::
1334
- - Data of input `y` should not be 0, or the maximum value of its dtype will be returned.
1335
- - When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
1336
- double thousandths in the mini form.
1337
- - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
1338
- - If shape is expressed as :math:`(D1, D2 ..., Dn)`, then D1\*D2... \*DN<=1000000,n<=8.
1339
-
1340
- Args:
1341
- x (Union[Tensor, Number, bool]): The first input is a number or
1342
- a bool or a tensor whose data type is number or bool.
1343
- y (Union[Tensor, Number, bool]): The second input is a number or
1344
- a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
1345
-
1346
- Returns:
1347
- Tensor, the shape is the same as the one after broadcasting,
1348
- and the data type is the one with higher precision of the two inputs.
1349
-
1350
- Raises:
1351
- TypeError: If neither `x` nor `y` is a Tensor.
1352
-
1353
- Supported Platforms:
1354
- ``Ascend`` ``GPU`` ``CPU``
1355
-
1356
- Examples:
1357
- >>> import mindspore
1358
- >>> import numpy as np
1359
- >>> from mindspore import Tensor, ops
1360
- >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
1361
- >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
1362
- >>> output = ops.floor_mod(x, y)
1363
- >>> print(output)
1364
- [2 1 2]
1365
- """
1366
- return tensor_mod(x, y)
1367
-
1368
-
1369
- def exp(input):
1370
- r"""
1371
- Returns exponential of a tensor element-wise.
1372
-
1373
- .. math::
1374
-
1375
- out_i = e^{x_i}
1376
-
1377
- Args:
1378
- input (Tensor): The input tensor.
1379
-
1380
- Returns:
1381
- Tensor, has the same shape and dtype as the `input`.
1382
-
1383
- Raises:
1384
- TypeError: If `input` is not a Tensor.
1385
-
1386
- Supported Platforms:
1387
- ``Ascend`` ``GPU`` ``CPU``
1388
-
1389
- Examples:
1390
- >>> import mindspore
1391
- >>> import numpy as np
1392
- >>> from mindspore import Tensor, ops
1393
- >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1394
- >>> output = ops.exp(x)
1395
- >>> print(output)
1396
- [ 2.718282 7.389056 54.598152]
1397
- """
1398
- return tensor_exp(input)
1399
-
1400
-
1401
- def expm1(input):
1402
- r"""
1403
- Returns exponential then minus 1 of a tensor element-wise.
1404
-
1405
- .. math::
1406
-
1407
- out_i = e^{x_i} - 1
1408
-
1409
- Args:
1410
- input (Tensor): The input Tensor.
1411
-
1412
- Returns:
1413
- Tensor, has the same shape as the `input`.
1414
-
1415
- Raises:
1416
- TypeError: If `input` is not a Tensor.
1417
-
1418
- Supported Platforms:
1419
- ``Ascend`` ``GPU`` ``CPU``
1420
-
1421
- Examples:
1422
- >>> import mindspore
1423
- >>> import numpy as np
1424
- >>> from mindspore import Tensor, ops
1425
- >>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
1426
- >>> output = ops.expm1(x)
1427
- >>> print(output)
1428
- [ 0. 1.718282 6.389056 53.598152]
1429
- """
1430
- return tensor_expm1(input)
1431
-
1432
-
1433
- def log(input):
1434
- r"""
1435
- Returns the natural logarithm of a tensor element-wise.
1436
-
1437
- .. math::
1438
- y_i = \log_e(x_i)
1439
-
1440
- .. warning::
1441
- If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
1442
- be affacted.
1443
-
1444
- Args:
1445
- input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
1446
-
1447
- Returns:
1448
- Tensor, has the same shape and dtype as the `input`.
1449
-
1450
- Raises:
1451
- TypeError: If `input` is not a Tensor.
1452
-
1453
- Supported Platforms:
1454
- ``Ascend`` ``GPU`` ``CPU``
1455
-
1456
- Examples:
1457
- >>> import mindspore
1458
- >>> import numpy as np
1459
- >>> from mindspore import Tensor, ops
1460
- >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1461
- >>> output = ops.log(x)
1462
- >>> print(output)
1463
- [0. 0.6931472 1.3862944]
1464
- """
1465
- return log_(input)
1466
-
1467
-
1468
891
  def logdet(input):
1469
892
  r"""
1470
893
  Calculates log determinant of one or a batch of square matrices.
@@ -1494,40 +917,6 @@ def logdet(input):
1494
917
  return log_(det_x)
1495
918
 
1496
919
 
1497
- def floor(input):
1498
- r"""
1499
- Rounds a tensor down to the closest integer element-wise.
1500
-
1501
- .. math::
1502
-
1503
- out_i = \lfloor x_i \rfloor
1504
-
1505
- Args:
1506
- input (Tensor): The input tensor, its data type must be float16,
1507
- float32 or float64.
1508
-
1509
- Returns:
1510
- Tensor, has the same shape as `input`.
1511
-
1512
- Raises:
1513
- TypeError: If `input` is not a Tensor.
1514
- TypeError: If dtype of `input` is not in [float16, float32, float64].
1515
-
1516
- Supported Platforms:
1517
- ``Ascend`` ``GPU`` ``CPU``
1518
-
1519
- Examples:
1520
- >>> import mindspore
1521
- >>> import numpy as np
1522
- >>> from mindspore import Tensor, ops
1523
- >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
1524
- >>> output = ops.floor(x)
1525
- >>> print(output)
1526
- [ 1. 2. -2.]
1527
- """
1528
- return floor_(input)
1529
-
1530
-
1531
920
  def i0(input):
1532
921
  r"""
1533
922
  Alias for :func:`mindspore.ops.bessel_i0` .
@@ -1730,7 +1119,7 @@ def logical_not(input):
1730
1119
  out_{i} = \\neg input_{i}
1731
1120
 
1732
1121
  Args:
1733
- input (Tensor): The input tensor, the dtype must be bool.
1122
+ input (Tensor): The input tensor.
1734
1123
 
1735
1124
  Returns:
1736
1125
  Tensor, the shape is the same as the `input`, and the dtype is bool.
@@ -1750,8 +1139,6 @@ def logical_not(input):
1750
1139
  >>> print(output)
1751
1140
  [False True False]
1752
1141
  """
1753
- if isinstance(input, Tensor) and input.dtype != mstype.bool_:
1754
- input = input.astype(mstype.bool_)
1755
1142
  return logical_not_(input)
1756
1143
 
1757
1144
 
@@ -1761,17 +1148,17 @@ def logical_or(input, other):
1761
1148
 
1762
1149
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1763
1150
  The inputs must be two tensors or one tensor and one bool.
1764
- When the inputs are two tensors, the shapes of them could be broadcast,
1765
- and the data types of them must be bool.
1766
- When the inputs are one tensor and one bool, the bool object could only be a constant,
1767
- and the data type of the tensor must be bool.
1151
+
1152
+ When the inputs are two tensors, the shapes of them could be broadcast.
1153
+
1154
+ When the inputs are one tensor and one bool, the bool object could only be a constant.
1768
1155
 
1769
1156
  .. math::
1770
1157
 
1771
- out_{i} = x_{i} \\vee y_{i}
1158
+ out_{i} = input_{i} \\vee other_{i}
1772
1159
 
1773
1160
  Note:
1774
- LogicalOr supports broadcasting.
1161
+ logical_or supports broadcasting.
1775
1162
 
1776
1163
  Args:
1777
1164
  input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
@@ -1782,9 +1169,6 @@ def logical_or(input, other):
1782
1169
  Returns:
1783
1170
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
1784
1171
 
1785
- Raises:
1786
- TypeError: If neither `input` nor `other` is a Tensor.
1787
-
1788
1172
  Supported Platforms:
1789
1173
  ``Ascend`` ``GPU`` ``CPU``
1790
1174
 
@@ -1813,10 +1197,6 @@ def logical_or(input, other):
1813
1197
  >>> print(output)
1814
1198
  [True True]
1815
1199
  """
1816
- if isinstance(input, Tensor) and input.dtype != mstype.bool_:
1817
- input = input.astype(mstype.bool_)
1818
- if isinstance(other, Tensor) and other.dtype != mstype.bool_:
1819
- other = other.astype(mstype.bool_)
1820
1200
  return logical_or_(input, other)
1821
1201
 
1822
1202
 
@@ -1826,17 +1206,17 @@ def logical_and(input, other):
1826
1206
 
1827
1207
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1828
1208
  The inputs must be two tensors or one tensor and one bool.
1829
- When the inputs are two tensors, the shapes of them could be broadcast,
1830
- and the data types of them must be bool.
1831
- When the inputs are one tensor and one bool, the bool object could only be a constant,
1832
- and the data type of the tensor must be bool.
1209
+
1210
+ When the inputs are two tensors, the shapes of them could be broadcast.
1211
+
1212
+ When the inputs are one tensor and one bool, the bool object could only be a constant.
1833
1213
 
1834
1214
  .. math::
1835
1215
 
1836
1216
  out_{i} = input_{i} \wedge other_{i}
1837
1217
 
1838
1218
  Note:
1839
- LogicalAnd supports broadcasting.
1219
+ logical_and supports broadcasting.
1840
1220
 
1841
1221
  Args:
1842
1222
  input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
@@ -1878,10 +1258,6 @@ def logical_and(input, other):
1878
1258
  >>> print(output)
1879
1259
  [True False]
1880
1260
  """
1881
- if isinstance(input, Tensor) and input.dtype != mstype.bool_:
1882
- input = input.astype(mstype.bool_)
1883
- if isinstance(other, Tensor) and other.dtype != mstype.bool_:
1884
- other = other.astype(mstype.bool_)
1885
1261
  return logical_and_(input, other)
1886
1262
 
1887
1263
 
@@ -1983,131 +1359,26 @@ def sgn(input):
1983
1359
  Supported Platforms:
1984
1360
  ``Ascend`` ``GPU`` ``CPU``
1985
1361
 
1986
- Examples:
1987
- >>> import mindspore as ms
1988
- >>> import mindspore.ops as ops
1989
- >>> input = ms.Tensor([[3 + 4j, 7 - 24j, 0, 6 + 8j, 8], [15 + 20j, 7 - 24j, 0, 3 + 4j, 20]], dtype=ms.complex64)
1990
- >>> output = ops.sgn(input)
1991
- >>> print(output)
1992
- [[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]
1993
- [0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]]
1994
- """
1995
- if not isinstance(input, Tensor):
1996
- raise TypeError(f"For sgn, the input must be a Tensor, but got {type(input)}")
1997
- if not ops.is_complex(input):
1998
- return ops.sign(input)
1999
- modulus = ops.ComplexAbs()(input)
2000
- zeros_mask = modulus.equal(0)
2001
- non_zero_modulus = ops.masked_fill(modulus, zeros_mask, 1)
2002
- zeros_modulus = ops.zeros_like(non_zero_modulus)
2003
- complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
2004
- res = input / complex_modulus
2005
- return res
2006
-
2007
-
2008
- def sin(input):
2009
- r"""
2010
- Computes sine of the input element-wise.
2011
-
2012
- .. math::
2013
-
2014
- out_i = \sin(input_i)
2015
-
2016
- Args:
2017
- input (Tensor): The shape of tensor is
2018
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2019
-
2020
- Returns:
2021
- Tensor, has the same shape and dtype as `input`.
2022
-
2023
- Raises:
2024
- TypeError: If `input` is not a Tensor.
2025
- TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
2026
-
2027
- Supported Platforms:
2028
- ``Ascend`` ``GPU`` ``CPU``
2029
-
2030
- Examples:
2031
- >>> import mindspore
2032
- >>> import numpy as np
2033
- >>> from mindspore import Tensor, ops
2034
- >>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
2035
- >>> output = ops.sin(input)
2036
- >>> print(output)
2037
- [0.5810352 0.27635565 0.41687083 0.5810352]
2038
- """
2039
- return sin_(input)
2040
-
2041
-
2042
- def sinc(input):
2043
- r"""
2044
- Computes the normalized sinc of input.
2045
-
2046
- .. math::
2047
-
2048
- out_i = \begin{cases} \frac{sin(\pi input_i)}{\pi input_i} & input_i\neq 0\\
2049
- 1 & input_i=0 \end{cases}
2050
-
2051
- Args:
2052
- input (Tensor): The input Tensor.
2053
-
2054
- Returns:
2055
- Tensor, has the same shape as the `input`. The dtype of output is float32 when dtype of `input` is in
2056
- [int, bool]. Otherwise output has the same dtype as the `input`.
2057
-
2058
- Raises:
2059
- TypeError: If `input` is not a Tensor.
2060
-
2061
- Supported Platforms:
2062
- ``Ascend`` ``GPU`` ``CPU``
2063
-
2064
- Examples:
2065
- >>> import mindspore
2066
- >>> import numpy as np
2067
- >>> from mindspore import Tensor, ops
2068
- >>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
2069
- >>> output = ops.sinc(input)
2070
- >>> print(output)
2071
- [0.47735003 0.8759357 0.7224278 0.47735003]
2072
- """
2073
- return sinc_(input)
2074
-
2075
-
2076
- def cos(input):
2077
- r"""
2078
- Computes cosine of input element-wise.
2079
-
2080
- .. math::
2081
- out_i = \cos(x_i)
2082
-
2083
- .. warning::
2084
- Supported dtypes are float16 and float32, and using float64 may
2085
- cause a problem of missing precision.
2086
-
2087
- Args:
2088
- input (Tensor): The shape of tensor is
2089
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2090
-
2091
- Returns:
2092
- Tensor, has the same shape and dtype as `input`.
2093
-
2094
- Raises:
2095
- TypeError: If `input` is not a Tensor.
2096
- TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
2097
-
2098
- Supported Platforms:
2099
- ``Ascend`` ``GPU`` ``CPU``
2100
-
2101
- Examples:
2102
- >>> import mindspore
2103
- >>> import numpy as np
2104
- >>> from mindspore import Tensor, ops
2105
- >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2106
- >>> output = ops.cos(x)
1362
+ Examples:
1363
+ >>> import mindspore as ms
1364
+ >>> import mindspore.ops as ops
1365
+ >>> input = ms.Tensor([[3 + 4j, 7 - 24j, 0, 6 + 8j, 8], [15 + 20j, 7 - 24j, 0, 3 + 4j, 20]], dtype=ms.complex64)
1366
+ >>> output = ops.sgn(input)
2107
1367
  >>> print(output)
2108
- [0.971338 0.6748758 0.95233357 0.9959527]
1368
+ [[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]
1369
+ [0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]]
2109
1370
  """
2110
- return cos_(input)
1371
+ if not isinstance(input, Tensor):
1372
+ raise TypeError(f"For sgn, the input must be a Tensor, but got {type(input)}")
1373
+ if not ops.is_complex(input):
1374
+ return ops.sign(input)
1375
+ modulus = ops.ComplexAbs()(input)
1376
+ zeros_mask = modulus.equal(0)
1377
+ non_zero_modulus = ops.masked_fill(modulus, zeros_mask, ops.cast(1, modulus.dtype))
1378
+ zeros_modulus = ops.zeros_like(non_zero_modulus)
1379
+ complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
1380
+ res = input / complex_modulus
1381
+ return res
2111
1382
 
2112
1383
 
2113
1384
  def cosine_similarity(x1, x2, dim=1, eps=1e-08):
@@ -2222,7 +1493,7 @@ def cov(input, *, correction=1, fweights=None, aweights=None):
2222
1493
  Default: ``None`` .
2223
1494
 
2224
1495
  Returns:
2225
- Tensor, The covariance matrix Tensor of `input`.
1496
+ Tensor, the covariance matrix Tensor of `input`.
2226
1497
 
2227
1498
  Raises:
2228
1499
  ValueError: If the dimensions of input is greater than 2.
@@ -2309,9 +1580,6 @@ def t(input):
2309
1580
  Returns:
2310
1581
  Tensor, the transpose of `input` .
2311
1582
 
2312
- Raises:
2313
- ValueError: If the dimension of `input` is larger than 2.
2314
-
2315
1583
  Supported Platforms:
2316
1584
  ``Ascend`` ``GPU`` ``CPU``
2317
1585
 
@@ -2326,8 +1594,6 @@ def t(input):
2326
1594
  [2. 3.]
2327
1595
  [3. 4.]]
2328
1596
  """
2329
- if input.ndim > 2:
2330
- raise ValueError(f"For t(), the dimension of tensor should be less than 3, but got {input.ndim}.")
2331
1597
  if input.ndim == 2:
2332
1598
  return transpose_(input, (1, 0))
2333
1599
  return input
@@ -2386,8 +1652,8 @@ def xlogy(input, other):
2386
1652
  Args:
2387
1653
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
2388
1654
  a bool or a tensor whose data type is
2389
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
2390
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1655
+ `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
1656
+ `bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
2391
1657
  other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
2392
1658
  a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
2393
1659
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -2535,389 +1801,73 @@ def polar(abs, angle): # pylint: disable=redefined-outer-name
2535
1801
  return polar_(abs, angle)
2536
1802
 
2537
1803
 
2538
- def asin(input):
2539
- r"""
2540
- Computes arcsine of input tensors element-wise.
2541
-
2542
- .. math::
2543
-
2544
- out_i = \sin^{-1}(input_i)
2545
-
2546
- Args:
2547
- input (Tensor): The shape of tensor is
2548
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2549
-
2550
- Returns:
2551
- Tensor, has the same shape and dtype as `input`.
2552
-
2553
- Raises:
2554
- TypeError: If `input` is not a Tensor.
2555
- TypeError: If dtype of `input` is not float16, float32, float64, complex64, complex128.
2556
-
2557
- Supported Platforms:
2558
- ``Ascend`` ``GPU`` ``CPU``
2559
-
2560
- Examples:
2561
- >>> import mindspore
2562
- >>> import numpy as np
2563
- >>> from mindspore import Tensor, ops
2564
- >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
2565
- >>> output = ops.asin(x)
2566
- >>> print(output)
2567
- [0.8330704 0.04001067 0.30469266 0.5943858 ]
2568
- """
2569
- return asin_(input)
2570
-
2571
-
2572
- def acos(input):
2573
- r"""
2574
- Computes arccosine of input tensors element-wise.
2575
-
2576
- .. math::
2577
-
2578
- out_i = \cos^{-1}(input_i)
2579
-
2580
- Args:
2581
- input (Tensor): The shape of tensor is
2582
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2583
-
2584
- Returns:
2585
- Tensor, has the same shape and dtype as `input`.
2586
-
2587
- Raises:
2588
- TypeError: If `input` is not a Tensor.
2589
- TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
2590
-
2591
- Supported Platforms:
2592
- ``Ascend`` ``GPU`` ``CPU``
2593
-
2594
- Examples:
2595
- >>> import mindspore
2596
- >>> import numpy as np
2597
- >>> from mindspore import Tensor, ops
2598
- >>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
2599
- >>> output = ops.acos(input)
2600
- >>> print(output)
2601
- [0.737726 1.5307857 1.2661036 0.9764105]
2602
- """
2603
- return acos_(input)
2604
-
2605
-
2606
1804
  def arccos(input):
2607
1805
  """
2608
1806
  Alias for :func:`mindspore.ops.acos` .
2609
-
2610
- Supported Platforms:
2611
- ``Ascend`` ``GPU`` ``CPU``
2612
- """
2613
- return acos(input)
2614
-
2615
-
2616
- def atan(input):
2617
- r"""
2618
- Computes the trigonometric inverse tangent of the input element-wise.
2619
-
2620
- .. math::
2621
-
2622
- out_i = \tan^{-1}(input_i)
2623
-
2624
- Args:
2625
- input (Tensor): The shape of tensor is
2626
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2627
- The data type should be one of the following types: float16, float32.
2628
-
2629
- Returns:
2630
- A Tensor, has the same type as the input.
2631
-
2632
- Raises:
2633
- TypeError: If `input` is not a Tensor.
2634
- TypeError: If dtype of `input` is not float16 or float32.
2635
-
2636
- Supported Platforms:
2637
- ``Ascend`` ``GPU`` ``CPU``
2638
-
2639
- Examples:
2640
- >>> import mindspore
2641
- >>> import numpy as np
2642
- >>> from mindspore import Tensor, ops
2643
- >>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
2644
- >>> output = ops.atan(x)
2645
- >>> print(output)
2646
- [0.7853982 0. ]
2647
- """
2648
- return atan_(input)
2649
-
2650
-
2651
- def sinh(input):
2652
- r"""
2653
- Computes hyperbolic sine of the input element-wise.
2654
-
2655
- .. math::
2656
-
2657
- out_i = \sinh(input_i)
2658
-
2659
- Args:
2660
- input (Tensor): The input tensor of hyperbolic sine function.
2661
-
2662
- Returns:
2663
- Tensor, has the same shape as `input`.
2664
-
2665
- Raises:
2666
- TypeError: If `input` is not a Tensor.
2667
-
2668
- Supported Platforms:
2669
- ``Ascend`` ``GPU`` ``CPU``
2670
-
2671
- Examples:
2672
- >>> import mindspore
2673
- >>> import numpy as np
2674
- >>> from mindspore import Tensor, ops
2675
- >>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
2676
- >>> output = ops.sinh(input)
2677
- >>> print(output)
2678
- [0.6604918 0.28367308 0.44337422 0.6604918 ]
2679
- """
2680
- return sinh_(input)
2681
-
2682
-
2683
- def cosh(input):
2684
- r"""
2685
- Computes hyperbolic cosine of input element-wise.
2686
-
2687
- .. math::
2688
-
2689
- out_i = \cosh(input_i)
2690
-
2691
- Args:
2692
- input (Tensor): The input tensor of hyperbolic cosine function, its data type
2693
- must be float16, float32, float64, complex64 or complex128.
2694
-
2695
- Returns:
2696
- Tensor, has the same shape as `input`.
2697
-
2698
- Raises:
2699
- TypeError: If the dtype of `input` is not one of the following types:
2700
- float16, float32, float64, complex64, complex128.
2701
- TypeError: If `input` is not a Tensor.
2702
-
2703
- Supported Platforms:
2704
- ``Ascend`` ``GPU`` ``CPU``
2705
-
2706
- Examples:
2707
- >>> import mindspore
2708
- >>> import numpy as np
2709
- >>> from mindspore import Tensor, ops
2710
- >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2711
- >>> output = ops.cosh(x)
2712
- >>> print(output)
2713
- [1.0289385 1.364684 1.048436 1.0040528]
2714
- >>> x = Tensor(2.1, mindspore.float32)
2715
- >>> output = ops.cosh(x)
2716
- >>> print(output)
2717
- 4.144313
2718
- """
2719
- return cosh_(input)
2720
-
2721
-
2722
- def tanh(input):
2723
- r"""
2724
- Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
2725
-
2726
- .. math::
2727
-
2728
- tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
2729
-
2730
- where :math:`x_i` is an element of the input Tensor.
2731
-
2732
- Args:
2733
- input (Tensor): Input of Tanh.
2734
-
2735
- Returns:
2736
- Tensor, with the same type and shape as the `input`.
2737
-
2738
- Raises:
2739
- TypeError: If `input` is not a Tensor.
2740
-
2741
- Supported Platforms:
2742
- ``Ascend`` ``GPU`` ``CPU``
2743
-
2744
- Examples:
2745
- >>> import mindspore
2746
- >>> import numpy as np
2747
- >>> from mindspore import Tensor, ops
2748
- >>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
2749
- >>> output = ops.tanh(input)
2750
- >>> print(output)
2751
- [0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
2752
- """
2753
- return tanh_(input)
2754
-
2755
-
2756
- def asinh(input):
2757
- r"""
2758
- Computes inverse hyperbolic sine of the input element-wise.
2759
-
2760
- .. math::
2761
-
2762
- out_i = \sinh^{-1}(input_i)
2763
-
2764
- Args:
2765
- input (Tensor): The input tensor of inverse hyperbolic sine function.
2766
-
2767
- Returns:
2768
- Tensor, has the same shape and type as `input`.
2769
-
2770
- Raises:
2771
- TypeError: If `input` is not a Tensor.
2772
-
2773
- Supported Platforms:
2774
- ``Ascend`` ``GPU`` ``CPU``
2775
-
2776
- Examples:
2777
- >>> import mindspore
2778
- >>> import numpy as np
2779
- >>> from mindspore import Tensor, ops
2780
- >>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
2781
- >>> output = ops.asinh(input)
2782
- >>> print(output)
2783
- [-2.3124382 1.1947632 1.8184465 5.298342 ]
2784
- """
2785
- return asinh_(input)
2786
-
2787
-
2788
- def arcsinh(input):
2789
- r"""
2790
- Alias for :func:`mindspore.ops.asinh`.
2791
-
2792
- Supported Platforms:
2793
- ``Ascend`` ``GPU`` ``CPU``
2794
- """
2795
- return asinh(input)
2796
-
2797
-
2798
- def arctanh(input):
2799
- r"""
2800
- Alias for :func:`mindspore.ops.atanh`.
2801
-
2802
- Supported Platforms:
2803
- ``Ascend`` ``GPU`` ``CPU``
2804
- """
2805
- return atanh(input)
2806
-
2807
-
2808
- def acosh(input):
2809
- r"""
2810
- Computes inverse hyperbolic cosine of the inputs element-wise.
2811
-
2812
- .. math::
2813
-
2814
- out_i = \cosh^{-1}(input_i)
2815
-
2816
- .. warning::
2817
- Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
2818
- Input range is [1, inf].
2819
-
2820
- Args:
2821
- input (Tensor): The input tensor of inverse hyperbolic cosine function.
2822
-
2823
- Returns:
2824
- Tensor, has the same shape and type as `input`.
2825
-
2826
- Raises:
2827
- TypeError: If `input` is not a Tensor.
2828
-
2829
- Supported Platforms:
2830
- ``Ascend`` ``GPU`` ``CPU``
2831
-
2832
- Examples:
2833
- >>> import mindspore
2834
- >>> import numpy as np
2835
- >>> from mindspore import Tensor, ops
2836
- >>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
2837
- >>> output = ops.acosh(x)
2838
- >>> print(output)
2839
- [0. 0.9624237 1.7627472 5.298292 ]
1807
+
1808
+ Supported Platforms:
1809
+ ``Ascend`` ``GPU`` ``CPU``
2840
1810
  """
2841
- return acosh_(input)
1811
+ return acos(input)
2842
1812
 
2843
1813
 
2844
- def atanh(input):
1814
+ def tanh(input):
2845
1815
  r"""
2846
- Computes inverse hyperbolic tangent of the input element-wise.
1816
+ Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
2847
1817
 
2848
1818
  .. math::
2849
1819
 
2850
- out_i = \tanh^{-1}(input_{i})
1820
+ tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
1821
+
1822
+ where :math:`x_i` is an element of the input Tensor.
1823
+
1824
+ Tanh Activation Function Graph:
1825
+
1826
+ .. image:: ../images/Tanh.png
1827
+ :align: center
2851
1828
 
2852
1829
  Args:
2853
- input (Tensor): The shape of tensor is
2854
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2855
- The data type should be one of the following types: float16, float32.
1830
+ input (Tensor): Input of Tanh.
2856
1831
 
2857
1832
  Returns:
2858
- A Tensor, has the same type as the input.
1833
+ Tensor, with the same type and shape as the `input`.
2859
1834
 
2860
1835
  Raises:
2861
1836
  TypeError: If `input` is not a Tensor.
2862
- TypeError: If dtype of `input` is not float16 or float32.
2863
1837
 
2864
1838
  Supported Platforms:
2865
- ``Ascend`` ``GPU`` ``CPU``
1839
+ ``Ascend`` ``GPU`` ``CPU``
2866
1840
 
2867
1841
  Examples:
2868
1842
  >>> import mindspore
2869
1843
  >>> import numpy as np
2870
1844
  >>> from mindspore import Tensor, ops
2871
- >>> input = Tensor(np.array([0, -0.5]), mindspore.float32)
2872
- >>> output = ops.atanh(input)
1845
+ >>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
1846
+ >>> output = ops.tanh(input)
2873
1847
  >>> print(output)
2874
- [ 0. -0.54930615]
1848
+ [0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
2875
1849
  """
2876
- return atanh_(input)
1850
+ return tanh_(input)
2877
1851
 
2878
1852
 
2879
- def atan2(input, other):
1853
+ def arcsinh(input):
2880
1854
  r"""
2881
- Returns arctangent of input/other element-wise.
2882
-
2883
- It returns :math:`\theta\ \in\ [-\pi, \pi]`
2884
- such that :math:`input = r*\sin(\theta), other = r*\cos(\theta)`, where :math:`r = \sqrt{input^2 + other^2}`.
2885
-
2886
- Note:
2887
- - Arg `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
2888
- If they have different data types, the lower precision data type will be converted to relatively the
2889
- highest precision data type.
2890
- - At least one of the `input` and `other` args is Tensor.
1855
+ Alias for :func:`mindspore.ops.asinh`.
2891
1856
 
2892
- Args:
2893
- input (Tensor): The input tensor with shape
2894
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2895
- The data type should be one of the following types: float16, float32, float64
2896
- other (Tensor): The input tensor. It has the same shape with `input` or
2897
- its shape is able to broadcast with `input`.
1857
+ Supported Platforms:
1858
+ ``Ascend`` ``GPU`` ``CPU``
1859
+ """
1860
+ return asinh(input)
2898
1861
 
2899
- Returns:
2900
- Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
2901
1862
 
2902
- Raises:
2903
- TypeError: If `input` or `other` is not a Tensor.
2904
- RuntimeError: If the data type of `input` and `other` conversion of Parameter is required
2905
- when data type conversion of Parameter is not supported.
1863
+ def arctanh(input):
1864
+ r"""
1865
+ Alias for :func:`mindspore.ops.atanh`.
2906
1866
 
2907
1867
  Supported Platforms:
2908
1868
  ``Ascend`` ``GPU`` ``CPU``
2909
-
2910
- Examples:
2911
- >>> import mindspore
2912
- >>> import numpy as np
2913
- >>> from mindspore import Tensor, ops
2914
- >>> input = Tensor(np.array([0, 1]), mindspore.float32)
2915
- >>> other = Tensor(np.array([1, 1]), mindspore.float32)
2916
- >>> output = ops.atan2(input, other)
2917
- >>> print(output)
2918
- [0. 0.7853982]
2919
1869
  """
2920
- return atan2_(input, other)
1870
+ return atanh(input)
2921
1871
 
2922
1872
 
2923
1873
  def bitwise_and(input, other):
@@ -3145,51 +2095,6 @@ def bitwise_right_shift(input, other):
3145
2095
  return rs(input, other)
3146
2096
 
3147
2097
 
3148
- def nextafter(input, other):
3149
- """
3150
- Returns the next representable floating-point value after `input` towards `other` element-wise.
3151
-
3152
- Say there are two float32 numbers :math:`a`, :math:`b`, and let the
3153
- representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
3154
- then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
3155
- the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
3156
-
3157
- .. math::
3158
-
3159
- out_{i} = nextafter({input_{i}, other_{i}})
3160
-
3161
- Args:
3162
- input (Tensor): The first input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
3163
- any number of additional dimensions. Must be one of the following types: float32, float64.
3164
-
3165
- other (Tensor): The second input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
3166
- any number of additional dimensions. Must be one of the following types: float32, float64.
3167
-
3168
- Returns:
3169
- Tensor, has the same shape and data type as `input`.
3170
-
3171
- Raises:
3172
- TypeError: If neither `input` nor `other` is a Tensor.
3173
- TypeError: If the dtype of `input` and `other` is not one of: float32, float64.
3174
- TypeError: If the dtypes of `input` and `other` are not same.
3175
- ValueError: If `input`'s shape is not the same as `other`.
3176
-
3177
- Supported Platforms:
3178
- ``Ascend`` ``GPU`` ``CPU``
3179
-
3180
- Examples:
3181
- >>> import mindspore
3182
- >>> import numpy as np
3183
- >>> from mindspore import Tensor, ops
3184
- >>> input_ = Tensor(np.asarray([0.0]), mindspore.float32)
3185
- >>> other_ = Tensor(np.asarray([0.1]), mindspore.float32)
3186
- >>> output_ = ops.nextafter(input_, other_)
3187
- >>> print(output_)
3188
- [1.e-45]
3189
- """
3190
- return nextafter_(input, other)
3191
-
3192
-
3193
2098
  def inv(x):
3194
2099
  r"""
3195
2100
  Computes Reciprocal of input tensor element-wise.
@@ -3285,78 +2190,6 @@ def invert(x):
3285
2190
  return invert_(x)
3286
2191
 
3287
2192
 
3288
- def erf(input):
3289
- r"""
3290
- Computes the Gauss error function of `input` element-wise.
3291
-
3292
- .. math::
3293
-
3294
- erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
3295
-
3296
- Args:
3297
- input (Tensor): The input tensor of Gaussian error function. Supported dtypes:
3298
-
3299
- - Ascend: float16, float32.
3300
- - GPU/CPU: float16, float32, float64.
3301
-
3302
- Returns:
3303
- Tensor, has the same shape and dtype as the `input`.
3304
-
3305
- Raises:
3306
- TypeError: If `input` is not a Tensor.
3307
- TypeError: If dtype of `input` is neither float16 float32 or float64.
3308
-
3309
- Supported Platforms:
3310
- ``Ascend`` ``GPU`` ``CPU``
3311
-
3312
- Examples:
3313
- >>> import mindspore
3314
- >>> import numpy as np
3315
- >>> from mindspore import Tensor, ops
3316
- >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
3317
- >>> output = ops.erf(x)
3318
- >>> print(output)
3319
- [-0.8427168 0. 0.8427168 0.99530876 0.99997765]
3320
- """
3321
- return erf_(input)
3322
-
3323
-
3324
- def erfc(input):
3325
- r"""
3326
- Computes the complementary error function of `input` element-wise.
3327
-
3328
- .. math::
3329
-
3330
- erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
3331
-
3332
- Args:
3333
- input (Tensor): The input tensor. Supported dtypes:
3334
-
3335
- - Ascend: float16, float32.
3336
- - GPU/CPU: float16, float32, float64.
3337
-
3338
- Returns:
3339
- Tensor, has the same shape and dtype as `input`.
3340
-
3341
- Raises:
3342
- TypeError: If `input` is not a Tensor.
3343
- TypeError: If dtype of `input` is not float16, float32 or float64.
3344
-
3345
- Supported Platforms:
3346
- ``Ascend`` ``GPU`` ``CPU``
3347
-
3348
- Examples:
3349
- >>> import mindspore
3350
- >>> import numpy as np
3351
- >>> from mindspore import Tensor, ops
3352
- >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
3353
- >>> output = ops.erfc(x)
3354
- >>> print(output)
3355
- [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
3356
- """
3357
- return erfc_(input)
3358
-
3359
-
3360
2193
  def bessel_j0(x):
3361
2194
  r"""
3362
2195
  Computes Bessel function of the first kind, order 0 element-wise.
@@ -3792,46 +2625,6 @@ def log_matrix_determinant(input):
3792
2625
  return log_matrix_determinant_(input)
3793
2626
 
3794
2627
 
3795
- def matrix_exp(input):
3796
- r"""
3797
- Computes the exponential of a single or a batch of square matrices.
3798
-
3799
- .. math::
3800
-
3801
- matrix\_exp(x) = \sum_{k=0}^{\infty} \frac{1}{k !} x^{k} \in \mathbb{K}^{n \times n}
3802
-
3803
- where :math:`x` corresponds to `input` .
3804
-
3805
- Args:
3806
- input (Tensor): The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
3807
- Must be one of the following types: float16, float32, float64, complex64, complex128.
3808
-
3809
- Returns:
3810
- Tensor, has the same shape and dtype as the `input`.
3811
-
3812
- Raises:
3813
- TypeError: If `input` is not a Tensor.
3814
- TypeError: If the dtype of `input` is not one of the following dtype:
3815
- float16, float32, float64, complex64, complex128.
3816
- ValueError: If the rank of `input` is less than 2.
3817
- ValueError: If the size of last two dimensions of `input` are not equal.
3818
-
3819
- Supported Platforms:
3820
-
3821
-
3822
- Examples:
3823
- >>> import mindspore
3824
- >>> import numpy as np
3825
- >>> from mindspore import Tensor, ops
3826
- >>> input = Tensor(np.array([[1, 2], [0, 1]]), mindspore.float32)
3827
- >>> output = ops.matrix_exp(input)
3828
- >>> print(output)
3829
- [[2.7182817 5.436563 ]
3830
- [0. 2.7182817]]
3831
- """
3832
- return matrix_exp_(input)
3833
-
3834
-
3835
2628
  def lu_solve(b, LU_data, LU_pivots):
3836
2629
  r"""
3837
2630
  Computes the solution y to the system of linear equations :math:`Ay = b` ,
@@ -3879,7 +2672,6 @@ def lu_solve(b, LU_data, LU_pivots):
3879
2672
  [-1.4000001]
3880
2673
  [ 0.6 ]]
3881
2674
  """
3882
- lu_solve_ = _get_cache_prim(LuSolve)()
3883
2675
  out = lu_solve_(b, LU_data, LU_pivots)
3884
2676
  return out
3885
2677
 
@@ -3973,53 +2765,12 @@ def slogdet(input):
3973
2765
  return log_matrix_determinant_(input)
3974
2766
 
3975
2767
 
3976
- def trace(input):
3977
- """
3978
- Returns a new tensor that is the sum of the `input` main trace.
3979
-
3980
- Note:
3981
- Input must be matrix, and complex number is not supported at present.
3982
-
3983
- Args:
3984
- input (Tensor): A matrix to be calculated. The matrix must be two dimensional.
3985
-
3986
- Returns:
3987
- Tensor, with the same data type as input `input`, and size equals to 1.
3988
-
3989
- Raises:
3990
- TypeError: If `input` is not a Tensor.
3991
- ValueError: If the dimension of `input` is not equal to 2.
3992
-
3993
- Supported Platforms:
3994
- ``Ascend`` ``GPU`` ``CPU``
3995
-
3996
- Examples:
3997
- >>> import mindspore
3998
- >>> import numpy as np
3999
- >>> from mindspore import Tensor, ops
4000
- >>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
4001
- >>> output = ops.trace(input)
4002
- >>> print(output)
4003
- 42.0
4004
- >>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
4005
- >>> output = ops.trace(input)
4006
- >>> print(output)
4007
- 18.0
4008
- >>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
4009
- >>> output = ops.trace(input)
4010
- >>> print(output)
4011
- 24.0
4012
- """
4013
- return trace_(input)
4014
-
4015
-
4016
2768
  def truncate_div(x, y):
4017
2769
  """
4018
2770
  Divides the first input tensor by the second input tensor element-wise and rounds the results
4019
2771
  of division towards zero. Equivalent to C-style integer division.
4020
2772
 
4021
2773
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4022
- The inputs must be two tensors or one tensor and one scalar.
4023
2774
  When the inputs are two tensors,
4024
2775
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
4025
2776
  When the inputs are one tensor and one scalar,
@@ -4062,7 +2813,6 @@ def truncate_mod(x, y):
4062
2813
  Returns the remainder of division element-wise.
4063
2814
 
4064
2815
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4065
- The inputs must be two tensors or one tensor and one scalar.
4066
2816
  When the inputs are two tensors,
4067
2817
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
4068
2818
  When the inputs are one tensor and one scalar,
@@ -4188,8 +2938,7 @@ def ldexp(x, other):
4188
2938
 
4189
2939
  def logit(input, eps=None):
4190
2940
  r"""
4191
- Calculate the logit of a tensor element-wise. When eps is not None, element in `input` is clamped to [eps, 1-eps].
4192
- When eps is None, input `input` is not clamped.
2941
+ Calculate the logit of a tensor element-wise.
4193
2942
 
4194
2943
  .. math::
4195
2944
  \begin{align}
@@ -4205,7 +2954,7 @@ def logit(input, eps=None):
4205
2954
  Args:
4206
2955
  input (Tensor): The input tensor of type float16, float32 or float64.
4207
2956
  eps (float, optional): The epsilon. If eps is not None, the input clamp bound is defined as [eps, 1-eps],
4208
- otherwise, the input `input` is not clamped. Default: ``None`` .
2957
+ otherwise, the `input` is not clamped. Default: ``None`` .
4209
2958
 
4210
2959
  Returns:
4211
2960
  Tensor, with the same shape and dtype as the `input`.
@@ -4228,59 +2977,14 @@ def logit(input, eps=None):
4228
2977
  """
4229
2978
  if eps is None:
4230
2979
  eps = -1.0
4231
- logit_ = _get_cache_prim(Logit)(eps)
2980
+ logit_ = _get_cache_prim(P.Logit)(eps)
4232
2981
  return logit_(input)
4233
2982
 
4234
-
4235
2983
  #####################################
4236
2984
  # Comparison Operation Functions.
4237
2985
  #####################################
4238
2986
 
4239
2987
 
4240
- def less(input, other):
4241
- r"""
4242
- Computes the boolean value of :math:`input < other` element-wise.
4243
-
4244
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
4245
- The inputs must be two tensors or one tensor and one scalar.
4246
- When the inputs are one tensor and one scalar,
4247
- the scalar could only be a constant.
4248
-
4249
- .. math::
4250
-
4251
- out_{i} =\begin{cases}
4252
- & \text{True, if } input_{i}<other_{i} \\
4253
- & \text{False, if } input_{i}>=other_{i}
4254
- \end{cases}
4255
-
4256
- Args:
4257
- input (Union[Tensor, Number, bool]): The first input is a number or
4258
- a bool or a tensor whose data type is number or bool.
4259
- other (Union[Tensor, Number, bool]): The second input is a number or
4260
- a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
4261
-
4262
- Returns:
4263
- Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
4264
-
4265
- Raises:
4266
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4267
-
4268
- Supported Platforms:
4269
- ``Ascend`` ``GPU`` ``CPU``
4270
-
4271
- Examples:
4272
- >>> import mindspore
4273
- >>> import numpy as np
4274
- >>> from mindspore import Tensor, ops
4275
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4276
- >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
4277
- >>> output = ops.less(x, y)
4278
- >>> print(output)
4279
- [False False True]
4280
- """
4281
- return tensor_lt(input, other)
4282
-
4283
-
4284
2988
  def lt(input, other):
4285
2989
  """
4286
2990
  Alias for :func:`mindspore.ops.less` .
@@ -4311,8 +3015,8 @@ def le(input, other):
4311
3015
  Args:
4312
3016
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4313
3017
  a bool or a tensor whose data type is
4314
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4315
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
3018
+ `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
3019
+ `bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
4316
3020
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4317
3021
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4318
3022
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -4320,9 +3024,6 @@ def le(input, other):
4320
3024
  Returns:
4321
3025
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4322
3026
 
4323
- Raises:
4324
- TypeError: If neither `input` nor `other` is a Tensor.
4325
-
4326
3027
  Supported Platforms:
4327
3028
  ``Ascend`` ``GPU`` ``CPU``
4328
3029
 
@@ -4364,8 +3065,8 @@ def gt(input, other):
4364
3065
  Args:
4365
3066
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
4366
3067
  a bool or a tensor whose data type is
4367
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
4368
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
3068
+ `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
3069
+ `bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ .
4369
3070
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
4370
3071
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
4371
3072
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -4410,61 +3111,15 @@ def ge(input, other):
4410
3111
  .. math::
4411
3112
 
4412
3113
  out_{i} =\begin{cases}
4413
- & \text{True, if } input_{i}>=other_{i} \\
4414
- & \text{False, if } input_{i}<other_{i}
4415
- \end{cases}
4416
-
4417
- Args:
4418
- input (Union[Tensor, Number, bool]): The first input is a number or
4419
- a bool or a tensor whose data type is number or bool.
4420
- other (Union[Tensor, Number, bool]): The second input is a number or
4421
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
4422
-
4423
- Returns:
4424
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
4425
-
4426
- Raises:
4427
- TypeError: If neither `input` nor `other` is a Tensor.
4428
-
4429
- Supported Platforms:
4430
- ``Ascend`` ``GPU`` ``CPU``
4431
-
4432
- Examples:
4433
- >>> import mindspore
4434
- >>> import numpy as np
4435
- >>> from mindspore import Tensor, ops
4436
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
4437
- >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
4438
- >>> output = ops.ge(x, y)
4439
- >>> print(output)
4440
- [True True False]
4441
- """
4442
- return tensor_ge(input, other)
4443
-
4444
-
4445
- def eq(input, other):
4446
- r"""
4447
- Computes the equivalence between two tensors element-wise.
4448
-
4449
- The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
4450
-
4451
- .. math::
4452
-
4453
- out_{i} =\begin{cases}
4454
- & \text{True, if } input_{i} = other_{i} \\
4455
- & \text{False, if } input_{i} \ne other_{i}
3114
+ & \text{True, if } input_{i}>=other_{i} \\
3115
+ & \text{False, if } input_{i}<other_{i}
4456
3116
  \end{cases}
4457
3117
 
4458
- Note:
4459
- - `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
4460
- - The shapes of the inputs can be broadcasted to each other.
4461
-
4462
3118
  Args:
4463
- input (Union[Tensor, Number]): The first input is a number or
4464
- a tensor whose data type is number.
4465
- other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
4466
- The data type is the same as the first input. If the first input is a number,
4467
- the second input should be a tensor.
3119
+ input (Union[Tensor, Number, bool]): The first input is a number or
3120
+ a bool or a tensor whose data type is number or bool.
3121
+ other (Union[Tensor, Number, bool]): The second input is a number or
3122
+ a bool when the first input is a tensor or a tensor whose data type is number or bool.
4468
3123
 
4469
3124
  Returns:
4470
3125
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
@@ -4477,23 +3132,18 @@ def eq(input, other):
4477
3132
 
4478
3133
  Examples:
4479
3134
  >>> import mindspore
3135
+ >>> import numpy as np
4480
3136
  >>> from mindspore import Tensor, ops
4481
- >>> # case 1: The shape of two inputs are different
4482
- >>> x = Tensor([1, 2, 3], mindspore.float32)
4483
- >>> output = ops.eq(x, 2.0)
4484
- >>> print(output)
4485
- [False True False]
4486
- >>> # case 2: The shape of two inputs are the same
4487
- >>> x = Tensor([1, 2, 3], mindspore.int32)
4488
- >>> y = Tensor([1, 2, 4], mindspore.int32)
4489
- >>> output = ops.eq(x, y)
3137
+ >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3138
+ >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3139
+ >>> output = ops.ge(x, y)
4490
3140
  >>> print(output)
4491
- [ True True False]
3141
+ [True True False]
4492
3142
  """
4493
- return equal_(input, other)
3143
+ return tensor_ge(input, other)
4494
3144
 
4495
3145
 
4496
- def equal(input, other):
3146
+ def eq(input, other):
4497
3147
  r"""
4498
3148
  Computes the equivalence between two tensors element-wise.
4499
3149
 
@@ -4512,7 +3162,7 @@ def equal(input, other):
4512
3162
 
4513
3163
  Args:
4514
3164
  input (Union[Tensor, Number]): The first input is a number or
4515
- a tensor whose data type is number.query.dtye
3165
+ a tensor whose data type is number.
4516
3166
  other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
4517
3167
  The data type is the same as the first input. If the first input is a number,
4518
3168
  the second input should be a tensor.
@@ -4531,17 +3181,17 @@ def equal(input, other):
4531
3181
  >>> from mindspore import Tensor, ops
4532
3182
  >>> # case 1: The shape of two inputs are different
4533
3183
  >>> x = Tensor([1, 2, 3], mindspore.float32)
4534
- >>> output = ops.equal(x, 2.0)
3184
+ >>> output = ops.eq(x, 2.0)
4535
3185
  >>> print(output)
4536
3186
  [False True False]
4537
3187
  >>> # case 2: The shape of two inputs are the same
4538
3188
  >>> x = Tensor([1, 2, 3], mindspore.int32)
4539
3189
  >>> y = Tensor([1, 2, 4], mindspore.int32)
4540
- >>> output = ops.equal(x, y)
3190
+ >>> output = ops.eq(x, y)
4541
3191
  >>> print(output)
4542
3192
  [ True True False]
4543
3193
  """
4544
- return equal_(input, other)
3194
+ return equal(input, other)
4545
3195
 
4546
3196
 
4547
3197
  def ne(input, other):
@@ -4551,7 +3201,6 @@ def ne(input, other):
4551
3201
  Note:
4552
3202
  - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4553
3203
  consistent.
4554
- - The inputs must be two tensors or one tensor and one scalar.
4555
3204
  - When the inputs are two tensors, the shapes of them could be broadcast.
4556
3205
  - When the inputs are one tensor and one scalar, the scalar could only be a constant.
4557
3206
  - Broadcasting is supported.
@@ -4574,7 +3223,6 @@ def ne(input, other):
4574
3223
 
4575
3224
  Raises:
4576
3225
  TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4577
- TypeError: If neither `input` nor `other` is a Tensor.
4578
3226
 
4579
3227
  Supported Platforms:
4580
3228
  ``Ascend`` ``GPU`` ``CPU``
@@ -4593,17 +3241,7 @@ def ne(input, other):
4593
3241
  >>> print(output)
4594
3242
  [False False True]
4595
3243
  """
4596
- return not_equal_(input, other)
4597
-
4598
-
4599
- def not_equal(input, other):
4600
- r"""
4601
- Alias for :func:`mindspore.ops.ne` .
4602
-
4603
- Supported Platforms:
4604
- ``Ascend`` ``GPU`` ``CPU``
4605
- """
4606
- return ne(input, other)
3244
+ return not_equal(input, other)
4607
3245
 
4608
3246
 
4609
3247
  def approximate_equal(x, y, tolerance=1e-5):
@@ -4651,7 +3289,7 @@ def approximate_equal(x, y, tolerance=1e-5):
4651
3289
  >>> print(output)
4652
3290
  [ True False False]
4653
3291
  """
4654
- return P.ApproximateEqual(tolerance)(x, y)
3292
+ return _get_cache_prim(P.ApproximateEqual)(tolerance)(x, y)
4655
3293
 
4656
3294
 
4657
3295
  def isfinite(x):
@@ -4741,7 +3379,7 @@ def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
4741
3379
  is “close” to the corresponding element of `other`. Closeness is defined as:
4742
3380
 
4743
3381
  .. math::
4744
- inputother atol + rtol × other
3382
+ |input-other| atol + rtol × |other|
4745
3383
 
4746
3384
  Args:
4747
3385
  input (Tensor): First Tensor to compare, with data type belongs to float32, float16, int32.
@@ -4947,61 +3585,6 @@ def fmax(input, other):
4947
3585
  return fmax_(input, other)
4948
3586
 
4949
3587
 
4950
- def maximum(input, other):
4951
- r"""
4952
- Computes the maximum of input tensors element-wise.
4953
-
4954
- Note:
4955
- - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
4956
- consistent.
4957
- - The inputs must be two tensors or one tensor and one scalar.
4958
- - When the inputs are two tensors,
4959
- dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
4960
- - When the inputs are one tensor and one scalar,
4961
- the scalar could only be a constant.
4962
- - Broadcasting is supported.
4963
- - If one of the elements being compared is a NaN, then that element is returned.
4964
-
4965
- .. math::
4966
- output_i = \max(input_i, other_i)
4967
-
4968
- Args:
4969
- input (Union[Tensor, Number, bool]): The first input is a number or
4970
- a bool or a tensor whose data type is number or bool.
4971
- other (Union[Tensor, Number, bool]): The second input is a number or
4972
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
4973
-
4974
- Returns:
4975
- Tensor, the shape is the same as the one after broadcasting,
4976
- and the data type is the one with higher precision or higher digits among the two inputs.
4977
-
4978
- Raises:
4979
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
4980
- ValueError: If `input` and `other` are not the same shape.
4981
-
4982
- Supported Platforms:
4983
- ``Ascend`` ``GPU`` ``CPU``
4984
-
4985
- Examples:
4986
- >>> import mindspore
4987
- >>> import numpy as np
4988
- >>> from mindspore import Tensor, ops
4989
- >>> # case 1 : same data type
4990
- >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
4991
- >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
4992
- >>> output = ops.maximum(x, y)
4993
- >>> print(output)
4994
- [4. 5. 6.]
4995
- >>> # case 2 : different data type
4996
- >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
4997
- >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
4998
- >>> output = ops.maximum(x, y)
4999
- >>> print(output.dtype)
5000
- Float32
5001
- """
5002
- return maximum_(input, other)
5003
-
5004
-
5005
3588
  def fmin(input, other):
5006
3589
  r"""
5007
3590
  Computes the minimum of input tensors element-wise.
@@ -5045,59 +3628,6 @@ def fmin(input, other):
5045
3628
  return fmin_(input, other)
5046
3629
 
5047
3630
 
5048
- def minimum(input, other):
5049
- r"""
5050
- Computes the minimum of input tensors element-wise.
5051
-
5052
- Note:
5053
- - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
5054
- consistent.
5055
- - The inputs must be two tensors or one tensor and one scalar.
5056
- - When the inputs are two tensors, dtypes of them cannot be bool at the same time.
5057
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
5058
- - Shapes of them are supposed to be broadcast.
5059
- - If one of the elements being compared is a NaN, then that element is returned.
5060
-
5061
- .. math::
5062
- output_i = \min(input_i, other_i)
5063
-
5064
- Args:
5065
- input (Union[Tensor, Number, bool]): The first input is a number or
5066
- a bool or a tensor whose data type is number or bool.
5067
- other (Union[Tensor, Number, bool]): The second input is a number or
5068
- a bool when the first input is a tensor or a tensor whose data type is number or bool.
5069
-
5070
- Returns:
5071
- Tensor, the shape is the same as the one after broadcasting,
5072
- and the data type is the one with higher precision or higher digits among the two inputs.
5073
-
5074
- Raises:
5075
- TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
5076
- ValueError: If `input` and `other` are not the same shape after broadcast.
5077
-
5078
- Supported Platforms:
5079
- ``Ascend`` ``GPU`` ``CPU``
5080
-
5081
- Examples:
5082
- >>> import mindspore
5083
- >>> import numpy as np
5084
- >>> from mindspore import Tensor, ops
5085
- >>> # case 1 : same data type
5086
- >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
5087
- >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5088
- >>> output = ops.minimum(x, y)
5089
- >>> print(output)
5090
- [1. 2. 3.]
5091
- >>> # case 2 : different data type
5092
- >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
5093
- >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5094
- >>> output = ops.minimum(x, y)
5095
- >>> print(output.dtype)
5096
- Float32
5097
- """
5098
- return minimum_(input, other)
5099
-
5100
-
5101
3631
  def median(input, axis=-1, keepdims=False):
5102
3632
  r"""
5103
3633
  Computes the median and indices of input tensor.
@@ -5228,6 +3758,8 @@ def nanmean(input, axis=None, keepdims=False, *, dtype=None):
5228
3758
  """
5229
3759
  _check_is_tensor("input", input, "nanmean")
5230
3760
  _check_repeat_in_axis(axis, input.ndim, "nanmean")
3761
+ if input.dtype not in mstype.float_type:
3762
+ raise TypeError(f"For 'nanmean', input should be floating point dtype, but got {type(input)}.")
5231
3763
  nan_sum = nansum(input, axis, keepdims)
5232
3764
  is_num = isnan(input).logical_not()
5233
3765
  is_num = is_num.sum(axis=axis, keepdims=keepdims)
@@ -5323,7 +3855,7 @@ def ormqr(input, tau, other, left=True, transpose=False):
5323
3855
  TypeError: If dtype of `input` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
5324
3856
  ValueError: If the dimension of `input` or `other` is less than 2D.
5325
3857
  ValueError: If rank(`input`) - rank(`tau`) != 1.
5326
- ValueError: If tau.shape[:-2] != input.shape[:-2]
3858
+ ValueError: If tau.shape[:-1] != input.shape[:-2]
5327
3859
  ValueError: If other.shape[:-2] != input.shape[:-2]
5328
3860
  ValueError: If left == true, other.shape[-2] < tau.shape[-1].
5329
3861
  ValueError: If left == true, other.shape[-2] != input.shape[-2].
@@ -5397,11 +3929,11 @@ def heaviside(input, values):
5397
3929
  Computes the Heaviside step function for each element in input.
5398
3930
 
5399
3931
  .. math::
5400
- \text { heaviside }(\text { input, values })=\left\{\begin{array}{ll}
5401
- 0, & \text { if input }<0 \\
5402
- \text { values, } & \text { if input }=0 \\
5403
- 1, & \text { if input }>0
5404
- \end{array}\right.
3932
+ \text { heaviside }(\text { input, values })=\left\{\begin{array}{ll}
3933
+ 0, & \text { if input }<0 \\
3934
+ \text { values, } & \text { if input }=0 \\
3935
+ 1, & \text { if input }>0
3936
+ \end{array}\right.
5405
3937
 
5406
3938
  Args:
5407
3939
  input (Tensor): The input tensor. With real number data type.
@@ -5489,9 +4021,6 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
5489
4021
  &output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
5490
4022
  \end{aligned}
5491
4023
 
5492
- Note:
5493
- - Input `base` must be integer.
5494
-
5495
4024
  Args:
5496
4025
  start (Union[float, Tensor]): Start value of interval.
5497
4026
  end (Union[float, Tensor]): End value of interval.
@@ -5533,6 +4062,8 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
5533
4062
  def logaddexp(input, other):
5534
4063
  r"""
5535
4064
  Computes the logarithm of the sum of exponentiations of the inputs.
4065
+ This function is useful in statistics where the calculated probabilities of events may be
4066
+ so small as to exceed the range of normal floating point numbers.
5536
4067
 
5537
4068
  .. math::
5538
4069
 
@@ -5573,7 +4104,7 @@ def logaddexp(input, other):
5573
4104
  f"but got {input.dtype} and {other.dtype}.")
5574
4105
  m = maximum(input, other)
5575
4106
  abs_val = abs(input - other)
5576
- exp_val = tensor_exp(neg_tensor(abs_val))
4107
+ exp_val = tensor_exp(neg(abs_val))
5577
4108
  y = m + log1p(exp_val)
5578
4109
  return y
5579
4110
 
@@ -5619,7 +4150,7 @@ def logaddexp2(input, other):
5619
4150
 
5620
4151
  m = maximum(input, other)
5621
4152
  abs_val = abs(input - other)
5622
- exp2_val = pows(2., neg_tensor(abs_val))
4153
+ exp2_val = pows(2., neg(abs_val))
5623
4154
  y = m + log2(1. + exp2_val)
5624
4155
  return y
5625
4156
 
@@ -5950,35 +4481,6 @@ def std_mean(input, axis=None, ddof=0, keepdims=False):
5950
4481
  return tensor_pow(output[0], 0.5), output[1]
5951
4482
 
5952
4483
 
5953
- def real(input):
5954
- r"""
5955
- Returns a Tensor that is the real part of the input.
5956
- If input is real, it is returned unchanged.
5957
-
5958
- Args:
5959
- input (Tensor): The input tensor to compute to.
5960
-
5961
- Returns:
5962
- Tensor, the shape is the same as the `input`.
5963
-
5964
- Raises:
5965
- TypeError: If `input` is not a Tensor.
5966
-
5967
- Supported Platforms:
5968
- ``Ascend`` ``GPU`` ``CPU``
5969
-
5970
- Examples:
5971
- >>> import mindspore as ms
5972
- >>> import mindspore.ops as ops
5973
- >>> import numpy as np
5974
- >>> input = ms.Tensor(np.asarray(np.complex(1.3+0.4j)), ms.complex64)
5975
- >>> output = ops.real(input)
5976
- >>> print(output)
5977
- 1.3
5978
- """
5979
- return real_(input)
5980
-
5981
-
5982
4484
  def reciprocal(input):
5983
4485
  r"""
5984
4486
  Returns reciprocal of a tensor element-wise.
@@ -5989,7 +4491,6 @@ def reciprocal(input):
5989
4491
 
5990
4492
  Args:
5991
4493
  input (Tensor): The input tensor.
5992
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5993
4494
 
5994
4495
  Returns:
5995
4496
  Tensor, has the same shape as the `input`.
@@ -6009,108 +4510,9 @@ def reciprocal(input):
6009
4510
  >>> print(output)
6010
4511
  [1. 0.5 0.25]
6011
4512
  """
6012
- if not isinstance(input, Tensor):
6013
- raise TypeError(f"For reciprocal, the input must be a Tensor, but got {type(input)}.")
6014
- if not is_complex(input) and not ops.is_floating_point(input):
6015
- input = ops.cast(input, mstype.float32)
6016
4513
  return reciprocal_(input)
6017
4514
 
6018
4515
 
6019
- def rsqrt(input):
6020
- r"""
6021
- Computes reciprocal of square root of input tensor element-wise.
6022
-
6023
- .. math::
6024
-
6025
- out_{i} = \frac{1}{\sqrt{input_{i}}}
6026
-
6027
- Args:
6028
- input (Tensor): The input of rsqrt. Its each element must be a non-negative
6029
- number, if an element is negative, the calculation result is nan.
6030
-
6031
- Returns:
6032
- Tensor, has the same shape and dtype as the `input`.
6033
-
6034
- Raises:
6035
- TypeError: If `input` is not a Tensor.
6036
-
6037
- Supported Platforms:
6038
- ``Ascend`` ``GPU`` ``CPU``
6039
-
6040
- Examples:
6041
- >>> import mindspore as ms
6042
- >>> import mindspore.ops as ops
6043
- >>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
6044
- >>> output = ops.rsqrt(input)
6045
- >>> print(output)
6046
- [ nan 1.8349396 0.80530024 nan]
6047
- """
6048
- return rsqrt_(input)
6049
-
6050
-
6051
- def sqrt(x):
6052
- """
6053
- Returns sqrt of a tensor element-wise.
6054
-
6055
- .. math::
6056
-
6057
- out_{i} = \\sqrt{x_{i}}
6058
-
6059
- Args:
6060
- x (Tensor): The input tensor with a dtype of number.Number.
6061
- Returns:
6062
- Tensor, has the same shape and dtype as the `x`.
6063
-
6064
- Raises:
6065
- TypeError: If `x` is not a Tensor.
6066
-
6067
- Supported Platforms:
6068
- ``Ascend`` ``GPU`` ``CPU``
6069
-
6070
- Examples:
6071
- >>> import mindspore
6072
- >>> import numpy as np
6073
- >>> from mindspore import Tensor, ops
6074
- >>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
6075
- >>> output = ops.sqrt(x)
6076
- >>> print(output)
6077
- [1. 2. 3.]
6078
- """
6079
- return sqrt_(x)
6080
-
6081
-
6082
- def square(input):
6083
- """
6084
- Returns square of a tensor element-wise.
6085
-
6086
- .. math::
6087
-
6088
- y_i = input_i ^ 2
6089
-
6090
- Args:
6091
- input (Tensor): The input tensor with a dtype of Number.
6092
-
6093
- Returns:
6094
- Tensor, has the same shape and dtype as the `input`.
6095
-
6096
- Raises:
6097
- TypeError: If `input` is not a Tensor.
6098
-
6099
- Supported Platforms:
6100
- ``Ascend`` ``GPU`` ``CPU``
6101
-
6102
- Examples:
6103
- >>> import mindspore
6104
- >>> import numpy as np
6105
- >>> from mindspore import Tensor, ops
6106
- >>> input = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
6107
- >>> output = ops.square(input)
6108
- >>> print(output)
6109
- [1. 4. 9.]
6110
- """
6111
- return square_(input)
6112
-
6113
-
6114
4516
  def outer(input, vec2):
6115
4517
  """
6116
4518
  Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
@@ -6128,7 +4530,6 @@ def outer(input, vec2):
6128
4530
 
6129
4531
  Raises:
6130
4532
  TypeError: If `input` or `vec2` is not a Tensor.
6131
- ValueError: If `input` or `vec2` is not an 1-D Tensor.
6132
4533
 
6133
4534
  Supported Platforms:
6134
4535
  ``Ascend`` ``GPU`` ``CPU``
@@ -6151,10 +4552,6 @@ def outer(input, vec2):
6151
4552
  raise TypeError("the input input must be Tensor!")
6152
4553
  if not isinstance(vec2, (Tensor, Tensor_)):
6153
4554
  raise TypeError("the input vec2 must be Tensor!")
6154
- if len(input.shape) != 1:
6155
- raise ValueError("the input input must be a 1-D vector!")
6156
- if len(vec2.shape) != 1:
6157
- raise ValueError("the input vec2 must be a 1-D vector!")
6158
4555
  input = input.reshape(-1, 1)
6159
4556
  y = tensor_mul(input, vec2)
6160
4557
  return y
@@ -6194,10 +4591,6 @@ def mv(mat, vec):
6194
4591
  raise TypeError("The input mat must be Tensor.")
6195
4592
  if not isinstance(vec, (Tensor, Tensor_)):
6196
4593
  raise TypeError("The input vec must be Tensor.")
6197
- if len(mat.shape) != 2:
6198
- raise ValueError("The input mat must be 2-D Tensor.")
6199
- if len(vec.shape) != 1:
6200
- raise ValueError("The input vec must be 1-D Tensor.")
6201
4594
 
6202
4595
  length_vec = get_x_shape(vec.shape)
6203
4596
  vec = reshape_(vec, (length_vec[0], 1))
@@ -6252,10 +4645,6 @@ def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
6252
4645
  [1285. 1377. 1469.]
6253
4646
  [1621. 1745. 1869.]]
6254
4647
  """
6255
- dim1 = batch1.ndim
6256
- dim2 = batch2.ndim
6257
- if dim1 != 3 or dim2 != 3:
6258
- raise ValueError(f"For 'addbmm', 'batch1' and 'batch2' must be 3D, but got {dim1} and {dim2} respectively.")
6259
4648
  if not isinstance(alpha, (int, float)):
6260
4649
  raise TypeError(f"For 'addbmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
6261
4650
  if not isinstance(beta, (int, float)):
@@ -6340,7 +4729,7 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
6340
4729
 
6341
4730
  Raises:
6342
4731
  TypeError: If `mat`, `vec`, `input` is not a Tensor.
6343
- TypeError: If inputs `mat`, 'vec' are not the same dtype.
4732
+ TypeError: If inputs `mat`, `vec` are not the same dtype.
6344
4733
  ValueError: If `mat` is not a 2-D Tensor.
6345
4734
  ValueError: If `vec` is not a 1-D Tensor.
6346
4735
 
@@ -6363,17 +4752,14 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
6363
4752
  raise TypeError("For Addmv, inputs must be all tensors.")
6364
4753
  if dtype_(mat) != dtype_(vec):
6365
4754
  raise TypeError("For Addmv, the mat and vec should be the same dtype.")
6366
- _check_input_1d(vec.shape, "vec", "Addmv")
6367
- _check_input_2d(mat.shape, "mat", "Addmv")
6368
4755
  _check_input_dtype("input", input_dtype,
6369
4756
  [mstype.float16, mstype.float32, mstype.float64,
6370
4757
  mstype.int16, mstype.int32, mstype.int64], "Addmv")
6371
4758
  _check_attr_dtype("alpha", alpha, [int, float, bool], "Addmv")
6372
4759
  _check_attr_dtype("beta", beta, [int, float, bool], "Addmv")
6373
4760
  if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
6374
- scalar_cast = P.ScalarCast()
6375
- alpha = scalar_cast(alpha, mstype.int32)
6376
- beta = scalar_cast(beta, mstype.int32)
4761
+ alpha = ops.scalar_cast(alpha, mstype.int64)
4762
+ beta = ops.scalar_cast(beta, mstype.int64)
6377
4763
  out = beta * input + alpha * mv(mat, vec)
6378
4764
  return out
6379
4765
 
@@ -6404,7 +4790,11 @@ def adjoint(x):
6404
4790
  [[0.-0.j 2.-2.j]
6405
4791
  [1.-1.j 3.-3.j]]
6406
4792
  """
6407
- return x.swapaxes(-1, -2).conj()
4793
+ _dtype = x.dtype
4794
+ _t = x.swapaxes(-1, -2)
4795
+ if _dtype in mstype.complex_type:
4796
+ return _t.conj()
4797
+ return _t
6408
4798
 
6409
4799
 
6410
4800
  def addr(x, vec1, vec2, *, beta=1, alpha=1):
@@ -6460,25 +4850,21 @@ def addr(x, vec1, vec2, *, beta=1, alpha=1):
6460
4850
  raise TypeError("For Addr, inputs must be all tensors.")
6461
4851
  if dtype_(vec1) != dtype_(vec2):
6462
4852
  raise TypeError("For Addr, the vec1 and vec2 should be the same dtype.")
6463
- _check_input_1d(vec1.shape, "vec1", "Addr")
6464
- _check_input_1d(vec2.shape, "vec2", "Addr")
6465
4853
  _check_input_dtype("x", input_dtype,
6466
4854
  [mstype.float16, mstype.float32, mstype.float64,
6467
4855
  mstype.int16, mstype.int32, mstype.int64], "Addr")
6468
4856
  _check_attr_dtype("alpha", alpha, [int, float, bool], "Addr")
6469
4857
  _check_attr_dtype("beta", beta, [int, float, bool], "Addr")
6470
4858
  if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
6471
- scalar_cast = P.ScalarCast()
6472
- alpha = scalar_cast(alpha, mstype.int32)
6473
- beta = scalar_cast(beta, mstype.int32)
6474
- matmul_op = P.MatMul()
4859
+ alpha = ops.scalar_cast(alpha, mstype.int64)
4860
+ beta = ops.scalar_cast(beta, mstype.int64)
6475
4861
 
6476
4862
  length_vec1 = get_x_shape(vec1.shape)
6477
4863
  vec1 = reshape_(vec1, (length_vec1[0], 1))
6478
4864
  length_vec2 = get_x_shape(vec2.shape)
6479
4865
  vec2 = reshape_(vec2, (1, length_vec2[0]))
6480
4866
 
6481
- out = beta * x + alpha * matmul_op(vec1, vec2)
4867
+ out = beta * x + alpha * matmul_(vec1, vec2)
6482
4868
  return out
6483
4869
 
6484
4870
 
@@ -6498,7 +4884,7 @@ def lcm(input, other):
6498
4884
 
6499
4885
  Raises:
6500
4886
  TypeError: If data type `input` or `other` is not int32 or int64.
6501
- ValueError: If shape of two inputs are not broadcastable.
4887
+ ValueError: If shapes of two inputs are not broadcastable.
6502
4888
 
6503
4889
  Supported Platforms:
6504
4890
  ``Ascend`` ``GPU`` ``CPU``
@@ -6512,8 +4898,6 @@ def lcm(input, other):
6512
4898
  >>> print(y)
6513
4899
  [14 24 36]
6514
4900
  """
6515
-
6516
- lcm_ = _get_cache_prim(Lcm)()
6517
4901
  return lcm_(input, other)
6518
4902
 
6519
4903
 
@@ -6542,46 +4926,11 @@ def cdist(x1, x2, p=2.0):
6542
4926
  TypeError: If `x1` or `x2` is not Tensor.
6543
4927
  TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
6544
4928
  TypeError: If `p` is not float32.
6545
- ValueError: If `p` is negative.
6546
- ValueError: If dimension of `x1` is not the same as `x2`.
6547
- ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
6548
- ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
6549
- ValueError: If the number of columns of `x1` is not the same as the number of `x2`.
6550
-
6551
- Supported Platforms:
6552
- ``Ascend`` ``GPU`` ``CPU``
6553
-
6554
- Examples:
6555
- >>> import numpy as np
6556
- >>> from mindspore import Tensor, ops
6557
- >>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
6558
- >>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
6559
- >>> output = ops.cdist(x, y, 2.0)
6560
- >>> print(output)
6561
- [[[2.8284273 2.8284273]
6562
- [1.4142137 1.4142137]]]
6563
- """
6564
- cdist_ = _get_cache_prim(P.Cdist)(p)
6565
- return cdist_(x1, x2)
6566
-
6567
-
6568
- def gcd(input, other):
6569
- """
6570
- Computes greatest common divisor of input tensors element-wise.
6571
- The shape of two inputs should be broadcastable, and data type of them should be
6572
- one of: int32, int64
6573
-
6574
- Args:
6575
- input (Tensor): The first input tensor.
6576
- other (Tensor): The second input tensor.
6577
-
6578
- Returns:
6579
- Tensor, the shape is the same as the one after broadcasting, and the data type is one
6580
- with higher digits in the two inputs.
6581
-
6582
- Raises:
6583
- TypeError: If data type `input` or `other` is not int32 or int64.
6584
- ValueError: If shape of two inputs are not broadcastable.
4929
+ ValueError: If `p` is negative.
4930
+ ValueError: If dimension of `x1` is not the same as `x2`.
4931
+ ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
4932
+ ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
4933
+ ValueError: If the number of columns of `x1` is not the same as that of `x2`.
6585
4934
 
6586
4935
  Supported Platforms:
6587
4936
  ``Ascend`` ``GPU`` ``CPU``
@@ -6589,15 +4938,15 @@ def gcd(input, other):
6589
4938
  Examples:
6590
4939
  >>> import numpy as np
6591
4940
  >>> from mindspore import Tensor, ops
6592
- >>> x1 = Tensor(np.array([7, 8, 9]))
6593
- >>> x2 = Tensor(np.array([14, 6, 12]))
6594
- >>> y = ops.gcd(x1, x2)
6595
- >>> print(y)
6596
- [7 2 3]
4941
+ >>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
4942
+ >>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
4943
+ >>> output = ops.cdist(x, y, 2.0)
4944
+ >>> print(output)
4945
+ [[[2.8284273 2.8284273]
4946
+ [1.4142137 1.4142137]]]
6597
4947
  """
6598
-
6599
- gcd_ = _get_cache_prim(Gcd)()
6600
- return gcd_(input, other)
4948
+ cdist_ = _get_cache_prim(P.Cdist)(p)
4949
+ return cdist_(x1, x2)
6601
4950
 
6602
4951
 
6603
4952
  def lerp(input, end, weight):
@@ -6964,8 +5313,7 @@ def frac(x):
6964
5313
  >>> print(output)
6965
5314
  [ 0. 0.1992 -0.5 ]
6966
5315
  """
6967
- frac_op = P.Mod()
6968
- return frac_op(x, 1)
5316
+ return mod_(x, 1)
6969
5317
 
6970
5318
 
6971
5319
  #####################################
@@ -7014,6 +5362,7 @@ def cummin(input, axis):
7014
5362
 
7015
5363
  Raises:
7016
5364
  TypeError: If `input` is not a Tensor.
5365
+ TypeError: If `input` is a Tensor, but the type is complex or bool.
7017
5366
  TypeError: If `axis` is not an int.
7018
5367
  ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
7019
5368
 
@@ -7030,6 +5379,8 @@ def cummin(input, axis):
7030
5379
  >>> print(output[1])
7031
5380
  [0 1 1 1 4 4]
7032
5381
  """
5382
+ if isinstance(axis, bool):
5383
+ raise TypeError(f"For 'cummin', the date type of 'axis' must be Int, but got {axis}.")
7033
5384
  cummin_op = _get_cache_prim(Cummin)(axis=0)
7034
5385
  if axis == 0:
7035
5386
  out1, out2 = cummin_op(input)
@@ -7043,55 +5394,6 @@ def cummin(input, axis):
7043
5394
  return [out1, out2]
7044
5395
 
7045
5396
 
7046
- def cummax(input, axis):
7047
- r"""
7048
- Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
7049
- along the dimension `axis`, and `indices` is the index location of each maximum value.
7050
-
7051
- .. math::
7052
- \begin{array}{ll} \\
7053
- y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
7054
- \end{array}
7055
-
7056
- Args:
7057
- input (Tensor): The input Tensor, rank of `input` > 0.
7058
- axis (int): The dimension to do the operation over. The value of `axis` must be in the range
7059
- `[-input.ndim, input.ndim - 1]`.
7060
-
7061
- Returns:
7062
- tuple [Tensor], tuple of 2 Tensors, containing the cumulative maximum of elements and the index.
7063
- The shape of each output tensor is the same as input `input`.
7064
-
7065
- Raises:
7066
- TypeError: If `input` is not a Tensor.
7067
- TypeError: If `axis` is not an int.
7068
- ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
7069
-
7070
- Supported Platforms:
7071
- ``GPU`` ``CPU``
7072
-
7073
- Examples:
7074
- >>> import mindspore
7075
- >>> import numpy as np
7076
- >>> from mindspore import Tensor
7077
- >>> import mindspore.ops as ops
7078
- >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
7079
- >>> output = ops.cummax(x, axis=0)
7080
- >>> print(output[0])
7081
- [[ 3. 4. 6. 10.]
7082
- [ 3. 6. 7. 10.]
7083
- [ 4. 6. 8. 10.]
7084
- [ 4. 6. 8. 10.]]
7085
- >>> print(output[1])
7086
- [[0 0 0 0]
7087
- [0 1 1 0]
7088
- [2 1 2 0]
7089
- [2 1 2 0]]
7090
- """
7091
- _cummax = _get_cache_prim(ops.Cummax)(axis=axis)
7092
- return _cummax(input)
7093
-
7094
-
7095
5397
  def cumsum(x, axis, dtype=None):
7096
5398
  """
7097
5399
  Computes the cumulative sum of input Tensor along `axis`.
@@ -7105,7 +5407,7 @@ def cumsum(x, axis, dtype=None):
7105
5407
  For the case of dynamic shape, the dtype of `x` only support int32, float16 or float32.
7106
5408
 
7107
5409
  Args:
7108
- x (Tensor): The input Tensor of shape :math:`(N,*)` where :math:`*` means, any number
5410
+ x (Tensor): The input Tensor of shape :math:`(N, *)` where :math:`*` means, any number
7109
5411
  of additional dimensions.
7110
5412
  axis (int): Axis along which the cumulative sum is computed.
7111
5413
  dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
@@ -7176,8 +5478,8 @@ def sparse_segment_mean(x, indices, segment_ids):
7176
5478
  TypeError: If the dtype of `x` is not one of the following dtype: float16, float32, float64.
7177
5479
  TypeError: If the dtype of `indices` and `segment_ids` are not one of the following dtype: int32, int64.
7178
5480
  TypeError: If the dtype of `indices` and `segment_ids` are not the same.
7179
- ValueError: If the shape of `x`, 'indices' or `segment_ids` don't meet the parameter description.
7180
- ValueError: If the size of 'indices' and `segment_ids` are not the same.
5481
+ ValueError: If the shape of `x`, `indices` or `segment_ids` don't meet the parameter description.
5482
+ ValueError: If the size of `indices` and `segment_ids` are not the same.
7181
5483
 
7182
5484
  Supported Platforms:
7183
5485
  ``GPU`` ``CPU``
@@ -7259,6 +5561,8 @@ def block_diag(*inputs):
7259
5561
  f"{ary.ndim}"
7260
5562
  )
7261
5563
 
5564
+ if not inputs:
5565
+ raise RuntimeError("For 'block_diag', the input is empty.")
7262
5566
  arys = [to_2d(ary) for ary in inputs]
7263
5567
  matrix = [ops.concat(to_col_block(arys, idx, ary)) for idx, ary in enumerate(arys)]
7264
5568
  return ops.concat(matrix, 1)
@@ -7277,7 +5581,7 @@ def atleast_1d(inputs):
7277
5581
  Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 1`.
7278
5582
 
7279
5583
  Raises:
7280
- TypeError: If the `input` is not a tensor or a list of tensors.
5584
+ TypeError: If the `inputs` is not a tensor or a list of tensors.
7281
5585
 
7282
5586
  Supported Platforms:
7283
5587
  ``Ascend`` ``GPU`` ``CPU``
@@ -7359,7 +5663,7 @@ def dstack(inputs):
7359
5663
  trans_inputs += (tensor,)
7360
5664
  if not trans_inputs:
7361
5665
  raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
7362
- return P.Concat(2)(trans_inputs)
5666
+ return _get_cache_prim(P.Concat)(2)(trans_inputs)
7363
5667
 
7364
5668
 
7365
5669
  @_primexpr
@@ -7377,7 +5681,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None):
7377
5681
 
7378
5682
  Note:
7379
5683
  Zero-shaped Tensor is not supported, a value error is raised if
7380
- an empty Tensor is encountered. Any dimension of an Tensor is 0 is considered
5684
+ an empty Tensor is encountered. Any dimension of a Tensor is 0, which is considered
7381
5685
  an empty Tensor. Tensor with shape of :math:`(0,)`, :math:`(1, 2, 0, 4)` are all
7382
5686
  empty Tensor.
7383
5687
 
@@ -7556,7 +5860,7 @@ def atleast_2d(inputs):
7556
5860
  Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 2` .
7557
5861
 
7558
5862
  Raises:
7559
- TypeError: If the `input` is not a tensor or a list of tensors.
5863
+ TypeError: If the `inputs` is not a tensor or a list of tensors.
7560
5864
 
7561
5865
  Supported Platforms:
7562
5866
  ``Ascend`` ``GPU`` ``CPU``
@@ -7616,9 +5920,9 @@ def cartesian_prod(*inputs):
7616
5920
  >>> print(len(out))
7617
5921
  60
7618
5922
  """
7619
- meshgrid = P.Meshgrid(indexing="ij")
5923
+ meshgrid = _get_cache_prim(P.Meshgrid)(indexing="ij")
7620
5924
  meshgrid_output = meshgrid(inputs)
7621
- stack = P.Stack(axis=-1)
5925
+ stack = _get_cache_prim(P.Stack)(axis=-1)
7622
5926
  stack_output = stack(meshgrid_output)
7623
5927
  return reshape_(stack_output, (-1, len(inputs)))
7624
5928
 
@@ -7639,7 +5943,7 @@ def atleast_3d(inputs):
7639
5943
  a 2-D Tensor of shape :math:`(M, N)` becomes a tensor of shape :math:`(M, N, 1)`.
7640
5944
 
7641
5945
  Raises:
7642
- TypeError: If the `input` is not a tensor or a list of tensors.
5946
+ TypeError: If the `inputs` is not a tensor or a list of tensors.
7643
5947
 
7644
5948
  Supported Platforms:
7645
5949
  ``Ascend`` ``GPU`` ``CPU``
@@ -7674,9 +5978,9 @@ def atleast_3d(inputs):
7674
5978
  if ndim == 0:
7675
5979
  return reshape_(arr, (1, 1, 1))
7676
5980
  if ndim == 1:
7677
- return reshape_(arr, (1, P.Size()(arr), 1))
5981
+ return reshape_(arr, (1, size_(arr), 1))
7678
5982
  if ndim == 2:
7679
- return reshape_(arr, P.Shape()(arr) + (1,))
5983
+ return reshape_(arr, shape_(arr) + (1,))
7680
5984
  return arr
7681
5985
 
7682
5986
  if isinstance(inputs, Tensor):
@@ -7768,7 +6072,7 @@ def vstack(inputs):
7768
6072
  msg = f"For 'vstack', Tensor is required, but got {type(tensor)}"
7769
6073
  raise TypeError(msg)
7770
6074
  if tensor.ndim <= 1:
7771
- shape = P.Shape()(tensor)
6075
+ shape = shape_(tensor)
7772
6076
  if isinstance(shape, int):
7773
6077
  shape = (shape,)
7774
6078
  ndim_diff = 2 - len(shape)
@@ -7778,7 +6082,7 @@ def vstack(inputs):
7778
6082
  trans_tup += (tensor,)
7779
6083
  if not trans_tup:
7780
6084
  raise ValueError("For 'vstack', need at least one tensor to concatenate.")
7781
- out = P.Concat(0)(trans_tup)
6085
+ out = _get_cache_prim(P.Concat)(0)(trans_tup)
7782
6086
  return out
7783
6087
 
7784
6088
 
@@ -7796,8 +6100,8 @@ def combinations(input, r=2, with_replacement=False):
7796
6100
  r"""
7797
6101
  Returns all r-length subsequences of input Tensor.
7798
6102
 
7799
- When `with_replacement` is set to `False`, it works similar to Python's
7800
- `itertools.combinations`, and when `with_replacement` is set to `True`,
6103
+ When `with_replacement` is set to ``False``, it works similar to Python's
6104
+ `itertools.combinations`, and when `with_replacement` is set to ``True``,
7801
6105
  it behaves like `itertools.combinations_with_replacement`.
7802
6106
 
7803
6107
  Args:
@@ -7860,7 +6164,7 @@ def combinations(input, r=2, with_replacement=False):
7860
6164
  return None
7861
6165
 
7862
6166
  def _combinations_with_replacement(iterable, r):
7863
- lst = Tensor([])
6167
+ lst = Tensor_([])
7864
6168
  pool = tuple(iterable)
7865
6169
  n = len(pool)
7866
6170
  if not n and r:
@@ -7974,7 +6278,7 @@ def copysign(x, other):
7974
6278
  """Broadcasts x from current shape to shape"""
7975
6279
  ndim_to = len(shape)
7976
6280
  x = _expand(x, ndim_to)
7977
- return _broadcast_to(x, P.Shape()(x), shape, ndim_to)
6281
+ return _broadcast_to(x, shape_(x), shape, ndim_to)
7978
6282
 
7979
6283
  if not isinstance(x, Tensor):
7980
6284
  raise TypeError("Tensor is expected, but got " + f"{type(x)}")
@@ -7985,7 +6289,7 @@ def copysign(x, other):
7985
6289
 
7986
6290
  if not isinstance(other, Tensor):
7987
6291
  other = _type_convert(Tensor, other)
7988
- other = _broadcast_to_shape(other, P.Shape()(x))
6292
+ other = _broadcast_to_shape(other, shape_(x))
7989
6293
 
7990
6294
  if _check_same_type(dtype_(x), mstype.bool_):
7991
6295
  raise TypeError("copysign does not accept dtype bool.")
@@ -8005,9 +6309,9 @@ def copysign(x, other):
8005
6309
  if x.dtype in (mstype.float16, mstype.float32, mstype.float64)
8006
6310
  else x.astype("float32")
8007
6311
  )
8008
- pos_tensor = P.Abs()(x_float)
8009
- less_zero = P.Less()(other, 0)
8010
- return P.Select()(less_zero, neg_tensor(pos_tensor), pos_tensor)
6312
+ pos_tensor = absolute_(x_float)
6313
+ less_zero = tensor_lt(other, 0)
6314
+ return select_(less_zero, neg(pos_tensor), pos_tensor)
8011
6315
 
8012
6316
 
8013
6317
  def hann_window(window_length, periodic=True, *, dtype=None):
@@ -8067,7 +6371,7 @@ def hann_window(window_length, periodic=True, *, dtype=None):
8067
6371
  w = 0.5 - 0.5 * np.cos(2 * math.pi / (window_length - 1) * n)
8068
6372
 
8069
6373
  if dtype is not None:
8070
- w = P.Cast()(w, dtype)
6374
+ w = cast_(ms.tensor(w), dtype)
8071
6375
  return Tensor(w[:-1]) if periodic else Tensor(w)
8072
6376
 
8073
6377
 
@@ -8091,7 +6395,7 @@ def logcumsumexp(input, axis):
8091
6395
  Args:
8092
6396
  input (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64.
8093
6397
  axis (int) - Describing the dimension to compute the cumulative product.
8094
- Must be in the range [-rank(x), rank(x)).
6398
+ Must be in the range [-rank(input), rank(input)).
8095
6399
 
8096
6400
  Returns:
8097
6401
  Tensor, has the same dtype and shape as the `input`.
@@ -8118,8 +6422,7 @@ def logcumsumexp(input, axis):
8118
6422
  raise TypeError(
8119
6423
  f"For 'logcumsumexp', 'axis' must be int type, but got {type(axis)}"
8120
6424
  )
8121
- logcumsumexp_ = _get_cache_prim(CumulativeLogsumexp)()
8122
- return logcumsumexp_(input, Tensor(axis))
6425
+ return cumulative_logsumexp_(input, Tensor(axis))
8123
6426
 
8124
6427
 
8125
6428
  def logsumexp(input, axis, keep_dims=False):
@@ -8176,34 +6479,40 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
8176
6479
  reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
8177
6480
  output and input are the same.
8178
6481
 
6482
+ Note:
6483
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6484
+
8179
6485
  Args:
8180
6486
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
8181
6487
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8182
- axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
8183
- Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
8184
- keepdims (bool): If true, keep these reduced dimensions and the length is 1. If false, don't keep
6488
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6489
+ dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6490
+ keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
8185
6491
  these dimensions. Default: ``False`` .
8186
6492
 
8187
6493
  Keyword Args:
8188
6494
  initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
8189
6495
  on empty slice. Default: ``None`` .
8190
- where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
8191
- with the value in `initial`. If True, do not replace, otherwise replace. For the index of True in `where`,
8192
- the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by default.
6496
+ where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
6497
+ value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
6498
+ the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
6499
+ default.
8193
6500
 
8194
6501
  Returns:
8195
6502
  Tensor, has the same data type as input tensor.
8196
6503
 
8197
- - If `axis` is None, and `keepdims` is False,
6504
+ - If `axis` is ``None`` , and `keepdims` is ``False`` ,
8198
6505
  the output is a 0-D tensor representing the product of all elements in the input tensor.
8199
- - If `axis` is int, set as 1, and `keepdims` is False,
6506
+ - If `axis` is int, set as 1, and `keepdims` is ``False`` ,
8200
6507
  the shape of output is :math:`(x_0, x_2, ..., x_R)`.
8201
- - If `axis` is tuple(int), set as (1, 2), and `keepdims` is False,
6508
+ - If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` ,
6509
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6510
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` ,
8202
6511
  the shape of output is :math:`(x_0, x_3, ..., x_R)`.
8203
6512
 
8204
6513
  Raises:
8205
6514
  TypeError: If `input` is not a Tensor.
8206
- TypeError: If `axis` is not one of the following: int, tuple or list.
6515
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8207
6516
  TypeError: If `keepdims` is not a bool.
8208
6517
  ValueError: If `axis` is out of range.
8209
6518
 
@@ -8280,33 +6589,39 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
8280
6589
  reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
8281
6590
  output and input are the same.
8282
6591
 
6592
+ Note:
6593
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6594
+
8283
6595
  Args:
8284
6596
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
8285
6597
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8286
- axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
8287
- Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
8288
- keepdims (bool): If true, keep these reduced dimensions and the length is 1. If false, don't keep these
8289
- dimensions. Default: ``False`` .
6598
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6599
+ dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6600
+ keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
6601
+ these dimensions. Default: ``False`` .
8290
6602
 
8291
6603
  Keyword Args:
8292
6604
  initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
8293
6605
  on empty slice. Default: ``None`` .
8294
- where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
8295
- with the value in `initial`. If True, do not replace, otherwise replace. For the index of True in `where`,
8296
- the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by default.
6606
+ where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
6607
+ value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
6608
+ the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
6609
+ default.
8297
6610
 
8298
6611
  Returns:
8299
6612
  Tensor, has the same data type as input tensor.
8300
6613
 
8301
- - If `axis` is None, and `keepdims` is False, the output is a 0-D tensor representing the product of all
8302
- elements in the input tensor.
8303
- - If `axis` is int, set as 1, and `keepdims` is False, the shape of output is :math:`(x_0, x_2, ..., x_R)`.
8304
- - If `axis` is tuple(int), set as (1, 2), and `keepdims` is False, the shape of output is
6614
+ - If `axis` is ``None`` , and `keepdims` is ``False`` , the output is a 0-D tensor representing the product of
6615
+ all elements in the input tensor.
6616
+ - If `axis` is int, set as 1, and `keepdims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`.
6617
+ - If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` , the shape of output is
6618
+ :math:`(x_0, x_3, ..., x_R)`.
6619
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` , the shape of output is
8305
6620
  :math:`(x_0, x_3, ..., x_R)`.
8306
6621
 
8307
6622
  Raises:
8308
6623
  TypeError: If `input` is not a Tensor.
8309
- TypeError: If `axis` is not one of the following: int, tuple or list.
6624
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8310
6625
  TypeError: If `keepdims` is not a bool.
8311
6626
  ValueError: If `axis` is out of range.
8312
6627
 
@@ -8365,30 +6680,36 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
8365
6680
  def mean(x, axis=None, keep_dims=False):
8366
6681
  r"""
8367
6682
  Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
8368
- And reduce a dimension of `x` along the specified `axis`. `keep_dims`
6683
+ And reduce a dimension of `input` along the specified `axis`. `keep_dims`
8369
6684
  determines whether the dimensions of the output and input are the same.
8370
6685
 
6686
+ Note:
6687
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6688
+
8371
6689
  Args:
8372
6690
  x (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
8373
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8374
- axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
8375
- Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
8376
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
8377
- If false, don't keep these dimensions. Default: ``False`` .
6691
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6692
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
6693
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
6694
+ and the value range is [-r,r).
6695
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
6696
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
8378
6697
 
8379
6698
  Returns:
8380
6699
  Tensor, has the same data type as input tensor.
8381
6700
 
8382
- - If `axis` is None, and `keep_dims` is False,
8383
- the output is a 0-D tensor representing the product of all elements in the input tensor.
8384
- - If `axis` is int, set as 1, and `keep_dims` is False,
8385
- the shape of output is :math:`(x_0, x_2, ..., x_R)`.
6701
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
6702
+ the output is a 0-D tensor representing the product of all elements in the input tensor.
6703
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
6704
+ the shape of output is :math:`(x_0, x_2, ..., x_R)`.
8386
6705
  - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
8387
- the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6706
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6707
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
6708
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
8388
6709
 
8389
6710
  Raises:
8390
6711
  TypeError: If `x` is not a Tensor.
8391
- TypeError: If `axis` is not one of the following: int, tuple or list.
6712
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8392
6713
  TypeError: If `keep_dims` is not a bool.
8393
6714
  ValueError: If `axis` is out of range.
8394
6715
 
@@ -8418,26 +6739,26 @@ def mean(x, axis=None, keep_dims=False):
8418
6739
  >>> output = ops.mean(x, 0, True)
8419
6740
  >>> print(output)
8420
6741
  [[[4. 4. 4. 4. 4. 4.]
8421
- [5. 5. 5. 5. 5. 5.]
8422
- [6. 6. 6. 6. 6. 6.]]]
6742
+ [5. 5. 5. 5. 5. 5.]
6743
+ [6. 6. 6. 6. 6. 6.]]]
8423
6744
  >>> # case 3: Reduces a dimension along the axis 1
8424
6745
  >>> output = ops.mean(x, 1, True)
8425
6746
  >>> print(output)
8426
6747
  [[[2. 2. 2. 2. 2. 2.]]
8427
- [[5. 5. 5. 5. 5. 5.]]
8428
- [[8. 8. 8. 8. 8. 8.]]]
6748
+ [[5. 5. 5. 5. 5. 5.]]
6749
+ [[8. 8. 8. 8. 8. 8.]]]
8429
6750
  >>> # case 4: Reduces a dimension along the axis 2
8430
6751
  >>> output = ops.mean(x, 2, True)
8431
6752
  >>> print(output)
8432
6753
  [[[ 2.]
8433
- [ 2.]
8434
- [ 2.]]
8435
- [[ 4.]
8436
- [ 5.]
8437
- [ 6.]]
8438
- [[ 6.]
8439
- [ 8.]
8440
- [10.]]]
6754
+ [ 2.]
6755
+ [ 2.]]
6756
+ [[ 4.]
6757
+ [ 5.]
6758
+ [ 6.]]
6759
+ [[ 6.]
6760
+ [ 8.]
6761
+ [10.]]]
8441
6762
  """
8442
6763
  if axis is None:
8443
6764
  axis = ()
@@ -8447,30 +6768,35 @@ def mean(x, axis=None, keep_dims=False):
8447
6768
  def prod(input, axis=None, keep_dims=False):
8448
6769
  r"""
8449
6770
  Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
8450
- reduce a dimension of `input` along the axis. Determine whether the dimensions of the output and input are the same
8451
- by controlling `keep_dims`.
6771
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
6772
+ same by controlling `keep_dims`.
6773
+
6774
+ Note:
6775
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
8452
6776
 
8453
6777
  Args:
8454
6778
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
8455
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8456
- axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
8457
- Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
8458
- keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
8459
- If false, don't keep these dimensions. Default: ``False`` .
6779
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6780
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6781
+ dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6782
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
6783
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
8460
6784
 
8461
6785
  Returns:
8462
6786
  Tensor, has the same data type as input tensor.
8463
6787
 
8464
- - If `axis` is None, and `keep_dims` is False,
6788
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
8465
6789
  the output is a 0-D tensor representing the product of all elements in the input tensor.
8466
- - If `axis` is int, set as 1, and `keep_dims` is False,
6790
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
8467
6791
  the shape of output is :math:`(input_0, input_2, ..., input_R)`.
8468
- - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is False,
6792
+ - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
6793
+ the shape of output is :math:`(input_0, input_3, ..., input_R)`.
6794
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
8469
6795
  the shape of output is :math:`(input_0, input_3, ..., input_R)`.
8470
6796
 
8471
6797
  Raises:
8472
6798
  TypeError: If `input` is not a Tensor.
8473
- TypeError: If `axis` is not one of the following: int, tuple or list.
6799
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8474
6800
  TypeError: If `keep_dims` is not a bool.
8475
6801
  ValueError: If `axis` is out of range.
8476
6802
 
@@ -9252,7 +7578,7 @@ def _check_logits_shape(logits):
9252
7578
  raise ValueError("For gumbel_softmax, the 0-D input is not supported.")
9253
7579
 
9254
7580
 
9255
- def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
7581
+ def gumbel_softmax(logits, tau=1.0, hard=False, dim=-1):
9256
7582
  r"""
9257
7583
  Returns the samples from the Gumbel-Softmax distribution and optionally discretizes. If `hard = True`, the returned
9258
7584
  samples will be one-hot, otherwise it will be probability distributions that sum to 1 across `dim`.
@@ -9270,9 +7596,9 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
9270
7596
  Raises:
9271
7597
  TypeError: If `logits` is not a Tensor.
9272
7598
  TypeError: If dtype of `logits` is not one of: float16, float32.
9273
- TypeError: If `tau` is not an float.
7599
+ TypeError: If `tau` is not a float.
9274
7600
  TypeError: If `hard` is not a bool.
9275
- TypeError: If `dim` is not a int.
7601
+ TypeError: If `dim` is not an int.
9276
7602
  ValueError: If If `tau` is not positive.
9277
7603
 
9278
7604
  Supported Platforms:
@@ -9301,13 +7627,11 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
9301
7627
  _check_int_range(dim, -len(logits.shape),
9302
7628
  len(logits.shape), 'dim', "gumbel_softmax")
9303
7629
 
9304
- const_op = _get_cache_prim(P.ScalarToTensor)()
9305
-
9306
7630
  sample_shape = shape_(logits)
9307
- uniform = C.uniform(sample_shape, const_op(
9308
- 0.0, mstype.float32), const_op(1.0, mstype.float32))
7631
+ uniform = C.uniform(sample_shape, scalar_to_tensor_(
7632
+ 0.0, mstype.float32), scalar_to_tensor_(1.0, mstype.float32))
9309
7633
  uniform = cast_(uniform, logits_dtype)
9310
- gumbel = neg_tensor(log_(neg_tensor(log_(uniform))))
7634
+ gumbel = neg(log_(neg(log_(uniform))))
9311
7635
  gumbel = (logits + gumbel) / tau
9312
7636
  y_soft = _get_cache_prim(P.Softmax)(dim)(gumbel)
9313
7637
  if hard:
@@ -9388,7 +7712,7 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
9388
7712
  beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)
9389
7713
  ) / np.i0(float(beta))
9390
7714
  if dtype is not None:
9391
- w = P.Cast()(w, dtype)
7715
+ w = cast_(ms.tensor(w), dtype)
9392
7716
  out = Tensor(w[:-1]) if periodic else Tensor(w)
9393
7717
  return out
9394
7718
 
@@ -9541,18 +7865,6 @@ def _check_value(items, max_size, msg_prefix, shape1, shape2):
9541
7865
  def _check_matmul_shapes(shape1, shape2, prim_name=None):
9542
7866
  """Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting."""
9543
7867
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
9544
-
9545
- def _check(shape1, shape2):
9546
- ndim1, ndim2 = len(shape1), len(shape2)
9547
- if ndim1 < 1 or ndim2 < 1:
9548
- raise ValueError(f"{msg_prefix} dimension of input operands must be at least 1, but got "
9549
- f"the length of shape1: {ndim1}, the length of shape2: {ndim2}.")
9550
- if ndim2 >= 2 and shape1[-1] != shape2[-2]:
9551
- raise ValueError(f"{msg_prefix} shape1[-1] must be equal to shape2[-2] when the length of shape2 "
9552
- f"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, "
9553
- f"shape2[-2]: {shape2[-2]}.")
9554
-
9555
- _check(shape1, shape2)
9556
7868
  shape_out = list()
9557
7869
  r_shape1 = shape1[:-2]
9558
7870
  r_shape2 = shape2[:-2]
@@ -9571,18 +7883,6 @@ def _check_need_broadcast(shape1, shape2):
9571
7883
  return shape1[:-2] != shape2[:-2]
9572
7884
 
9573
7885
 
9574
- @_primexpr
9575
- def _check_input_1d(input_shape, param_name, func_name):
9576
- if len(input_shape) != 1:
9577
- raise ValueError(f"{func_name} {param_name} should be 1d, but got shape {input_shape}")
9578
-
9579
-
9580
- @_primexpr
9581
- def _check_input_2d(input_shape, param_name, func_name):
9582
- if len(input_shape) != 2:
9583
- raise ValueError(f"{func_name} {param_name} should be 2d, but got shape {input_shape}")
9584
-
9585
-
9586
7886
  @_primexpr
9587
7887
  def _expand(x, ndim):
9588
7888
  """Expand x to ndim from axis, which can be 0 or -1."""
@@ -9593,8 +7893,7 @@ def _expand(x, ndim):
9593
7893
 
9594
7894
  def _broadcast_to(x, shape_cur, shape_to, ndim_to):
9595
7895
  """Broadcasts x from shape_cur to shape_to."""
9596
- tile_size_op = _get_cache_prim(TileSize)()
9597
- size = tile_size_op(shape_cur, shape_to, ndim_to)
7896
+ size = tile_size_(shape_cur, shape_to, ndim_to)
9598
7897
  F.stop_gradient(size)
9599
7898
  return tile_(x, size)
9600
7899
 
@@ -9612,11 +7911,11 @@ def matmul(input, other):
9612
7911
 
9613
7912
  Args:
9614
7913
  input (Tensor): Input tensor, scalar not allowed.
9615
- The last dimension of `input` must be the same size as the second last dimension of `other`.
9616
- And the shape of input and other could be broadcast.
7914
+ The last dimension of `input` must be the same size as the second last dimension of `other`.
7915
+ And the shape of input and other could be broadcast.
9617
7916
  other (Tensor): Input tensor, scalar not allowed.
9618
- The last dimension of `input` must be the same size as the second last dimension of `other`.
9619
- And the shape of input and other could be broadcast.
7917
+ The last dimension of `input` must be the same size as the second last dimension of `other`.
7918
+ And the shape of input and other could be broadcast.
9620
7919
 
9621
7920
  Returns:
9622
7921
  Tensor or scalar, the matrix product of the inputs. This is a scalar only
@@ -9979,9 +8278,6 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
9979
8278
  bmmop = _get_cache_prim(P.BatchMatMul)(False, False)
9980
8279
  if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
9981
8280
  raise TypeError("For Baddbmm, inputs must be all tensors.")
9982
- if len(batch1.shape) != 3 or len(batch2.shape) != 3:
9983
- raise ValueError("For batch1 and batch2 must be 3-D tensors each containing the same number of matrices, "
9984
- f"but got length of batch1:'{len(batch1.shape)}', length of batch2:'{len(batch2.shape)}'.")
9985
8281
  input_dtype = dtype_(input)
9986
8282
  if not (input_dtype == dtype_(batch1) and input_dtype == dtype_(batch2)):
9987
8283
  raise TypeError("For Baddbmm, the inputs should be the same dtype.")
@@ -10173,11 +8469,9 @@ def xdivy(x, y):
10173
8469
  Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
10174
8470
 
10175
8471
  Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
10176
- The inputs must be two tensors or one tensor and one scalar.
10177
8472
  When the inputs are two tensors,
10178
8473
  dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
10179
- When the inputs are one tensor and one scalar,
10180
- the scalar could only be a constant.
8474
+ If one of the inputs is scalar, the scalar could only be a constant.
10181
8475
 
10182
8476
  .. note::
10183
8477
  When `x` and `y` are both of datatype complex, they should be both complex64 or complex128 at the same time.
@@ -10193,7 +8487,7 @@ def xdivy(x, y):
10193
8487
 
10194
8488
  Raises:
10195
8489
  TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
10196
- TypeError: If dtype of `x` and 'y' is not in [float16, float32, float64, complex64, complex128, bool].
8490
+ TypeError: If dtype of `x` and `y` is not in [float16, float32, float64, complex64, complex128, bool].
10197
8491
  ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
10198
8492
  RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
10199
8493
  but data type conversion of Parameter is not supported.
@@ -10254,37 +8548,6 @@ def log10(input):
10254
8548
  return output
10255
8549
 
10256
8550
 
10257
- def log1p(input):
10258
- r"""
10259
- Returns the natural logarithm of one plus the input tensor element-wise.
10260
-
10261
- .. math::
10262
- out_i = {log_e}(input_i + 1)
10263
-
10264
- Args:
10265
- input (Tensor): The input tensor. The value must be greater than -1.
10266
-
10267
- Returns:
10268
- Tensor, has the same shape as the `input`.
10269
-
10270
- Raises:
10271
- TypeError: If `input` is not a Tensor.
10272
-
10273
- Supported Platforms:
10274
- ``Ascend`` ``GPU`` ``CPU``
10275
-
10276
- Examples:
10277
- >>> import mindspore
10278
- >>> import numpy as np
10279
- >>> from mindspore import Tensor, ops
10280
- >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
10281
- >>> output = ops.log1p(x)
10282
- >>> print(output)
10283
- [0.6931472 1.0986123 1.609438 ]
10284
- """
10285
- return log1p_(input)
10286
-
10287
-
10288
8551
  def kron(input, other):
10289
8552
  """
10290
8553
  Computes the Kronecker product :math:`input ⊗ other`, denoted by ⊗, of `input` and `other`.
@@ -10381,31 +8644,37 @@ def _check_is_tensor(param_name, input, cls_name):
10381
8644
  def all(input, axis=None, keep_dims=False):
10382
8645
  r"""
10383
8646
  Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
10384
- reduce a dimension of `input` along the axis. Determine whether the dimensions of the output and input are the same
10385
- by controlling `keep_dims`.
8647
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
8648
+ same by controlling `keep_dims`.
8649
+
8650
+ Note:
8651
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
10386
8652
 
10387
8653
  Args:
10388
8654
  input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
10389
8655
  any number of additional dimensions.
10390
- axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce. Suppose the rank of `input` is
10391
- r, axis must be in the range [-rank(input), rank(input)). Default: ``None`` , all dimensions are reduced.
10392
- keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1.
10393
- If false, don't keep these dimensions. Default : ``False`` .
8656
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
8657
+ Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
8658
+ Default: ``None`` , all dimensions are reduced.
8659
+ keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
8660
+ If ``False`` , don't keep these dimensions. Default : ``False`` .
10394
8661
 
10395
8662
  Returns:
10396
8663
  Tensor, the dtype is bool.
10397
8664
 
10398
- - If `axis` is None, and `keep_dims` is ``False`` ,
8665
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
10399
8666
  the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
10400
8667
  - If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
10401
8668
  the shape of output is :math:`(input_1, input_3, ..., input_R)`.
10402
- - If `axis` is tuple(int), such as (2, 3), and `keep_dims` is False,
8669
+ - If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
8670
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
8671
+ - If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
10403
8672
  the shape of output is :math:`(input_1, input_4, ..., input_R)`.
10404
8673
 
10405
8674
  Raises:
10406
8675
  TypeError: If `keep_dims` is not a bool.
10407
8676
  TypeError: If `input` is not a Tensor.
10408
- TypeError: If `axis` is not one of the following: int, tuple or list.
8677
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
10409
8678
 
10410
8679
  Supported Platforms:
10411
8680
  ``Ascend`` ``GPU`` ``CPU``
@@ -10440,31 +8709,37 @@ def all(input, axis=None, keep_dims=False):
10440
8709
  def any(input, axis=None, keep_dims=False):
10441
8710
  r"""
10442
8711
  Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
10443
- reduce a dimension of `input` along the axis. Determine whether the dimensions of the output and input are the same
10444
- by controlling `keep_dims`.
8712
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
8713
+ same by controlling `keep_dims`.
8714
+
8715
+ Note:
8716
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
10445
8717
 
10446
8718
  Args:
10447
8719
  input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
10448
8720
  any number of additional dimensions.
10449
- axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce. Suppose the rank of `input` is r,
10450
- axis must be in the range [-rank(input), rank(input)). Default: ``None`` , all dimensions are reduced.
10451
- keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1.
10452
- If false, don't keep these dimensions. Default : ``False`` .
8721
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
8722
+ Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
8723
+ Default: ``None`` , all dimensions are reduced.
8724
+ keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
8725
+ If ``False`` , don't keep these dimensions. Default : ``False`` .
10453
8726
 
10454
8727
  Returns:
10455
8728
  Tensor, the dtype is bool.
10456
8729
 
10457
- - If `axis` is None, and `keep_dims` is ``False`` ,
8730
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
10458
8731
  the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
10459
8732
  - If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
10460
8733
  the shape of output is :math:`(input_1, input_3, ..., input_R)`.
10461
8734
  - If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
10462
8735
  the shape of output is :math:`(input_1, input_4, ..., input_R)`.
8736
+ - If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
8737
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
10463
8738
 
10464
8739
  Raises:
10465
8740
  TypeError: If `keep_dims` is not a bool.
10466
8741
  TypeError: If `input` is not a Tensor.
10467
- TypeError: If `axis` is not one of the following: int, tuple or list.
8742
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
10468
8743
 
10469
8744
  Supported Platforms:
10470
8745
  ``Ascend`` ``GPU`` ``CPU``
@@ -10488,11 +8763,8 @@ def any(input, axis=None, keep_dims=False):
10488
8763
  >>> print(output)
10489
8764
  [ True True]
10490
8765
  """
10491
- _check_is_tensor("input", input, "any")
10492
8766
  if axis is None:
10493
8767
  axis = ()
10494
- if input.dtype != mstype.bool_:
10495
- input = cast_(input, mstype.bool_)
10496
8768
  return _get_cache_prim(P.ReduceAny)(keep_dims)(input, axis)
10497
8769
 
10498
8770
 
@@ -10599,21 +8871,21 @@ def iou(anchor_boxes, gt_boxes, mode='iou'):
10599
8871
  and width are scaled by 0.2 internally.
10600
8872
 
10601
8873
  Args:
10602
- anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . "N" indicates the number of anchor boxes,
10603
- and the value "4" refers to "x0", "y0", "x1", and "y1".
10604
- Data type must be either float16, float32 or float64.
10605
- gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . "M" indicates the number of ground
10606
- truth boxes, and the value "4" refers to "x0", "y0", "x1", and "y1".
10607
- Data type must be either float16, float32 or float64.
8874
+ anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . :math:`N` indicates the number of
8875
+ anchor boxes, and the value :math:`4` refers to four boundary coordinates of the predicted area
8876
+ "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
8877
+ gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . :math:`M` indicates the number
8878
+ of ground truth boxes, and the value :math:`4` refers to four boundary coordinates of the truth
8879
+ area "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
10608
8880
  mode (string): The mode is used to specify the calculation method,
10609
8881
  now supporting 'iou' (intersection over union) or 'iof' (intersection over foreground) mode.
10610
8882
  Default: ``'iou'`` .
10611
8883
 
10612
8884
  Returns:
10613
- Tensor, the 'iou' values, tensor of shape :math:`(M, N)` , with the same data type as `anchor_boxes`.
8885
+ Tensor, the IOU/IOF values, tensor of shape :math:`(M, N)` , with the same data type as `anchor_boxes`.
10614
8886
 
10615
8887
  Raises:
10616
- KeyError: When `mode` is not 'iou' or 'iof'.
8888
+ KeyError: When `mode` is not ``'iou'`` or ``'iof'``.
10617
8889
 
10618
8890
  Supported Platforms:
10619
8891
  ``Ascend`` ``GPU`` ``CPU``
@@ -10659,8 +8931,8 @@ def _check_dim_in_range(dim, ndim):
10659
8931
 
10660
8932
 
10661
8933
  def dotrapezoid(y, dx, dim):
10662
- y_left = select_(y, dim, 0)
10663
- y_right = select_(y, dim, -1)
8934
+ y_left = _select(y, dim, 0)
8935
+ y_right = _select(y, dim, -1)
10664
8936
  y_sum = y.sum(dim)
10665
8937
  return (y_sum - (y_left + y_right) * 0.5) * dx
10666
8938
 
@@ -10670,10 +8942,10 @@ def dotrapezoid_tensor(y, dx, dim):
10670
8942
  y_start_dim_left = tuple(y_start_dim_left)
10671
8943
  y_start_dim_right = [0 for _ in range(y.ndim - dim - 1)]
10672
8944
  y_start_dim_right = tuple(y_start_dim_right)
10673
- y_slice_size = _tuple_setitem(P.Shape()(y), dim, P.Shape()(y)[dim] - 1)
10674
- y_slice_left = P.Slice()(y, y_start_dim_left + (0,) + y_start_dim_right, y_slice_size)
10675
- y_slice_right = P.Slice()(y, y_start_dim_left + (1,) + y_start_dim_right, y_slice_size)
10676
- return (P.Add()(y_slice_left, y_slice_right) * dx).sum(dim) / 2.
8945
+ y_slice_size = _tuple_setitem(shape_(y), dim, shape_(y)[dim] - 1)
8946
+ y_slice_left = slice_(y, y_start_dim_left + (0,) + y_start_dim_right, y_slice_size)
8947
+ y_slice_right = slice_(y, y_start_dim_left + (1,) + y_start_dim_right, y_slice_size)
8948
+ return (tensor_add(y_slice_left, y_slice_right) * dx).sum(dim) / 2.
10677
8949
 
10678
8950
 
10679
8951
  def add_padding_to_shape(curr_shape, target_n_dim):
@@ -10706,8 +8978,8 @@ def trapezoid_tensor(y, x, dim):
10706
8978
  x_start_dim_right = [0 for _ in range(x.ndim - dim - 1)]
10707
8979
  x_start_dim_right = tuple(x_start_dim_right)
10708
8980
  x_slice_size = _tuple_setitem(x.shape, dim, x.shape[dim] - 1)
10709
- x_left = P.Slice()(x, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
10710
- x_right = P.Slice()(x, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
8981
+ x_left = slice_(x, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
8982
+ x_right = slice_(x, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
10711
8983
  dx = x_right - x_left
10712
8984
  new_sizes = add_padding_to_shape(dx.shape, y.ndim)
10713
8985
  dx = dx.view(tuple(new_sizes))
@@ -10725,8 +8997,8 @@ def trapezoid_tensor(y, x, dim):
10725
8997
  x_start_dim_right = [0 for _ in range(x_viewed.ndim - dim - 1)]
10726
8998
  x_start_dim_right = tuple(x_start_dim_right)
10727
8999
  x_slice_size = _tuple_setitem(x_viewed.shape, dim, x_viewed.shape[dim] - 1)
10728
- x_left = P.Slice()(x_viewed, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
10729
- x_right = P.Slice()(x_viewed, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
9000
+ x_left = slice_(x_viewed, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
9001
+ x_right = slice_(x_viewed, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
10730
9002
  dx = x_right - x_left
10731
9003
  return dotrapezoid_tensor(y, dx, dim)
10732
9004
 
@@ -10745,12 +9017,12 @@ def get(ts, depth, dim, index, r):
10745
9017
  return get(item, depth + 1, dim, index, r)
10746
9018
 
10747
9019
 
10748
- def select_(feat, dim, index):
9020
+ def _select(feat, dim, index):
10749
9021
  select_shape = feat.shape
10750
9022
  select_shape = list(select_shape)
10751
9023
  select_shape[dim] = 1
10752
9024
  new_shape = feat.shape[:dim] + feat.shape[dim + 1:]
10753
- indexes = P.Ones()(tuple(select_shape), mstype.int32) * (index)
9025
+ indexes = ones_(tuple(select_shape), mstype.int32) * (index)
10754
9026
  return feat.gather_elements(dim, indexes).reshape(new_shape)
10755
9027
 
10756
9028
 
@@ -10809,14 +9081,14 @@ def trapz(y, x=None, *, dx=1.0, dim=-1):
10809
9081
  if not isinstance(dim, int):
10810
9082
  raise TypeError(f"For `trapz`, the input `dim` must be int, but get {type(dim)}.")
10811
9083
  if not _check_is_float(y.dtype):
10812
- y = P.Cast()(y, mstype.float32)
9084
+ y = cast_(y, mstype.float32)
10813
9085
  _check_dim_in_range(dim, y.ndim)
10814
9086
  dim = dim + y.ndim if dim < 0 else dim
10815
9087
  if x is None:
10816
9088
  return trapezoid(y, dx, dim)
10817
9089
  if not isinstance(x, (Tensor, Tensor_)):
10818
9090
  raise TypeError(f"For `trapz`, the input `x` must be Tensor, but get {type(x)}.")
10819
- x = P.Cast()(x, mstype.float32)
9091
+ x = cast_(x, mstype.float32)
10820
9092
  return trapezoid_tensor(y, x, dim)
10821
9093
 
10822
9094
 
@@ -10979,42 +9251,6 @@ def cholesky_solve(input, input2, upper=False):
10979
9251
  return _get_cache_prim(P.CholeskySolve)(upper)(input, input2)
10980
9252
 
10981
9253
 
10982
- def conj(input):
10983
- r"""
10984
- Returns a tensor of complex numbers that are the complex conjugate of each element in input.
10985
- The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
10986
-
10987
- The complex conjugate returned by this operation is of the form a - bj.
10988
-
10989
- If `input` is real, it is returned unchanged.
10990
-
10991
- Args:
10992
- input (Tensor): The input tensor to compute to. Must have numeric type.
10993
-
10994
- Returns:
10995
- Tensor, has the same dtype as the `input`.
10996
-
10997
- Raises:
10998
- TypeError: If the dtype of `input` is not a numeric type.
10999
- TypeError: If the `input` is not a Tensor.
11000
-
11001
- Supported Platforms:
11002
- ``Ascend`` ``GPU`` ``CPU``
11003
-
11004
- Examples:
11005
- >>> import mindspore
11006
- >>> import numpy as np
11007
- >>> from mindspore import Tensor, ops
11008
- >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
11009
- >>> output = ops.conj(x)
11010
- >>> print(output)
11011
- (1.3-0.4j)
11012
- """
11013
- if not isinstance(input, (Tensor, Tensor_)):
11014
- raise TypeError("For conj op, input must be Tensor.")
11015
- return conj_(input)
11016
-
11017
-
11018
9254
  def cross(input, other, dim=None):
11019
9255
  r"""
11020
9256
  Computes the cross product of `input` and `other` in dimension `dim`.
@@ -11184,91 +9420,6 @@ def einsum(equation, *operands):
11184
9420
  return _get_cache_prim(P.Einsum)(equation)(operands)
11185
9421
 
11186
9422
 
11187
- def erfinv(input):
11188
- r"""
11189
- Returns the result of the inverse error function with `input`, which is defined in the
11190
- range `(-1, 1)` as:
11191
-
11192
- .. math::
11193
-
11194
- erfinv(erf(x)) = x
11195
-
11196
- where :math:`x` is the `input`.
11197
-
11198
- Args:
11199
- input (Tensor): The input tensor. Supported dtypes:
11200
-
11201
- - Ascend: float16, float32.
11202
- - GPU/CPU: float16, float32, float64.
11203
-
11204
- Returns:
11205
- Tensor, has the same shape and dtype as `input`.
11206
-
11207
- Raises:
11208
- TypeError: If dtype of `input` is not float16, float32 or float64.
11209
-
11210
- Supported Platforms:
11211
- ``Ascend`` ``GPU`` ``CPU``
11212
-
11213
- Examples:
11214
- >>> import mindspore
11215
- >>> import numpy as np
11216
- >>> from mindspore import Tensor, ops
11217
- >>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
11218
- >>> output = ops.erfinv(x)
11219
- >>> print(output)
11220
- [ 0. 0.47695306 -1.1630805 ]
11221
- """
11222
- return erfinv_(input)
11223
-
11224
-
11225
- def less_equal(input, other):
11226
- r"""
11227
- Computes the boolean value of :math:`input <= other` element-wise.
11228
-
11229
- .. math::
11230
- out_{i} =\begin{cases}
11231
- & \text{True, if } input_{i}<=other_{i} \\
11232
- & \text{False, if } input_{i}>other_{i}
11233
- \end{cases}
11234
-
11235
- .. note::
11236
- - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
11237
- consistent.
11238
- - The inputs must be two tensors or one tensor and one scalar.
11239
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
11240
-
11241
- Args:
11242
- input (Union[Tensor, Number, bool]): The first input is a Number or
11243
- a bool or a tensor whose data type is
11244
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11245
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
11246
- other (Union[Tensor, Number, bool]): The second input, when the first input is a Tensor,
11247
- the second input should be a Number or bool value, or a Tensor whose data type is number or bool\_.
11248
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
11249
-
11250
- Returns:
11251
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
11252
-
11253
- Raises:
11254
- TypeError: If neither `input` nor `other` is a Tensor.
11255
-
11256
- Supported Platforms:
11257
- ``Ascend`` ``GPU`` ``CPU``
11258
-
11259
- Examples:
11260
- >>> import mindspore
11261
- >>> import numpy as np
11262
- >>> from mindspore import Tensor, ops
11263
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
11264
- >>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
11265
- >>> output = ops.less_equal(x, other)
11266
- >>> print(output)
11267
- [ True False True]
11268
- """
11269
- return tensor_le(input, other)
11270
-
11271
-
11272
9423
  def cumprod(input, dim, dtype=None):
11273
9424
  r"""
11274
9425
  Computes the cumulative product of the `input` tensor along dimension `dim`.
@@ -11310,70 +9461,6 @@ def cumprod(input, dim, dtype=None):
11310
9461
  return output
11311
9462
 
11312
9463
 
11313
- def greater(input, other):
11314
- r"""
11315
- Computes the boolean value of :math:`input > other` element-wise.
11316
-
11317
- Args:
11318
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
11319
- a bool or a tensor whose data type is
11320
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11321
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
11322
- other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
11323
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
11324
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
11325
-
11326
- Returns:
11327
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
11328
-
11329
- Supported Platforms:
11330
- ``Ascend`` ``GPU`` ``CPU``
11331
-
11332
- Examples:
11333
- >>> import mindspore
11334
- >>> import numpy as np
11335
- >>> from mindspore import Tensor, ops
11336
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
11337
- >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
11338
- >>> output = ops.greater(x, y)
11339
- >>> print(output)
11340
- [False True False]
11341
- """
11342
- return tensor_gt(input, other)
11343
-
11344
-
11345
- def greater_equal(input, other):
11346
- r"""
11347
- Computes the boolean value of :math:`input \geq other` element-wise.
11348
-
11349
- Args:
11350
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
11351
- a bool or a tensor whose data type is
11352
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
11353
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
11354
- other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
11355
- the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
11356
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
11357
-
11358
- Returns:
11359
- Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
11360
-
11361
- Supported Platforms:
11362
- ``Ascend`` ``GPU`` ``CPU``
11363
-
11364
- Examples:
11365
- >>> import mindspore
11366
- >>> import numpy as np
11367
- >>> from mindspore import Tensor, ops
11368
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
11369
- >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
11370
- >>> output = ops.greater_equal(x, y)
11371
- >>> print(output)
11372
- [True True False]
11373
- """
11374
- return tensor_ge(input, other)
11375
-
11376
-
11377
9464
  def igamma(input, other):
11378
9465
  r"""
11379
9466
  Calculates lower regularized incomplete Gamma function.
@@ -11421,8 +9508,7 @@ def igamma(input, other):
11421
9508
  >>> print(output)
11422
9509
  [0.593994 0.35276785 0.21486944 0.13337152]
11423
9510
  """
11424
- igamma_op = _get_cache_prim(Igamma)()
11425
- return igamma_op(input, other)
9511
+ return igamma_(input, other)
11426
9512
 
11427
9513
 
11428
9514
  def igammac(input, other):
@@ -11472,8 +9558,7 @@ def igammac(input, other):
11472
9558
  >>> print (output)
11473
9559
  [0.40600586 0.6472318 0.7851304 0.8666283]
11474
9560
  """
11475
- igammac_op = _get_cache_prim(Igammac)()
11476
- return igammac_op(input, other)
9561
+ return igammac_(input, other)
11477
9562
 
11478
9563
 
11479
9564
  def lgamma(input):
@@ -11700,7 +9785,7 @@ def logical_xor(input, other):
11700
9785
 
11701
9786
  .. math::
11702
9787
 
11703
- out_{i} = x_{i} \oplus y_{i}
9788
+ out_{i} = input_{i} \oplus other_{i}
11704
9789
 
11705
9790
  Args:
11706
9791
  input (Tensor): The first input is a tensor whose data type can be implicitly converted to bool.
@@ -11843,7 +9928,7 @@ def nansum(input, axis=None, keepdims=False, *, dtype=None):
11843
9928
  if input.dtype == mstype.bool_:
11844
9929
  input = input.astype(mstype.int64)
11845
9930
  is_nan = isnan_(input)
11846
- input = ops.masked_fill(input, is_nan, 0)
9931
+ input = ops.masked_fill(input, is_nan, ops.cast(0, input.dtype))
11847
9932
  input = _get_cache_prim(P.ReduceSum)(keepdims)(input, axis)
11848
9933
  if dtype is not None and input.dtype != dtype:
11849
9934
  input = input.astype(dtype)
@@ -11937,7 +10022,7 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
11937
10022
  diag_plane = (dsize, dsize)
11938
10023
  output_shape_trans = batch_shape + diag_plane
11939
10024
  output = zeros(output_shape_trans, input.dtype)
11940
- k = P.Cast()(offset, mstype.int32)
10025
+ k = cast_(offset, mstype.int32)
11941
10026
  output = matrix_set_diag_op(output, input, k)
11942
10027
  dim = 0
11943
10028
  perm = ()
@@ -11956,25 +10041,28 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
11956
10041
  """
11957
10042
  Calculate sum of Tensor elements over a given dim.
11958
10043
 
10044
+ Note:
10045
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
10046
+
11959
10047
  Args:
11960
10048
  input (Tensor): The input tensor.
11961
- dim (Union[None, int, tuple(int), list(int)]): Dimensions along which a sum is performed.
11962
- If None, sum all the elements of the input tensor.
10049
+ dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
10050
+ If ``None`` , sum all the elements of the input tensor.
11963
10051
  If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
11964
- Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None``.
10052
+ Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
11965
10053
  keepdim (bool): Whether the output tensor has dim retained or not.
11966
- If True, keep these reduced dimensions and the length is 1.
11967
- If False, don't keep these dimensions. Default: ``False``.
10054
+ If ``True`` , keep these reduced dimensions and the length is 1.
10055
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
11968
10056
 
11969
10057
  Keyword Args:
11970
- dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None``.
10058
+ dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
11971
10059
 
11972
10060
  Returns:
11973
- A Tensor, sum of elements over a given dim in `input`.
10061
+ A Tensor, sum of elements over a given `dim` in `input`.
11974
10062
 
11975
10063
  Raises:
11976
10064
  TypeError: If `input` is not a Tensor.
11977
- TypeError: If `dim` is not an int, tulpe(int), list(int) or None.
10065
+ TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
11978
10066
  ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
11979
10067
  TypeError: If `keepdim` is not a bool.
11980
10068
 
@@ -12229,6 +10317,8 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
12229
10317
  def as_strided(x, shape=None, strides=None):
12230
10318
  n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
12231
10319
  strides = tuple(np.array(strides) * n)
10320
+ if x.dtype == mstype.bfloat16:
10321
+ return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
12232
10322
  return Tensor(np.lib.stride_tricks.as_strided(x.asnumpy(), shape, strides, False, True), dtype=x.dtype)
12233
10323
 
12234
10324
 
@@ -12250,13 +10340,13 @@ def _resize_input(input, input_dim, ret_dim, ret_shape, input_sizes):
12250
10340
  if input_sizes[value] > ret_shape[i]:
12251
10341
  start_index = [0] * input_dim
12252
10342
  input_sizes[value] = ret_shape[i]
12253
- input = P.Slice()(input, start_index, input_sizes)
10343
+ input = slice_(input, start_index, input_sizes)
12254
10344
 
12255
10345
  if must_copy:
12256
10346
  paddings = np.reshape(paddings, (input_dim, 2)).tolist()
12257
10347
  paddings.reverse()
12258
10348
  paddings = (*paddings,)
12259
- input = P.Pad(paddings)(input)
10349
+ input = _get_cache_prim(P.Pad)(paddings)(input)
12260
10350
 
12261
10351
  return input
12262
10352
 
@@ -12762,7 +10852,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
12762
10852
 
12763
10853
  Args:
12764
10854
  x (Tensor): Input data is used to count non-zero numbers. With shape
12765
- :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
10855
+ :math:`(*)` where :math:`*` means, any number of additional dimensions.
12766
10856
  axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
12767
10857
  Default: ``()`` , reduce all dimensions.
12768
10858
  keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
@@ -12821,7 +10911,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
12821
10911
  reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
12822
10912
 
12823
10913
  tensor_0 = ops.zeros(x.shape, x.dtype)
12824
- nonzero_bool = not_equal_(x, tensor_0)
10914
+ nonzero_bool = not_equal(x, tensor_0)
12825
10915
  # ReduceSum only support float16 or float32 tensor.
12826
10916
  nonzero_val = cast_(nonzero_bool, mstype.float32)
12827
10917
  nonzero_num = cast_(reduce_sum(nonzero_val, axis), dtype)
@@ -13048,7 +11138,8 @@ def vecdot(x, y, *, axis=-1):
13048
11138
  Calculates the dot product of two batches of vectors across the specified dimension.
13049
11139
 
13050
11140
  The formula of calculation is as follows.
13051
- :math:`\bar{x_{i}}` represents the conjugate for complex vectors, and it is the raw value for real vectors.
11141
+ :math:`\bar{x_{i}}` represents the conjugate for complex vectors,
11142
+ and :math:`\bar{x_{i}}` is the raw value for real vectors.
13052
11143
 
13053
11144
  .. math::
13054
11145
 
@@ -13358,7 +11449,8 @@ def _get_output_shape(batch_size, x1_ret, x2_ret):
13358
11449
 
13359
11450
  def batch_dot(x1, x2, axes=None):
13360
11451
  """
13361
- Computation of batch dot product between samples in two tensors containing batch dims.
11452
+ Computation of batch dot product between samples in two tensors containing batch dims, i.e. `x1` or `x2` 's
11453
+ first dimension is batch size.
13362
11454
 
13363
11455
  .. math::
13364
11456
  output = x1[batch, :] * x2[batch, :]
@@ -13492,7 +11584,6 @@ __all__ = [
13492
11584
  'arctan',
13493
11585
  'arctan2',
13494
11586
  'bincount',
13495
- 'neg_tensor',
13496
11587
  'neg',
13497
11588
  'negative',
13498
11589
  'tensor_lt',
@@ -13750,6 +11841,6 @@ __all__ = [
13750
11841
  'vecdot',
13751
11842
  'dot',
13752
11843
  'batch_dot',
13753
- 'eps'
11844
+ 'eps',
13754
11845
  ]
13755
11846
  __all__.sort()