mindspore 2.2.11__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1151) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +7 -5
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +76 -18
  7. mindspore/_extends/builtin_operations.py +2 -1
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  9. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  10. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  11. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  14. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  15. mindspore/_extends/parse/__init__.py +18 -14
  16. mindspore/_extends/parse/compile_config.py +258 -0
  17. mindspore/_extends/parse/namespace.py +2 -2
  18. mindspore/_extends/parse/parser.py +174 -62
  19. mindspore/_extends/parse/resources.py +45 -14
  20. mindspore/_extends/parse/standard_method.py +142 -240
  21. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  22. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  23. mindspore/_extends/remote/kernel_build_server.py +2 -0
  24. mindspore/_profiler.py +30 -0
  25. mindspore/amp.py +51 -24
  26. mindspore/avcodec-59.dll +0 -0
  27. mindspore/avdevice-59.dll +0 -0
  28. mindspore/avfilter-8.dll +0 -0
  29. mindspore/avformat-59.dll +0 -0
  30. mindspore/avutil-57.dll +0 -0
  31. mindspore/boost/adasum.py +1 -1
  32. mindspore/boost/base.py +1 -1
  33. mindspore/boost/boost_cell_wrapper.py +2 -2
  34. mindspore/boost/grad_freeze.py +2 -2
  35. mindspore/boost/group_loss_scale_manager.py +1 -1
  36. mindspore/boost/less_batch_normalization.py +9 -6
  37. mindspore/common/__init__.py +15 -4
  38. mindspore/common/_jit_fallback_utils.py +2 -3
  39. mindspore/common/_register_for_adapter.py +7 -0
  40. mindspore/common/_register_for_recompute.py +48 -0
  41. mindspore/common/_register_for_tensor.py +8 -9
  42. mindspore/common/_stub_tensor.py +7 -1
  43. mindspore/common/_utils.py +5 -17
  44. mindspore/common/api.py +411 -106
  45. mindspore/common/auto_dynamic_shape.py +27 -14
  46. mindspore/common/dtype.py +17 -10
  47. mindspore/common/dump.py +6 -8
  48. mindspore/common/file_system.py +48 -0
  49. mindspore/common/generator.py +260 -0
  50. mindspore/common/hook_handle.py +51 -4
  51. mindspore/common/initializer.py +1 -1
  52. mindspore/common/jit_config.py +34 -14
  53. mindspore/common/lazy_inline.py +72 -19
  54. mindspore/common/mindir_util.py +12 -2
  55. mindspore/common/mutable.py +79 -14
  56. mindspore/common/no_inline.py +54 -0
  57. mindspore/common/np_dtype.py +25 -0
  58. mindspore/common/parameter.py +30 -11
  59. mindspore/common/recompute.py +262 -0
  60. mindspore/common/seed.py +9 -9
  61. mindspore/common/sparse_tensor.py +272 -24
  62. mindspore/common/symbol.py +122 -0
  63. mindspore/common/tensor.py +468 -496
  64. mindspore/communication/__init__.py +6 -11
  65. mindspore/communication/_comm_helper.py +5 -0
  66. mindspore/communication/comm_func.py +1140 -0
  67. mindspore/communication/management.py +118 -102
  68. mindspore/config/op_info.config +22 -54
  69. mindspore/context.py +378 -65
  70. mindspore/dataset/__init__.py +5 -5
  71. mindspore/dataset/audio/__init__.py +6 -6
  72. mindspore/dataset/audio/transforms.py +711 -158
  73. mindspore/dataset/callback/ds_callback.py +2 -2
  74. mindspore/dataset/engine/cache_client.py +2 -2
  75. mindspore/dataset/engine/datasets.py +163 -83
  76. mindspore/dataset/engine/datasets_audio.py +14 -14
  77. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  78. mindspore/dataset/engine/datasets_text.py +38 -38
  79. mindspore/dataset/engine/datasets_user_defined.py +78 -59
  80. mindspore/dataset/engine/datasets_vision.py +77 -73
  81. mindspore/dataset/engine/offload.py +5 -7
  82. mindspore/dataset/engine/queue.py +56 -38
  83. mindspore/dataset/engine/validators.py +11 -5
  84. mindspore/dataset/text/__init__.py +3 -3
  85. mindspore/dataset/text/transforms.py +408 -121
  86. mindspore/dataset/text/utils.py +9 -9
  87. mindspore/dataset/transforms/__init__.py +1 -1
  88. mindspore/dataset/transforms/transforms.py +261 -76
  89. mindspore/dataset/utils/browse_dataset.py +9 -9
  90. mindspore/dataset/vision/__init__.py +8 -8
  91. mindspore/dataset/vision/c_transforms.py +10 -10
  92. mindspore/dataset/vision/py_transforms_util.py +3 -3
  93. mindspore/dataset/vision/transforms.py +2844 -549
  94. mindspore/dataset/vision/utils.py +161 -10
  95. mindspore/dataset/vision/validators.py +14 -2
  96. mindspore/dnnl.dll +0 -0
  97. mindspore/experimental/optim/__init__.py +12 -2
  98. mindspore/experimental/optim/adadelta.py +161 -0
  99. mindspore/experimental/optim/adagrad.py +168 -0
  100. mindspore/experimental/optim/adam.py +35 -34
  101. mindspore/experimental/optim/adamax.py +170 -0
  102. mindspore/experimental/optim/adamw.py +40 -16
  103. mindspore/experimental/optim/asgd.py +153 -0
  104. mindspore/experimental/optim/lr_scheduler.py +71 -127
  105. mindspore/experimental/optim/nadam.py +157 -0
  106. mindspore/experimental/optim/optimizer.py +15 -8
  107. mindspore/experimental/optim/radam.py +194 -0
  108. mindspore/experimental/optim/rmsprop.py +154 -0
  109. mindspore/experimental/optim/rprop.py +164 -0
  110. mindspore/experimental/optim/sgd.py +28 -19
  111. mindspore/hal/__init__.py +40 -0
  112. mindspore/hal/_ascend.py +57 -0
  113. mindspore/hal/_base.py +57 -0
  114. mindspore/hal/_cpu.py +56 -0
  115. mindspore/hal/_gpu.py +57 -0
  116. mindspore/hal/device.py +356 -0
  117. mindspore/hal/event.py +179 -0
  118. mindspore/hal/memory.py +326 -0
  119. mindspore/hal/stream.py +339 -0
  120. mindspore/include/api/data_type.h +2 -2
  121. mindspore/include/api/dual_abi_helper.h +16 -3
  122. mindspore/include/api/model.h +4 -3
  123. mindspore/include/api/status.h +14 -0
  124. mindspore/include/c_api/model_c.h +173 -0
  125. mindspore/include/c_api/ms/base/types.h +1 -0
  126. mindspore/include/c_api/types_c.h +19 -0
  127. mindspore/include/dataset/execute.h +1 -3
  128. mindspore/include/dataset/vision.h +54 -2
  129. mindspore/jpeg62.dll +0 -0
  130. mindspore/log.py +2 -2
  131. mindspore/mindrecord/__init__.py +5 -1
  132. mindspore/mindrecord/config.py +809 -0
  133. mindspore/mindrecord/filereader.py +25 -0
  134. mindspore/mindrecord/filewriter.py +76 -58
  135. mindspore/mindrecord/mindpage.py +40 -6
  136. mindspore/mindrecord/shardutils.py +3 -2
  137. mindspore/mindrecord/shardwriter.py +7 -0
  138. mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
  139. mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
  140. mindspore/mindrecord/tools/csv_to_mr.py +7 -17
  141. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  142. mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
  143. mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
  144. mindspore/mindspore_backend.dll +0 -0
  145. mindspore/mindspore_common.dll +0 -0
  146. mindspore/mindspore_core.dll +0 -0
  147. mindspore/mindspore_glog.dll +0 -0
  148. mindspore/mindspore_np_dtype.dll +0 -0
  149. mindspore/mindspore_shared_lib.dll +0 -0
  150. mindspore/mint/__init__.py +1137 -0
  151. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  152. mindspore/mint/nn/__init__.py +512 -0
  153. mindspore/mint/nn/functional.py +573 -0
  154. mindspore/mint/optim/__init__.py +24 -0
  155. mindspore/mint/optim/adamw.py +185 -0
  156. mindspore/multiprocessing/__init__.py +72 -0
  157. mindspore/nn/__init__.py +1 -0
  158. mindspore/nn/cell.py +213 -257
  159. mindspore/nn/dynamic_lr.py +2 -2
  160. mindspore/nn/extend/__init__.py +29 -0
  161. mindspore/nn/extend/basic.py +140 -0
  162. mindspore/nn/extend/embedding.py +143 -0
  163. mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
  164. mindspore/nn/extend/layer/normalization.py +109 -0
  165. mindspore/nn/extend/pooling.py +117 -0
  166. mindspore/nn/layer/activation.py +84 -94
  167. mindspore/nn/layer/basic.py +177 -82
  168. mindspore/nn/layer/channel_shuffle.py +3 -16
  169. mindspore/nn/layer/container.py +3 -3
  170. mindspore/nn/layer/conv.py +75 -66
  171. mindspore/nn/layer/embedding.py +103 -45
  172. mindspore/nn/layer/embedding_service.py +531 -0
  173. mindspore/nn/layer/embedding_service_layer.py +393 -0
  174. mindspore/nn/layer/image.py +4 -7
  175. mindspore/nn/layer/math.py +1 -1
  176. mindspore/nn/layer/normalization.py +52 -66
  177. mindspore/nn/layer/padding.py +30 -39
  178. mindspore/nn/layer/pooling.py +18 -9
  179. mindspore/nn/layer/rnn_cells.py +6 -16
  180. mindspore/nn/layer/rnns.py +6 -5
  181. mindspore/nn/layer/thor_layer.py +1 -2
  182. mindspore/nn/layer/timedistributed.py +1 -1
  183. mindspore/nn/layer/transformer.py +52 -50
  184. mindspore/nn/learning_rate_schedule.py +6 -5
  185. mindspore/nn/loss/loss.py +63 -84
  186. mindspore/nn/optim/ada_grad.py +6 -4
  187. mindspore/nn/optim/adadelta.py +3 -1
  188. mindspore/nn/optim/adafactor.py +1 -1
  189. mindspore/nn/optim/adam.py +102 -181
  190. mindspore/nn/optim/adamax.py +4 -2
  191. mindspore/nn/optim/adasum.py +3 -3
  192. mindspore/nn/optim/asgd.py +4 -2
  193. mindspore/nn/optim/ftrl.py +31 -61
  194. mindspore/nn/optim/lamb.py +5 -3
  195. mindspore/nn/optim/lars.py +2 -2
  196. mindspore/nn/optim/lazyadam.py +6 -4
  197. mindspore/nn/optim/momentum.py +13 -25
  198. mindspore/nn/optim/optimizer.py +6 -3
  199. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  200. mindspore/nn/optim/rmsprop.py +9 -3
  201. mindspore/nn/optim/rprop.py +4 -2
  202. mindspore/nn/optim/sgd.py +7 -4
  203. mindspore/nn/optim/thor.py +2 -2
  204. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  205. mindspore/nn/probability/distribution/beta.py +2 -2
  206. mindspore/nn/probability/distribution/categorical.py +4 -6
  207. mindspore/nn/probability/distribution/cauchy.py +2 -2
  208. mindspore/nn/probability/distribution/exponential.py +2 -2
  209. mindspore/nn/probability/distribution/geometric.py +1 -1
  210. mindspore/nn/probability/distribution/gumbel.py +2 -2
  211. mindspore/nn/probability/distribution/logistic.py +1 -1
  212. mindspore/nn/probability/distribution/poisson.py +2 -2
  213. mindspore/nn/probability/distribution/uniform.py +2 -2
  214. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  215. mindspore/nn/wrap/__init__.py +2 -1
  216. mindspore/nn/wrap/cell_wrapper.py +58 -13
  217. mindspore/nn/wrap/grad_reducer.py +148 -8
  218. mindspore/nn/wrap/loss_scale.py +32 -9
  219. mindspore/numpy/__init__.py +2 -0
  220. mindspore/numpy/array_creations.py +2 -0
  221. mindspore/numpy/array_ops.py +6 -6
  222. mindspore/numpy/dtypes.py +3 -3
  223. mindspore/numpy/fft.py +431 -0
  224. mindspore/numpy/math_ops.py +61 -67
  225. mindspore/numpy/utils.py +3 -0
  226. mindspore/opencv_core452.dll +0 -0
  227. mindspore/opencv_imgcodecs452.dll +0 -0
  228. mindspore/opencv_imgproc452.dll +0 -0
  229. mindspore/ops/__init__.py +8 -4
  230. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
  231. mindspore/ops/_grad_experimental/grad_comm_ops.py +93 -36
  232. mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
  233. mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
  234. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  235. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  236. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  237. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  238. mindspore/ops/_op_impl/__init__.py +0 -1
  239. mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
  240. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  241. mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
  242. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  243. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  244. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  245. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  246. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  247. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  248. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  249. mindspore/ops/_vmap/vmap_array_ops.py +164 -101
  250. mindspore/ops/_vmap/vmap_base.py +8 -1
  251. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  252. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  253. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  254. mindspore/ops/_vmap/vmap_math_ops.py +130 -58
  255. mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
  256. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  257. mindspore/ops/auto_generate/__init__.py +31 -0
  258. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  259. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  260. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  261. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  262. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  263. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  264. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  265. mindspore/ops/composite/__init__.py +5 -2
  266. mindspore/ops/composite/base.py +121 -23
  267. mindspore/ops/composite/math_ops.py +10 -49
  268. mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
  269. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  270. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  271. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  272. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  273. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  274. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  275. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  276. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  277. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  278. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  279. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  280. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  281. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  282. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  283. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  284. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  285. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  286. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  287. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  288. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  289. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  290. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  291. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  292. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  293. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  296. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  298. mindspore/ops/deprecated.py +14 -3
  299. mindspore/ops/extend/__init__.py +53 -0
  300. mindspore/ops/extend/array_func.py +218 -0
  301. mindspore/ops/extend/math_func.py +76 -0
  302. mindspore/ops/extend/nn_func.py +308 -0
  303. mindspore/ops/function/__init__.py +31 -11
  304. mindspore/ops/function/array_func.py +848 -1736
  305. mindspore/ops/function/clip_func.py +19 -31
  306. mindspore/ops/function/debug_func.py +2 -5
  307. mindspore/ops/function/fft_func.py +31 -0
  308. mindspore/ops/function/grad/grad_func.py +27 -20
  309. mindspore/ops/function/image_func.py +27 -21
  310. mindspore/ops/function/linalg_func.py +30 -53
  311. mindspore/ops/function/math_func.py +916 -2791
  312. mindspore/ops/function/nn_func.py +1445 -889
  313. mindspore/ops/function/other_func.py +6 -7
  314. mindspore/ops/function/parameter_func.py +6 -92
  315. mindspore/ops/function/random_func.py +254 -108
  316. mindspore/ops/function/reshard_func.py +102 -0
  317. mindspore/ops/function/sparse_func.py +4 -4
  318. mindspore/ops/function/sparse_unary_func.py +11 -18
  319. mindspore/ops/function/spectral_func.py +1 -1
  320. mindspore/ops/function/vmap_func.py +15 -14
  321. mindspore/ops/functional.py +342 -343
  322. mindspore/ops/op_info_register.py +16 -43
  323. mindspore/ops/operations/__init__.py +32 -23
  324. mindspore/ops/operations/_embedding_cache_ops.py +1 -1
  325. mindspore/ops/operations/_grad_ops.py +21 -853
  326. mindspore/ops/operations/_infer_ops.py +19 -0
  327. mindspore/ops/operations/_inner_ops.py +155 -511
  328. mindspore/ops/operations/_quant_ops.py +4 -4
  329. mindspore/ops/operations/_rl_inner_ops.py +3 -3
  330. mindspore/ops/operations/_scalar_ops.py +5 -480
  331. mindspore/ops/operations/_sequence_ops.py +6 -36
  332. mindspore/ops/operations/_tensor_array.py +8 -8
  333. mindspore/ops/operations/array_ops.py +112 -2698
  334. mindspore/ops/operations/comm_ops.py +801 -118
  335. mindspore/ops/operations/custom_ops.py +62 -121
  336. mindspore/ops/operations/debug_ops.py +105 -36
  337. mindspore/ops/operations/image_ops.py +3 -219
  338. mindspore/ops/operations/inner_ops.py +54 -40
  339. mindspore/ops/operations/linalg_ops.py +1 -49
  340. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  341. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  342. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  343. mindspore/ops/operations/math_ops.py +621 -4654
  344. mindspore/ops/operations/nn_ops.py +316 -2226
  345. mindspore/ops/operations/other_ops.py +53 -45
  346. mindspore/ops/operations/random_ops.py +4 -51
  347. mindspore/ops/operations/reshard_ops.py +53 -0
  348. mindspore/ops/operations/sparse_ops.py +8 -8
  349. mindspore/ops/primitive.py +204 -103
  350. mindspore/ops/silent_check.py +162 -0
  351. mindspore/ops_generate/__init__.py +27 -0
  352. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  353. mindspore/ops_generate/arg_handler.py +197 -0
  354. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  355. mindspore/ops_generate/gen_ops.py +1084 -0
  356. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  357. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  358. mindspore/ops_generate/gen_utils.py +209 -0
  359. mindspore/ops_generate/op_proto.py +138 -0
  360. mindspore/ops_generate/pyboost_utils.py +354 -0
  361. mindspore/ops_generate/template.py +239 -0
  362. mindspore/parallel/__init__.py +7 -4
  363. mindspore/parallel/_auto_parallel_context.py +155 -6
  364. mindspore/parallel/_cell_wrapper.py +16 -9
  365. mindspore/parallel/_cost_model_context.py +1 -1
  366. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  367. mindspore/parallel/_parallel_serialization.py +62 -14
  368. mindspore/parallel/_ps_context.py +1 -1
  369. mindspore/parallel/_recovery_context.py +1 -1
  370. mindspore/parallel/_tensor.py +18 -9
  371. mindspore/parallel/_transformer/__init__.py +1 -1
  372. mindspore/parallel/_transformer/layers.py +1 -1
  373. mindspore/parallel/_transformer/loss.py +1 -1
  374. mindspore/parallel/_transformer/moe.py +1 -1
  375. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  376. mindspore/parallel/_transformer/transformer.py +10 -10
  377. mindspore/parallel/_utils.py +161 -6
  378. mindspore/parallel/algo_parameter_config.py +6 -8
  379. mindspore/parallel/checkpoint_transform.py +369 -64
  380. mindspore/parallel/cluster/__init__.py +15 -0
  381. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  382. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  383. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  384. mindspore/parallel/cluster/run.py +136 -0
  385. mindspore/parallel/mpi/__init__.py +1 -1
  386. mindspore/parallel/mpi/_mpi_config.py +1 -1
  387. mindspore/parallel/parameter_broadcast.py +152 -0
  388. mindspore/parallel/shard.py +128 -17
  389. mindspore/profiler/__init__.py +3 -2
  390. mindspore/profiler/common/process_pool.py +41 -0
  391. mindspore/profiler/common/singleton.py +28 -0
  392. mindspore/profiler/common/util.py +125 -0
  393. mindspore/profiler/envprofiling.py +2 -2
  394. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  395. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  396. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  397. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  398. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  399. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  400. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  401. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  402. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  403. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  404. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  405. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  406. mindspore/profiler/parser/ascend_flops_generator.py +27 -5
  407. mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
  408. mindspore/profiler/parser/ascend_hccl_generator.py +31 -280
  409. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  410. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  411. mindspore/profiler/parser/ascend_msprof_exporter.py +151 -126
  412. mindspore/profiler/parser/ascend_msprof_generator.py +75 -274
  413. mindspore/profiler/parser/ascend_op_generator.py +94 -36
  414. mindspore/profiler/parser/ascend_timeline_generator.py +297 -131
  415. mindspore/profiler/parser/base_timeline_generator.py +17 -3
  416. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
  417. mindspore/profiler/parser/framework_parser.py +11 -4
  418. mindspore/profiler/parser/integrator.py +3 -1
  419. mindspore/profiler/parser/memory_usage_parser.py +8 -2
  420. mindspore/profiler/parser/minddata_analyzer.py +8 -2
  421. mindspore/profiler/parser/minddata_parser.py +73 -4
  422. mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
  423. mindspore/profiler/parser/msadvisor_parser.py +10 -4
  424. mindspore/profiler/parser/profiler_info.py +16 -1
  425. mindspore/profiler/profiling.py +522 -195
  426. mindspore/rewrite/__init__.py +2 -13
  427. mindspore/rewrite/api/node.py +123 -37
  428. mindspore/rewrite/api/pattern_engine.py +2 -3
  429. mindspore/rewrite/api/scoped_value.py +16 -15
  430. mindspore/rewrite/api/symbol_tree.py +46 -30
  431. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  432. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  433. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  434. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  435. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  436. mindspore/rewrite/common/__init__.py +1 -2
  437. mindspore/rewrite/common/config.py +24 -0
  438. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  439. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  440. mindspore/rewrite/common/namespace.py +118 -0
  441. mindspore/rewrite/node/__init__.py +5 -5
  442. mindspore/rewrite/node/call_function.py +23 -7
  443. mindspore/rewrite/node/cell_container.py +7 -3
  444. mindspore/rewrite/node/control_flow.py +53 -28
  445. mindspore/rewrite/node/node.py +212 -196
  446. mindspore/rewrite/node/node_manager.py +51 -22
  447. mindspore/rewrite/node/node_topological_manager.py +3 -23
  448. mindspore/rewrite/parsers/__init__.py +12 -0
  449. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  450. mindspore/rewrite/parsers/assign_parser.py +637 -413
  451. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  452. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  453. mindspore/rewrite/parsers/constant_parser.py +5 -5
  454. mindspore/rewrite/parsers/container_parser.py +4 -6
  455. mindspore/rewrite/parsers/expr_parser.py +55 -0
  456. mindspore/rewrite/parsers/for_parser.py +31 -98
  457. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  458. mindspore/rewrite/parsers/if_parser.py +28 -10
  459. mindspore/rewrite/parsers/module_parser.py +8 -182
  460. mindspore/rewrite/parsers/parser.py +1 -5
  461. mindspore/rewrite/parsers/parser_register.py +1 -1
  462. mindspore/rewrite/parsers/return_parser.py +5 -10
  463. mindspore/rewrite/parsers/while_parser.py +59 -0
  464. mindspore/rewrite/sparsify/utils.py +1 -1
  465. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  466. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  467. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  468. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  469. mindspore/run_check/_check_version.py +6 -14
  470. mindspore/run_check/run_check.py +1 -1
  471. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  472. mindspore/swresample-4.dll +0 -0
  473. mindspore/swscale-6.dll +0 -0
  474. mindspore/tinyxml2.dll +0 -0
  475. mindspore/train/__init__.py +6 -5
  476. mindspore/train/_utils.py +178 -4
  477. mindspore/train/amp.py +167 -245
  478. mindspore/train/anf_ir_pb2.py +14 -2
  479. mindspore/train/callback/__init__.py +5 -2
  480. mindspore/train/callback/_backup_and_restore.py +5 -5
  481. mindspore/train/callback/_callback.py +4 -4
  482. mindspore/train/callback/_checkpoint.py +151 -37
  483. mindspore/train/callback/_cluster_monitor.py +201 -0
  484. mindspore/train/callback/_early_stop.py +2 -2
  485. mindspore/train/callback/_flops_collector.py +238 -0
  486. mindspore/train/callback/_landscape.py +16 -11
  487. mindspore/train/callback/_loss_monitor.py +2 -2
  488. mindspore/train/callback/_mindio_ttp.py +443 -0
  489. mindspore/train/callback/_on_request_exit.py +2 -2
  490. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  491. mindspore/train/callback/_summary_collector.py +13 -14
  492. mindspore/train/callback/_time_monitor.py +3 -3
  493. mindspore/train/data_sink.py +6 -5
  494. mindspore/train/dataset_helper.py +66 -21
  495. mindspore/train/loss_scale_manager.py +2 -2
  496. mindspore/train/metrics/accuracy.py +7 -7
  497. mindspore/train/metrics/confusion_matrix.py +8 -6
  498. mindspore/train/metrics/cosine_similarity.py +6 -4
  499. mindspore/train/metrics/error.py +2 -2
  500. mindspore/train/metrics/metric.py +3 -3
  501. mindspore/train/metrics/perplexity.py +2 -1
  502. mindspore/train/metrics/topk.py +2 -2
  503. mindspore/train/mind_ir_pb2.py +89 -15
  504. mindspore/train/model.py +298 -56
  505. mindspore/train/serialization.py +501 -221
  506. mindspore/train/summary/_summary_adapter.py +1 -1
  507. mindspore/train/summary/_writer_pool.py +1 -1
  508. mindspore/train/summary/summary_record.py +56 -34
  509. mindspore/train/train_thor/convert_utils.py +3 -3
  510. mindspore/turbojpeg.dll +0 -0
  511. mindspore/version.py +1 -1
  512. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
  513. mindspore-2.3.0.dist-info/RECORD +1400 -0
  514. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  517. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  518. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  519. mindspore/gen_ops.py +0 -273
  520. mindspore/nn/layer/flash_attention.py +0 -189
  521. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  522. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  523. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  524. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  525. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  526. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  527. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  528. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  529. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  530. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  531. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  532. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  533. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  534. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  535. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  536. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  537. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  538. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  539. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  540. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  542. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  543. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  544. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  547. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  548. mindspore/ops/_op_impl/tbe/add.py +0 -42
  549. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  550. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  551. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  552. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  553. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  554. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  555. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  556. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  557. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  558. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  559. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  560. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  561. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  562. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  563. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  565. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  566. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  567. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  568. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  569. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  570. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  571. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  572. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  573. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  574. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  575. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  576. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  577. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  578. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  579. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  580. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  581. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  582. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  583. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  584. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  585. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  586. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  587. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  588. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  589. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  590. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  591. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  592. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  594. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  595. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  596. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  597. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  598. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  599. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  600. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  601. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  602. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  603. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  604. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  605. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  606. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  607. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  608. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  609. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  610. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  611. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  612. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  613. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  614. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  615. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  616. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  617. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  618. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  619. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  620. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  621. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  622. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  623. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  624. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  625. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  626. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  629. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  630. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  631. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  632. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  633. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  634. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  635. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  636. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  637. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  638. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  639. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  640. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  641. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  642. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  643. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  644. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  645. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  646. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  647. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  648. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  650. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  651. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  652. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  653. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  654. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  655. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  656. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  657. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  658. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  659. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  660. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  661. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  662. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  663. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  665. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  666. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  667. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  668. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  669. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  670. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  671. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  672. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  673. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  674. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  675. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  676. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  677. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  678. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  679. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  680. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  681. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  682. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  683. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  684. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  685. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  686. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  687. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  688. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  689. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  690. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  691. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  692. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  693. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  694. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  695. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  696. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  697. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  698. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  699. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  701. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  702. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  703. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  704. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  705. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  706. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  707. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  708. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  709. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  710. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  711. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  712. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  713. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  714. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  715. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  716. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  717. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  718. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  719. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  720. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  721. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  722. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  723. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  724. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  725. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  726. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  727. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  728. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  729. mindspore/ops/_op_impl/tbe/div.py +0 -41
  730. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  731. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  732. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  733. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  734. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  735. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  736. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  737. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  738. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  739. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  740. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  741. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  742. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  743. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  744. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  745. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  746. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  747. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  748. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  749. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  750. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  751. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  752. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  753. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  754. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  755. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  756. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  757. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  758. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  759. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  761. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  762. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  763. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  764. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  765. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  766. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  767. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  768. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  769. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  770. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  771. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  772. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  773. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  774. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  775. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  776. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  777. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  778. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  779. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  780. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  781. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  782. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  783. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  784. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  785. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  786. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  787. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  788. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  789. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  790. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  791. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  792. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  793. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  794. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  795. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  796. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  797. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  798. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  799. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  800. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  801. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  802. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  803. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  804. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  805. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  806. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  807. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  808. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  809. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  810. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  811. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  812. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  813. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  814. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  815. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  816. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  817. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  818. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  819. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  820. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  821. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  822. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  823. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  824. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  825. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  826. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  827. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  828. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  829. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  830. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  831. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  832. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  833. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  834. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  835. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  836. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  837. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  838. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  839. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  840. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  841. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  843. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  844. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  845. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  846. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  847. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  849. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  850. mindspore/ops/_op_impl/tbe/less.py +0 -41
  851. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  852. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  853. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  854. mindspore/ops/_op_impl/tbe/log.py +0 -40
  855. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  856. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  857. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  858. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  859. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  861. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  862. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  863. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  864. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  866. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  867. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  868. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  869. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  870. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  871. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  872. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  873. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  874. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  875. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  876. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  877. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  878. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  879. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  880. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  881. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  882. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  883. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  884. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  885. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  886. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  887. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  888. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  889. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  890. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  891. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  892. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  893. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  894. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  895. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  896. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  897. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  898. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  899. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  900. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  901. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  902. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  903. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  904. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  905. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  906. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  907. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  908. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  909. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  910. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  911. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  912. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  913. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  914. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  915. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  916. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  917. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  918. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  919. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  920. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  921. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  922. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  923. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  924. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  925. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  926. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  927. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  928. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  929. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  930. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  931. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  932. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  933. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  934. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  935. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  936. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  937. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  938. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  939. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  940. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  941. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  942. mindspore/ops/_op_impl/tbe/range.py +0 -39
  943. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  944. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  945. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  946. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  947. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  948. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  949. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  950. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  951. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  952. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  953. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  954. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  955. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  956. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  957. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  958. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  959. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  960. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  962. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  963. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  964. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  965. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  966. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  967. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  968. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  969. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  970. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  971. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  972. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  973. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  974. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  975. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  977. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  978. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  979. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  980. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  981. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  982. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  984. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  985. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  986. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  987. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  988. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  989. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  990. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  991. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  992. mindspore/ops/_op_impl/tbe/round.py +0 -38
  993. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  994. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  995. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  996. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  997. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  998. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  999. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1000. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1001. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1002. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1003. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1004. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1005. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1006. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1011. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1012. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1013. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1014. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1015. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1016. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1017. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1018. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1019. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1020. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1021. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1022. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1023. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1024. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1026. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1027. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1028. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1029. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1030. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1031. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1032. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1033. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1034. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1035. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1036. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1037. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1038. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1040. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1041. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1042. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1043. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1044. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1045. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1046. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1047. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1049. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1050. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1051. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1052. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1053. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1054. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1055. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1057. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1058. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1060. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1061. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1062. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1063. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1073. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1074. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1075. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1076. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1077. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1078. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1079. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1080. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1081. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1082. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1083. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1084. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1085. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1086. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1087. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1088. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1089. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1090. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1092. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1093. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1094. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1095. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1096. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1097. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1098. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1099. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1100. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1101. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1102. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1103. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1104. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1105. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1107. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1108. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1109. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1110. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1111. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1112. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1113. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1114. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1115. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1116. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1117. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1118. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1119. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1120. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1122. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1123. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1124. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1125. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1126. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1127. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1128. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1129. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1135. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1136. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1137. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1138. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1141. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1142. mindspore/ops/_tracefunc.py +0 -241
  1143. mindspore/ops/arg_dtype_cast.py +0 -54
  1144. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1145. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1146. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1147. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1148. mindspore/rewrite/namespace.py +0 -53
  1149. mindspore-2.2.11.dist-info/RECORD +0 -1920
  1150. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
  1151. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -29,15 +29,21 @@ from mindspore.common._utils import is_shape_unknown, is_dim_unknown
29
29
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
30
30
  from mindspore import _checkparam as validator
31
31
  from mindspore._checkparam import _check_3d_int_or_tuple
32
- from mindspore.ops._tracefunc import PackFunc
33
32
  from mindspore.common import dtype as mstype
34
33
  from mindspore.common._decorator import deprecated
35
- from mindspore.common.parameter import Parameter
36
34
  from mindspore.common import Tensor, CSRTensor, COOTensor
37
35
  from mindspore._c_expression import Tensor as Tensor_
38
36
  from mindspore._c_expression import CSRTensor as CSRTensor_
39
37
  from mindspore._c_expression import COOTensor as COOTensor_
40
-
38
+ from ..auto_generate import (ExpandDims, Reshape, TensorShape, Transpose, Gather,
39
+ OnesLike, ZerosLike, Argmax, ArgMaxExt,
40
+ ReverseV2, Diag, Eye, ScatterNd, ResizeNearestNeighborV2,
41
+ GatherNd, GatherD, Range, MaskedFill, RightShift, NonZero,
42
+ ResizeNearestNeighbor, Identity, Split, CumSum, CumProd,
43
+ Cummax, Cummin, Argmin, Concat, UnsortedSegmentSum, ScalarToTensor,
44
+ Triu, BroadcastTo, StridedSlice, Select, TopkExt, SearchSorted)
45
+ from .manually_defined import Rank, Shape, Tile, Cast, Ones, Zeros
46
+ from ..auto_generate import ArgMaxWithValue, ArgMinWithValue
41
47
 
42
48
  class _ScatterOp(PrimitiveWithInfer):
43
49
  """
@@ -187,50 +193,6 @@ class Expand(Primitive):
187
193
  self.init_prim_io_names(inputs=['x', 'shape'], outputs=['y'])
188
194
 
189
195
 
190
- class ExpandDims(PrimitiveWithCheck):
191
- """
192
- Adds an additional dimension to `input_x` at the given axis, the dimension of
193
- `input_x` should be greater than or equal to 1.
194
-
195
- Refer to :func:`mindspore.ops.expand_dims` for more details.
196
-
197
- Inputs:
198
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
199
- - **axis** (int) - Specifies the dimension index at which to expand
200
- the shape of `input_x`. The value of axis must be in the range
201
- `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
202
-
203
- Outputs:
204
- Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
205
- value of `axis` is 0. It has the same data type as `input_x`.
206
-
207
- Supported Platforms:
208
- ``Ascend`` ``GPU`` ``CPU``
209
-
210
- Examples:
211
- >>> import mindspore
212
- >>> import numpy as np
213
- >>> from mindspore import Tensor, ops
214
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
215
- >>> expand_dims = ops.ExpandDims()
216
- >>> output = expand_dims(input_tensor, 0)
217
- >>> print(output)
218
- [[[2. 2.]
219
- [2. 2.]]]
220
- """
221
-
222
- @prim_attr_register
223
- def __init__(self):
224
- """Initialize ExpandDims"""
225
- self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output'])
226
-
227
- def infer_value(self, input_x, axis):
228
- value = None
229
- if input_x is not None and axis is not None:
230
- value = Tensor(np.expand_dims(input_x.asnumpy(), axis))
231
- return value
232
-
233
-
234
196
  class DType(Primitive):
235
197
  """
236
198
  Returns the data type of the input tensor as mindspore.dtype.
@@ -300,85 +262,6 @@ class CheckNumerics(Primitive):
300
262
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
301
263
 
302
264
 
303
- class Cast(PrimitiveWithCheck):
304
- """
305
- Returns a tensor with the new specified data type.
306
-
307
- Note:
308
- When converting complex numbers to boolean type, the imaginary part of the complex number is not
309
- taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
310
-
311
- Inputs:
312
- - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
313
- The tensor to be cast.
314
- - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
315
-
316
- Outputs:
317
- Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
318
-
319
- Raises:
320
- TypeError: If `input_x` is neither Tensor nor Number.
321
- TypeError: If `type` is not a Number.
322
-
323
- Supported Platforms:
324
- ``Ascend`` ``GPU`` ``CPU``
325
-
326
- Examples:
327
- >>> import mindspore
328
- >>> import numpy as np
329
- >>> from mindspore import Tensor, ops
330
- >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
331
- >>> input_x = Tensor(input_np)
332
- >>> type_dst = mindspore.int32
333
- >>> cast = ops.Cast()
334
- >>> output = cast(input_x, type_dst)
335
- >>> print(output.dtype)
336
- Int32
337
- >>> print(output.shape)
338
- (2, 3, 4, 5)
339
- """
340
-
341
- @prim_attr_register
342
- def __init__(self):
343
- """Initialize Cast"""
344
- self.init_prim_io_names(inputs=['x', 'dst_type'], outputs=['output'])
345
-
346
- def check_elim(self, x, dtype):
347
- if isinstance(x, (Tensor, numbers.Number, Parameter)):
348
- if isinstance(x, Parameter):
349
- data = x.data
350
- if data.dtype == dtype:
351
- return (True, x)
352
- if isinstance(x, Tensor) and x.dtype == dtype and not PackFunc.is_tracing():
353
- x = Tensor(x)
354
- x.set_cast_dtype()
355
- return (True, x)
356
- if isinstance(x, numbers.Number):
357
- return (True, Tensor(x, dtype=dtype))
358
- return (False, None)
359
-
360
- def infer_value(self, x, dst_type):
361
- if x is None:
362
- return None
363
- src_type = mstype.get_py_obj_dtype(x)
364
- validator.check_subclass("input_x", src_type,
365
- [mstype.tensor_type, mstype.number], self.name)
366
- validator.check_subclass("type", dst_type, mstype.number, self.name)
367
-
368
- if isinstance(src_type, type(mstype.tensor_type)):
369
- src_type = src_type.element_type()
370
- if isinstance(dst_type, type(mstype.tensor_type)):
371
- dst_type = dst_type.element_type()
372
-
373
- value = None
374
- np_dst_type = mstype.dtype_to_nptype(dst_type)
375
- if isinstance(x, (int, float)):
376
- value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
377
- else:
378
- value = Tensor(x.asnumpy().astype(np_dst_type), dtype=dst_type)
379
- return value
380
-
381
-
382
265
  class Im2Col(Primitive):
383
266
  r"""
384
267
  Extracts sliding local blocks from a batched input tensor.
@@ -427,7 +310,6 @@ class Im2Col(Primitive):
427
310
 
428
311
  - If one int, :math:`pad\_height = pad\_width`.
429
312
  - If two int, :math:`pad\_height = pads[0]`, :math:`pad\_width = pads[1]`.
430
- - If four int, :math:`pads = [pad\_height\_top, pad\_height\_bottom, pad\_width\_left, pad\_width\_right]`.
431
313
 
432
314
  Inputs:
433
315
  - **x** (Tensor) - input tensor, only 4-D input tensors (batched image-like tensors) are supported.
@@ -492,11 +374,10 @@ class Im2Col(Primitive):
492
374
 
493
375
  class Col2Im(Primitive):
494
376
  r"""
495
- Combines an array of sliding local blocks into a large containing tensor. It is
377
+ Rearranges a row vector to an image. It is
496
378
  usually used to reconstruct an image from a set of image patches(or sliding local blocks).
497
379
 
498
- Consider a batched :attr:`input` tensor containing sliding local blocks,
499
- e.g., patches of images, of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
380
+ Consider an input Tensor of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
500
381
  where :math:`N` is batch dimension, :math:`C` is channel dimension,
501
382
  :math:`\prod(\text{kernel_size})` is the block size, and
502
383
  :math:`L` is the total number of blocks. This operation combines these
@@ -583,149 +464,6 @@ class Col2Im(Primitive):
583
464
  self.add_prim_attr('stride', self.stride)
584
465
 
585
466
 
586
- class Reshape(PrimitiveWithCheck):
587
- """
588
- Rearranges the input Tensor based on the given shape.
589
-
590
- Refer to :func:`mindspore.ops.reshape` for more details.
591
-
592
- Inputs:
593
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
594
- - **input_shape** (tuple[int]) - The input tuple is constructed by multiple
595
- integers, i.e., :math:`(y_1, y_2, ..., y_S)`.
596
-
597
- Outputs:
598
- Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
599
-
600
- Supported Platforms:
601
- ``Ascend`` ``GPU`` ``CPU``
602
-
603
- Examples:
604
- >>> import mindspore
605
- >>> import numpy as np
606
- >>> from mindspore import Tensor, ops
607
- >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
608
- >>> reshape = ops.Reshape()
609
- >>> output = reshape(input_x, (3, 2))
610
- >>> print(output)
611
- [[-0.1 0.3]
612
- [ 3.6 0.4]
613
- [ 0.5 -3.2]]
614
- """
615
-
616
- @prim_attr_register
617
- def __init__(self):
618
- """Initialize Reshape"""
619
- self.init_prim_io_names(inputs=['tensor', 'shape'], outputs=['output'])
620
-
621
- def infer_value(self, x, shape):
622
- """infer value"""
623
- # for shape is not constant
624
- if shape is None or self.none_in_tuple_or_list(shape) or x is None:
625
- return None
626
-
627
- if isinstance(shape, (Tensor, Tensor_)):
628
- validator.check_tensor_dtype_valid("shape", mstype.TensorType(shape.dtype),
629
- [mstype.int32, mstype.int64], self.name)
630
- shape = shape.asnumpy().tolist()
631
- else:
632
- validator.check_value_type("shape", shape, [tuple], self.name)
633
- shape = list(shape)
634
-
635
- neg_index = -1
636
- dim_prod = 1
637
- for i, shp_i in enumerate(shape):
638
- validator.check_value_type("shape[%d]" % i, shp_i, [int], self.name)
639
- if shp_i == -1:
640
- if neg_index != -1:
641
- raise ValueError(f"For '{self.name}', there can be at most one '-1' in 'input_shape', "
642
- f"but got {shape}.")
643
- neg_index = i
644
- else:
645
- dim_prod *= shp_i
646
- out = None
647
- if not is_shape_unknown(x.shape):
648
- x_shp = x.shape
649
- if dim_prod <= 0:
650
- raise ValueError(f"For '{self.name}', the shape of 'input_x' is {x_shp}, "
651
- f"the value of 'input_shape' is {shape}. "
652
- f"The product of 'input_shape' should > 0, but got {dim_prod}.")
653
- arr_prod = np.prod(x_shp)
654
- if neg_index != -1:
655
- shape[neg_index] = int(arr_prod // dim_prod)
656
- dim_prod *= shape[neg_index]
657
- if dim_prod != arr_prod:
658
- raise ValueError(f"For '{self.name}', the product of the 'input_x' shape "
659
- f"should be equal to product of 'input_shape', but got product of the"
660
- f" shape of 'input_x': {arr_prod}, product of 'input_shape': {dim_prod}.")
661
- out = Tensor(x.asnumpy().reshape(shape))
662
- return out
663
-
664
- def none_in_tuple_or_list(self, x):
665
- return isinstance(x, (tuple, list)) and None in x
666
-
667
-
668
- class Shape(Primitive):
669
- """
670
- Returns the shape of the input tensor.
671
-
672
- Refer to :func:`mindspore.ops.shape` for more details.
673
-
674
- Inputs:
675
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
676
-
677
- Outputs:
678
- tuple[int], the output tuple is constructed by multiple integers,
679
- :math:`(x_1, x_2, ..., x_R)`.
680
-
681
- Supported Platforms:
682
- ``Ascend`` ``GPU`` ``CPU``
683
-
684
- Examples:
685
- >>> import mindspore
686
- >>> import numpy as np
687
- >>> from mindspore import Tensor, ops
688
- >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
689
- >>> shape = ops.Shape()
690
- >>> output = shape(input_x)
691
- >>> print(output)
692
- (3, 2, 1)
693
- """
694
-
695
- @prim_attr_register
696
- def __init__(self):
697
- """Initialize Shape"""
698
-
699
- def __call__(self, x):
700
- if isinstance(x, (Tensor, COOTensor, CSRTensor, Tensor_)):
701
- return x.shape
702
- raise TypeError(f"For primitive[{self.name}], the input argument must be Tensor, but got {type(x)}.")
703
-
704
-
705
- class TensorShape(Primitive):
706
- """
707
- Returns the shape of the input tensor.
708
-
709
- Supported Platforms:
710
- ``Ascend`` ``GPU`` ``CPU``
711
-
712
- Examples:
713
- >>> import mindspore
714
- >>> import numpy as np
715
- >>> from mindspore import Tensor, ops
716
- >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
717
- >>> shape = ops.TensorShape()
718
- >>> output = shape(input_x)
719
- >>> print(output)
720
- [3 2 1]
721
- """
722
-
723
- @prim_attr_register
724
- def __init__(self):
725
- """init Shape"""
726
- self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
727
-
728
-
729
467
  class Unsqueeze(PrimitiveWithCheck):
730
468
  """Unsqueeze"""
731
469
 
@@ -781,48 +519,6 @@ class Squeeze(Primitive):
781
519
  self.add_prim_attr("axis", (axis,))
782
520
 
783
521
 
784
- class Transpose(Primitive):
785
- """
786
- Permutes the dimensions of the input tensor according to input permutation.
787
-
788
- Refer to :func:`mindspore.ops.transpose` for more details.
789
-
790
- Inputs:
791
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
792
- - **input_perm** (tuple[int]) - The permutation to be converted. The elements in `input_perm` are composed of
793
- the indexes of each dimension of `input_x`. The length of `input_perm` and the shape of `input_x` must be
794
- the same. Only constant value is allowed. Must be in the range [0, rank(input_x)).
795
-
796
- Outputs:
797
- Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the
798
- shape of `input_x` and the value of `input_perm`.
799
-
800
- Supported Platforms:
801
- ``Ascend`` ``GPU`` ``CPU``
802
-
803
- Examples:
804
- >>> import mindspore
805
- >>> import numpy as np
806
- >>> from mindspore import Tensor, ops
807
- >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
808
- >>> input_perm = (0, 2, 1)
809
- >>> transpose = ops.Transpose()
810
- >>> output = transpose(input_x, input_perm)
811
- >>> print(output)
812
- [[[ 1. 4.]
813
- [ 2. 5.]
814
- [ 3. 6.]]
815
- [[ 7. 10.]
816
- [ 8. 11.]
817
- [ 9. 12.]]]
818
- """
819
-
820
- @prim_attr_register
821
- def __init__(self):
822
- """Initialize Transpose"""
823
- self.init_prim_io_names(inputs=['x', 'perm'], outputs=['output'])
824
-
825
-
826
522
  class ConjugateTranspose(Primitive):
827
523
  """
828
524
  Calculate the conjugate matrix of input x which has been transposed according to input perm.
@@ -992,99 +688,6 @@ class UniqueConsecutive(Primitive):
992
688
  self.add_prim_attr("axis", axis)
993
689
 
994
690
 
995
- class Gather(Primitive):
996
- r"""
997
- Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
998
-
999
- Refer to :func:`mindspore.ops.gather` for more details.
1000
-
1001
- Args:
1002
- batch_dims (int, optional): Specifies the number of batch dimensions.
1003
- It must be less than or equal to the rank of `input_indices`. Default: ``0`` .
1004
-
1005
- Inputs:
1006
- - **input_params** (Tensor) - The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1007
- - **input_indices** (Tensor) - Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1008
- Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
1009
- - **axis** (Union(int, Tensor[int])) - Specifies the dimension index to gather indices.
1010
- When axis is Tensor, the size must be 1.
1011
-
1012
- Outputs:
1013
- Tensor, the shape of tensor is
1014
- :math:`input\_params.shape[:axis] + input\_indices.shape + input\_params.shape[axis + 1:]`.
1015
-
1016
- Supported Platforms:
1017
- ``Ascend`` ``GPU`` ``CPU``
1018
-
1019
- Examples:
1020
- >>> import mindspore
1021
- >>> import numpy as np
1022
- >>> from mindspore import Tensor, ops
1023
- >>> # case1: input_indices is a Tensor with shape (5, ).
1024
- >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
1025
- >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
1026
- >>> axis = 0
1027
- >>> output = ops.Gather()(input_params, input_indices, axis)
1028
- >>> print(output)
1029
- [1. 3. 5. 3. 7.]
1030
- >>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
1031
- the output shape is equal to the input_indices shape.
1032
- >>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
1033
- >>> axis = 0
1034
- >>> output = ops.Gather()(input_params, input_indices, axis)
1035
- >>> print(output)
1036
- [[ 1. 3.]
1037
- [ 3. 7.]]
1038
- >>> # case3: input_indices is a Tensor with shape (2, ). input_params is a Tensor with shape (3, 4) and axis is 0.
1039
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
1040
- >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
1041
- >>> axis = 0
1042
- >>> output = ops.Gather()(input_params, input_indices, axis)
1043
- >>> print(output)
1044
- [[1. 2. 3. 4.]
1045
- [9. 10. 11. 12.]]
1046
- >>> # case4: input_indices is a Tensor with shape (2, ).
1047
- >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
1048
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
1049
- >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
1050
- >>> axis = 1
1051
- >>> batch_dims = 1
1052
- >>> output = ops.Gather(batch_dims)(input_params, input_indices, axis)
1053
- >>> print(output)
1054
- [ 1. 7. 10.]
1055
- """
1056
-
1057
- @prim_attr_register
1058
- def __init__(self, batch_dims=0):
1059
- """Initialize Gather"""
1060
- validator.check_value_type("batch_dims", batch_dims, [int], self.name)
1061
- self.add_prim_attr("batch_dims", batch_dims)
1062
- self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
1063
-
1064
-
1065
- class GatherV2(PrimitiveWithCheck):
1066
- """
1067
- Same as operator Gather. GatherV2 will be deprecated in the future.
1068
- Please use Gather instead.
1069
- """
1070
-
1071
- @deprecated("1.1", "Gather", True)
1072
- @prim_attr_register
1073
- def __init__(self):
1074
- """Initialize GatherV2"""
1075
- self.add_prim_attr("batch_dims", 0)
1076
- self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
1077
-
1078
- def __check__(self, params, indices, axis):
1079
- validator.check_subclass("params", params['dtype'], mstype.tensor_type, self.name)
1080
- validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
1081
- validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
1082
- axis_v = axis['value']
1083
- validator.check_value_type('axis', axis_v, [int], self.name)
1084
- rank = len(params['shape'])
1085
- validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
1086
-
1087
-
1088
691
  class SparseGatherV2(Primitive):
1089
692
  """
1090
693
  Returns a slice of input tensor based on the specified indices and axis.
@@ -1207,100 +810,6 @@ class UniqueWithPad(Primitive):
1207
810
  self.init_prim_io_names(inputs=['x', 'pad_num'], outputs=['y', 'idx'])
1208
811
 
1209
812
 
1210
- class Split(Primitive):
1211
- r"""
1212
- Splits the input tensor into output_num of tensors along the given axis and output numbers.
1213
-
1214
- Refer to :func:`mindspore.ops.split` for more details.
1215
-
1216
- Args:
1217
- axis (int): Index of the split position. Default: ``0`` .
1218
- output_num (int): The number of output tensors. Must be positive int. Default: ``1`` .
1219
-
1220
- Inputs:
1221
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1.
1222
-
1223
- Outputs:
1224
- tuple[Tensor], the shape of each output tensor is the same, which is
1225
- :math:`(x_0, x_1, ..., x_{axis}/{output\_num}, ..., x_{R-1})`.
1226
- And the data type is the same as `input_x`.
1227
-
1228
- Supported Platforms:
1229
- ``Ascend`` ``GPU`` ``CPU``
1230
-
1231
- Examples:
1232
- >>> import mindspore
1233
- >>> import numpy as np
1234
- >>> from mindspore import Tensor, ops
1235
- >>> split = ops.Split(1, 2)
1236
- >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
1237
- >>> print(x)
1238
- [[1 1 1 1]
1239
- [2 2 2 2]]
1240
- >>> output = split(x)
1241
- >>> print(output)
1242
- (Tensor(shape=[2, 2], dtype=Int32, value=
1243
- [[1, 1],
1244
- [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
1245
- [[1, 1],
1246
- [2, 2]]))
1247
- >>> split = ops.Split(1, 4)
1248
- >>> output = split(x)
1249
- >>> print(output)
1250
- (Tensor(shape=[2, 1], dtype=Int32, value=
1251
- [[1],
1252
- [2]]), Tensor(shape=[2, 1], dtype=Int32, value=
1253
- [[1],
1254
- [2]]), Tensor(shape=[2, 1], dtype=Int32, value=
1255
- [[1],
1256
- [2]]), Tensor(shape=[2, 1], dtype=Int32, value=
1257
- [[1],
1258
- [2]]))
1259
- """
1260
-
1261
- @prim_attr_register
1262
- def __init__(self, axis=0, output_num=1):
1263
- """Initialize Split"""
1264
- validator.check_value_type("axis", axis, [int], self.name)
1265
- validator.check_value_type("output_num", output_num, [int], self.name)
1266
- validator.check_positive_int(output_num, "output_num", self.name)
1267
- self.axis = axis
1268
- self.output_num = output_num
1269
- self.add_prim_attr('num_split', self.output_num)
1270
-
1271
-
1272
- class Rank(Primitive):
1273
- """
1274
- Returns the rank of a tensor.
1275
-
1276
- Refer to :func:`mindspore.ops.rank` for more details.
1277
-
1278
- Supported Platforms:
1279
- ``Ascend`` ``GPU`` ``CPU``
1280
-
1281
- Examples:
1282
- >>> import mindspore
1283
- >>> import numpy as np
1284
- >>> from mindspore import Tensor, ops
1285
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1286
- >>> rank = ops.Rank()
1287
- >>> output = rank(input_tensor)
1288
- >>> print(output)
1289
- 2
1290
- >>> print(type(output))
1291
- <class 'int'>
1292
- """
1293
-
1294
- @prim_attr_register
1295
- def __init__(self):
1296
- """Initialize Rank"""
1297
-
1298
- def __call__(self, x):
1299
- if not isinstance(x, (Tensor, Tensor_)):
1300
- raise TypeError("the input x must be Tensor!")
1301
- return len(x.shape)
1302
-
1303
-
1304
813
  class Size(Primitive):
1305
814
  r"""
1306
815
  Returns a Scalar of type int that represents the size of the input Tensor and the total number of elements in the
@@ -1310,7 +819,7 @@ class Size(Primitive):
1310
819
 
1311
820
  Inputs:
1312
821
  - **input_x** (Tensor) - Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1313
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
822
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1314
823
 
1315
824
  Outputs:
1316
825
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1497,7 +1006,7 @@ class MatrixDiagPartV3(Primitive):
1497
1006
  class MatrixSetDiagV3(Primitive):
1498
1007
  r"""
1499
1008
  Updates the diagonal part of a batched tensor.
1500
- It takes an Tensor `x` and `diagonal` as input and returns a Tensor in which
1009
+ It takes a Tensor `x` and `diagonal` as input and returns a Tensor in which
1501
1010
  the specified diagonal values in the innermost matrices will be replaced
1502
1011
  by the values in the `diagonal`.
1503
1012
 
@@ -1763,186 +1272,49 @@ class FillV2(PrimitiveWithCheck):
1763
1272
  self.init_prim_io_names(inputs=['shape', 'value'], outputs=['y'])
1764
1273
 
1765
1274
  def check_elim(self, dims, x):
1766
- x_is_invalid = x is None or (not isinstance(x, (Tensor, Tensor_))) or (x.shape != ())
1767
- dims_is_invalid = dims is None or (isinstance(dims, (tuple, list)) and dims) or\
1768
- isinstance(dims, (Tensor, Tensor_))
1769
- if x_is_invalid or dims_is_invalid:
1275
+ if x is None or (not isinstance(x, (Tensor, Tensor_))) or (x.shape != ()) or \
1276
+ dims is None or (isinstance(dims, (tuple, list)) and dims) or \
1277
+ isinstance(dims, (Tensor, Tensor_)):
1770
1278
  return (False, None)
1771
1279
  return (True, x)
1772
1280
 
1773
1281
  def infer_value(self, dims, x):
1774
- dims_is_invalid = dims is None or\
1775
- (isinstance(dims, (tuple, list)) and dims) or\
1776
- isinstance(dims, (Tensor, Tensor_))
1777
- if x is None or dims_is_invalid:
1282
+ if x is None or dims is None or isinstance(dims, (Tensor, Tensor_)):
1778
1283
  return None
1779
- return x
1284
+ if isinstance(dims, (tuple, list)) and None in dims:
1285
+ return None
1286
+ if 0 in dims:
1287
+ init_func = Zero()
1288
+ init_func.__enable_zero_dim__ = True
1289
+ out = Tensor(shape=dims, dtype=x.dtype, init=init_func)
1290
+ return out
1291
+ return Tensor(np.full(dims, x.asnumpy()))
1780
1292
 
1781
1293
 
1782
- class Ones(Primitive):
1783
- r"""
1784
- Creates a tensor filled with value ones.
1294
+ class TupleToArray(PrimitiveWithInfer):
1295
+ """
1296
+ Converts a tuple to a tensor.
1785
1297
 
1786
- Refer to :func:`mindspore.ops.ones` for more details.
1298
+ Refer to :func:`mindspore.ops.tuple_to_array` for more details.
1787
1299
 
1788
1300
  Inputs:
1789
- - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
1790
- - **type** (:class:`mindspore.dtype`) - The specified type of output tensor.
1301
+ - **input_x** (tuple) - A tuple of numbers. These numbers have the same type.
1302
+ The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
1791
1303
 
1792
1304
  Outputs:
1793
- Tensor, has the same type and shape as input shape value.
1305
+ Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is :math:`(N,)`.
1794
1306
 
1795
1307
  Supported Platforms:
1796
1308
  ``Ascend`` ``GPU`` ``CPU``
1797
1309
 
1798
1310
  Examples:
1799
- >>> import mindspore
1800
1311
  >>> from mindspore import ops
1801
- >>> ones = ops.Ones()
1802
- >>> output = ones((2, 2), mindspore.float32)
1803
- >>> print(output)
1804
- [[1. 1.]
1805
- [1. 1.]]
1806
- >>> output = ones((3, 3), mindspore.float32)
1807
- >>> print(output)
1808
- [[1. 1. 1.]
1809
- [1. 1. 1.]
1810
- [1. 1. 1.]]
1811
- """
1812
-
1813
- @prim_attr_register
1814
- def __init__(self):
1815
- """Initialize Ones"""
1816
-
1817
-
1818
- class Zeros(Primitive):
1819
- r"""
1820
- Zeros will be deprecated in the future. Please use class `mindspore.ops.zeros` instead.
1821
-
1822
- Creates a tensor filled with value zeros.
1823
-
1824
- Creates a tensor with shape described by the first argument and
1825
- fills it with value zeros in type of the second argument.
1826
-
1827
- Inputs:
1828
- - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
1829
- - **type** (mindspore.dtype) - The specified type of output tensor.
1830
-
1831
- Outputs:
1832
- Tensor, has the same type and shape as input shape value.
1833
-
1834
- Raises:
1835
- TypeError: If `shape` is neither int nor tuple.
1836
- TypeError: If `shape` is a tuple whose elements are not all int.
1837
-
1838
- Supported Platforms:
1839
- Deprecated
1840
-
1841
- Examples:
1842
- >>> import mindspore
1843
- >>> from mindspore import ops
1844
- >>> zeros = ops.Zeros()
1845
- >>> output = zeros((2, 2), mindspore.float32)
1846
- >>> print(output)
1847
- [[0. 0.]
1848
- [0. 0.]]
1849
-
1850
- """
1851
-
1852
- @prim_attr_register
1853
- def __init__(self):
1854
- """Initialize Zeros"""
1855
-
1856
-
1857
- class OnesLike(Primitive):
1858
- """
1859
- Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
1860
-
1861
- Refer to :func:`mindspore.ops.ones_like` for more details.
1862
-
1863
- Inputs:
1864
- - **input_x** (Tensor) - Tensor of any dimension.
1865
-
1866
- Outputs:
1867
- Tensor, has the same shape and type as `input_x` but filled with ones.
1868
-
1869
- Supported Platforms:
1870
- ``Ascend`` ``GPU`` ``CPU``
1871
-
1872
- Examples:
1873
- >>> import numpy as np
1874
- >>> from mindspore import Tensor, ops
1875
- >>> oneslike = ops.OnesLike()
1876
- >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1877
- >>> output = oneslike(input_x)
1878
- >>> print(output)
1879
- [[1 1]
1880
- [1 1]]
1881
- """
1882
-
1883
- @prim_attr_register
1884
- def __init__(self):
1885
- """Initialize OnesLike"""
1886
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
1887
-
1888
-
1889
- class ZerosLike(Primitive):
1890
- """
1891
- Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
1892
-
1893
- Inputs:
1894
- - **input_x** (Tensor) - Input Tensor of any dimension.
1895
-
1896
- Outputs:
1897
- Tensor, has the same shape and data type as `input_x` but filled with zeros.
1898
-
1899
- Raises:
1900
- TypeError: If `input_x` is not a Tensor.
1901
-
1902
- Supported Platforms:
1903
- ``Ascend`` ``GPU`` ``CPU``
1904
-
1905
- Examples:
1906
- >>> import numpy as np
1907
- >>> from mindspore import Tensor, ops
1908
- >>> zeroslike = ops.ZerosLike()
1909
- >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
1910
- >>> output = zeroslike(input_x)
1911
- >>> print(output)
1912
- [[0. 0.]
1913
- [0. 0.]]
1914
- """
1915
-
1916
- @prim_attr_register
1917
- def __init__(self):
1918
- """Initialize ZerosLike"""
1919
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
1920
-
1921
-
1922
- class TupleToArray(PrimitiveWithInfer):
1923
- """
1924
- Converts a tuple to a tensor.
1925
-
1926
- Refer to :func:`mindspore.ops.tuple_to_array` for more details.
1927
-
1928
- Inputs:
1929
- - **input_x** (tuple) - A tuple of numbers. These numbers have the same type.
1930
- The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
1931
-
1932
- Outputs:
1933
- Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is :math:`(N,)`.
1934
-
1935
- Supported Platforms:
1936
- ``Ascend`` ``GPU`` ``CPU``
1937
-
1938
- Examples:
1939
- >>> from mindspore import ops
1940
- >>> input_x = (1,2,3)
1941
- >>> print(type(input_x))
1942
- <class 'tuple'>
1943
- >>> output = ops.TupleToArray()(input_x)
1944
- >>> print(type(output))
1945
- <class 'mindspore.common.tensor.Tensor'>
1312
+ >>> input_x = (1,2,3)
1313
+ >>> print(type(input_x))
1314
+ <class 'tuple'>
1315
+ >>> output = ops.TupleToArray()(input_x)
1316
+ >>> print(type(output))
1317
+ <class 'mindspore.common.tensor.Tensor'>
1946
1318
  >>> print(output)
1947
1319
  [1 2 3]
1948
1320
  """
@@ -1975,42 +1347,6 @@ class TupleToArray(PrimitiveWithInfer):
1975
1347
  return _run_op(self, self.name, args)
1976
1348
 
1977
1349
 
1978
- class ScalarToTensor(PrimitiveWithInfer):
1979
- """
1980
- Converts a scalar to a `Tensor`, and converts the data type to the specified type.
1981
-
1982
- Refer to :func:`mindspore.ops.scalar_to_tensor` for more details.
1983
-
1984
- Inputs:
1985
- - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.
1986
- - **dtype** (mindspore.dtype) - The target data type. Default: ``mindspore.float32`` . Only
1987
- constant value is allowed.
1988
-
1989
- Outputs:
1990
- Tensor. 0-D Tensor and the content is the input.
1991
-
1992
- Supported Platforms:
1993
- ``Ascend`` ``GPU`` ``CPU``
1994
-
1995
- Examples:
1996
- >>> import mindspore
1997
- >>> from mindspore import ops
1998
- >>> op = ops.ScalarToTensor()
1999
- >>> data = 1
2000
- >>> output = op(data, mindspore.float32)
2001
- >>> print(output)
2002
- 1.0
2003
- """
2004
-
2005
- @prim_attr_register
2006
- def __init__(self):
2007
- self.init_prim_io_names(inputs=['input_scalar', 'dtype'], outputs=['output_data'])
2008
-
2009
- def __call__(self, x, dtype=mstype.float32):
2010
- validator.check_value_type("x", x, [bool, int, float], self.name)
2011
- validator.check_subclass("dtype", dtype, mstype.number, self.name)
2012
- data_type = mstype.dtype_to_nptype(dtype)
2013
- return Tensor(np.array(x, data_type), dtype=dtype)
2014
1350
 
2015
1351
 
2016
1352
  class InvertPermutation(PrimitiveWithInfer):
@@ -2092,94 +1428,6 @@ class InvertPermutation(PrimitiveWithInfer):
2092
1428
  'value': tuple(y)}
2093
1429
 
2094
1430
 
2095
- class Argmax(Primitive):
2096
- """
2097
- Returns the indices of the maximum value along a specified `axis` of a Tensor.
2098
-
2099
- Refer to :func:`mindspore.ops.argmax` for more details.
2100
-
2101
- Args:
2102
- axis (int): Axis where the Argmax operation applies to. Default: ``-1`` .
2103
- output_type (:class:`mindspore.dtype`): Output data type.
2104
- Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
2105
-
2106
- Inputs:
2107
- - **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional
2108
- dimensions.
2109
-
2110
- Outputs:
2111
- Tensor, indices of the max value of input tensor across the axis.
2112
-
2113
- Supported Platforms:
2114
- ``Ascend`` ``GPU`` ``CPU``
2115
-
2116
- Examples:
2117
- >>> import mindspore
2118
- >>> import numpy as np
2119
- >>> from mindspore import Tensor, ops
2120
- >>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
2121
- >>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
2122
- >>> print(output)
2123
- [1 0 0]
2124
- """
2125
-
2126
- @prim_attr_register
2127
- def __init__(self, axis=-1, output_type=mstype.int32):
2128
- """Initialize Argmax"""
2129
- self.init_prim_io_names(inputs=['x'], outputs=['output'])
2130
- validator.check_value_type("axis", axis, [int], self.name)
2131
- validator.check_types_same_and_valid({'output': output_type}, [mstype.int32, mstype.int64], self.name)
2132
- self.axis = axis
2133
- self.add_prim_attr('output_type', output_type)
2134
-
2135
-
2136
- class Argmin(Primitive):
2137
- """
2138
- Returns the indices of the minimum value along a specified `axis` of a Tensor.
2139
-
2140
- If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
2141
- :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
2142
-
2143
- Args:
2144
- axis (int): Axis where the Argmin operation applies to. Default: ``-1`` .
2145
- output_type (:class:`mindspore.dtype`): Output data type.
2146
- Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
2147
-
2148
- Inputs:
2149
- - **input_x** (Tensor) - Input tensor.
2150
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2151
-
2152
- Outputs:
2153
- Tensor, whose dtype is determined by `output_type`.
2154
-
2155
- Raises:
2156
- TypeError: If `axis` is not an int.
2157
- TypeError: If `output_type` is neither int32 nor int64.
2158
-
2159
- Supported Platforms:
2160
- ``Ascend`` ``GPU`` ``CPU``
2161
-
2162
- Examples:
2163
- >>> import mindspore
2164
- >>> import numpy as np
2165
- >>> from mindspore import Tensor, ops
2166
- >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
2167
- >>> index = ops.Argmin()(input_x)
2168
- >>> print(index)
2169
- 2
2170
- """
2171
-
2172
- @prim_attr_register
2173
- def __init__(self, axis=-1, output_type=mstype.int32):
2174
- """Initialize Argmin"""
2175
- self.init_prim_io_names(inputs=['x'], outputs=['output'])
2176
- validator.check_value_type("axis", axis, [int], self.name)
2177
- validator.check_type_name("output_type", output_type, [mstype.int32, mstype.int64], self.name)
2178
- self.axis = axis
2179
- self.add_prim_attr('output_type', output_type)
2180
- self.add_prim_attr('axis', axis)
2181
-
2182
-
2183
1431
  class ArgminV2(Primitive):
2184
1432
  """
2185
1433
  Returns the indices of the minimum value of a tensor across the axis.
@@ -2238,328 +1486,6 @@ class ArgminV2(Primitive):
2238
1486
  return output
2239
1487
 
2240
1488
 
2241
- class ArgMaxWithValue(Primitive):
2242
- """
2243
- Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and
2244
- indices.
2245
-
2246
- Note:
2247
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
2248
-
2249
- .. warning::
2250
- - If there are multiple maximum values, the index of the first maximum value is used.
2251
- - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
2252
-
2253
- Also see :func:`mindspore.ops.max`.
2254
-
2255
- Args:
2256
- axis (int): The dimension to reduce. Default: ``0`` .
2257
- keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
2258
- input, the output will reduce dimension if ``false`` . Default: ``False`` .
2259
-
2260
- Inputs:
2261
- - **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
2262
- :math:`(x_1, x_2, ..., x_N)`.
2263
-
2264
- Outputs:
2265
- tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
2266
- tensor.
2267
-
2268
- - **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keep_dims`
2269
- is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2270
- Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2271
- - **values** (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
2272
-
2273
- Raises:
2274
- TypeError: If `x` is not Tensor.
2275
- TypeError: If `keep_dims` is not a bool.
2276
- TypeError: If `axis` is not an int.
2277
-
2278
- Supported Platforms:
2279
- ``Ascend`` ``GPU`` ``CPU``
2280
-
2281
- Examples:
2282
- >>> import mindspore
2283
- >>> import numpy as np
2284
- >>> from mindspore import Tensor, ops
2285
- >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2286
- >>> index, output = ops.ArgMaxWithValue()(input_x)
2287
- >>> print(index, output)
2288
- 3 0.7
2289
- >>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)
2290
- >>> print(index, output)
2291
- [3] [0.7]
2292
- """
2293
-
2294
- @prim_attr_register
2295
- def __init__(self, axis=0, keep_dims=False):
2296
- """Initialize ArgMaxWithValue"""
2297
- self.init_prim_io_names(inputs=['x'], outputs=['index', 'values'])
2298
- validator.check_value_type("axis", axis, [int], self.name)
2299
- validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
2300
- self.axis = axis
2301
- self.keep_dims = keep_dims
2302
- self.add_prim_attr('dimension', self.axis)
2303
-
2304
-
2305
- class ArgMinWithValue(Primitive):
2306
- """
2307
- Calculates the minimum value along with the given axis for the input tensor, and returns the minimum values and
2308
- indices.
2309
-
2310
- Note:
2311
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
2312
-
2313
- .. warning::
2314
- - If there are multiple minimum values, the index of the first minimum value is used.
2315
- - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
2316
-
2317
- Also see :func:`mindspore.ops.min`.
2318
-
2319
- Args:
2320
- axis (int): The dimension to reduce. Default: ``0`` .
2321
- keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
2322
- input, the output will reduce dimension if ``false`` . Default: ``False`` .
2323
-
2324
- Inputs:
2325
- - **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
2326
- :math:`(x_1, x_2, ..., x_N)` .Complex tensor is not supported.
2327
-
2328
- Outputs:
2329
- tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
2330
- tensor.
2331
-
2332
- - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keep_dims`
2333
- is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2334
- Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2335
- - **values** (Tensor) - The minimum value of input tensor, with the same
2336
- shape as `index`, and same dtype as `x`.
2337
-
2338
- Raises:
2339
- TypeError: If `x` is not Tensor.
2340
- TypeError: If `keep_dims` is not a bool.
2341
- TypeError: If `axis` is not an int.
2342
-
2343
- Supported Platforms:
2344
- ``Ascend`` ``GPU`` ``CPU``
2345
-
2346
- Examples:
2347
- >>> import mindspore
2348
- >>> import numpy as np
2349
- >>> from mindspore import Tensor, ops
2350
- >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2351
- >>> index, output = ops.ArgMinWithValue()(x)
2352
- >>> print(index, output)
2353
- 0 0.0
2354
- >>> index, output = ops.ArgMinWithValue(keep_dims=True)(x)
2355
- >>> print(index, output)
2356
- [0] [0.0]
2357
- """
2358
-
2359
- @prim_attr_register
2360
- def __init__(self, axis=0, keep_dims=False):
2361
- """Initialize ArgMinWithValue"""
2362
- self.init_prim_io_names(inputs=['x'], outputs=['index', 'values'])
2363
- validator.check_value_type("axis", axis, [int], self.name)
2364
- validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
2365
- self.axis = axis
2366
- self.keep_dims = keep_dims
2367
- self.add_prim_attr('dimension', self.axis)
2368
-
2369
-
2370
- class Tile(PrimitiveWithInfer):
2371
- r"""
2372
- Replicates an input tensor with given multiples times.
2373
-
2374
- Refer to :func:`mindspore.ops.tile` for more details.
2375
-
2376
- Inputs:
2377
- - **input_x** (Tensor) - 1-D or higher dimensional Tensor. Set the shape of input tensor as
2378
- :math:`(x_1, x_2, ..., x_S)` .
2379
- - **multiples** (tuple[int]) - The parameter that specifies the number of replications,
2380
- the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
2381
- The length of `multiples` cannot be smaller than the length of the shape of `input_x`.
2382
- Only constant value is allowed.
2383
-
2384
- Outputs:
2385
- Tensor, has the same data type as the `input_x`. Suppose the length of `multiples` is `d`,
2386
- the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
2387
-
2388
- - If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
2389
- the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
2390
- - If `input_x.dim < d`, fill in multiple 1 in the length of the shape of `input_x` until their
2391
- lengths are consistent. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
2392
- then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
2393
- :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
2394
-
2395
- Supported Platforms:
2396
- ``Ascend`` ``GPU`` ``CPU``
2397
-
2398
- Examples:
2399
- >>> import mindspore
2400
- >>> import numpy as np
2401
- >>> from mindspore import Tensor, ops
2402
- >>> tile = ops.Tile()
2403
- >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
2404
- >>> multiples = (2, 3)
2405
- >>> output = tile(input_x, multiples)
2406
- >>> print(output)
2407
- [[1. 2. 1. 2. 1. 2.]
2408
- [3. 4. 3. 4. 3. 4.]
2409
- [1. 2. 1. 2. 1. 2.]
2410
- [3. 4. 3. 4. 3. 4.]]
2411
- >>> multiples = (2, 3, 2)
2412
- >>> output = tile(input_x, multiples)
2413
- >>> print(output)
2414
- [[[1. 2. 1. 2.]
2415
- [3. 4. 3. 4.]
2416
- [1. 2. 1. 2.]
2417
- [3. 4. 3. 4.]
2418
- [1. 2. 1. 2.]
2419
- [3. 4. 3. 4.]]
2420
- [[1. 2. 1. 2.]
2421
- [3. 4. 3. 4.]
2422
- [1. 2. 1. 2.]
2423
- [3. 4. 3. 4.]
2424
- [1. 2. 1. 2.]
2425
- [3. 4. 3. 4.]]]
2426
- """
2427
-
2428
- @prim_attr_register
2429
- def __init__(self):
2430
- """Initialize Tile"""
2431
- self.init_prim_io_names(inputs=['x', 'multiples'], outputs=['output'])
2432
-
2433
- def check_elim(self, *args):
2434
- base_tensor, multiplier = args
2435
- if PackFunc.is_tracing() and not PackFunc.current.is_pynative_mode:
2436
- return (False, None)
2437
- if not isinstance(base_tensor, Tensor):
2438
- raise TypeError(f"For '{self.name}', the type of 'input_x' must be Tensor, "
2439
- f"but got {type(base_tensor).__name__}.")
2440
- if not isinstance(multiplier, tuple):
2441
- raise TypeError(f"For '{self.name}', the type of 'multiplier' must be tuple, "
2442
- f"but got {type(multiplier).__name__}.")
2443
-
2444
- if all(v == 1 for v in multiplier) and len(base_tensor.shape) >= len(multiplier):
2445
- ret = Identity()(base_tensor)
2446
- return (True, ret)
2447
- return (False, None)
2448
-
2449
- def _get_shape_and_range(self, x, multiples):
2450
- """calculate tile shape and value"""
2451
- x_shp = x['shape']
2452
- if is_dim_unknown(x_shp):
2453
- return {'shape': x_shp}, None
2454
- multiples_v = multiples['value']
2455
- value = None
2456
- len_sub = len(multiples_v) - len(x_shp)
2457
- multiples_w = None
2458
- if len_sub == 0:
2459
- multiples_w = multiples_v
2460
- if len_sub > 0:
2461
- for _ in range(0, len_sub):
2462
- x_shp.insert(0, 1)
2463
- multiples_w = multiples_v
2464
- elif len_sub < 0:
2465
- raise ValueError(f"For '{self.name}', the length of 'multiples' can not be smaller than "
2466
- f"the dimension of 'input_x', but got length of 'multiples': {len(multiples_v)} "
2467
- f"and dimension of 'input_x': {len(x_shp)}.")
2468
-
2469
- for i, a in enumerate(multiples_w):
2470
- if x_shp[i] >= 0:
2471
- x_shp[i] *= a
2472
- if x['value'] is not None:
2473
- value = Tensor(np.tile(x['value'].asnumpy(), multiples_w))
2474
- out_shape = {
2475
- 'shape': x_shp
2476
- }
2477
- return out_shape, value
2478
-
2479
- def __infer__(self, x, multiples):
2480
- multiples_v = multiples['value']
2481
- if multiples_v is None or None in multiples_v:
2482
- if 'max_value' not in multiples or 'min_value' not in multiples:
2483
- if multiples_v is not None:
2484
- shape = [len(multiples['shape'])]
2485
- else:
2486
- shape = multiples['shape']
2487
- if len(shape) != 1:
2488
- raise ValueError(f'For \'{self.name}\', the dim of multiples must be 1.')
2489
- rank = max(len(x['shape']), shape[0])
2490
- out_shape = [-1] * rank
2491
- if -2 in x['shape']:
2492
- out_shape = [-2]
2493
- return {
2494
- 'shape': out_shape,
2495
- 'dtype': x['dtype'],
2496
- 'value': None
2497
- }
2498
- out_shape, value = self._get_shape_and_range(x, multiples)
2499
- shape = out_shape.get('shape', None)
2500
- out = {'shape': shape,
2501
- 'dtype': x['dtype'],
2502
- 'value': value}
2503
- return out
2504
-
2505
- validator.check_value_type(
2506
- "multiples", multiples_v, [tuple], self.name)
2507
- for i, multiple in enumerate(multiples_v):
2508
- validator.check_positive_int(
2509
- multiple, "multiples[%d]" % i, self.name)
2510
- validator.check_value_type(
2511
- "x[\'dtype\']", x["dtype"], mstype.TensorType, self.name)
2512
- out_shp, value = self._get_shape_and_range(x, multiples)
2513
- shp = out_shp.get('shape', None)
2514
- out = {'shape': shp,
2515
- 'dtype': x['dtype'],
2516
- 'value': value}
2517
- return out
2518
-
2519
-
2520
- class UnsortedSegmentSum(Primitive):
2521
- r"""
2522
- Computes the sum of a tensor along segments.
2523
-
2524
- Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details.
2525
-
2526
- Inputs:
2527
- - **input_x** (Tensor) - Input Tensor contains the data to be summed.
2528
- The shape is :math:`(x_1, x_2, ..., x_R)`.
2529
- - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2530
- Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2531
- - **num_segments** (int) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
2532
-
2533
- Outputs:
2534
- Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
2535
-
2536
- Supported Platforms:
2537
- ``Ascend`` ``GPU`` ``CPU``
2538
-
2539
- Examples:
2540
- >>> from mindspore import Tensor
2541
- >>> from mindspore import ops
2542
- >>> import mindspore
2543
- >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
2544
- >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
2545
- >>> num_segments = 4
2546
- >>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
2547
- >>> print(output)
2548
- [3. 3. 4. 0.]
2549
- >>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
2550
- >>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
2551
- >>> num_segments = 6
2552
- >>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
2553
- >>> print(output)
2554
- [3. 3. 4. 2. 5. 0.]
2555
- """
2556
-
2557
- @prim_attr_register
2558
- def __init__(self):
2559
- """Initialize UnsortedSegmentSum"""
2560
- self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
2561
-
2562
-
2563
1489
  class UnsortedSegmentMin(PrimitiveWithCheck):
2564
1490
  r"""
2565
1491
  Computes the minimum of a tensor along segments.
@@ -2571,10 +1497,10 @@ class UnsortedSegmentMin(PrimitiveWithCheck):
2571
1497
  The data type must be float16, float32 or int32.
2572
1498
  - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2573
1499
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2574
- - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
1500
+ - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
2575
1501
 
2576
1502
  Outputs:
2577
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
1503
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
2578
1504
 
2579
1505
  Supported Platforms:
2580
1506
  ``Ascend`` ``GPU`` ``CPU``
@@ -2633,10 +1559,10 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
2633
1559
  The data type must be float16, float32 or int32.
2634
1560
  - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2635
1561
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2636
- - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
1562
+ - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
2637
1563
 
2638
1564
  Outputs:
2639
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
1565
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
2640
1566
 
2641
1567
  Supported Platforms:
2642
1568
  ``Ascend`` ``GPU`` ``CPU``
@@ -2752,13 +1678,12 @@ class UnsortedSegmentProd(Primitive):
2752
1678
  Inputs:
2753
1679
  - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
2754
1680
  With float16, float32 or int32 data type.
2755
- - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.
2756
- Data type must be int32.
2757
- - **num_segments** (int) - The value specifies the number of distinct `segment_ids`,
2758
- must be greater than 0.
1681
+ - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
1682
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. Data type must be int32.
1683
+ - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
2759
1684
 
2760
1685
  Outputs:
2761
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
1686
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
2762
1687
 
2763
1688
  Supported Platforms:
2764
1689
  ``Ascend`` ``GPU`` ``CPU``
@@ -2783,62 +1708,6 @@ class UnsortedSegmentProd(Primitive):
2783
1708
  self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
2784
1709
 
2785
1710
 
2786
- class Concat(PrimitiveWithCheck):
2787
- r"""
2788
- Connect tensor in the specified axis.
2789
-
2790
- Refer to :func:`mindspore.ops.concat` for more details.
2791
-
2792
- Args:
2793
- axis (int, optional): The specified axis. Default: ``0`` .
2794
-
2795
- Inputs:
2796
- - **input_x** (Union[tuple, list]) - A tuple or a list of input tensors.
2797
- Suppose there are two tensors in this tuple or list, namely x1 and x2.
2798
- To perform `Concat` in the axis 0 direction, except for the 0th axis, all other axes should be equal,
2799
- that is, :math:`x1.shape[1] == x2.shape[1], x1.shape[2] == x2.shape[2], ..., x1.shape[R] == x2.shape[R]`,
2800
- where the :math:`R` indicates the last axis.
2801
-
2802
- Outputs:
2803
- - Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
2804
- The data type is the same with `input_x`.
2805
-
2806
- Supported Platforms:
2807
- ``Ascend`` ``GPU`` ``CPU``
2808
-
2809
- Examples:
2810
- >>> import numpy as np
2811
- >>> from mindspore import Tensor, ops
2812
- >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
2813
- >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
2814
- >>> op = ops.Concat()
2815
- >>> output = op((input_x1, input_x2))
2816
- >>> print(output)
2817
- [[0. 1.]
2818
- [2. 1.]
2819
- [0. 1.]
2820
- [2. 1.]]
2821
- >>> op = ops.Concat(1)
2822
- >>> output = op((input_x1, input_x2))
2823
- >>> print(output)
2824
- [[0. 1. 0. 1.]
2825
- [2. 1. 2. 1.]]
2826
- """
2827
-
2828
- @prim_attr_register
2829
- def __init__(self, axis=0):
2830
- """Initialize Concat"""
2831
- self.axis = axis
2832
- validator.check_value_type("axis", axis, [int], self.name)
2833
-
2834
- def infer_value(self, input_x):
2835
- """Implement Concat infer value"""
2836
- value = None
2837
- if input_x is not None and None not in input_x:
2838
- value = Tensor(np.concatenate([x.asnumpy() for x in input_x], axis=self.axis))
2839
- return value
2840
-
2841
-
2842
1711
  class ConcatOffsetV1(Primitive):
2843
1712
  r"""
2844
1713
  primitive for computing Concat’s gradient.
@@ -2952,7 +1821,7 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2952
1821
 
2953
1822
  out_n = len(x_shape)
2954
1823
  for i in range(1, out_n):
2955
- if x_type[i] != x_type[i-1]:
1824
+ if x_type[i] != x_type[i - 1]:
2956
1825
  raise TypeError(f"For {prim_name}, all types should be same, but got {x_type}")
2957
1826
 
2958
1827
  new_x_shape = []
@@ -3040,6 +1909,7 @@ class Stack(PrimitiveWithInfer):
3040
1909
  tuple_value = value['value']
3041
1910
  input_array = []
3042
1911
  infered_value = None
1912
+ dtype = x_type[0]
3043
1913
  if tuple_value is not None and None not in tuple_value:
3044
1914
  for item in tuple_value:
3045
1915
  npy_item = item.asnumpy()
@@ -3048,23 +1918,9 @@ class Stack(PrimitiveWithInfer):
3048
1918
 
3049
1919
  shape = all_shape.get('shape') if isinstance(all_shape, dict) else all_shape
3050
1920
  out = {'shape': shape,
3051
- 'dtype': x_type[0],
1921
+ 'dtype': dtype,
3052
1922
  'value': infered_value}
3053
1923
 
3054
- def unpack(x):
3055
- if isinstance(x, (tuple, list)) and len(x) == 1:
3056
- return unpack(x[0])
3057
- return x
3058
-
3059
- if 'shape_value' in value and value['shape_value'] is not None:
3060
- input_shape_value = []
3061
- for item in value['shape_value']:
3062
- item = unpack(item)
3063
- item = np.array(item)
3064
- input_shape_value.append(item)
3065
- infered_shape_value = np.stack(input_shape_value, axis=self.axis)
3066
- infered_shape_value = tuple(infered_shape_value.tolist())
3067
- out['shape_value'] = infered_shape_value
3068
1924
  return out
3069
1925
 
3070
1926
 
@@ -3217,61 +2073,6 @@ class Coalesce(Primitive):
3217
2073
  outputs=['y_indices', 'y_values', 'y_shape'])
3218
2074
 
3219
2075
 
3220
- class ReverseV2(Primitive):
3221
- """
3222
- Reverses specific dimensions of a tensor.
3223
-
3224
- .. warning::
3225
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
3226
-
3227
- Args:
3228
- axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
3229
-
3230
- Inputs:
3231
- - **input_x** (Tensor) - The target tensor.
3232
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3233
-
3234
- Outputs:
3235
- Tensor, has the same shape and type as `input_x`.
3236
-
3237
- Raises:
3238
- TypeError: If `axis` is neither list nor tuple.
3239
- TypeError: If element of `axis` is not an int.
3240
- ValueError: There are multiple identical axes in `axis`.
3241
-
3242
- Supported Platforms:
3243
- ``Ascend`` ``GPU`` ``CPU``
3244
-
3245
- Examples:
3246
- >>> import mindspore
3247
- >>> import numpy as np
3248
- >>> from mindspore import Tensor, ops
3249
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
3250
- >>> op = ops.ReverseV2(axis=[1])
3251
- >>> output = op(input_x)
3252
- >>> print(output)
3253
- [[4 3 2 1]
3254
- [8 7 6 5]]
3255
- >>> op = ops.ReverseV2(axis=[1, 0])
3256
- >>> output = op(input_x)
3257
- >>> print(output)
3258
- [[8 7 6 5]
3259
- [4 3 2 1]]
3260
- """
3261
-
3262
- @prim_attr_register
3263
- def __init__(self, axis):
3264
- """Initialize ReverseV2."""
3265
- validator.check_value_type('axis', axis, [list, tuple], self.name)
3266
- for i, each in enumerate(axis):
3267
- validator.check_value_type(f'axis[{i}]', each, [int], self.name)
3268
- self.axis = axis
3269
- if isinstance(axis, list):
3270
- self.axis = tuple(axis)
3271
- self.add_prim_attr('axis', self.axis)
3272
- self.init_prim_io_names(inputs=['x'], outputs=['output'])
3273
-
3274
-
3275
2076
  class Rint(Primitive):
3276
2077
  """
3277
2078
  Returns an integer that is closest to `input_x` element-wise.
@@ -3311,54 +2112,6 @@ class Rint(Primitive):
3311
2112
  self.init_prim_io_names(inputs=['x'], outputs=['output'])
3312
2113
 
3313
2114
 
3314
- class Select(Primitive):
3315
- r"""
3316
- The conditional tensor determines whether the corresponding element in the output must be
3317
- selected from `x` (if True) or `y` (if False) based on the value of each
3318
- element.
3319
-
3320
- It can be defined as:
3321
-
3322
- .. math::
3323
- out_i = \begin{cases}
3324
- x_i, & \text{if } condition_i \\
3325
- y_i, & \text{otherwise}
3326
- \end{cases}
3327
-
3328
- Inputs:
3329
- - **condition** (Tensor[bool]) - The condition tensor, decides which element is chosen.
3330
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
3331
- - **x** (Tensor) - The first tensor to be selected and the shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
3332
- - **y** (Tensor) - The second tensor to be selected and the shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
3333
-
3334
- Outputs:
3335
- Tensor, has the same shape as `condition`.
3336
-
3337
- Raises:
3338
- TypeError: If `x` or `y` is not a Tensor.
3339
- ValueError: If shape of the three inputs are different.
3340
-
3341
- Supported Platforms:
3342
- ``Ascend`` ``GPU`` ``CPU``
3343
-
3344
- Examples:
3345
- >>> import mindspore
3346
- >>> from mindspore import Tensor, ops
3347
- >>> select = ops.Select()
3348
- >>> input_cond = Tensor([True, False])
3349
- >>> input_x = Tensor([2,3], mindspore.float32)
3350
- >>> input_y = Tensor([1,2], mindspore.float32)
3351
- >>> output = select(input_cond, input_x, input_y)
3352
- >>> print(output)
3353
- [2. 2.]
3354
- """
3355
-
3356
- @prim_attr_register
3357
- def __init__(self):
3358
- """Initialize Select."""
3359
- self.init_prim_io_names(inputs=['condition', 'x', 'y'], outputs=['output'])
3360
-
3361
-
3362
2115
  class StridedSliceV2(Primitive):
3363
2116
  r"""
3364
2117
  StridedSliceV2 will be deprecated by StridedSlice in the future.
@@ -3374,151 +2127,32 @@ class StridedSliceV2(Primitive):
3374
2127
 
3375
2128
  Inputs:
3376
2129
  - **input_x** (Tensor) - The input Tensor.
3377
- - **begin** (tuple[int]) - A tuple which represents the location where to start. Only
3378
- constant value is allowed.
3379
- - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
3380
- Only constant value is allowed.
3381
- - **strides** (tuple[int]) - A tuple which represents the stride is continuously added
3382
- before reaching the maximum location. Only constant value is allowed.
3383
-
3384
- Outputs:
3385
- Tensor, The output is explained by following example.
3386
-
3387
- Raises:
3388
- TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.
3389
- TypeError: If `begin`, `end` or `strides` is not a tuple.
3390
- ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.
3391
-
3392
- Supported Platforms:
3393
- ``Ascend`` ``CPU``
3394
-
3395
- Examples:
3396
- >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
3397
- ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
3398
- >>> strided_slice_v2 = ops.StridedSliceV2()
3399
- >>> output = strided_slice_v2(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
3400
- >>> print(output)
3401
- [[[3.]]
3402
- [[5.]]]
3403
- """
3404
-
3405
- @prim_attr_register
3406
- def __init__(self,
3407
- begin_mask=0,
3408
- end_mask=0,
3409
- ellipsis_mask=0,
3410
- new_axis_mask=0,
3411
- shrink_axis_mask=0):
3412
- """Initialize StridedSliceV2"""
3413
- self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
3414
-
3415
-
3416
- class StridedSlice(PrimitiveWithInfer):
3417
- r"""
3418
-
3419
- Extracts a strided slice of a tensor.
3420
-
3421
- Refer to :func:`mindspore.ops.strided_slice` for more details.
3422
-
3423
- Args:
3424
- begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
3425
- end_mask (int, optional): Ending index of the slice. Default: ``0`` .
3426
- ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
3427
- new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
3428
- shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
3429
-
3430
- Inputs:
3431
- - **input_x** (Tensor) - The input Tensor to be extracted from.
3432
- - **begin** (tuple[int]) - A tuple which represents the location where to start.
2130
+ - **begin** (tuple[int]) - A tuple which represents the location where to start. Only
2131
+ constant value is allowed.
3433
2132
  - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
3434
- - **strides** (tuple[int]) - A tuple which represents the strides is continuously added
3435
- before reaching the maximum location. Only int is allowed, it can be negative
3436
- which results in reversed slicing.
2133
+ Only constant value is allowed.
2134
+ - **strides** (tuple[int]) - A tuple which represents the stride is continuously added
2135
+ before reaching the maximum location. Only constant value is allowed.
3437
2136
 
3438
2137
  Outputs:
3439
- Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
2138
+ Tensor, The output is explained by following example.
2139
+
2140
+ Raises:
2141
+ TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.
2142
+ TypeError: If `begin`, `end` or `strides` is not a tuple.
2143
+ ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.
3440
2144
 
3441
2145
  Supported Platforms:
3442
- ``Ascend`` ``GPU`` ``CPU``
2146
+ ``Ascend`` ``CPU``
3443
2147
 
3444
2148
  Examples:
3445
- >>> import mindspore
3446
- >>> from mindspore import Tensor, ops
3447
2149
  >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
3448
2150
  ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
3449
- >>> # [[[1. 1. 1.]
3450
- >>> # [2. 2. 2.]]
3451
- >>> #
3452
- >>> # [[3. 3. 3.]
3453
- >>> # [4. 4. 4.]]
3454
- >>> #
3455
- >>> # [[5. 5. 5.]
3456
- >>> # [6. 6. 6.]]]
3457
- >>> # In order to visually view the multi-dimensional array, write the above as follows
3458
- >>> # [
3459
- >>> # [
3460
- >>> # [1,1,1]
3461
- >>> # [2,2,2]
3462
- >>> # ]
3463
- >>> # [
3464
- >>> # [3,3,3]
3465
- >>> # [4,4,4]
3466
- >>> # ]
3467
- >>> # [
3468
- >>> # [5,5,5]
3469
- >>> # [6,6,6]
3470
- >>> # ]
3471
- >>> # ]
3472
- >>> strided_slice = ops.StridedSlice()
3473
- >>> output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
3474
- >>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
3475
- >>> # start = [1, 0, 2] , end = [3, 1, 3], stride = [1, 1, 1], Find a segment of (start, end),
3476
- >>> # note that end is an open interval
3477
- >>> # To facilitate understanding, this operator can be divided into three steps:
3478
- >>> # Step 1: Calculation of the first dimension:
3479
- >>> # start = 1, end = 3, stride = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
3480
- >>> # output_1th =
3481
- >>> # [
3482
- >>> # [
3483
- >>> # [3,3,3]
3484
- >>> # [4,4,4]
3485
- >>> # ]
3486
- >>> # [
3487
- >>> # [5,5,5]
3488
- >>> # [6,6,6]
3489
- >>> # ]
3490
- >>> # ]
3491
- >>> # Step 2: Calculation of the second dimension
3492
- >>> # 2nd dimension, start = 0, end = 1, stride = 1. So only 0th rows can be taken, and the output at this time.
3493
- >>> # output_2nd =
3494
- >>> # [
3495
- >>> # [
3496
- >>> # [3,3,3]
3497
- >>> # ]
3498
- >>> # [
3499
- >>> # [5,5,5]
3500
- >>> # ]
3501
- >>> # ]
3502
- >>> # Step 3: Calculation of the third dimension
3503
- >>> # 3nd dimension,start = 2, end = 3, stride = 1, So can take 2th cols,
3504
- >>> # and you get the final output at this time.
3505
- >>> # output_3ed =
3506
- >>> # [
3507
- >>> # [
3508
- >>> # [3]
3509
- >>> # ]
3510
- >>> # [
3511
- >>> # [5]
3512
- >>> # ]
3513
- >>> # ]
3514
- >>> # The final output after finishing is:
2151
+ >>> strided_slice_v2 = ops.StridedSliceV2()
2152
+ >>> output = strided_slice_v2(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
3515
2153
  >>> print(output)
3516
2154
  [[[3.]]
3517
2155
  [[5.]]]
3518
- >>> # another example like :
3519
- >>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
3520
- >>> print(output)
3521
- [[[3. 3. 3.]]]
3522
2156
  """
3523
2157
 
3524
2158
  @prim_attr_register
@@ -3528,407 +2162,9 @@ class StridedSlice(PrimitiveWithInfer):
3528
2162
  ellipsis_mask=0,
3529
2163
  new_axis_mask=0,
3530
2164
  shrink_axis_mask=0):
3531
- """Initialize StridedSlice"""
2165
+ """Initialize StridedSliceV2"""
3532
2166
  self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
3533
2167
 
3534
- validator.check_non_negative_int(begin_mask, 'begin_mask', self.name)
3535
- validator.check_non_negative_int(end_mask, 'end_mask', self.name)
3536
- validator.check_non_negative_int(ellipsis_mask, 'ellipsis_mask', self.name)
3537
- if len(tuple(filter(lambda x: x == '1', bin(ellipsis_mask)[-1:1:-1]))) > 1:
3538
- raise ValueError(f"For '{self.name}', only support one ellipsis in the index, but got {ellipsis_mask}.")
3539
- validator.check_non_negative_int(new_axis_mask, 'new_axis_mask', self.name)
3540
- validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask',
3541
- self.name)
3542
-
3543
- def __infer__(self, x, begin, end, strides):
3544
- begin_v, begin_len = self._check_and_get_value(begin, 'begin')
3545
- end_v, end_len = self._check_and_get_value(end, 'end')
3546
- strides_v, strides_len = self._check_and_get_value(strides, 'strides')
3547
-
3548
- is_dynamic_tuple = (self._is_none_in_tuple(begin_v.get('value'))
3549
- or self._is_none_in_tuple(end_v.get('value'))
3550
- or self._is_none_in_tuple(strides_v.get('value')))
3551
- is_dynamic = None in (begin_v.get('value'), end_v.get('value'), strides_v.get('value'))
3552
-
3553
- if not is_dynamic and (begin_len != strides_len or end_len != strides_len):
3554
- raise ValueError(
3555
- f"For '{self.name}', 'begin', 'end' and 'strides' must be the same length, but got "
3556
- f"'begin' length: {begin_len}, 'end' length: {end_len}, 'strides' length: {strides_len}."
3557
- )
3558
-
3559
- if is_dynamic or is_dynamic_tuple or is_shape_unknown(x['shape']):
3560
- ret_shape = self._compute_dynamic_slicing_shape(x, begin_v, end_v, strides_v, begin_len)
3561
- rets = {'shape': ret_shape,
3562
- 'dtype': x['dtype'],
3563
- 'value': None}
3564
- return rets
3565
-
3566
- ret_shape = self._compute_slicing_shape(x['shape'], begin_v['value'], end_v['value'], strides_v['value'])
3567
- if all(ret_shape):
3568
- value = None
3569
- else:
3570
- init_func = Zero()
3571
- init_func.__enable_zero_dim__ = True
3572
- value = Tensor(dtype=x['dtype'].element_type(), shape=ret_shape, init=init_func)
3573
-
3574
- if "max_value" in x and "min_value" in x:
3575
- validator.check_value_type("min_value", x["min_value"], [tuple, list], self.name)
3576
- validator.check_value_type("max_value", x["max_value"], [tuple, list], self.name)
3577
- max_value_slice = self._compute_dynamic_slicing_value(x["max_value"], begin_v, end_v, strides_v)
3578
- min_value_slice = self._compute_dynamic_slicing_value(x["min_value"], begin_v, end_v, strides_v)
3579
- return {'shape': ret_shape,
3580
- 'dtype': x['dtype'],
3581
- 'value': value,
3582
- 'max_value': max_value_slice,
3583
- 'min_value': min_value_slice}
3584
-
3585
- if "shape_value" in x:
3586
- validator.check_value_type("shape_value", x["shape_value"], [tuple], self.name)
3587
- shape_value_slice = self._compute_dynamic_slicing_value(x["shape_value"], begin_v, end_v, strides_v)
3588
- return {'shape': ret_shape,
3589
- 'dtype': x['dtype'],
3590
- 'shape_value': shape_value_slice,
3591
- 'value': value}
3592
- return {'shape': ret_shape,
3593
- 'dtype': x['dtype'],
3594
- 'value': value}
3595
-
3596
- @staticmethod
3597
- def _compute_slicing_len_for_positive_stride(begin, end, stride, x_dim):
3598
- """Compute slice length for positive stride."""
3599
- if x_dim == -1:
3600
- if begin >= end:
3601
- # When slicing forward, if begin >= end, the length of the slicing is 0.
3602
- slicing_length = 0
3603
- else:
3604
- slicing_length = -1
3605
- return slicing_length
3606
- # When slicing forward, convert begin and end to positive numbers.
3607
- if begin >= x_dim or end < -x_dim:
3608
- # When slicing forward, if begin >= x_dim or end < -x_dim, the length of the slicing is 0.
3609
- slicing_length = 0
3610
- else:
3611
- if -x_dim <= begin < 0:
3612
- begin += x_dim
3613
- if begin < -x_dim:
3614
- # When slicing forward, if begin < -x_dim, set begin = 0, which means start from the 0th element.
3615
- begin = 0
3616
- if -x_dim <= end < 0:
3617
- end += x_dim
3618
- if end > x_dim:
3619
- # When slicing forward, if end > x_dim, set end = x_dims, which means slice to the last element.
3620
- end = x_dim
3621
- if begin >= end:
3622
- # When slicing forward, if begin >= end, the length of the slicing is 0.
3623
- slicing_length = 0
3624
- else:
3625
- slicing_length = 1 + (end - 1 - begin) // stride
3626
- return slicing_length
3627
-
3628
- @staticmethod
3629
- def _compute_slicing_len_for_negative_stride(begin, end, stride, x_dim):
3630
- """Compute slice length for negative stride."""
3631
- if x_dim == -1:
3632
- if begin <= end:
3633
- slicing_length = 0
3634
- else:
3635
- slicing_length = -1
3636
- return slicing_length
3637
- # When slicing backward, convert begin and end to negative numbers.
3638
- if begin < -x_dim or end >= x_dim:
3639
- # When slicing backward, if begin < -x_dim or end >= x_dim, the length of the slicing is 0.
3640
- slicing_length = 0
3641
- else:
3642
- if 0 <= begin < x_dim:
3643
- begin += -x_dim
3644
- if begin >= x_dim:
3645
- begin = -1
3646
- if 0 <= end < x_dim:
3647
- end += -x_dim
3648
- if end < -x_dim - 1:
3649
- # Slicing to the 0th element.
3650
- end = -x_dim - 1
3651
- if begin <= end:
3652
- slicing_length = 0
3653
- else:
3654
- slicing_length = 1 + (end + 1 - begin) // stride
3655
- return slicing_length
3656
-
3657
- @staticmethod
3658
- def _get_slice_value(begin_v, end_v, strides_v):
3659
- """Get the slice value from value or shape_value."""
3660
- begin_value = begin_v['value']
3661
- end_value = end_v['value']
3662
- strides_value = strides_v['value']
3663
- if begin_value is None:
3664
- begin_value = begin_v['shape_value']
3665
- if end_value is None:
3666
- end_value = end_v['shape_value']
3667
- if strides_value is None:
3668
- strides_value = strides_v['shape_value']
3669
- return begin_value, end_value, strides_value
3670
-
3671
- def _is_none_in_tuple(self, x):
3672
- return isinstance(x, tuple) and None in x
3673
-
3674
- def _compute_slicing_length(self, begin, end, stride, x_dim):
3675
- """Computes the length of the slicing."""
3676
- if stride > 0:
3677
- slicing_length = self._compute_slicing_len_for_positive_stride(begin, end, stride, x_dim)
3678
- else:
3679
- slicing_length = self._compute_slicing_len_for_negative_stride(begin, end, stride, x_dim)
3680
- return slicing_length
3681
-
3682
- def _compute_slicing_shape(self, x_shape, begin_v, end_v, strides_v):
3683
- """Computes the shape of the slicing."""
3684
- x_rank = len(x_shape)
3685
- slice_len = len(begin_v)
3686
-
3687
- # After the integer is converted to binary, it is a str and the first two chars are the flag char '0b'.
3688
- begin_pos = bin(self.begin_mask)[-1:1:-1]
3689
- end_pos = bin(self.end_mask)[-1:1:-1]
3690
- ellipsis_pos = bin(self.ellipsis_mask)[-1:1:-1]
3691
- new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]
3692
- shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]
3693
-
3694
- ret_shape = []
3695
- i, j = 0, 0
3696
- has_ellipsis = False
3697
- while i < x_rank or j < slice_len:
3698
- if j < slice_len:
3699
- begin, end, stride = begin_v[j], end_v[j], strides_v[j]
3700
-
3701
- if j < len(ellipsis_pos) and ellipsis_pos[j] == '1':
3702
- # When there is ellipsis, the latter part of the ellipsis will be processed separately.
3703
- has_ellipsis = True
3704
- break
3705
- if j < len(begin_pos) and begin_pos[j] == '1':
3706
- begin = -1 if strides_v[j] < 0 else 0
3707
- if j < len(end_pos) and end_pos[j] == '1':
3708
- end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
3709
- if j < len(new_axis_pos) and new_axis_pos[j] == '1':
3710
- ret_shape.append(1)
3711
- j += 1
3712
- continue
3713
- if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
3714
- if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
3715
- raise IndexError(f"For '{self.name}', the 'strides[{i}]' cannot be negative number and "
3716
- f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
3717
- f"when 'shrink_axis_mask' is greater than 0, "
3718
- f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
3719
- f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
3720
- j += 1
3721
- i += 1
3722
- continue
3723
- else:
3724
- begin, end, stride = 0, x_shape[i], 1
3725
-
3726
- slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
3727
- ret_shape.append(slicing_length)
3728
- i += 1
3729
- j += 1
3730
- if has_ellipsis:
3731
- # When there is ellipsis, handle the second half of the ellipsis split.
3732
- ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + \
3733
- len(tuple(filter(lambda x: x == '1', new_axis_pos[j + 1:slice_len])))
3734
- ret_shape.extend(x_shape[i:i + ellipsis_occupied_dims])
3735
- j += 1
3736
- i += ellipsis_occupied_dims
3737
-
3738
- while i < x_rank or j < slice_len:
3739
- begin, end, stride = begin_v[j], end_v[j], strides_v[j]
3740
-
3741
- if j < len(begin_pos) and begin_pos[j] == '1':
3742
- begin = -1 if strides_v[j] < 0 else 0
3743
- if j < len(end_pos) and end_pos[j] == '1':
3744
- end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
3745
- if j < len(new_axis_pos) and new_axis_pos[j] == '1':
3746
- ret_shape.append(1)
3747
- j += 1
3748
- continue
3749
- if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
3750
- if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
3751
- raise IndexError(f"For '{self.name}', the 'strides[{i}]' can not be negative number and "
3752
- f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
3753
- f"when 'shrink_axis_mask' is greater than 0, "
3754
- f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
3755
- f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
3756
- j += 1
3757
- i += 1
3758
- continue
3759
-
3760
- slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
3761
- ret_shape.append(slicing_length)
3762
- i += 1
3763
- j += 1
3764
- return ret_shape
3765
-
3766
- def _compute_dynamic_slicing_value(self, shape_value, begin_v, end_v, strides_v):
3767
- """Computes the length of the slicing for dynamic shape."""
3768
- shape_value_np = np.array(shape_value)
3769
- slice_index = []
3770
- for begin_i, end_i, strides_i in zip(begin_v['value'], end_v['value'], strides_v['value']):
3771
- s = slice(begin_i, end_i, strides_i)
3772
- slice_index.append(s)
3773
- slice_index = tuple(slice_index)
3774
- shape_value_slice = shape_value_np[slice_index]
3775
- shape_value_slice = tuple(shape_value_slice.tolist())
3776
- return shape_value_slice
3777
-
3778
- def _compute_dynamic_slicing_length(self, begin, end, stride, x_dim):
3779
- """Computes the length of the slicing for dynamic shape."""
3780
- slicing_length = -1
3781
- if None in (begin, end, stride) or -1 in (begin, end, stride):
3782
- return slicing_length
3783
- slicing_length = self._compute_slicing_length(begin, end, stride, x_dim)
3784
- return slicing_length
3785
-
3786
- def _compute_dynamic_slicing_shape(self, x, begin_v, end_v, strides_v, slice_len):
3787
- """Computes the shape of the slicing for dynamic shape, mask is currently not supported."""
3788
- x_shape = x['shape']
3789
- if is_dim_unknown(x_shape):
3790
- return [-2]
3791
- x_rank = len(x_shape)
3792
- new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]
3793
- shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]
3794
- if self.ellipsis_mask:
3795
- raise ValueError("Ellipsis Mask is currently not supported in dynamic shape.")
3796
- ret_shape = []
3797
- i, j = 0, 0
3798
- slice_has_special_value = False
3799
- begin_value, end_value, strides_value = self._get_slice_value(
3800
- begin_v, end_v, strides_v)
3801
- is_dynamic_tuple = (self._is_none_in_tuple(begin_value)
3802
- or self._is_none_in_tuple(end_value)
3803
- or self._is_none_in_tuple(strides_value))
3804
- if None in (begin_v['value'], end_v['value'], strides_v['value']) or is_dynamic_tuple:
3805
- slice_has_special_value = True
3806
- while i < x_rank or j < slice_len:
3807
- slicing_length = -1
3808
- if j < slice_len:
3809
- if j < len(new_axis_pos) and new_axis_pos[j] == '1':
3810
- ret_shape.append(1)
3811
- j += 1
3812
- continue
3813
- if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
3814
- j += 1
3815
- i += 1
3816
- continue
3817
- if None in (begin_value, end_value, strides_value):
3818
- slicing_length = -1
3819
- elif slice_has_special_value:
3820
- slicing_length = self._compute_dynamic_slicing_length(
3821
- begin_value[j], end_value[j], strides_value[j], x_shape[i])
3822
- else:
3823
- slicing_length = \
3824
- self._compute_slicing_length(begin_value[j], end_value[j], strides_value[j], x_shape[i])
3825
- else:
3826
- if i >= len(x_shape):
3827
- raise ValueError(f"For 'StridedSlice', the index must be less than or equal to "
3828
- f"the dimension of 'input_x', but got the dimension of 'input_x': {len(x_shape)} "
3829
- f"and the index: {i}.")
3830
- begin, end, stride = 0, x_shape[i], 1
3831
- if end > 0:
3832
- slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
3833
- ret_shape.append(slicing_length)
3834
- i += 1
3835
- j += 1
3836
- return ret_shape
3837
-
3838
- def _check_and_get_value(self, slice_input, name):
3839
- """Check begin, end, strides. Get its length and value."""
3840
- slice_value = slice_input['value']
3841
- slice_min = None
3842
- slice_max = None
3843
- slice_special_value = None
3844
- if "min_value" in slice_input and "max_value" in slice_input:
3845
- slice_min = slice_input["min_value"]
3846
- slice_max = slice_input["max_value"]
3847
- elif "shape_value" in slice_input:
3848
- slice_special_value = slice_input["shape_value"]
3849
- if slice_value is None:
3850
- validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int32, mstype.int64], self.name)
3851
- slice_shape = slice_input['shape']
3852
- if len(slice_shape) != 1:
3853
- raise ValueError(f"For '{self.name}', both the 'begins', 'ends', and 'strides' must be 1-D, "
3854
- f"but got '{name}' shape: {slice_shape}.")
3855
- # not support scalar
3856
- slices = {
3857
- 'value': slice_value,
3858
- 'shape_value': slice_special_value,
3859
- 'min_value': slice_min,
3860
- 'max_value': slice_max
3861
- }
3862
- return slices, slice_shape[0]
3863
-
3864
- if isinstance(slice_value, (Tensor, Tensor_)):
3865
- validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int64], self.name)
3866
- slice_value = slice_value.asnumpy().tolist()
3867
- elif not isinstance(slice_value, tuple):
3868
- raise TypeError(f"For '{self.name}', both the 'begin', 'end', and 'strides' must be a tuple or Tensor, "
3869
- f"but got '{name}': {slice_value}.")
3870
-
3871
- if tuple(filter(lambda x: x is not None and not isinstance(x, int), slice_value)):
3872
- raise TypeError(f"For '{self.name}', the elements of 'begin', 'end', and 'strides' must be int, "
3873
- f"but got {name}: {slice_value}.")
3874
-
3875
- if name == 'strides':
3876
- if slice_value is not None and tuple(filter(lambda x: x == 0, slice_value)):
3877
- raise ValueError(f"For '{self.name}', 'strides' cannot contain 0, but got 'strides': {slice_value}.")
3878
-
3879
- slices = {
3880
- 'value': slice_value,
3881
- 'shape_value': slice_special_value,
3882
- 'min_value': slice_min,
3883
- 'max_value': slice_max
3884
- }
3885
- return slices, len(slice_value)
3886
-
3887
-
3888
- class Diag(PrimitiveWithCheck):
3889
- r"""
3890
-
3891
- Constructs a diagonal tensor with a given diagonal values.
3892
-
3893
- .. warning::
3894
- This is an experimental API that is subject to change or deletion.
3895
-
3896
- Refer to :func:`mindspore.ops.diag` for more details.
3897
-
3898
- Inputs:
3899
- - **input_x** (Tensor) - The input tensor.
3900
-
3901
- Outputs:
3902
- Tensor, has the same dtype as the `input_x`.
3903
-
3904
- Supported Platforms:
3905
- ``Ascend`` ``GPU`` ``CPU``
3906
-
3907
- Examples:
3908
- >>> from mindspore import Tensor, ops
3909
- >>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
3910
- >>> diag = ops.Diag()
3911
- >>> output = diag(input_x)
3912
- >>> print(output)
3913
- [[1 0 0 0]
3914
- [0 2 0 0]
3915
- [0 0 3 0]
3916
- [0 0 0 4]]
3917
- """
3918
-
3919
- @prim_attr_register
3920
- def __init__(self):
3921
- """Initialize Diag"""
3922
-
3923
- def infer_value(self, x):
3924
- if x is None:
3925
- return None
3926
- # do constant-folding only when x rank is 1
3927
- if len(x.shape) != 1:
3928
- return None
3929
- ret = np.diag(x.asnumpy())
3930
- return Tensor(ret)
3931
-
3932
2168
 
3933
2169
  class DiagPart(PrimitiveWithCheck):
3934
2170
  r"""
@@ -4022,280 +2258,6 @@ class Mvlgamma(Primitive):
4022
2258
  validator.check_positive_int(p, 'p', self.name)
4023
2259
 
4024
2260
 
4025
- class Eye(Primitive):
4026
- """
4027
- Creates a tensor with ones on the diagonal and zeros in the rest.
4028
-
4029
- Refer to :func:`mindspore.ops.eye` for more details.
4030
-
4031
- Inputs:
4032
- - **n** (int) - The number of rows of returned tensor. Constant value only.
4033
- - **m** (int) - The number of columns of returned tensor. Constant value only.
4034
- - **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
4035
- Default: ``None`` , the data type of the returned tensor is mindspore.float32.
4036
-
4037
- Outputs:
4038
- Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
4039
- the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.
4040
-
4041
- Supported Platforms:
4042
- ``Ascend`` ``GPU`` ``CPU``
4043
-
4044
- Examples:
4045
- >>> import mindspore
4046
- >>> from mindspore import ops
4047
- >>> eye = ops.Eye()
4048
- >>> output = eye(2, 2, mindspore.int32)
4049
- >>> print(output)
4050
- [[1 0]
4051
- [0 1]]
4052
- >>> print(output.dtype)
4053
- Int32
4054
- >>> output = eye(1, 2, mindspore.float64)
4055
- >>> print(output)
4056
- [[1. 0.]]
4057
- >>> print(output.dtype)
4058
- Float64
4059
- """
4060
-
4061
- @prim_attr_register
4062
- def __init__(self):
4063
- """Initialize Eye"""
4064
- self.init_prim_io_names(inputs=['n', 'm', 't'], outputs=['output'])
4065
-
4066
-
4067
- class ScatterNd(Primitive):
4068
- r"""
4069
- Scatters a tensor into a new tensor depending on the specified indices.
4070
-
4071
- Refer to :func:`mindspore.ops.scatter_nd` for more details.
4072
-
4073
- Inputs:
4074
- - **indices** (Tensor) - The index of scattering in the new tensor with int32 or int64 data type.
4075
- The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
4076
- - **updates** (Tensor) - The source Tensor to be scattered.
4077
- It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.
4078
- - **shape** (tuple[int]) - Define the shape of the output tensor, has the same data type as indices.
4079
- The shape of `shape` is :math:`(x_1, x_2, ..., x_R)`, and the length of 'shape' is greater than or equal to 2.
4080
- In other words, the shape of `shape` is at least :math:`(x_1, x_2)`.
4081
- And the value of any element in `shape` must be greater than or equal to 1.
4082
- In other words, :math:`x_1` >= 1, :math:`x_2` >= 1.
4083
-
4084
- Outputs:
4085
- Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
4086
-
4087
- Supported Platforms:
4088
- ``Ascend`` ``GPU`` ``CPU``
4089
-
4090
- Examples:
4091
- >>> import mindspore
4092
- >>> import numpy as np
4093
- >>> from mindspore import Tensor, ops
4094
- >>> op = ops.ScatterNd()
4095
- >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
4096
- >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
4097
- ... [3, 3, 3, 3], [4, 4, 4, 4]],
4098
- ... [[1, 1, 1, 1], [2, 2, 2, 2],
4099
- ... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
4100
- >>> shape = (4, 4, 4)
4101
- >>> output = op(indices, updates, shape)
4102
- >>> print(output)
4103
- [[[1. 1. 1. 1.]
4104
- [2. 2. 2. 2.]
4105
- [3. 3. 3. 3.]
4106
- [4. 4. 4. 4.]]
4107
- [[0. 0. 0. 0.]
4108
- [0. 0. 0. 0.]
4109
- [0. 0. 0. 0.]
4110
- [0. 0. 0. 0.]]
4111
- [[1. 1. 1. 1.]
4112
- [2. 2. 2. 2.]
4113
- [3. 3. 3. 3.]
4114
- [4. 4. 4. 4.]]
4115
- [[0. 0. 0. 0.]
4116
- [0. 0. 0. 0.]
4117
- [0. 0. 0. 0.]
4118
- [0. 0. 0. 0.]]]
4119
- >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
4120
- >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
4121
- >>> shape = (3, 3)
4122
- >>> output = op(indices, updates, shape)
4123
- >>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
4124
- >>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
4125
- >>> # [
4126
- >>> # [0. 0. 0.]
4127
- >>> # [0. 0. 0.]
4128
- >>> # [0. 0. 0.]
4129
- >>> # ]
4130
- >>> # Step 2: Modify the data at the specified location according to the indicators
4131
- >>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
4132
- >>> # means that the empty tensor in the 0th row and 1st col set to 3.2
4133
- >>> # [
4134
- >>> # [0. 3.2. 0.]
4135
- >>> # [0. 0. 0.]
4136
- >>> # [0. 0. 0.]
4137
- >>> # ]
4138
- >>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
4139
- >>> # means that the empty tensor in the 1th row and 1st col set to 1.1
4140
- >>> # [
4141
- >>> # [0. 3.2. 0.]
4142
- >>> # [0. 1.1 0.]
4143
- >>> # [0. 0. 0.]
4144
- >>> # ]
4145
- >>> # The final result is as follows:
4146
- >>> print(output)
4147
- [[0. 3.2 0.]
4148
- [0. 1.1 0.]
4149
- [0. 0. 0.]]
4150
- """
4151
-
4152
- @prim_attr_register
4153
- def __init__(self):
4154
- """Initialize ScatterNd"""
4155
- self.init_prim_io_names(inputs=['indices', 'update', 'shape'], outputs=['output'])
4156
-
4157
-
4158
- class ResizeNearestNeighbor(Primitive):
4159
- r"""
4160
- Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest
4161
- neighbor algorithm selects the value of the nearest point and does not consider the
4162
- values of neighboring points at all, yielding a piecewise-constant interpolant.
4163
-
4164
- Args:
4165
- size (Union[tuple, list]): The target size. The dimension of size must be 2.
4166
- align_corners (bool): Whether the centers of the 4 corner pixels of the input
4167
- and output tensors are aligned. Default: ``False`` .
4168
-
4169
- Inputs:
4170
- - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
4171
-
4172
- Outputs:
4173
- Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`.
4174
- The data type is the same as the `input_x`.
4175
-
4176
- Raises:
4177
- TypeError: If `size` is neither tuple nor list.
4178
- TypeError: If `align_corners` is not a bool.
4179
- ValueError: If length of `size` is not equal to 2.
4180
-
4181
- Supported Platforms:
4182
- ``Ascend`` ``GPU`` ``CPU``
4183
-
4184
- Examples:
4185
- >>> import numpy as np
4186
- >>> import mindspore
4187
- >>> from mindspore import Tensor, ops
4188
- >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
4189
- >>> size = (2, 2)
4190
- >>> output = ops.ResizeNearestNeighbor(size=size)(input_tensor)
4191
- >>> print(output)
4192
- [[[[-0.1 0.3]
4193
- [ 0.4 0.5]]]]
4194
- """
4195
-
4196
- @prim_attr_register
4197
- def __init__(self, size, align_corners=False):
4198
- """Initialize ResizeNearestNeighbor"""
4199
- validator.check_value_type("size", size, [tuple, list], self.name)
4200
- validator.check_value_type("align_corners", align_corners, [bool], self.name)
4201
- validator.check_equal_int(len(size), 2, "length of size", self.name)
4202
- for i, value in enumerate(size):
4203
- validator.check_non_negative_int(value, f'{i}th value of size', self.name)
4204
- self.init_prim_io_names(inputs=['image_in'], outputs=['image_out'])
4205
-
4206
-
4207
- class ResizeNearestNeighborV2(Primitive):
4208
- r"""
4209
- Resizes the input tensor to specific size by using the nearest neighbor algorithm.
4210
-
4211
- The nearest neighbor algorithm selects the value of the nearest point and does not consider the
4212
- values of neighboring points at all, yielding a piecewise-constant interpolant.
4213
-
4214
- Args:
4215
- align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output
4216
- tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
4217
- half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
4218
- `align_corners` should be False. Default: ``False`` .
4219
-
4220
- Inputs:
4221
- - **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` .
4222
- - **size** (Tensor) - The new size for the images. A 1-D int32 Tensor
4223
- of 2 elements: [`new_height, new_width`].
4224
-
4225
- Outputs:
4226
- - **y** (Tensor) - The resized images. A 4-D with shape
4227
- :math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`.
4228
-
4229
- Raises:
4230
- TypeError: If `x` or `size` is not a Tensor.
4231
- TypeError: If the data type of `size` is not int32.
4232
- TypeError: If `align_corners` or `half_pixel_centers` is not bool.
4233
- ValueError: If any value of `size` is non positive.
4234
- ValueError: If the dimension of `x` is not 4.
4235
- ValueError: If the dimension of `size` is not 1.
4236
- ValueError: If the elements number of `size` is not 2.
4237
- ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time.
4238
-
4239
- Supported Platforms:
4240
- ``Ascend`` ``GPU`` ``CPU``
4241
-
4242
- Examples:
4243
- >>> import numpy as np
4244
- >>> from mindspore import Tensor, ops
4245
- >>> from mindspore import dtype as mstype
4246
- >>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32)
4247
- >>> size = Tensor([2, 2], mstype.int32)
4248
- >>> resize = ops.ResizeNearestNeighborV2()
4249
- >>> output = resize(input_tensor, size)
4250
- >>> print(output)
4251
- [[[[1. 1.]
4252
- [1. 1.]]]]
4253
- >>> print(output.shape)
4254
- (1, 1, 2, 2)
4255
- """
4256
-
4257
- @prim_attr_register
4258
- def __init__(self, align_corners=False, half_pixel_centers=False):
4259
- """Initialize ResizeNearestNeighborV2"""
4260
- self.init_prim_io_names(inputs=['x', 'size'], outputs=['y'])
4261
- validator.check_bool(align_corners, 'align_corners', self.name)
4262
- validator.check_bool(half_pixel_centers, 'half_pixel_centers', self.name)
4263
-
4264
-
4265
- class GatherNd(Primitive):
4266
- r"""
4267
- Gathers slices from a tensor by indices.
4268
-
4269
- Refer to :func:`mindspore.ops.gather_nd` for more details.
4270
-
4271
- Inputs:
4272
- - **input_x** (Tensor) - The target tensor to gather values.
4273
- - **indices** (Tensor) - The index tensor, with int32 or int64 data type.
4274
-
4275
- Outputs:
4276
- Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].
4277
-
4278
- Supported Platforms:
4279
- ``Ascend`` ``GPU`` ``CPU``
4280
-
4281
- Examples:
4282
- >>> import mindspore
4283
- >>> import numpy as np
4284
- >>> from mindspore import Tensor, ops
4285
- >>> op = ops.GatherNd()
4286
- >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4287
- >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
4288
- >>> output = op(input_x, indices)
4289
- >>> print(output)
4290
- [-0.1 0.5]
4291
- """
4292
-
4293
- @prim_attr_register
4294
- def __init__(self):
4295
- """Initialize GatherNd"""
4296
- self.init_prim_io_names(inputs=['input_x', 'indices'], outputs=['y'])
4297
-
4298
-
4299
2261
  class ScatterUpdate(Primitive):
4300
2262
  r"""
4301
2263
  Updates tensor values by using input indices and value.
@@ -4798,80 +2760,6 @@ class ScatterSub(Primitive):
4798
2760
  self.add_prim_attr('side_effect_mem', True)
4799
2761
 
4800
2762
 
4801
- class Triu(Primitive):
4802
- """
4803
- Returns the upper triangular portion of the 2-D matrix or the set of matrices
4804
- in a batch. The remaining elements of the resulting Tensor are assigned a value of 0.
4805
- The upper triangular section of the matrix comprises of the
4806
- elements present on and above the main diagonal.
4807
-
4808
- .. warning::
4809
- This is an experimental API that is subject to change or deletion.
4810
-
4811
- Args:
4812
- diagonal (int, optional): The index of diagonal. Default: ``0`` , indicating the main diagonal.
4813
-
4814
- Inputs:
4815
- - **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
4816
- where :math:`*` means any number of additional dimensions.
4817
-
4818
- Outputs:
4819
- - **y** (Tensor) - A tensor has the same shape and data type as input.
4820
-
4821
- Raises:
4822
- TypeError: If `x` is not an Tensor.
4823
- TypeError: If `diagonal` is not an int.
4824
- ValueError: If the dimension of `input` is less than 2.
4825
-
4826
- Supported Platforms:
4827
- ``Ascend`` ``GPU`` ``CPU``
4828
-
4829
- Examples:
4830
- >>> import numpy as np
4831
- >>> from mindspore import Tensor, ops
4832
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4833
- ... [ 5, 6, 7, 8],
4834
- ... [10, 11, 12, 13],
4835
- ... [14, 15, 16, 17]]))
4836
- >>> triu = ops.Triu()
4837
- >>> result = triu(x)
4838
- >>> print(result)
4839
- [[ 1 2 3 4]
4840
- [ 0 6 7 8]
4841
- [ 0 0 12 13]
4842
- [ 0 0 0 17]]
4843
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4844
- ... [ 5, 6, 7, 8],
4845
- ... [10, 11, 12, 13],
4846
- ... [14, 15, 16, 17]]))
4847
- >>> triu = ops.Triu(diagonal=1)
4848
- >>> result = triu(x)
4849
- >>> print(result)
4850
- [[ 0 2 3 4]
4851
- [ 0 0 7 8]
4852
- [ 0 0 0 13]
4853
- [ 0 0 0 0]]
4854
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4855
- ... [ 5, 6, 7, 8],
4856
- ... [10, 11, 12, 13],
4857
- ... [14, 15, 16, 17]]))
4858
- >>> triu = ops.Triu(diagonal=-1)
4859
- >>> result = triu(x)
4860
- >>> print(result)
4861
- [[ 1 2 3 4]
4862
- [ 5 6 7 8]
4863
- [ 0 11 12 13]
4864
- [ 0 0 16 17]]
4865
- """
4866
-
4867
- @prim_attr_register
4868
- def __init__(self, diagonal=0):
4869
- """Initialize Triu"""
4870
- validator.check_value_type("diagonal", diagonal, [int], self.name)
4871
- self.diagonal = diagonal
4872
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
4873
-
4874
-
4875
2763
  class ScatterMul(_ScatterOpDynamic):
4876
2764
  r"""
4877
2765
  Updates the value of the input tensor through the multiply operation.
@@ -5837,6 +3725,9 @@ class SpaceToBatchND(Primitive):
5837
3725
  ``Ascend`` ``GPU`` ``CPU``
5838
3726
 
5839
3727
  Examples:
3728
+ >>> import mindspore
3729
+ >>> from mindspore import Tensor, ops
3730
+ >>> import numpy as np
5840
3731
  >>> block_shape = [2, 2]
5841
3732
  >>> paddings = [[0, 0], [0, 0]]
5842
3733
  >>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
@@ -5896,84 +3787,31 @@ class BatchToSpaceNDV2(Primitive):
5896
3787
  same, equal to `block_shape`. In this case of Ascend, M must be 2.
5897
3788
  - **crops** (Union[list(int), tuple(int)]) - The crops values for spatial dimensions, containing
5898
3789
  M subtraction list. Each contains 2 integer values. All values must be >= 0. crops[i] specifies
5899
- the crops values for spatial dimension i, which corresponds to input dimension i + offset,
5900
- where offset = N-M, and N is the number of input dimensions. It is required that
5901
- :math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
5902
-
5903
- Outputs:
5904
- Tensor, contains the result of batch division and rearrangement of the original Tensor.
5905
-
5906
- Supported Platforms:
5907
- ``Ascend``
5908
-
5909
- Examples:
5910
- >>> block_shape = Tensor(np.array([2, 2]), mindspore.int32)
5911
- >>> crops = [[0, 0], [0, 0]]
5912
- >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
5913
- >>> output = ops.BatchToSpaceNDV2(input_x, block_shape, crops)
5914
- >>> print(output)
5915
- [[[[1. 2.]
5916
- [3. 4.]]]]
5917
- """
5918
-
5919
- @prim_attr_register
5920
- def __init__(self):
5921
- """Initialize BatchToSpaceNDV2"""
5922
- self.init_prim_io_names(inputs=['input_x', 'block_shape', 'crops'], outputs=['y'])
5923
- self.add_prim_attr('origin_format', 'NHWC')
5924
-
5925
-
5926
- class BroadcastTo(PrimitiveWithCheck):
5927
- """
5928
- Broadcasts input tensor to a given shape.
5929
-
5930
- Refer to :func:`mindspore.ops.broadcast_to` for more details.
5931
-
5932
- Args:
5933
- shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
5934
- where it will be substituted by the input tensor's shape in that position, see example.
5935
-
5936
- Inputs:
5937
- - **input_x** (Tensor) - The input tensor of any dimension.
3790
+ the crops values for spatial dimension i, which corresponds to input dimension i + offset,
3791
+ where offset = N-M, and N is the number of input dimensions. It is required that
3792
+ :math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
5938
3793
 
5939
3794
  Outputs:
5940
- Tensor, with the given `shape` and the same data type as `input_x`.
3795
+ Tensor, contains the result of batch division and rearrangement of the original Tensor.
5941
3796
 
5942
3797
  Supported Platforms:
5943
- ``Ascend`` ``GPU`` ``CPU``
3798
+ ``Ascend``
5944
3799
 
5945
3800
  Examples:
5946
- >>> import numpy as np
5947
- >>> from mindspore import Tensor, ops
5948
- >>> shape = (2, 3)
5949
- >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
5950
- >>> output = ops.BroadcastTo(shape=shape)(x)
5951
- >>> print(output)
5952
- [[1. 2. 3.]
5953
- [1. 2. 3.]]
5954
- >>>
5955
- >>> shape = (-1, 2)
5956
- >>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
5957
- >>> output = ops.BroadcastTo(shape=shape)(x)
3801
+ >>> block_shape = Tensor(np.array([2, 2]), mindspore.int32)
3802
+ >>> crops = [[0, 0], [0, 0]]
3803
+ >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
3804
+ >>> output = ops.BatchToSpaceNDV2(input_x, block_shape, crops)
5958
3805
  >>> print(output)
5959
- [[1. 1.]
5960
- [2. 2.]]
3806
+ [[[[1. 2.]
3807
+ [3. 4.]]]]
5961
3808
  """
5962
3809
 
5963
3810
  @prim_attr_register
5964
- def __init__(self, shape):
5965
- """Initialize BroadcastTo"""
5966
- validator.check_value_type("shape", shape, (tuple), self.name)
5967
- validator.check("dimension of x", len(shape), "", 0, validator.GT, self.name)
5968
- for ix, i in enumerate(shape):
5969
- validator.check_value_type('target shape index -> ' + str(ix), i, [int], self.name)
5970
- validator.check("shape element", i, "shape element min limit", -1, validator.GE, self.name)
5971
- self.shape = shape
5972
-
5973
- def infer_value(self, x):
5974
- if x is None:
5975
- return None
5976
- return Tensor(np.broadcast_to(x.asnumpy(), self.shape))
3811
+ def __init__(self):
3812
+ """Initialize BatchToSpaceNDV2"""
3813
+ self.init_prim_io_names(inputs=['input_x', 'block_shape', 'crops'], outputs=['y'])
3814
+ self.add_prim_attr('origin_format', 'NHWC')
5977
3815
 
5978
3816
 
5979
3817
  class Meshgrid(PrimitiveWithInfer):
@@ -5983,13 +3821,13 @@ class Meshgrid(PrimitiveWithInfer):
5983
3821
  Refer to :func:`mindspore.ops.meshgrid` for more details.
5984
3822
 
5985
3823
  Args:
5986
- indexing (str, optional): Cartesian ('xy') or
5987
- matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
3824
+ indexing (str, optional): Cartesian ``'xy'`` or
3825
+ matrix ``'ij'`` indexing of output. In the 2-D case with
5988
3826
  inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
5989
- for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
3827
+ for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
5990
3828
  case with inputs of length `M`, `N` and `P`, outputs are of shape
5991
- :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
5992
- Default: 'xy'.
3829
+ :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3830
+ Default: ``'xy'``.
5993
3831
 
5994
3832
  Inputs:
5995
3833
  - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
@@ -6216,7 +4054,7 @@ class EditDistance(Primitive):
6216
4054
  >>> import numpy as np
6217
4055
  >>> from mindspore import Tensor
6218
4056
  >>> import mindspore.nn as nn
6219
- >>> import mindspore.ops as ops
4057
+ >>> from mindspore import ops
6220
4058
  >>> class EditDistance(nn.Cell):
6221
4059
  ... def __init__(self, hypothesis_shape, truth_shape, normalize=True):
6222
4060
  ... super(EditDistance, self).__init__()
@@ -6279,8 +4117,8 @@ class Sort(Primitive):
6279
4117
  Sorts the elements of the input tensor along the given dimension in the specified order.
6280
4118
 
6281
4119
  .. warning::
6282
- Currently, the data types of Float16 is well supported.
6283
- Using Float32 might cause loss of accuracy.
4120
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
4121
+ If use float32, it may cause loss of accuracy.
6284
4122
 
6285
4123
  Args:
6286
4124
  axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
@@ -6380,56 +4218,6 @@ class EmbeddingLookup(Primitive):
6380
4218
  self.add_prim_attr('bprop_return_sparse', True)
6381
4219
 
6382
4220
 
6383
- class GatherD(Primitive):
6384
- """
6385
- Gathers elements along an axis specified by dim.
6386
-
6387
- Refer to :func:`mindspore.ops.gather_elements` for more details.
6388
-
6389
- Inputs:
6390
- - **x** (Tensor) - The input tensor.
6391
- - **dim** (int) - The axis along which to index. It must be int32 or int64.
6392
- - **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:
6393
- int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).
6394
-
6395
- Outputs:
6396
- Tensor, has the same data type with `x`.
6397
-
6398
- Supported Platforms:
6399
- ``Ascend`` ``GPU`` ``CPU``
6400
-
6401
- Examples:
6402
- >>> import mindspore
6403
- >>> import numpy as np
6404
- >>> from mindspore import Tensor, ops
6405
- >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
6406
- >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
6407
- >>> dim = 1
6408
- >>> output = ops.GatherD()(x, dim, index)
6409
- >>> print(output)
6410
- [[1 1]
6411
- [4 3]]
6412
- """
6413
-
6414
- @prim_attr_register
6415
- def __init__(self):
6416
- """Initialize GatherD"""
6417
- self.init_prim_io_names(inputs=['x', 'dim', 'index'], outputs=['output'])
6418
-
6419
-
6420
- class Identity(Primitive):
6421
- """
6422
- The `mindspore.ops.Identity` interface is deprecated, please use the :func:`mindspore.ops.deepcopy` instead.
6423
-
6424
- Supported Platforms:
6425
- Deprecated
6426
- """
6427
-
6428
- @prim_attr_register
6429
- def __init__(self):
6430
- pass
6431
-
6432
-
6433
4221
  class IdentityN(Primitive):
6434
4222
  """
6435
4223
  Return a tuple of tensors with the same shapes and contents as the input.
@@ -6468,72 +4256,6 @@ class IdentityN(Primitive):
6468
4256
  self.init_prim_io_names(inputs=['x'], outputs=['y'])
6469
4257
 
6470
4258
 
6471
- class Range(PrimitiveWithCheck):
6472
- r"""
6473
- Creates a sequence of numbers that begins at `start` and extlimits by increments of
6474
- `delta` up to but not including `limit`.
6475
-
6476
- Refer to :func:`mindspore.ops.range` for more details.
6477
-
6478
- Args:
6479
- maxlen (int, optional): Memory that can fit `maxlen` many elements
6480
- will be allocated for the output. Optional, must be positive. Default: 1000000.
6481
- If the output has more than `maxlen` elements, a runtime error
6482
- will occur.
6483
-
6484
- Inputs:
6485
- - **start** (Tensor) - A scalar Tensor. The first number in the sequence.
6486
- - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive.
6487
- - **delta** (Tensor) - A scalar Tensor. Number that increments `start`.
6488
-
6489
- Outputs:
6490
- A 1-D Tensor, with the same type as the inputs.
6491
-
6492
- Supported Platforms:
6493
- ``GPU`` ``CPU``
6494
-
6495
- Examples:
6496
- >>> from mindspore import Tensor, ops
6497
- >>> from mindspore import dtype as mstype
6498
- >>> start = Tensor(0, mstype.int32)
6499
- >>> limit = Tensor(10, mstype.int32)
6500
- >>> delta = Tensor(4, mstype.int32)
6501
- >>> output = ops.Range()(start, limit, delta)
6502
- >>> print(output)
6503
- [0 4 8]
6504
- """
6505
-
6506
- @prim_attr_register
6507
- def __init__(self, maxlen=1000000):
6508
- self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output'])
6509
- validator.check_value_type("maxlen", maxlen, [int], self.name)
6510
- validator.check_positive_int(maxlen, "maxlen", self.name)
6511
- self.maxlen = maxlen
6512
- self.add_prim_attr('maxlen', maxlen)
6513
-
6514
- def check_shape(self, start_shape, limit_shape, delta_shape):
6515
- if not is_shape_unknown(start_shape):
6516
- validator.check("start_shape", len(start_shape), "", 0, validator.EQ, self.name)
6517
- if not is_shape_unknown(limit_shape):
6518
- validator.check("limit_shape", len(limit_shape), "", 0, validator.EQ, self.name)
6519
- if not is_shape_unknown(delta_shape):
6520
- validator.check("delta_shape", len(delta_shape), "", 0, validator.EQ, self.name)
6521
-
6522
- def check_dtype(self, start_dtype, limit_dtype, delta_dtype):
6523
- valid_dtypes = [mstype.int32, mstype.float32, mstype.int64, mstype.float64]
6524
- inputs = {"start": start_dtype, "limit": limit_dtype, "delta": delta_dtype}
6525
- validator.check_tensors_dtypes_same_and_valid(inputs, valid_dtypes, self.name)
6526
-
6527
- def infer_value(self, start_value, limit_value, delat_value):
6528
- """Infer the value of input for Range."""
6529
- if start_value is not None and limit_value is not None and delat_value is not None:
6530
- start = start_value.asnumpy()
6531
- limit = limit_value.asnumpy()
6532
- delat = delat_value.asnumpy()
6533
- return Tensor(np.arange(start, limit, delat), dtype=start_value.dtype)
6534
- return None
6535
-
6536
-
6537
4259
  class RangeV2(Primitive):
6538
4260
  """
6539
4261
  Creates a sequence of numbers that begins at `start`, ends at `limit` but not including `limit`
@@ -6588,46 +4310,6 @@ class RangeV2(Primitive):
6588
4310
  validator.check_positive_int(maxlen, "maxlen", self.name)
6589
4311
 
6590
4312
 
6591
- class MaskedFill(Primitive):
6592
- """
6593
- Fills elements with value where mask is True.
6594
-
6595
- Note:
6596
- If `value` is a floating-point number of Python, it will be converted to float32 later by default.
6597
- In this case, if `input_x` is a float16 Tensor, it will be converted to float32 for calculation,
6598
- and the result type will be converted back to float16 on the CPU and Ascend platforms, which may
6599
- cause the performance penalty. A TypeError may be raised on the GPU platform. Therefore,
6600
- it is recommended that 'value' should use a Tensor with the same dtype as `input_x`.
6601
-
6602
- Refer to :func:`mindspore.ops.masked_fill` for more details.
6603
-
6604
- Inputs:
6605
- - **input** (Tensor) - The input Tensor.
6606
- - **mask** (Tensor[bool]) - The boolean mask.
6607
- - **value** (Union[float, Tensor]) - The value to fill in with, which dtype is the same as `input`.
6608
-
6609
- Outputs:
6610
- Tensor, has the same type and shape as `input`.
6611
-
6612
- Supported Platforms:
6613
- ``Ascend`` ``GPU`` ``CPU``
6614
-
6615
- Examples:
6616
- >>> import mindspore
6617
- >>> import numpy as np
6618
- >>> from mindspore import Tensor, ops
6619
- >>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
6620
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
6621
- >>> output = ops.MaskedFill()(input, mask, 0.5)
6622
- >>> print(output)
6623
- [0.5 0.5 3. 0.5]
6624
- """
6625
-
6626
- @prim_attr_register
6627
- def __init__(self):
6628
- self.init_prim_io_names(inputs=['input', 'mask', 'value'], outputs=['output'])
6629
-
6630
-
6631
4313
  class MaskedScatter(Primitive):
6632
4314
  """
6633
4315
  Updates the value in the input with value in `updates` according to the `mask`.
@@ -6721,60 +4403,6 @@ class MaskedSelect(PrimitiveWithCheck):
6721
4403
  validator.check_tensor_dtype_valid('x', x_dtype, (mstype.bool_,) + mstype.number_type, self.name)
6722
4404
 
6723
4405
 
6724
- class SearchSorted(Primitive):
6725
- """
6726
- Returns the indices correspond to the positions where the given numbers in `values` should be inserted
6727
- into `sorted_sequence` so that the order of the sequence is maintained.
6728
-
6729
- .. warning::
6730
- This is an experimental API that is subject to change or deletion.
6731
-
6732
- Refer to :func:`mindspore.ops.searchsorted` for more details.
6733
-
6734
- Args:
6735
- dtype (:class:`mindspore.dtype`, optional): Output data type. An optional data type of
6736
- ``mstype.int32`` and ``mstype.int64``. Default: ``mstype.int64``.
6737
- right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
6738
- if ``False`` , return the first such index. Default: ``False`` .
6739
-
6740
- Inputs:
6741
- - **sorted_sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.
6742
- It must contain a monotonically increasing sequence on the innermost dimension.
6743
- - **values** (Tensor) - The value that should be inserted.
6744
- The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_S)`.
6745
-
6746
- Outputs:
6747
- Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
6748
- if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
6749
- whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of `values`.
6750
-
6751
- Supported Platforms:
6752
- ``Ascend`` ``GPU`` ``CPU``
6753
-
6754
- Examples:
6755
- >>> import mindspore
6756
- >>> import numpy as np
6757
- >>> from mindspore import Tensor, ops
6758
- >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
6759
- >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
6760
- >>> output = ops.SearchSorted()(sorted_sequence, values)
6761
- >>> print(output)
6762
- [[2 4 5]
6763
- [1 2 4]]
6764
- """
6765
-
6766
- @prim_attr_register
6767
- def __init__(self, dtype=mstype.int64, right=False):
6768
- """Initialize SearchSorted"""
6769
- validator.check_value_type("dtype", dtype, [mstype.Type], self.name)
6770
- valid_values = (mstype.int64, mstype.int32)
6771
- self.dtype = validator.check_type_name(
6772
- "dtype", dtype, valid_values, self.name)
6773
- validator.check_value_type('right', right, [bool], self.name)
6774
- self.init_prim_io_names(
6775
- inputs=['sorted_sequence', 'values'], outputs=['output'])
6776
-
6777
-
6778
4406
  class _TensorScatterOp(PrimitiveWithInfer):
6779
4407
  """
6780
4408
  Defines TensorScatter Base Operators
@@ -6879,43 +4507,15 @@ class TensorScatterUpdate(_TensorScatterOp):
6879
4507
  def __init__(self):
6880
4508
  self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
6881
4509
 
6882
- def _infer_specified_value(self, input_x_value, indices_value, updates_value):
6883
- """Calculate min/max value for output of TensorScatterUpdate op"""
6884
- if isinstance(input_x_value, tuple):
6885
- input_x_value = list(input_x_value)
6886
- if isinstance(input_x_value, (Tensor, Tensor_)):
6887
- input_x_value = input_x_value.asnumpy()
6888
- if indices_value is None or updates_value is None:
6889
- return None
6890
- if isinstance(indices_value, (Tensor, Tensor_)):
6891
- indices_value = indices_value.asnumpy()
6892
- if isinstance(updates_value, (Tensor, Tensor_)):
6893
- updates_value = updates_value.asnumpy()
6894
- input_x = np.array(input_x_value)
6895
- updates = np.array(updates_value)
6896
- for i, indice in enumerate(indices_value):
6897
- input_x[indice] = updates[i]
6898
- output = tuple(input_x.tolist())
6899
- return output
6900
-
6901
- def _infer_min_value(self, input_x_value, indices_value, updates_value):
6902
- return self._infer_specified_value(input_x_value, indices_value, updates_value)
6903
-
6904
- def _infer_max_value(self, input_x_value, indices_value, updates_value):
6905
- return self._infer_specified_value(input_x_value, indices_value, updates_value)
6906
-
6907
4510
  def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):
6908
4511
  validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
6909
4512
  args = {"input_x": input_x_dtype, "updates": updates_dtype}
6910
4513
  validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)
6911
4514
  return input_x_dtype
6912
4515
 
6913
- def _infer_shape_value(self, input_x_value, indices_value, updates_value):
6914
- return self._infer_specified_value(input_x_value, indices_value, updates_value)
6915
-
6916
4516
 
6917
4517
  class TensorScatterMax(Primitive):
6918
- """
4518
+ r"""
6919
4519
  By comparing the value at the position indicated by `indices` in `x` with the value in the `updates`,
6920
4520
  the value at the index will eventually be equal to the largest one to create a new tensor.
6921
4521
 
@@ -6926,7 +4526,7 @@ class TensorScatterMax(Primitive):
6926
4526
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6927
4527
  The rank must be at least 2.
6928
4528
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
6929
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
4529
+ and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
6930
4530
 
6931
4531
  Outputs:
6932
4532
  Tensor, has the same shape and type as `input_x`.
@@ -6963,7 +4563,7 @@ class TensorScatterMax(Primitive):
6963
4563
 
6964
4564
 
6965
4565
  class TensorScatterMin(Primitive):
6966
- """
4566
+ r"""
6967
4567
  By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
6968
4568
  the value at the index will eventually be equal to the smallest one to create a new tensor.
6969
4569
 
@@ -6974,7 +4574,7 @@ class TensorScatterMin(Primitive):
6974
4574
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6975
4575
  The rank must be at least 2.
6976
4576
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
6977
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
4577
+ and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
6978
4578
 
6979
4579
  Outputs:
6980
4580
  Tensor, has the same shape and type as `input_x`.
@@ -7019,7 +4619,7 @@ class TensorScatterSub(Primitive):
7019
4619
  instead of input `Parameter`.
7020
4620
 
7021
4621
  .. math::
7022
- output[indices] = input\_x - update
4622
+ output\left [indices \right ] = input\_x- update
7023
4623
 
7024
4624
  Refer to :func:`mindspore.ops.tensor_scatter_sub` for more details.
7025
4625
 
@@ -7123,7 +4723,7 @@ class TensorScatterMul(_TensorScatterOp):
7123
4723
  The updates are applied on output `Tensor` instead of input `Parameter`.
7124
4724
 
7125
4725
  .. math::
7126
- output[indices] = input\_x \times update
4726
+ output\left [indices \right ] = input\_x\times update
7127
4727
 
7128
4728
  Refer to :func:`mindspore.ops.tensor_scatter_mul` for more details.
7129
4729
 
@@ -7132,7 +4732,7 @@ class TensorScatterMul(_TensorScatterOp):
7132
4732
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7133
4733
  The rank must be at least 2.
7134
4734
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
7135
- and the shape of `updates` should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
4735
+ and the shape of `updates` should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
7136
4736
 
7137
4737
  Outputs:
7138
4738
  Tensor, has the same shape and type as `input_x`.
@@ -7169,7 +4769,7 @@ class TensorScatterMul(_TensorScatterOp):
7169
4769
 
7170
4770
 
7171
4771
  class TensorScatterDiv(_TensorScatterOp):
7172
- """
4772
+ r"""
7173
4773
  Creates a new tensor by dividing the values from the positions in `input_x` indicated by
7174
4774
  `indices`, with values from `updates`. When divided values are provided for the same
7175
4775
  index, the result of the update will be to divided these values respectively. Except that
@@ -7182,7 +4782,7 @@ class TensorScatterDiv(_TensorScatterOp):
7182
4782
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7183
4783
  The rank must be at least 2.
7184
4784
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
7185
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
4785
+ and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
7186
4786
 
7187
4787
  Outputs:
7188
4788
  Tensor, has the same shape and type as `input_x`.
@@ -7386,8 +4986,6 @@ class TensorScatterElements(Primitive):
7386
4986
  - **indices** (Tensor) - The index of `input_x` to do scatter operation whose data type must be int32 or
7387
4987
  int64. It has the same rank as `data`. And accepted range is [-s, s) where s is the size along axis.
7388
4988
  - **updates** (Tensor) - The tensor doing the scatter operation with `data`,
7389
- it has the same shape and type as `data`.
7390
- - **update** (Tensor) - The tensor doing the scatter operation with `data`,
7391
4989
  it has the same type as `data` and the same shape as `indices`.
7392
4990
 
7393
4991
  Outputs:
@@ -7398,7 +4996,7 @@ class TensorScatterElements(Primitive):
7398
4996
 
7399
4997
  Examples:
7400
4998
  >>> import mindspore
7401
- >>> import mindspore.ops as ops
4999
+ >>> from mindspore import ops
7402
5000
  >>> from mindspore import Tensor
7403
5001
  >>> op = ops.TensorScatterElements(0, "none")
7404
5002
  >>> data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
@@ -7410,7 +5008,7 @@ class TensorScatterElements(Primitive):
7410
5008
  [ 0.0 5.0 0.0]
7411
5009
  [ 7.0 0.0 0.0]]
7412
5010
  >>> import mindspore as ms
7413
- >>> import mindspore.ops as ops
5011
+ >>> from mindspore import ops
7414
5012
  >>> from mindspore import Tensor
7415
5013
  >>> op = ops.TensorScatterElements(1, "add")
7416
5014
  >>> data = Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32)
@@ -7436,64 +5034,13 @@ class TensorScatterElements(Primitive):
7436
5034
 
7437
5035
 
7438
5036
  class ExtractVolumePatches(Primitive):
7439
- r"""
7440
- Extract patches from input and put them in the "depth" output dimension.
7441
- "depth" dimension is the second dim of output.
7442
-
7443
- .. warning::
7444
- This is an experimental API that is subject to change or deletion.
7445
-
7446
- Args:
7447
- kernel_size (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
7448
- The size of the sliding window for each dimension of input. Must be: :math:`[1, 1, k_d, k_h, k_w]` or
7449
- :math:`[k_d, k_h, k_w]`. If :math:`k_d = k_h = k_w`, you can enter an integer.
7450
- strides (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
7451
- How far the centers of two consecutive patches are in input. Must be: :math:`[1, 1, s_d, s_h, s_w]` or
7452
- :math:`[s_d, s_h, s_w]`. If :math:`s_d = s_h = s_w`, you can enter an integer.
7453
- padding (str): A string from: ``"SAME"`` , ``"VALID"`` . The type of padding algorithm to use.
7454
-
7455
- Inputs:
7456
- - **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.
7457
-
7458
- Outputs:
7459
- Tensor, has the same type as input.
7460
- If padding is "VALID", the shape is :math:`(x_n, k_d * k_h * k_w * x_c, 1 + (x_d - k_d) / s_d,
7461
- 1 + (x_h - k_h) / s_h, 1 + (x_w - k_w) / s_w)`; if padding is "SAME", the shape is :math:`(
7462
- x_n, k_d * k_h * k_w * x_c, (x_d + s_d - 1) / s_d, (x_h + s_h - 1) / s_h, (x_w + s_w - 1) / s_w)`.
7463
-
7464
- Raises:
7465
- TypeError: If kernel_size or strides is not a list, a tuple or an int.
7466
- TypeError: If input_x is not a tensor.
7467
- TypeError: If padding is not str.
7468
- ValueError: If the length of kernel_size is neither 3 nor 5 and kernel_size is not an integer.
7469
- ValueError: If the length of strides is neither 3 nor 5 and strides is not an integer.
7470
- ValueError: If padding is neither ``"VALID"`` nor ``"SAME"`` .
7471
- ValueError: If elements of kernel_size or strides are not positive integer.
7472
- ValueError: If input_x is not a tensor in dimension 5.
7473
- ValueError: If input_x's shape has zero.
7474
- ValueError: If one of kernel_size or strides' first two numbers is not 1.
7475
- ValueError: If padding = "VALID" and :math:`input\_x - kernel\_size` is less than 0 in d, h or w dimension.
7476
- ValueError: If padding = "SAME" and :math:`padding\_needed = ((input\_x + strides - 1) / strides - 1) *
7477
- strides + kernel\_size - input\_x` is less than 0 in d, h or w dimension.
7478
- ValueError: If x_h is not 1 or x_w is not 1 and :math:`x_w + padding\_needed - k_w - s_w` is less than 0.
7479
- ValueError: If :math:`x_d * x_h * x_w` is greater than 2048.
5037
+ """
5038
+ `ops.ExtractVolumePatches` is deprecated from version 2.3 and will be removed in a future version.
7480
5039
 
7481
5040
  Supported Platforms:
7482
- ``Ascend`` ``GPU`` ``CPU``
7483
-
7484
- Examples:
7485
- >>> import numpy as np
7486
- >>> from mindspore import Tensor, ops
7487
- >>> from mindspore import dtype as mstype
7488
- >>> kernel_size = (1, 1, 2, 2, 2)
7489
- >>> strides = (1, 1, 1, 1, 1)
7490
- >>> padding = "VALID"
7491
- >>> input_x = ops.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))
7492
- >>> output_y = ops.ExtractVolumePatches(kernel_size, strides, padding)(input_x)
7493
- >>> print(output_y.shape)
7494
- (1, 8, 2, 2, 2)
5041
+ Deprecated
7495
5042
  """
7496
-
5043
+ @deprecated("2.3", "ops.ExtractVolumePatches", False)
7497
5044
  @prim_attr_register
7498
5045
  def __init__(self, kernel_size, strides, padding):
7499
5046
  validator.check_value_type("kernel_size", kernel_size, (int, list, tuple), self.name)
@@ -7668,7 +5215,7 @@ class LowerBound(Primitive):
7668
5215
  >>> import mindspore
7669
5216
  >>> import numpy as np
7670
5217
  >>> from mindspore import Tensor
7671
- >>> import mindspore.ops as ops
5218
+ >>> from mindspore import ops
7672
5219
  >>> lowerbound = ops.LowerBound(out_type = mindspore.int32)
7673
5220
  >>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
7674
5221
  >>> values = Tensor(np.array([[3], [4], [8]]).astype(np.int8))
@@ -7721,7 +5268,7 @@ class UpperBound(Primitive):
7721
5268
  >>> import mindspore
7722
5269
  >>> import numpy as np
7723
5270
  >>> from mindspore import Tensor
7724
- >>> import mindspore.ops as ops
5271
+ >>> from mindspore import ops
7725
5272
  >>> upperbound = ops.UpperBound(out_type = mindspore.int32)
7726
5273
  >>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
7727
5274
  >>> values = Tensor(np.array([[3], [6], [9]]).astype(np.int8))
@@ -7740,100 +5287,6 @@ class UpperBound(Primitive):
7740
5287
  self.init_prim_io_names(inputs=['sorted_x', 'values'], outputs=['y'])
7741
5288
 
7742
5289
 
7743
- class Cummax(Primitive):
7744
- """
7745
- Returns the cumulative maximum of elements and the index.
7746
-
7747
- Refer to :func:`mindspore.ops.cummax` for more details.
7748
-
7749
- Args:
7750
- axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)).
7751
-
7752
- Inputs:
7753
- - **input** (Tensor) - The input tensor.
7754
-
7755
- Outputs:
7756
- A tuple of 2 Tensors(values, indices), containing the cumulative maximum of elements and the index,
7757
- The shape of each output tensor is the same as input `input`.
7758
-
7759
- Supported Platforms:
7760
- ``GPU`` ``CPU``
7761
-
7762
- Examples:
7763
- >>> import mindspore
7764
- >>> import numpy as np
7765
- >>> from mindspore import Tensor
7766
- >>> import mindspore.ops as ops
7767
- >>> cummax = ops.Cummax(axis=0)
7768
- >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
7769
- >>> output = cummax(x)
7770
- >>> print(output[0])
7771
- [[ 3. 4. 6. 10.]
7772
- [ 3. 6. 7. 10.]
7773
- [ 4. 6. 8. 10.]
7774
- [ 4. 6. 8. 10.]]
7775
- >>> print(output[1])
7776
- [[0 0 0 0]
7777
- [0 1 1 0]
7778
- [2 1 2 0]
7779
- [2 1 2 0]]
7780
- """
7781
-
7782
- @prim_attr_register
7783
- def __init__(self, axis):
7784
- """Initialize Cummax"""
7785
- validator.check_value_type("axis", axis, [int], self.name)
7786
- self.init_prim_io_names(inputs=['x'], outputs=['y', 'indices'])
7787
- self.add_prim_attr("dim", axis)
7788
-
7789
-
7790
- class RightShift(Primitive):
7791
- r"""
7792
- Shift the value of each position of Tensor `input_x` to the right by corresponding bits in Tensor `input_y`.
7793
- The inputs are two tensors, dtypes of them must be consistent, and the
7794
- shapes of them could be broadcast.
7795
-
7796
- .. math::
7797
-
7798
- \begin{aligned}
7799
- &out_{i} =x_{i} >> y_{i}
7800
- \end{aligned}
7801
-
7802
- .. warning::
7803
- This is an experimental API that is subject to change or deletion.
7804
-
7805
- Inputs:
7806
- - **input_x** (Tensor) - The target tensor, will be shifted to the right
7807
- by `input_y` bits element-wise. Support all int and uint types.
7808
- - **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
7809
-
7810
- Outputs:
7811
- - **output** (Tensor) - The output tensor, has the same type as `input_x`.
7812
-
7813
- Raises:
7814
- TypeError: If `input_x` or `input_y` is not tensor.
7815
- TypeError: If `input_x` and `input_y` could not be broadcast.
7816
-
7817
- Supported Platforms:
7818
- ``Ascend`` ``GPU`` ``CPU``
7819
-
7820
- Examples:
7821
- >>> import numpy as np
7822
- >>> from mindspore import Tensor, ops
7823
- >>> rightshift = ops.RightShift()
7824
- >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8))
7825
- >>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8))
7826
- >>> output = rightshift(input_x, input_y)
7827
- >>> print(output)
7828
- [0 1 1]
7829
- """
7830
-
7831
- @prim_attr_register
7832
- def __init__(self):
7833
- """Initialize RightShift."""
7834
- self.init_prim_io_names(inputs=['input_x', 'input_y'], outputs=['output'])
7835
-
7836
-
7837
5290
  class LogSpace(Primitive):
7838
5291
  r"""
7839
5292
  Generates a 1-D Tensor with a length of steps. The tensor's
@@ -7901,46 +5354,6 @@ class LogSpace(Primitive):
7901
5354
  self.init_prim_io_names(inputs=['start', 'end'], outputs=['y'])
7902
5355
 
7903
5356
 
7904
- class NonZero(Primitive):
7905
- """
7906
- Return a tensor of the positions of all non-zero values.
7907
-
7908
- Refer to :func:`mindspore.ops.nonzero` for more details.
7909
-
7910
- Inputs:
7911
- - **x** (Tensor) - The input Tensor, its rank should be greater than or eaqual to 1.
7912
-
7913
- Outputs:
7914
- - **y** (Tensor), 2-D Tensor of data type int64.
7915
-
7916
- Supported Platforms:
7917
- ``Ascend`` ``GPU`` ``CPU``
7918
-
7919
- Examples:
7920
- >>> import mindspore
7921
- >>> import numpy as np
7922
- >>> from mindspore import Tensor
7923
- >>> from mindspore.ops import NonZero
7924
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
7925
- >>> nonzero = NonZero()
7926
- >>> output = nonzero(x)
7927
- >>> print(output)
7928
- [[0 0 0]
7929
- [0 1 0]]
7930
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
7931
- >>> nonzero = NonZero()
7932
- >>> output = nonzero(x)
7933
- >>> print(output)
7934
- [[0]
7935
- [2]
7936
- [4]]
7937
- """
7938
-
7939
- @prim_attr_register
7940
- def __init__(self):
7941
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
7942
-
7943
-
7944
5357
  class Tril(Primitive):
7945
5358
  """
7946
5359
  Returns the lower triangular portion of the 2-D matrix or the set of matrices
@@ -7953,7 +5366,7 @@ class Tril(Primitive):
7953
5366
 
7954
5367
  Args:
7955
5368
  diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0`` ,
7956
- indicating the main didiagonal.
5369
+ indicating the main diagonal.
7957
5370
 
7958
5371
  Inputs:
7959
5372
  - **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
@@ -8703,7 +6116,7 @@ class TopK(Primitive):
8703
6116
 
8704
6117
  .. math::
8705
6118
 
8706
- values.shape = indices.shape = input.shape[:-1] + [k].
6119
+ values.shape = indices.shape = input.shape[:-1] + [k]
8707
6120
 
8708
6121
  If the two compared elements are the same, the one with the smaller index value is returned first.
8709
6122
 
@@ -8719,7 +6132,8 @@ class TopK(Primitive):
8719
6132
  - GPU: float16, float32.
8720
6133
  - CPU: all numeric types.
8721
6134
 
8722
- - **k** (int) - The number of top elements to be computed along the last dimension, constant input is needed.
6135
+ - **k** (Union(Tensor, int)) - The number of top elements to be computed along the last dimension.
6136
+ If `k` is a Tensor, the supported dtype is int32 and it should be 0-D or 1-D with shape :math:`(1, )` .
8723
6137
 
8724
6138
  Outputs:
8725
6139
  A tuple consisting of `values` and `indexes`.