mindspore 2.2.14__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1154) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -4
  3. mindspore/_akg/akg/composite/build_module.py +155 -11
  4. mindspore/_akg/akg/config/repository.json +38 -0
  5. mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
  7. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
  8. mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
  9. mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
  10. mindspore/_akg/akg/utils/gen_random.py +14 -8
  11. mindspore/_akg/akg/utils/op_dsl.py +11 -0
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
  13. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  14. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +58 -0
  17. mindspore/_extends/builtin_operations.py +2 -1
  18. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  19. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  20. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  24. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  25. mindspore/_extends/parse/__init__.py +18 -14
  26. mindspore/_extends/parse/compile_config.py +229 -0
  27. mindspore/_extends/parse/parser.py +155 -59
  28. mindspore/_extends/parse/resources.py +40 -7
  29. mindspore/_extends/parse/standard_method.py +124 -204
  30. mindspore/_extends/remote/kernel_build_server.py +2 -0
  31. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  32. mindspore/_profiler.py +30 -0
  33. mindspore/amp.py +24 -18
  34. mindspore/bin/cache_admin +0 -0
  35. mindspore/bin/cache_server +0 -0
  36. mindspore/boost/boost_cell_wrapper.py +1 -1
  37. mindspore/boost/group_loss_scale_manager.py +1 -1
  38. mindspore/common/__init__.py +3 -1
  39. mindspore/common/_jit_fallback_utils.py +2 -3
  40. mindspore/common/_register_for_adapter.py +7 -0
  41. mindspore/common/_stub_tensor.py +6 -1
  42. mindspore/common/_utils.py +5 -17
  43. mindspore/common/api.py +91 -48
  44. mindspore/common/auto_dynamic_shape.py +27 -14
  45. mindspore/common/dtype.py +5 -4
  46. mindspore/common/dump.py +5 -4
  47. mindspore/common/initializer.py +1 -1
  48. mindspore/common/jit_config.py +20 -11
  49. mindspore/common/lazy_inline.py +58 -17
  50. mindspore/common/mindir_util.py +12 -2
  51. mindspore/common/mutable.py +79 -14
  52. mindspore/common/parameter.py +19 -4
  53. mindspore/common/seed.py +9 -9
  54. mindspore/common/sparse_tensor.py +251 -18
  55. mindspore/common/symbol.py +122 -0
  56. mindspore/common/tensor.py +321 -433
  57. mindspore/communication/__init__.py +3 -3
  58. mindspore/communication/_comm_helper.py +5 -0
  59. mindspore/communication/management.py +53 -38
  60. mindspore/config/op_info.config +22 -54
  61. mindspore/context.py +167 -59
  62. mindspore/dataset/__init__.py +5 -5
  63. mindspore/dataset/audio/__init__.py +6 -6
  64. mindspore/dataset/audio/transforms.py +711 -158
  65. mindspore/dataset/callback/ds_callback.py +2 -2
  66. mindspore/dataset/engine/cache_client.py +2 -2
  67. mindspore/dataset/engine/datasets.py +72 -38
  68. mindspore/dataset/engine/datasets_audio.py +14 -14
  69. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  70. mindspore/dataset/engine/datasets_text.py +38 -38
  71. mindspore/dataset/engine/datasets_user_defined.py +7 -7
  72. mindspore/dataset/engine/datasets_vision.py +75 -71
  73. mindspore/dataset/engine/offload.py +5 -7
  74. mindspore/dataset/text/__init__.py +3 -3
  75. mindspore/dataset/text/transforms.py +408 -121
  76. mindspore/dataset/text/utils.py +9 -9
  77. mindspore/dataset/transforms/__init__.py +1 -1
  78. mindspore/dataset/transforms/transforms.py +261 -76
  79. mindspore/dataset/utils/browse_dataset.py +9 -9
  80. mindspore/dataset/vision/__init__.py +3 -3
  81. mindspore/dataset/vision/c_transforms.py +5 -5
  82. mindspore/dataset/vision/transforms.py +2264 -514
  83. mindspore/dataset/vision/utils.py +40 -9
  84. mindspore/dataset/vision/validators.py +7 -1
  85. mindspore/experimental/optim/__init__.py +12 -2
  86. mindspore/experimental/optim/adadelta.py +161 -0
  87. mindspore/experimental/optim/adagrad.py +168 -0
  88. mindspore/experimental/optim/adam.py +35 -34
  89. mindspore/experimental/optim/adamax.py +170 -0
  90. mindspore/experimental/optim/adamw.py +40 -16
  91. mindspore/experimental/optim/asgd.py +153 -0
  92. mindspore/experimental/optim/lr_scheduler.py +60 -119
  93. mindspore/experimental/optim/nadam.py +157 -0
  94. mindspore/experimental/optim/optimizer.py +15 -8
  95. mindspore/experimental/optim/radam.py +194 -0
  96. mindspore/experimental/optim/rmsprop.py +154 -0
  97. mindspore/experimental/optim/rprop.py +164 -0
  98. mindspore/experimental/optim/sgd.py +28 -19
  99. mindspore/hal/__init__.py +34 -0
  100. mindspore/hal/_ascend.py +57 -0
  101. mindspore/hal/_base.py +57 -0
  102. mindspore/hal/_cpu.py +56 -0
  103. mindspore/hal/_gpu.py +57 -0
  104. mindspore/hal/device.py +356 -0
  105. mindspore/hal/event.py +179 -0
  106. mindspore/hal/stream.py +337 -0
  107. mindspore/include/api/data_type.h +2 -2
  108. mindspore/include/api/dual_abi_helper.h +16 -3
  109. mindspore/include/api/model.h +1 -3
  110. mindspore/include/api/status.h +14 -0
  111. mindspore/include/c_api/model_c.h +173 -0
  112. mindspore/include/c_api/ms/base/types.h +1 -0
  113. mindspore/include/c_api/types_c.h +19 -0
  114. mindspore/include/dataset/execute.h +1 -3
  115. mindspore/include/mindapi/base/format.h +125 -23
  116. mindspore/include/mindapi/base/types.h +7 -0
  117. mindspore/lib/libdnnl.so.2 +0 -0
  118. mindspore/lib/libmindspore.so +0 -0
  119. mindspore/lib/libmindspore_backend.so +0 -0
  120. mindspore/lib/libmindspore_common.so +0 -0
  121. mindspore/lib/libmindspore_core.so +0 -0
  122. mindspore/lib/libmindspore_glog.so.0 +0 -0
  123. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  124. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  125. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  126. mindspore/lib/libmindspore_shared_lib.so +0 -0
  127. mindspore/lib/libmpi_adapter.so +0 -0
  128. mindspore/lib/libmpi_collective.so +0 -0
  129. mindspore/lib/libnnacl.so +0 -0
  130. mindspore/lib/libopencv_core.so.4.5 +0 -0
  131. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  132. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  133. mindspore/lib/libps_cache.so +0 -0
  134. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
  135. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
  136. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
  137. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
  138. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
  139. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
  140. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  141. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
  142. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  143. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  144. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6325 -1767
  145. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  163. mindspore/lib/plugin/ascend/libakg.so +0 -0
  164. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  165. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  166. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  167. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  168. mindspore/lib/plugin/cpu/libakg.so +0 -0
  169. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  170. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  171. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  172. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  173. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  174. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  175. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  176. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  177. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  178. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  179. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  180. mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
  181. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  182. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  183. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  184. mindspore/mindrecord/__init__.py +5 -1
  185. mindspore/mindrecord/config.py +809 -0
  186. mindspore/mindrecord/filereader.py +25 -0
  187. mindspore/mindrecord/filewriter.py +74 -56
  188. mindspore/mindrecord/mindpage.py +40 -6
  189. mindspore/mindrecord/shardutils.py +3 -2
  190. mindspore/mindrecord/shardwriter.py +7 -0
  191. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  192. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  193. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  194. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  195. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  196. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  197. mindspore/multiprocessing/__init__.py +68 -0
  198. mindspore/nn/cell.py +86 -133
  199. mindspore/nn/dynamic_lr.py +2 -2
  200. mindspore/nn/layer/activation.py +79 -90
  201. mindspore/nn/layer/basic.py +4 -80
  202. mindspore/nn/layer/channel_shuffle.py +3 -16
  203. mindspore/nn/layer/container.py +3 -3
  204. mindspore/nn/layer/conv.py +71 -71
  205. mindspore/nn/layer/embedding.py +105 -44
  206. mindspore/nn/layer/image.py +4 -7
  207. mindspore/nn/layer/normalization.py +46 -38
  208. mindspore/nn/layer/padding.py +26 -39
  209. mindspore/nn/layer/pooling.py +13 -9
  210. mindspore/nn/layer/rnn_cells.py +5 -15
  211. mindspore/nn/layer/rnns.py +6 -5
  212. mindspore/nn/layer/thor_layer.py +1 -2
  213. mindspore/nn/layer/timedistributed.py +1 -1
  214. mindspore/nn/layer/transformer.py +52 -50
  215. mindspore/nn/learning_rate_schedule.py +6 -5
  216. mindspore/nn/loss/loss.py +43 -64
  217. mindspore/nn/optim/ada_grad.py +4 -2
  218. mindspore/nn/optim/adadelta.py +3 -1
  219. mindspore/nn/optim/adafactor.py +1 -1
  220. mindspore/nn/optim/adam.py +102 -181
  221. mindspore/nn/optim/adamax.py +4 -2
  222. mindspore/nn/optim/adasum.py +2 -2
  223. mindspore/nn/optim/asgd.py +4 -2
  224. mindspore/nn/optim/ftrl.py +31 -61
  225. mindspore/nn/optim/lamb.py +5 -3
  226. mindspore/nn/optim/lars.py +2 -2
  227. mindspore/nn/optim/lazyadam.py +6 -4
  228. mindspore/nn/optim/momentum.py +13 -25
  229. mindspore/nn/optim/optimizer.py +6 -3
  230. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  231. mindspore/nn/optim/rmsprop.py +9 -3
  232. mindspore/nn/optim/rprop.py +4 -2
  233. mindspore/nn/optim/sgd.py +6 -5
  234. mindspore/nn/optim/thor.py +2 -2
  235. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  236. mindspore/nn/probability/distribution/beta.py +2 -2
  237. mindspore/nn/probability/distribution/categorical.py +4 -6
  238. mindspore/nn/probability/distribution/cauchy.py +2 -2
  239. mindspore/nn/probability/distribution/exponential.py +1 -1
  240. mindspore/nn/probability/distribution/gumbel.py +2 -2
  241. mindspore/nn/probability/distribution/poisson.py +2 -2
  242. mindspore/nn/probability/distribution/uniform.py +2 -2
  243. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  244. mindspore/nn/wrap/__init__.py +2 -1
  245. mindspore/nn/wrap/cell_wrapper.py +33 -12
  246. mindspore/nn/wrap/grad_reducer.py +148 -8
  247. mindspore/nn/wrap/loss_scale.py +7 -7
  248. mindspore/numpy/__init__.py +2 -0
  249. mindspore/numpy/array_creations.py +2 -0
  250. mindspore/numpy/array_ops.py +1 -5
  251. mindspore/numpy/fft.py +431 -0
  252. mindspore/numpy/math_ops.py +54 -60
  253. mindspore/numpy/utils.py +3 -0
  254. mindspore/ops/__init__.py +5 -4
  255. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  256. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -22
  257. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  258. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  259. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  260. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  261. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  262. mindspore/ops/_op_impl/__init__.py +0 -1
  263. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  264. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  265. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  266. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  267. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  268. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  269. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  270. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  271. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  272. mindspore/ops/_vmap/vmap_array_ops.py +137 -101
  273. mindspore/ops/_vmap/vmap_base.py +8 -1
  274. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  275. mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
  276. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  277. mindspore/ops/_vmap/vmap_math_ops.py +74 -49
  278. mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
  279. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  280. mindspore/ops/auto_generate/__init__.py +31 -0
  281. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
  282. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
  283. mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
  284. mindspore/ops/auto_generate/gen_extend_func.py +130 -0
  285. mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
  286. mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
  287. mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
  288. mindspore/ops/composite/__init__.py +5 -2
  289. mindspore/ops/composite/base.py +118 -17
  290. mindspore/ops/composite/math_ops.py +9 -48
  291. mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
  292. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
  293. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  296. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  298. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  299. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  300. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  303. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  304. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  305. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  307. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  308. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  313. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  314. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  315. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  316. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  317. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  318. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  319. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  320. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  321. mindspore/ops/deprecated.py +14 -3
  322. mindspore/ops/extend/__init__.py +46 -0
  323. mindspore/ops/extend/array_func.py +152 -0
  324. mindspore/ops/extend/math_func.py +76 -0
  325. mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
  326. mindspore/ops/function/__init__.py +19 -11
  327. mindspore/ops/function/array_func.py +251 -1440
  328. mindspore/ops/function/clip_func.py +12 -13
  329. mindspore/ops/function/debug_func.py +1 -4
  330. mindspore/ops/function/fft_func.py +31 -0
  331. mindspore/ops/function/grad/grad_func.py +24 -17
  332. mindspore/ops/function/image_func.py +27 -21
  333. mindspore/ops/function/linalg_func.py +35 -68
  334. mindspore/ops/function/math_func.py +451 -2360
  335. mindspore/ops/function/nn_func.py +459 -780
  336. mindspore/ops/function/other_func.py +4 -5
  337. mindspore/ops/function/parameter_func.py +5 -93
  338. mindspore/ops/function/random_func.py +24 -80
  339. mindspore/ops/function/sparse_unary_func.py +9 -16
  340. mindspore/ops/function/spectral_func.py +1 -1
  341. mindspore/ops/function/vmap_func.py +14 -14
  342. mindspore/ops/functional.py +56 -62
  343. mindspore/ops/op_info_register.py +22 -19
  344. mindspore/ops/operations/__init__.py +19 -19
  345. mindspore/ops/operations/_grad_ops.py +20 -723
  346. mindspore/ops/operations/_inner_ops.py +178 -286
  347. mindspore/ops/operations/_scalar_ops.py +5 -480
  348. mindspore/ops/operations/_sequence_ops.py +4 -34
  349. mindspore/ops/operations/array_ops.py +99 -2491
  350. mindspore/ops/operations/comm_ops.py +38 -46
  351. mindspore/ops/operations/custom_ops.py +8 -8
  352. mindspore/ops/operations/debug_ops.py +100 -31
  353. mindspore/ops/operations/image_ops.py +1 -217
  354. mindspore/ops/operations/inner_ops.py +3 -38
  355. mindspore/ops/operations/linalg_ops.py +1 -49
  356. mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
  357. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  358. mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
  359. mindspore/ops/operations/math_ops.py +703 -4601
  360. mindspore/ops/operations/nn_ops.py +374 -1748
  361. mindspore/ops/operations/other_ops.py +50 -42
  362. mindspore/ops/operations/random_ops.py +3 -52
  363. mindspore/ops/primitive.py +196 -96
  364. mindspore/ops_generate/__init__.py +27 -0
  365. mindspore/ops_generate/arg_dtype_cast.py +248 -0
  366. mindspore/ops_generate/arg_handler.py +147 -0
  367. mindspore/ops_generate/gen_aclnn_implement.py +266 -0
  368. mindspore/ops_generate/gen_ops.py +1062 -0
  369. mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
  370. mindspore/ops_generate/gen_pyboost_func.py +932 -0
  371. mindspore/ops_generate/gen_utils.py +188 -0
  372. mindspore/ops_generate/op_proto.py +138 -0
  373. mindspore/ops_generate/pyboost_utils.py +364 -0
  374. mindspore/ops_generate/template.py +238 -0
  375. mindspore/parallel/__init__.py +5 -4
  376. mindspore/parallel/_auto_parallel_context.py +21 -76
  377. mindspore/parallel/_cell_wrapper.py +16 -9
  378. mindspore/parallel/_cost_model_context.py +1 -1
  379. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  380. mindspore/parallel/_parallel_serialization.py +30 -46
  381. mindspore/parallel/_ps_context.py +1 -1
  382. mindspore/parallel/_recovery_context.py +1 -1
  383. mindspore/parallel/_tensor.py +19 -7
  384. mindspore/parallel/_transformer/__init__.py +1 -1
  385. mindspore/parallel/_transformer/layers.py +1 -1
  386. mindspore/parallel/_transformer/loss.py +1 -1
  387. mindspore/parallel/_transformer/moe.py +1 -1
  388. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  389. mindspore/parallel/_transformer/transformer.py +1 -1
  390. mindspore/parallel/_utils.py +131 -6
  391. mindspore/parallel/algo_parameter_config.py +6 -6
  392. mindspore/parallel/checkpoint_transform.py +180 -196
  393. mindspore/parallel/cluster/__init__.py +15 -0
  394. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  395. mindspore/parallel/cluster/process_entity/_api.py +345 -0
  396. mindspore/parallel/cluster/process_entity/_utils.py +116 -0
  397. mindspore/parallel/cluster/run.py +139 -0
  398. mindspore/parallel/mpi/__init__.py +1 -1
  399. mindspore/parallel/mpi/_mpi_config.py +1 -1
  400. mindspore/parallel/parameter_broadcast.py +152 -0
  401. mindspore/parallel/shard.py +99 -2
  402. mindspore/profiler/common/util.py +20 -0
  403. mindspore/profiler/envprofiling.py +1 -1
  404. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  405. mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
  406. mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
  407. mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
  408. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
  409. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
  410. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
  411. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
  412. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  413. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
  414. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  415. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  416. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  417. mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
  418. mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
  419. mindspore/profiler/parser/ascend_msprof_generator.py +68 -285
  420. mindspore/profiler/parser/ascend_op_generator.py +75 -42
  421. mindspore/profiler/parser/ascend_timeline_generator.py +293 -135
  422. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  423. mindspore/profiler/parser/framework_parser.py +3 -2
  424. mindspore/profiler/parser/integrator.py +3 -1
  425. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  426. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  427. mindspore/profiler/parser/profiler_info.py +5 -0
  428. mindspore/profiler/profiling.py +296 -166
  429. mindspore/rewrite/__init__.py +2 -13
  430. mindspore/rewrite/api/node.py +121 -35
  431. mindspore/rewrite/api/pattern_engine.py +2 -3
  432. mindspore/rewrite/api/scoped_value.py +16 -15
  433. mindspore/rewrite/api/symbol_tree.py +45 -29
  434. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  435. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  436. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  437. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  438. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  439. mindspore/rewrite/common/__init__.py +1 -2
  440. mindspore/rewrite/common/config.py +24 -0
  441. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  442. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  443. mindspore/rewrite/common/namespace.py +118 -0
  444. mindspore/rewrite/node/__init__.py +5 -5
  445. mindspore/rewrite/node/call_function.py +23 -7
  446. mindspore/rewrite/node/cell_container.py +7 -3
  447. mindspore/rewrite/node/control_flow.py +53 -28
  448. mindspore/rewrite/node/node.py +212 -196
  449. mindspore/rewrite/node/node_manager.py +51 -22
  450. mindspore/rewrite/node/node_topological_manager.py +3 -23
  451. mindspore/rewrite/parsers/__init__.py +12 -0
  452. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  453. mindspore/rewrite/parsers/assign_parser.py +635 -413
  454. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  455. mindspore/rewrite/parsers/class_def_parser.py +107 -144
  456. mindspore/rewrite/parsers/constant_parser.py +5 -5
  457. mindspore/rewrite/parsers/container_parser.py +4 -6
  458. mindspore/rewrite/parsers/expr_parser.py +55 -0
  459. mindspore/rewrite/parsers/for_parser.py +31 -98
  460. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  461. mindspore/rewrite/parsers/if_parser.py +28 -10
  462. mindspore/rewrite/parsers/module_parser.py +8 -182
  463. mindspore/rewrite/parsers/parser.py +1 -5
  464. mindspore/rewrite/parsers/parser_register.py +1 -1
  465. mindspore/rewrite/parsers/return_parser.py +5 -10
  466. mindspore/rewrite/parsers/while_parser.py +59 -0
  467. mindspore/rewrite/sparsify/utils.py +1 -1
  468. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  469. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  470. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  471. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  472. mindspore/run_check/_check_version.py +6 -14
  473. mindspore/run_check/run_check.py +1 -1
  474. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  475. mindspore/scipy/__init__.py +2 -1
  476. mindspore/scipy/fft.py +133 -0
  477. mindspore/scipy/linalg.py +140 -55
  478. mindspore/scipy/ops.py +15 -71
  479. mindspore/scipy/ops_grad.py +5 -34
  480. mindspore/scipy/optimize/line_search.py +2 -2
  481. mindspore/scipy/optimize/minimize.py +1 -1
  482. mindspore/train/__init__.py +3 -2
  483. mindspore/train/_utils.py +178 -4
  484. mindspore/train/amp.py +167 -245
  485. mindspore/train/callback/_backup_and_restore.py +4 -4
  486. mindspore/train/callback/_callback.py +4 -4
  487. mindspore/train/callback/_checkpoint.py +39 -13
  488. mindspore/train/callback/_early_stop.py +2 -2
  489. mindspore/train/callback/_landscape.py +14 -8
  490. mindspore/train/callback/_loss_monitor.py +2 -2
  491. mindspore/train/callback/_on_request_exit.py +2 -2
  492. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  493. mindspore/train/callback/_summary_collector.py +7 -7
  494. mindspore/train/callback/_time_monitor.py +2 -2
  495. mindspore/train/data_sink.py +1 -1
  496. mindspore/train/dataset_helper.py +13 -4
  497. mindspore/train/loss_scale_manager.py +2 -2
  498. mindspore/train/metrics/accuracy.py +7 -7
  499. mindspore/train/metrics/confusion_matrix.py +8 -6
  500. mindspore/train/metrics/cosine_similarity.py +6 -4
  501. mindspore/train/metrics/error.py +2 -2
  502. mindspore/train/metrics/metric.py +3 -3
  503. mindspore/train/metrics/perplexity.py +2 -1
  504. mindspore/train/metrics/topk.py +2 -2
  505. mindspore/train/mind_ir_pb2.py +75 -6
  506. mindspore/train/model.py +24 -22
  507. mindspore/train/serialization.py +256 -132
  508. mindspore/train/summary/summary_record.py +51 -28
  509. mindspore/train/train_thor/convert_utils.py +3 -3
  510. mindspore/version.py +1 -1
  511. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
  512. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +515 -1061
  513. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
  514. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  517. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  518. mindspore/config/super_bar_config.json +0 -544
  519. mindspore/gen_ops.py +0 -273
  520. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  521. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  522. mindspore/nn/layer/flash_attention.py +0 -189
  523. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  524. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  525. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  526. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  527. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  528. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  529. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  530. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  531. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  532. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  533. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  534. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  535. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  536. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  537. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  538. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  539. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  540. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  542. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  543. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  544. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  547. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  548. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  549. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  550. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  551. mindspore/ops/_op_impl/tbe/add.py +0 -42
  552. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  553. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  554. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  555. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  556. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  557. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  558. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  559. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  560. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  561. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  562. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  563. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  565. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  566. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  567. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  568. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  569. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  570. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  571. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  572. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  573. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  574. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  575. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  576. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  577. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  578. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  579. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  580. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  581. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  582. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  583. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  584. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  585. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  586. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  587. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  588. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  589. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  590. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  591. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  592. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  594. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  595. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  596. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  597. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  598. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  599. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  600. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  601. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  602. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  603. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  604. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  605. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  606. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  607. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  608. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  609. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  610. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  611. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  612. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  613. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  614. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  615. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  616. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  617. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  618. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  620. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  621. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  622. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  623. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  624. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  625. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  626. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  629. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  630. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  631. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  632. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  633. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  634. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  635. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  636. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  637. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  638. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  639. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  640. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  641. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  642. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  643. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  644. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  645. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  646. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  647. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  648. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  650. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  651. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  652. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  653. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  654. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  655. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  656. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  657. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  658. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  659. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  660. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  661. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  662. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  663. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  665. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  666. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  667. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  668. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  669. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  670. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  671. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  672. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  673. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  674. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  675. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  676. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  677. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  678. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  679. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  680. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  681. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  682. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  683. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  684. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  685. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  686. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  687. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  688. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  689. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  690. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  691. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  692. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  693. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  694. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  695. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  696. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  697. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  698. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  699. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  701. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  702. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  703. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  704. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  705. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  706. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  707. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  708. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  709. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  710. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  711. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  712. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  713. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  714. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  715. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  716. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  717. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  718. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  719. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  720. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  721. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  722. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  723. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  724. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  725. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  726. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  727. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  728. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  729. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  730. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  731. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  732. mindspore/ops/_op_impl/tbe/div.py +0 -41
  733. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  734. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  735. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  736. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  737. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  738. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  739. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  740. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  741. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  742. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  743. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  744. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  745. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  746. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  747. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  748. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  749. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  750. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  751. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  752. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  753. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  754. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  755. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  756. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  757. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  758. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  759. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  761. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  762. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  763. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  764. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  765. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  766. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  767. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  768. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  769. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  770. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  771. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  772. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  773. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  774. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  775. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  776. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  777. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  778. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  779. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  780. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  781. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  782. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  783. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  784. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  785. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  786. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  787. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  788. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  789. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  790. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  791. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  792. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  793. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  794. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  795. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  796. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  797. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  798. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  799. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  800. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  801. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  802. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  803. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  804. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  805. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  806. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  807. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  808. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  809. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  810. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  811. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  812. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  813. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  814. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  815. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  816. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  817. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  818. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  819. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  820. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  821. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  822. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  823. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  824. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  825. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  826. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  827. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  828. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  829. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  830. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  831. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  832. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  833. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  834. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  835. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  836. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  837. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  838. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  839. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  840. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  841. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  843. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  844. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  845. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  846. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  847. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  849. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  850. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  851. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  852. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  853. mindspore/ops/_op_impl/tbe/less.py +0 -41
  854. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  855. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  856. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  857. mindspore/ops/_op_impl/tbe/log.py +0 -40
  858. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  859. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  861. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  862. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  863. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  864. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  866. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  867. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  868. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  869. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  870. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  871. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  872. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  873. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  874. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  875. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  876. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  877. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  878. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  879. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  880. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  881. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  882. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  883. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  884. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  885. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  886. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  887. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  888. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  889. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  890. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  891. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  892. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  893. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  894. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  895. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  896. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  897. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  898. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  899. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  900. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  901. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  902. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  903. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  904. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  905. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  906. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  907. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  908. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  909. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  910. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  911. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  912. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  913. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  914. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  915. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  916. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  917. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  918. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  919. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  920. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  921. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  922. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  923. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  924. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  925. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  926. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  927. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  928. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  929. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  930. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  931. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  932. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  933. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  934. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  935. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  936. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  937. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  938. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  939. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  940. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  941. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  942. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  943. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  944. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  945. mindspore/ops/_op_impl/tbe/range.py +0 -39
  946. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  947. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  948. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  949. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  950. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  951. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  952. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  953. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  954. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  955. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  956. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  957. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  958. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  959. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  960. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  962. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  963. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  964. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  965. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  966. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  967. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  968. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  969. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  970. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  971. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  972. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  973. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  974. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  975. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  976. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  977. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  978. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  979. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  980. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  981. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  982. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  984. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  985. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  986. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  987. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  988. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  989. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  990. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  991. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  992. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  993. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  994. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  995. mindspore/ops/_op_impl/tbe/round.py +0 -38
  996. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  997. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  998. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  999. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1000. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1001. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1002. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1003. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1004. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1005. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1006. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1011. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1012. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1013. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1014. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1015. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1016. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1017. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1018. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1019. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1020. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1021. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1022. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1023. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1024. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1026. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1027. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1028. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1029. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1030. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1031. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1032. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1033. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1034. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1035. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1036. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1037. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1038. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1040. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1041. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1042. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1043. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1044. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1045. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1046. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1047. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1048. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1049. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1050. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1051. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1052. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1053. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1054. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1055. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1057. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1058. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1060. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1061. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1062. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1063. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1073. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1074. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1075. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1076. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1077. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1078. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1079. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1080. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1082. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1083. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1084. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1085. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1086. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1087. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1088. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1089. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1090. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1091. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1095. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1096. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1097. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1098. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1099. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1100. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1101. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1102. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1103. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1104. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1105. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1107. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1108. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1109. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1110. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1111. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1112. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1113. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1114. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1115. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1116. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1117. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1118. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1119. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1120. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1121. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1122. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1123. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1124. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1125. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1126. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1127. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1128. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1129. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1135. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1136. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1137. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1138. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1139. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1140. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1141. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1142. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1143. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1144. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1145. mindspore/ops/_tracefunc.py +0 -241
  1146. mindspore/ops/arg_dtype_cast.py +0 -54
  1147. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1148. mindspore/rewrite/ast_creator_register.py +0 -37
  1149. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1150. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1151. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1152. mindspore/rewrite/namespace.py +0 -53
  1153. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
  1154. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
@@ -208,24 +208,11 @@ class _ConstantPadNd(Cell):
208
208
  raise TypeError(msg)
209
209
 
210
210
  self.value = value
211
- self.padding = _swap_to_ms_padding_order(padding)
212
- self._name = name
211
+ self.padding = padding
213
212
 
214
213
  def construct(self, x):
215
214
  """Construct the pad net."""
216
- input_shape = x.shape
217
- padding = _check(input_shape, self.padding, self._name)
218
- new_padding, start, end = _get_new_padding(padding)
219
- mask = ops.OnesLike()(x)
220
- output = ops.Pad(new_padding)(x)
221
- mask = ops.Pad(new_padding)(mask)
222
- ones = ops.OnesLike()(output)
223
- value = ops.fill(output.dtype, output.shape, self.value)
224
- output = ops.Add()(ops.Mul()(mask, output), ops.Mul()(ops.Sub()(ones, mask), value))
225
- slice_op = ops.Slice()
226
- begin, size = _get_begin_size(output.shape, start, end)
227
- output = slice_op(output, begin, size)
228
- return output
215
+ return ops.pad(x, padding=self.padding, mode='constant', value=self.value)
229
216
 
230
217
 
231
218
  class ConstantPad1d(_ConstantPadNd):
@@ -238,8 +225,13 @@ class ConstantPad1d(_ConstantPadNd):
238
225
  If a 2-tuple, uses (padding_0, padding_1) to pad. If the input is `x`, the size of last
239
226
  dimension of output is :math:`padding\_0 + x.shape[-1] + padding\_1`. The remaining dimensions
240
227
  of the output are consistent with those of the input.
228
+ Only support non-negative value while running in Ascend.
241
229
  value (Union[int, float]): Padding value.
242
230
 
231
+ Inputs:
232
+ - **x** (Tensor) - shape is :math:`(N, *)`, where :math:`*` means, any number of additional dimensions.
233
+ It is not supported that the size of dimensions is greater than 5 while running on Ascend.
234
+
243
235
  Returns:
244
236
  Tensor, the tensor after padding.
245
237
 
@@ -248,6 +240,8 @@ class ConstantPad1d(_ConstantPadNd):
248
240
  TypeError: If `value` is not int or float.
249
241
  ValueError: If the length of `padding` with tuple type is not equal to 2.
250
242
  ValueError: If the output shape after padding is not positive.
243
+ ValueError: If the rank of 'x' is more than 5 while running in Ascend.
244
+ ValueError: If `padding` contains negative value while running in Ascend.
251
245
 
252
246
  Supported Platforms:
253
247
  ``Ascend`` ``GPU`` ``CPU``
@@ -316,8 +310,13 @@ class ConstantPad2d(_ConstantPadNd):
316
310
  If the input is `x`, the size of last dimension of output is :math:`padding\_0 + x.shape[-1] + padding\_1`.
317
311
  The size of penultimate dimension of output is :math:`padding\_2 + x.shape[-2] + padding\_3`.
318
312
  The remaining dimensions of the output are consistent with those of the input.
313
+ Only support non-negative value while running in Ascend.
319
314
  value (Union[int, float]): Padding value.
320
315
 
316
+ Inputs:
317
+ - **x** (Tensor) - shape is :math:`(N, *)`, where :math:`*` means, any number of additional dimensions.
318
+ It is not supported that the size of dimensions is greater than 5 while running on Ascend.
319
+
321
320
  Returns:
322
321
  Tensor, the tensor after padding.
323
322
 
@@ -326,6 +325,8 @@ class ConstantPad2d(_ConstantPadNd):
326
325
  TypeError: If `value` is not int or float.
327
326
  ValueError: If the length of `padding` is more than 4 or not a multiple of 2.
328
327
  ValueError: If the output shape after padding is not positive.
328
+ ValueError: If the rank of 'x' is more than 5 while running in Ascend.
329
+ ValueError: If `padding` contains negative value while running in Ascend.
329
330
 
330
331
  Supported Platforms:
331
332
  ``Ascend`` ``GPU`` ``CPU``
@@ -369,8 +370,13 @@ class ConstantPad3d(_ConstantPadNd):
369
370
  The size of penultimate dimension of output is :math:`padding\_2 + x.shape[-2] + padding\_3`.
370
371
  The size of 3rd to last dimension of output is :math:`padding\_4 + x.shape[-3] + padding\_5`.
371
372
  The remaining dimensions of the output are consistent with those of the input.
373
+ Only support non-negative value while running in Ascend.
372
374
  value (Union[int, float]): Padding value.
373
375
 
376
+ Inputs:
377
+ - **x** (Tensor) - shape is :math:`(N, *)`, where :math:`*` means, any number of additional dimensions.
378
+ It is not supported that the size of dimensions is greater than 5 while running on Ascend.
379
+
374
380
  Returns:
375
381
  Tensor, the tensor after padding.
376
382
 
@@ -379,6 +385,8 @@ class ConstantPad3d(_ConstantPadNd):
379
385
  TypeError: If `value` is not int or float.
380
386
  ValueError: If the length of `padding` is more than 6 or not a multiple of 2.
381
387
  ValueError: If the output shape after padding is not positive.
388
+ ValueError: If the rank of 'x' is more than 5 while running in Ascend.
389
+ ValueError: If `padding` contains negative value while running in Ascend.
382
390
 
383
391
  Supported Platforms:
384
392
  ``Ascend`` ``GPU`` ``CPU``
@@ -620,6 +628,7 @@ class ZeroPad2d(_ConstantPadNd):
620
628
  If the input is `x`, the size of last dimension of output is :math:`padding\_0 + x.shape[-1] + padding\_1`.
621
629
  The size of penultimate dimension of output is :math:`padding\_2 + x.shape[-2] + padding\_3`.
622
630
  The remaining dimensions of the output are consistent with those of the input.
631
+ Only support non-negative value while running in Ascend.
623
632
 
624
633
  Returns:
625
634
  Tensor, the tensor after padding.
@@ -628,6 +637,8 @@ class ZeroPad2d(_ConstantPadNd):
628
637
  TypeError: If `padding` is not a tuple or int.
629
638
  ValueError: If the length of `padding` is more than 4 or not a multiple of 2.
630
639
  ValueError: If the output shape after padding is not positive.
640
+ ValueError: If the rank of 'x' is more than 5 while running in Ascend.
641
+ ValueError: If `padding` contains negative value while running in Ascend.
631
642
 
632
643
  Supported Platforms:
633
644
  ``Ascend`` ``GPU`` ``CPU``
@@ -679,18 +690,12 @@ class _ReplicationPadNd(Cell):
679
690
  self.padding = padding
680
691
  self.padv3 = nn_ops.PadV3(mode="edge")
681
692
 
682
- @staticmethod
683
- @_primexpr
684
- def _check_input_dim(shape, cls_name):
685
- raise NotImplementedError
686
-
687
693
  @staticmethod
688
694
  @constexpr
689
695
  def _need_expend_dim(x):
690
696
  raise NotImplementedError
691
697
 
692
698
  def construct(self, x):
693
- self._check_input_dim(x.shape, self.name)
694
699
  need_expend_dims = self._need_expend_dim(x)
695
700
  if need_expend_dims:
696
701
  x = x.expand_dims(0)
@@ -751,12 +756,6 @@ class ReplicationPad1d(_ReplicationPadNd):
751
756
  padding = (padding, padding)
752
757
  super(ReplicationPad1d, self).__init__(padding, name="ReplicationPad1d")
753
758
 
754
- @staticmethod
755
- @_primexpr
756
- def _check_input_dim(shape, cls_name):
757
- dim = len(shape)
758
- _check_dim(dim, 2, 3, cls_name)
759
-
760
759
  def _need_expend_dim(self, x):
761
760
  input_shape = x.shape
762
761
  return 1 if len(input_shape) == 2 else 0
@@ -821,12 +820,6 @@ class ReplicationPad2d(_ReplicationPadNd):
821
820
  padding = (padding, padding, padding, padding)
822
821
  super(ReplicationPad2d, self).__init__(padding, name="ReplicationPad2d")
823
822
 
824
- @staticmethod
825
- @_primexpr
826
- def _check_input_dim(shape, cls_name):
827
- dim = len(shape)
828
- _check_dim(dim, 3, 4, cls_name)
829
-
830
823
  def _need_expend_dim(self, x):
831
824
  input_shape = x.shape
832
825
  return 1 if len(input_shape) == 3 else 0
@@ -892,12 +885,6 @@ class ReplicationPad3d(_ReplicationPadNd):
892
885
  padding = (padding, padding, padding, padding, padding, padding)
893
886
  super(ReplicationPad3d, self).__init__(padding, name="ReplicationPad3d")
894
887
 
895
- @staticmethod
896
- @_primexpr
897
- def _check_input_dim(shape, cls_name):
898
- dim = len(shape)
899
- _check_dim(dim, 4, 5, cls_name)
900
-
901
888
  def _need_expend_dim(self, x):
902
889
  input_shape = x.shape
903
890
  return 1 if len(input_shape) == 4 else 0
@@ -105,7 +105,8 @@ class LPPool1d(Cell):
105
105
  kernel_size (int): The size of kernel window.
106
106
  stride (int): The distance of kernel moving, an int number that represents the width of movement is stride,
107
107
  if the value is None, the default value `kernel_size` is used. Default: ``None`` .
108
- ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
108
+ ceil_mode (bool): If ``True``, use ceil to calculate output shape.
109
+ If ``False``, use ceil to calculate output shape. Default: ``False`` .
109
110
 
110
111
  Inputs:
111
112
  - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
@@ -119,7 +120,7 @@ class LPPool1d(Cell):
119
120
 
120
121
 
121
122
  Raises:
122
- TypeError: If `x` is not an Tensor.
123
+ TypeError: If `x` is not a Tensor.
123
124
  TypeError: If `kernel_size` or `stride` is not an int.
124
125
  TypeError: If `ceil_mode` is not a bool.
125
126
  TypeError: If `norm_type` is neither float nor int.
@@ -168,7 +169,7 @@ class LPPool2d(Cell):
168
169
  f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
169
170
 
170
171
  Args:
171
- norm_type(Union[int, float]) - Type of normalization, represents :math:`p` in the formula, can not be 0.
172
+ norm_type(Union[int, float]): Type of normalization, represents :math:`p` in the formula, can not be 0.
172
173
 
173
174
  - if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
174
175
  pooling).
@@ -179,7 +180,7 @@ class LPPool2d(Cell):
179
180
  or a tuple of two int numbers that represent height and width respectively.
180
181
  stride(Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
181
182
  the height and width of movement are both stride, or a tuple of two int numbers that
182
- represent height and width of movement respectively, if the value is None,
183
+ represent height and width of movement respectively, if the value is ``None``,
183
184
  the default value `kernel_size` is used. Default: ``None`` .
184
185
  ceil_mode(bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
185
186
 
@@ -197,7 +198,7 @@ class LPPool2d(Cell):
197
198
  W_{out} = \left\lfloor\frac{W_{in} - \text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
198
199
 
199
200
  Raises:
200
- TypeError: If `x` is not an Tensor.
201
+ TypeError: If `x` is not a Tensor.
201
202
  TypeError: If `kernel_size` or `stride` is neither int nor tuple.
202
203
  TypeError: If `ceil_mode` is not a bool.
203
204
  TypeError: If `norm_type` is neither float nor int.
@@ -324,7 +325,8 @@ class MaxPool3d(_PoolNd):
324
325
  three integers. Default: ``1`` .
325
326
  return_indices (bool): If ``True`` , output is a Tuple of 2 Tensors, representing the maxpool result and where
326
327
  the max values are generated. Otherwise, only the maxpool result is returned. Default: ``False`` .
327
- ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
328
+ ceil_mode (bool): If ``True``, use ceil to calculate output shape.
329
+ If ``False``, use ceil to calculate output shape. Default: ``False`` .
328
330
 
329
331
  Inputs:
330
332
  - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})` or
@@ -620,7 +622,7 @@ class MaxPool1d(_PoolNd):
620
622
  at the begin and end is determined by the `padding` parameter.
621
623
  If this mode is set, `padding` must be greater than or equal to 0.
622
624
 
623
- padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is 0.
625
+ padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is ``0``.
624
626
  padding can only be an integer or a tuple/list containing a single integer, in which case padding times or
625
627
  padding[0] times are padded on both sides of the input.
626
628
  dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
@@ -1660,6 +1662,8 @@ class AdaptiveMaxPool3d(Cell):
1660
1662
  def __init__(self, output_size, return_indices=False):
1661
1663
  """Initialize AdaptiveMaxPool3d."""
1662
1664
  super(AdaptiveMaxPool3d, self).__init__()
1665
+ if isinstance(output_size, int):
1666
+ output_size = (output_size, output_size, output_size)
1663
1667
  self.output_size = Tensor(output_size, dtype=mstype.int32)
1664
1668
  self.return_indices = return_indices
1665
1669
  self.adaptive_max_pool3d = AdaptiveMaxPool3D()
@@ -1975,7 +1979,7 @@ class MaxUnpool2d(Cell):
1975
1979
  stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
1976
1980
  the height and width of movement are both stride, or a tuple of two int numbers that
1977
1981
  represent height and width of movement respectively.
1978
- If stride is None, then stride equal to kernel_size. Default: ``None`` .
1982
+ If stride is ``None``, then stride equal to kernel_size. Default: ``None`` .
1979
1983
  padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
1980
1984
  the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two
1981
1985
  integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
@@ -2068,7 +2072,7 @@ class MaxUnpool3d(Cell):
2068
2072
  stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
2069
2073
  the depth, height and width of movement are both stride, or a tuple of three int numbers that
2070
2074
  represent depth, height and width of movement respectively.
2071
- If stride is None, then stride equal to kernel_size. Default: ``None`` .
2075
+ If stride is ``None``, then stride equal to kernel_size. Default: ``None`` .
2072
2076
  padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
2073
2077
  the paddings of depth, height and width are the same, equal to padding. If `padding` is a tuple of three
2074
2078
  integers, the padding of depth, height and width equal to padding[0], padding[1] and padding[2]
@@ -25,7 +25,7 @@ from mindspore import log as logger
25
25
  from mindspore.common.tensor import Tensor
26
26
  from mindspore.common.parameter import Parameter
27
27
  from mindspore.common.initializer import initializer, Uniform
28
- from mindspore.ops.primitive import constexpr, _primexpr
28
+ from mindspore.ops.primitive import constexpr
29
29
  from mindspore.nn.cell import Cell
30
30
  from mindspore import _checkparam as validator
31
31
 
@@ -61,13 +61,6 @@ def _check_tuple_length(param_name, input_data, length, cls_name):
61
61
  f"but got '{len(input_data)}'")
62
62
 
63
63
 
64
- @_primexpr
65
- def _check_batch_size_equal(batch_size_x, batch_size_hx, cls_name):
66
- if batch_size_x != batch_size_hx:
67
- raise ValueError(f"For '{cls_name}' batch size of x and hx must be equal, but got {batch_size_x} of x "
68
- f"and {batch_size_hx} of hx.")
69
-
70
-
71
64
  def _check_lstmcell_init(func):
72
65
  """Internal function, used to check init args."""
73
66
  @wraps(func)
@@ -185,7 +178,7 @@ class RNNCell(RNNCellBase):
185
178
  Args:
186
179
  input_size (int): Number of features of input.
187
180
  hidden_size (int): Number of features of hidden layer.
188
- has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: ``True`` .
181
+ has_bias (bool): Whether the cell has bias :math:`b_ih` and :math:`b_hh`. Default: ``True`` .
189
182
  nonlinearity (str): The non-linearity to use. Can be either ``"tanh"`` or ``"relu"`` .
190
183
  Default: ``"tanh"`` .
191
184
  dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
@@ -232,7 +225,6 @@ class RNNCell(RNNCellBase):
232
225
  _check_is_tensor('hx', hx, self.cls_name)
233
226
  _check_input_dtype(x.dtype, "x", [mstype.float32, mstype.float16], self.cls_name)
234
227
  _check_input_dtype(hx.dtype, "hx", [mstype.float32, mstype.float16], self.cls_name)
235
- _check_batch_size_equal(x.shape[0], hx.shape[0], self.cls_name)
236
228
 
237
229
  if self.nonlinearity == "tanh":
238
230
  ret = _rnn_tanh_cell(x, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh)
@@ -318,8 +310,6 @@ class LSTMCell(RNNCellBase):
318
310
  _check_input_dtype(x.dtype, "x", [mstype.float32, mstype.float16], self.cls_name)
319
311
  _check_input_dtype(hx[0].dtype, "hx[0]", [mstype.float32, mstype.float16], self.cls_name)
320
312
  _check_input_dtype(hx[1].dtype, "hx[1]", [mstype.float32, mstype.float16], self.cls_name)
321
- _check_batch_size_equal(x.shape[0], hx[0].shape[0], self.cls_name)
322
- _check_batch_size_equal(x.shape[0], hx[1].shape[0], self.cls_name)
323
313
  return _lstm_cell(x, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh)
324
314
 
325
315
  def _check_construct_args(self, *inputs, **kwargs):
@@ -346,7 +336,8 @@ class GRUCell(RNNCellBase):
346
336
  \end{array}
347
337
 
348
338
  Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
349
- are learnable weights between the output and the input in the formula. For instance,
339
+ are learnable weights between the output and the input in the formula. :math:`h` is hidden state.
340
+ :math:`r` is reset gate. :math:`z` is update gate. :math:`n` is n-th layer. For instance,
350
341
  :math:`W_{ir}, b_{ir}` are the weight and bias used to transform from input :math:`x` to :math:`r`.
351
342
  Details can be found in paper
352
343
  `Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation
@@ -355,7 +346,7 @@ class GRUCell(RNNCellBase):
355
346
  Args:
356
347
  input_size (int): Number of features of input.
357
348
  hidden_size (int): Number of features of hidden layer.
358
- has_bias (bool): Whether the cell has bias `b_in` and `b_hn`. Default: ``True`` .
349
+ has_bias (bool): Whether the cell has bias :math:`b_{in}` and :math:`b_{hn}`. Default: ``True`` .
359
350
  dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
360
351
 
361
352
  Inputs:
@@ -394,5 +385,4 @@ class GRUCell(RNNCellBase):
394
385
  _check_is_tensor('hx', hx, self.cls_name)
395
386
  _check_input_dtype(x.dtype, "x", [mstype.float32, mstype.float16], self.cls_name)
396
387
  _check_input_dtype(hx.dtype, "hx", [mstype.float32, mstype.float16], self.cls_name)
397
- _check_batch_size_equal(x.shape[0], hx.shape[0], self.cls_name)
398
388
  return _gru_cell(x, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh)
@@ -588,13 +588,14 @@ class RNN(_RNNBase):
588
588
  Here :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
589
589
  the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
590
590
  previous layer at time :math:`t-1` or the initial hidden state at time `0`.
591
- If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
591
+ :math:`W_{ih}` is the learnable input-hidden weights, and :math:`b_{ih}` is the learnable input-hidden bias.
592
+ :math:`W_{hh}` is the learnable hidden-hidden weights, and :math:`b_{hh}` is the learnable hidden-hidden bias.
592
593
 
593
594
  Args:
594
595
  input_size (int): Number of features of input.
595
596
  hidden_size (int): Number of features of hidden layer.
596
597
  num_layers (int): Number of layers of stacked RNN. Default: ``1`` .
597
- nonlinearity (str): The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
598
+ nonlinearity (str): The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``.
598
599
  has_bias (bool): Whether the cell has bias :math:`b_{ih}` and :math:`b_{hh}`. Default: ``True`` .
599
600
  batch_first (bool): Specifies whether the first dimension of input `x` is batch_size. Default: ``False`` .
600
601
  dropout (float): If not 0.0, append `Dropout` layer on the outputs of each
@@ -696,7 +697,7 @@ class GRU(_RNNBase):
696
697
  input_size (int): Number of features of input.
697
698
  hidden_size (int): Number of features of hidden layer.
698
699
  num_layers (int): Number of layers of stacked GRU. Default: ``1`` .
699
- has_bias (bool): Whether the cell has bias `b_in` and `b_hn`. Default: ``True`` .
700
+ has_bias (bool): Whether the cell has bias :math:`b_{in}` and :math:`b_{hn}`. Default: ``True`` .
700
701
  batch_first (bool): Specifies whether the first dimension of input `x` is batch_size. Default: ``False`` .
701
702
  dropout (float): If not 0.0, append `Dropout` layer on the outputs of each
702
703
  GRU layer except the last layer. Default ``0.0`` . The range of dropout is [0.0, 1.0).
@@ -706,7 +707,7 @@ class GRU(_RNNBase):
706
707
 
707
708
  Inputs:
708
709
  - **x** (Tensor) - Tensor of data type mindspore.float32 or mindspore.float16 and
709
- shape (seq\_len, batch\_size, `input\_size`) or :math:`(batch\_size, seq\_len, input\_size)`.
710
+ shape :math:`(seq\_len, batch\_size, input\_size)` or :math:`(batch\_size, seq\_len, input\_size)`.
710
711
  - **hx** (Tensor) - Tensor of data type mindspore.float32 or mindspore.float16 and
711
712
  shape :math:`(num\_directions * num\_layers, batch\_size, hidden\_size)`.
712
713
  - **seq_length** (Tensor) - The length of each sequence in an input batch.
@@ -796,7 +797,7 @@ class LSTM(_RNNBase):
796
797
  input_size (int): Number of features of input.
797
798
  hidden_size (int): Number of features of hidden layer.
798
799
  num_layers (int): Number of layers of stacked LSTM . Default: ``1`` .
799
- has_bias (bool): Whether the cell has bias :math:`b_{ih}` and :math:`b_{hh}`. Default: ``True`` .
800
+ has_bias (bool): Whether the cell has bias :math:`b_{ih}` and :math:`b_{fh}`. Default: ``True`` .
800
801
  batch_first (bool): Specifies whether the first dimension of input `x` is batch_size. Default: ``False`` .
801
802
  dropout (float, int): If not 0, append `Dropout` layer on the outputs of each
802
803
  LSTM layer except the last layer. Default ``0`` . The range of dropout is [0.0, 1.0).
@@ -449,7 +449,7 @@ class Conv2dThor(_ConvThor):
449
449
  weight_shape = [1, self.in_channels, *self.kernel_size]
450
450
  self.weight_init = weight_init
451
451
  if isinstance(weight_init, Tensor):
452
- self.weight_init = Tensor(weight_init.asnumpy().swapaxes(0, 1), weight_init.dtype)
452
+ self.weight_init = weight_init.swapaxes(0, 1)
453
453
  if isinstance(weight_init, Initializer):
454
454
  self.weight_init.shape = weight_shape
455
455
  self.weight = Parameter(initializer(self.weight_init, weight_shape), name='weight')
@@ -596,7 +596,6 @@ class EmbeddingThor(Cell):
596
596
  if padding_idx is not None:
597
597
  self.padding_idx = Validator.check_int_range(padding_idx, 0, vocab_size, Validator.INC_BOTH,
598
598
  "padding_idx", self.cls_name)
599
- self.init_tensor = self.init_tensor.init_data().asnumpy()
600
599
  self.init_tensor[self.padding_idx] = 0
601
600
  self.embedding_table = Parameter(self.init_tensor, name='embedding_table')
602
601
  self.expand = P.ExpandDims()
@@ -85,7 +85,7 @@ class TimeDistributed(Cell):
85
85
  where :math:`*` means any number of additional dimensions.
86
86
 
87
87
  Outputs:
88
- Tensor of shape :math:`(N, T, *)`
88
+ Tensor of shape :math:`(N, T, *)`.
89
89
 
90
90
  Raises:
91
91
  TypeError: If layer is not a Cell or Primitive.
@@ -66,35 +66,35 @@ class MultiheadAttention(Cell):
66
66
  dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
67
67
 
68
68
  Inputs:
69
- - **query** (Tensor): The query embeddings. If `query` is unbatched, the shape is :math:`(L, E_q)`,
69
+ - **query** (Tensor) - The query embeddings. If `query` is unbatched, the shape is :math:`(L, E_q)`,
70
70
  otherwise the shape is :math:`(L, N, E_q)` when `batch_first=False` or :math:`(N, L, E_q)` when
71
71
  `batch_first=True` , where :math:`L`is the target sequence length, :math:`N` is the batch size,
72
72
  and :math:`E_q` is the query embedding dimension `embed_dim`. Supported types: float16, float32,
73
73
  float64. Queries are compared against key-value pairs to produce the output.
74
- - **key** (Tensor): The key embeddings. If `key` is unbatched, the shape is :math:`(S, E_k)`, otherwise
74
+ - **key** (Tensor) - The key embeddings. If `key` is unbatched, the shape is :math:`(S, E_k)`, otherwise
75
75
  the shape is :math:`(S, N, E_k)` when `batch_first=False` or :math:`(N, S, E_k)` when
76
76
  `batch_first=True` , where :math:`S` is the source sequence length, :math:`N` is the batch size,
77
77
  and :math:`E_k` is the key embedding dimension `kdim`. Supported types: float16, float32, float64.
78
- - **value** (Tensor): The value embeddings. If `value` is unbatched, the shape is :math:`(S, E_v)`,
78
+ - **value** (Tensor) - The value embeddings. If `value` is unbatched, the shape is :math:`(S, E_v)`,
79
79
  otherwise the shape is :math:`(S, N, E_v)` when `batch_first=False` or :math:`(N, S, E_v)` when
80
80
  `batch_first=True` , where :math:`S` is the source sequence length, :math:`N` is the batch size,
81
81
  and :math:`E_v` is the value embedding dimension `vdim`. Supported types: float16, float32, float64.
82
- - **key_padding_mask** (Tensor, optional): If specified, a mask of shape :math:`(N, S)` indicating which
82
+ - **key_padding_mask** (Tensor, optional) - If specified, a mask of shape :math:`(N, S)` indicating which
83
83
  elements within `key` to ignore for the purpose of attention (i.e. treat as "padding").
84
84
  For unbatched `query`, shape should be :math:`(S)`. Binary and float masks are supported.
85
85
  For a binary mask, a ``True`` value indicates that the corresponding `key` value will be ignored for
86
86
  the purpose of attention. For a float mask, it will be directly added to the corresponding `key` value.
87
87
  Supported float types: float16, float32, float64. Default: ``None``.
88
- - **need_weights** (bool): Whether returns `attn_output_weights` in addition to `attn_outputs`.
88
+ - **need_weights** (bool) - Whether returns `attn_output_weights` in addition to `attn_outputs`.
89
89
  Default: ``True``.
90
- - **attn_mask** (Tensor, optional): If specified, a 2D or 3D mask preventing attention to certain positions.
90
+ - **attn_mask** (Tensor, optional) - If specified, a 2D or 3D mask preventing attention to certain positions.
91
91
  Must be of shape :math:`(L, S)` or :math:`(N\cdot\text{num_heads}, L, S)`, where :math:`N` is the
92
92
  batch size, :math:`L` is the target sequence length, and :math:`S` is the source sequence length.
93
93
  A 2D mask will be broadcasted across the batch while a 3D mask allows for a different mask for each entry
94
94
  in the batch. For a binary mask, a ``True`` value indicates that the corresponding position is not allowed
95
95
  to attend. For a float mask, the mask values will be added to the attention weight.
96
96
  Supported float types: float16, float32, float64. Default: ``None``.
97
- - **average_attn_weights** (bool): If true, indicates that the returned `attn_weights` should be averaged
97
+ - **average_attn_weights** (bool) - If true, indicates that the returned `attn_weights` should be averaged
98
98
  across heads. Otherwise, `attn_weights` are provided separately per head. Note that this flag only
99
99
  has an effect when `need_weights=True`. Default: ``True`` (i.e. average weights across heads)
100
100
 
@@ -265,14 +265,14 @@ class TransformerEncoderLayer(Cell):
265
265
  dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
266
266
 
267
267
  Inputs:
268
- - **src** (Tensor): the sequence to the encoder layer. For unbatched input, the shape is
268
+ - **src** (Tensor) - the sequence to the encoder layer. For unbatched input, the shape is
269
269
  :math:`(S, E)` ; otherwise if `batch_first=False` , the shape is :math:`(S, N, E)` and if
270
- `batch_first=True` , the shape is :math:`(S, N, E)`, where :math:`(S)` is the source sequence
270
+ `batch_first=True` , the shape is :math:`(N, S, E)`, where :math:`(S)` is the source sequence
271
271
  length, :math:`(N)` is the batch number and :math:`(E)` is the feature number.
272
272
  Supported types: float16, float32, float64.
273
- - **src_mask** (Tensor, optional): the mask for the src sequence. The shape is :math:`(S, S)`
273
+ - **src_mask** (Tensor, optional) - the mask for the src sequence. The shape is :math:`(S, S)`
274
274
  or :math:`(N*nhead, S, S)`. Supported types: float16, float32, float64, bool. Default: ``None``.
275
- - **src_key_padding_mask** (Tensor, optional): the mask for the src keys per batch. The shape is
275
+ - **src_key_padding_mask** (Tensor, optional) - the mask for the src keys per batch. The shape is
276
276
  :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
277
277
  float64, bool. Default: ``None``.
278
278
 
@@ -406,20 +406,20 @@ class TransformerDecoderLayer(Cell):
406
406
  dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
407
407
 
408
408
  Inputs:
409
- - **tgt** (Tensor): The sequence to the decoder layer. For unbatched input, the shape is
409
+ - **tgt** (Tensor) - The sequence to the decoder layer. For unbatched input, the shape is
410
410
  :math:`(T, E)` ; otherwise if `batch_first=False` , the shape is :math:`(T, N, E)` and if
411
- `batch_first=True` , the shape is :math:`(T, N, E)`, where :math:`(T)` is the target sequence
411
+ `batch_first=True` , the shape is :math:`(N, T, E)`, where :math:`(T)` is the target sequence
412
412
  length. Supported types: float16, float32, float64.
413
- - **memory** (Tensor): The sequence from the last layer of the encoder. Supported types: float16,
413
+ - **memory** (Tensor) - The sequence from the last layer of the encoder. Supported types: float16,
414
414
  float32, float64.
415
- - **tgt_mask** (Tensor, optional): The mask of the tgt sequence. The shape is :math:`(T, T)`
415
+ - **tgt_mask** (Tensor, optional) - The mask of the tgt sequence. The shape is :math:`(T, T)`
416
416
  or :math:`(N*nhead, T, T)`. Supported types: float16, float32, float64, bool. Default: ``None``.
417
- - **memory_mask** (Tensor, optional): The mask of the memory sequence. The shape is
417
+ - **memory_mask** (Tensor, optional) - The mask of the memory sequence. The shape is
418
418
  :math:`(T, S)` . Supported types: float16, float32, float64, bool. Default: ``None``.
419
419
  - **tgt_key_padding_mask** (Tensor, optional): The mask of the tgt keys per batch. The shape is
420
- :math:`(T)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
420
+ :math:`(T)` for unbatched input, otherwise :math:`(N, T)` . Supported types: float16, float32,
421
421
  float64, bool. Default: ``None``.
422
- - **memory_key_padding_mask** (Tensor, optional): The mask of the memory keys per batch. The shape
422
+ - **memory_key_padding_mask** (Tensor, optional) - The mask of the memory keys per batch. The shape
423
423
  is :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
424
424
  float64, bool. Default: ``None``.
425
425
 
@@ -540,8 +540,8 @@ class TransformerDecoderLayer(Cell):
540
540
 
541
541
  class TransformerEncoder(Cell):
542
542
  r"""
543
- Transformer Encoder module with multi-layer stacked of `TransformerEncoderLayer`, including multihead
544
- attention and feedforward layer. Users can build the
543
+ Transformer Encoder module with multi-layer stacked of :class:`mindspore.nn.TransformerEncoderLayer`,
544
+ including multihead attention and feedforward layer. Users can build the
545
545
  BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
546
546
 
547
547
  Args:
@@ -550,15 +550,15 @@ class TransformerEncoder(Cell):
550
550
  norm (Cell, optional): The layer normalization module. Default: ``None``.
551
551
 
552
552
  Inputs:
553
- - **src** (Tensor): The sequence to the encoder. For unbatched input, the shape is
554
- :math:`(S, E)` ; otherwise if `batch_first=False` in TransformerEncoderLayer, the shape is
555
- :math:`(S, N, E)` and if `batch_first=True` , the shape is :math:`(S, N, E)`, where :math:`(S)` is the
556
- source sequence length, :math:`(N)` is the batch number and :math:`(E)` is the feature number.
557
- Supported types: float16, float32, float64.
558
- - **src_mask** (Tensor, optional): The mask of the src sequence. The shape is :math:`(S, S)`
559
- or :math:`(N*nhead, S, S)` , where `nhead` is the arguent in TransformerDecoderLayer.
553
+ - **src** (Tensor) - The sequence to the encoder. For unbatched input, the shape is
554
+ :math:`(S, E)` ; otherwise if `batch_first=False` in :class:`mindspore.nn.TransformerEncoderLayer`,
555
+ the shape is :math:`(S, N, E)` and if `batch_first=True` , the shape is :math:`(N, S, E)`,
556
+ where :math:`(S)` is the source sequence length, :math:`(N)` is the batch number and :math:`(E)` is
557
+ the feature number. Supported types: float16, float32, float64.
558
+ - **src_mask** (Tensor, optional) - The mask of the src sequence. The shape is :math:`(S, S)`
559
+ or :math:`(N*nhead, S, S)` , where `nhead` is the arguent in :class:`mindspore.nn.TransformerEncoderLayer`.
560
560
  Supported types: float16, float32, float64, bool. Default: ``None``.
561
- - **src_key_padding_mask** (Tensor, optional): the mask of the src keys per batch. The shape is
561
+ - **src_key_padding_mask** (Tensor, optional) - the mask of the src keys per batch. The shape is
562
562
  :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
563
563
  float64, bool. Default: ``None``.
564
564
 
@@ -611,8 +611,8 @@ class TransformerEncoder(Cell):
611
611
 
612
612
  class TransformerDecoder(Cell):
613
613
  r"""
614
- Transformer Decoder module with multi-layer stacked of `TransformerDecoderLayer`, including multihead self
615
- attention, cross attention and feedforward layer.
614
+ Transformer Decoder module with multi-layer stacked of :class:`mindspore.nn.TransformerDecoderLayer`,
615
+ including multihead self attention, cross attention and feedforward layer.
616
616
 
617
617
  Args:
618
618
  decoder_layer (Cell): An instance of the :class:`mindspore.nn.TransformerDecoderLayer` class.
@@ -620,20 +620,22 @@ class TransformerDecoder(Cell):
620
620
  norm (Cell, optional): The layer normalization module. Default: ``None``.
621
621
 
622
622
  Inputs:
623
- - **tgt** (Tensor): The sequence to the decoder. For unbatched input, the shape is
624
- :math:`(T, E)` ; otherwise if `batch_first=False` in TransformerDecoderLayer, the shape is
625
- :math:`(T, N, E)` and if `batch_first=True` , the shape is :math:`(T, N, E)`, where :math:`(T)` is the
626
- target sequence length. Supported types: float16, float32, float64.
627
- - **memory** (Tensor): The sequence from the last layer of the encoder. Supported types: float16,
623
+ - **tgt** (Tensor) - The sequence to the decoder. For unbatched input, the shape is
624
+ :math:`(T, E)` ; otherwise if `batch_first=False` in :class:`mindspore.nn.TransformerDecoderLayer`,
625
+ the shape is :math:`(T, N, E)` and if `batch_first=True` , the shape is :math:`(N, T, E)`,
626
+ where :math:`(T)` is the target sequence length, :math:`(N)` is the number of batches,
627
+ and :math:`(E)` is the number of features. Supported types: float16, float32, float64.
628
+ - **memory** (Tensor) - The sequence from the last layer of the encoder. Supported types: float16,
628
629
  float32, float64.
629
- - **tgt_mask** (Tensor, optional): the mask of the tgt sequence. The shape is :math:`(T, T)`
630
- or :math:`(N*nhead, T, T)` , where `nhead` is the arguent in TransformerDecoderLayer.
630
+ - **tgt_mask** (Tensor, optional) - the mask of the tgt sequence. The shape is :math:`(T, T)`
631
+ or :math:`(N*nhead, T, T)` , where `nhead` is the arguent in :class:`mindspore.nn.TransformerDecoderLayer`.
631
632
  Supported types: float16, float32, float64, bool. Default: ``None``.
632
- - **memory_mask** (Tensor, optional): the mask of the memory sequence. The shape is
633
+ - **memory_mask** (Tensor, optional) - the mask of the memory sequence. The shape is
633
634
  :math:`(T, S)` . Supported types: float16, float32, float64, bool. Default: ``None``.
634
- - **tgt_key_padding_mask** (Tensor, optional): the mask of the tgt keys per batch. Supported
635
+ - **tgt_key_padding_mask** (Tensor, optional) - the mask of the tgt keys per batch.
636
+ Shape is :math:`(T)`. Supported
635
637
  types: float16, float32, float64, bool. Default: ``None``.
636
- - **memory_key_padding_mask** (Tensor, optional): the mask of the memory keys per batch. The shape
638
+ - **memory_key_padding_mask** (Tensor, optional) - the mask of the memory keys per batch. The shape
637
639
  is :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
638
640
  float64, bool. Default: ``None``.
639
641
 
@@ -710,28 +712,28 @@ class Transformer(Cell):
710
712
  dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
711
713
 
712
714
  Inputs:
713
- - **src** (Tensor): The source sequence to the encoder. For unbatched input, the shape is
715
+ - **src** (Tensor) - The source sequence to the encoder. For unbatched input, the shape is
714
716
  :math:`(S, E)` ; otherwise if `batch_first=False` , the shape is :math:`(S, N, E)` and if
715
- `batch_first=True` , the shape is :math:`(S, N, E)`, where :math:`(S)` is the source sequence
717
+ `batch_first=True` , the shape is :math:`(N, S, E)`, where :math:`(S)` is the source sequence
716
718
  length, :math:`(N)` is the batch number and :math:`(E)` is the feature number. Supported
717
719
  types: float16, float32, float64.
718
- - **tgt** (Tensor): The target sequence to the decoder. For unbatched input, the shape is
720
+ - **tgt** (Tensor) - The target sequence to the decoder. For unbatched input, the shape is
719
721
  :math:`(T, E)` ; otherwise if `batch_first=False` , the shape is :math:`(T, N, E)` and if
720
- `batch_first=True` , the shape is :math:`(T, N, E)`, where :math:`(T)` is the target sequence
722
+ `batch_first=True` , the shape is :math:`(N, T, E)`, where :math:`(T)` is the target sequence
721
723
  length. Supported types: float16, float32, float64.
722
- - **src_mask** (Tensor, optional): The mask of the src sequence. The shape is :math:`(S, S)`
724
+ - **src_mask** (Tensor, optional) - The mask of the src sequence. The shape is :math:`(S, S)`
723
725
  or :math:`(N*nhead, S, S)`. Supported types: float16, float32, float64, bool. Default: ``None``.
724
- - **tgt_mask** (Tensor, optional): The mask of the tgt sequence. The shape is :math:`(T, T)`
726
+ - **tgt_mask** (Tensor, optional) - The mask of the tgt sequence. The shape is :math:`(T, T)`
725
727
  or :math:`(N*nhead, T, T)`. Supported types: float16, float32, float64, bool. Default: ``None``.
726
- - **memory_mask** (Tensor, optional): The additive mask of the encoder output. The shape is
728
+ - **memory_mask** (Tensor, optional) - The additive mask of the encoder output. The shape is
727
729
  :math:`(T, S)` . Supported types: float16, float32, float64, bool. Default: ``None``.
728
- - **src_key_padding_mask** (Tensor, optional): The mask of src keys per batch. The shape is
730
+ - **src_key_padding_mask** (Tensor, optional) - The mask of src keys per batch. The shape is
729
731
  :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
730
732
  float64, bool. Default: ``None``.
731
- - **tgt_key_padding_mask** (Tensor, optional): The mask of tgt keys per batch. The shape is
733
+ - **tgt_key_padding_mask** (Tensor, optional) - The mask of tgt keys per batch. The shape is
732
734
  :math:`(T)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16, float32,
733
735
  float64, bool. Default: ``None``.
734
- - **memory_key_padding_mask** (Tensor, optional): The mask of memory keys per batch. The shape
736
+ - **memory_key_padding_mask** (Tensor, optional) - The mask of memory keys per batch. The shape
735
737
  is :math:`(S)` for unbatched input, otherwise :math:`(N, S)` . Supported types: float16,
736
738
  float32, float64, bool. Default: ``None``.
737
739