mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.6.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (579) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +13 -6
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +3 -0
  7. mindspore/_checkparam.py +3 -38
  8. mindspore/_deprecated/__init__.py +17 -0
  9. mindspore/_deprecated/jit.py +198 -0
  10. mindspore/_extends/builtin_operations.py +1 -1
  11. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  12. mindspore/_extends/parse/__init__.py +6 -7
  13. mindspore/_extends/parse/compile_config.py +83 -0
  14. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +47 -198
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +229 -99
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +11 -5
  27. mindspore/avcodec-59.dll +0 -0
  28. mindspore/avdevice-59.dll +0 -0
  29. mindspore/avfilter-8.dll +0 -0
  30. mindspore/avformat-59.dll +0 -0
  31. mindspore/avutil-57.dll +0 -0
  32. mindspore/boost/__init__.py +2 -2
  33. mindspore/boost/base.py +3 -7
  34. mindspore/boost/boost_cell_wrapper.py +138 -43
  35. mindspore/common/__init__.py +6 -3
  36. mindspore/common/_grad_function.py +56 -0
  37. mindspore/common/_pijit_context.py +14 -5
  38. mindspore/common/_register_for_tensor.py +1 -2
  39. mindspore/common/_stub_tensor.py +30 -14
  40. mindspore/common/_tensor_cpp_method.py +17 -0
  41. mindspore/common/_tensor_docs.py +4760 -0
  42. mindspore/common/api.py +480 -372
  43. mindspore/common/auto_dynamic_shape.py +41 -44
  44. mindspore/common/dtype.py +39 -36
  45. mindspore/common/dump.py +9 -6
  46. mindspore/common/file_system.py +9 -1
  47. mindspore/common/generator.py +5 -0
  48. mindspore/common/hook_handle.py +6 -2
  49. mindspore/common/initializer.py +13 -10
  50. mindspore/common/jit_begin_end.py +94 -0
  51. mindspore/common/jit_config.py +6 -1
  52. mindspore/common/jit_context.py +76 -0
  53. mindspore/common/jit_trace.py +378 -0
  54. mindspore/common/lazy_inline.py +9 -3
  55. mindspore/common/mindir_util.py +10 -2
  56. mindspore/common/mutable.py +5 -4
  57. mindspore/common/parameter.py +135 -52
  58. mindspore/common/seed.py +2 -2
  59. mindspore/common/sparse_tensor.py +23 -17
  60. mindspore/common/tensor.py +975 -1981
  61. mindspore/communication/__init__.py +7 -5
  62. mindspore/communication/_comm_helper.py +52 -2
  63. mindspore/communication/comm_func.py +240 -181
  64. mindspore/communication/management.py +95 -26
  65. mindspore/context.py +324 -573
  66. mindspore/dataset/__init__.py +65 -37
  67. mindspore/dataset/audio/__init__.py +2 -8
  68. mindspore/dataset/audio/transforms.py +3 -17
  69. mindspore/dataset/callback/ds_callback.py +2 -1
  70. mindspore/dataset/core/config.py +87 -6
  71. mindspore/dataset/engine/cache_admin.py +3 -3
  72. mindspore/dataset/engine/cache_client.py +6 -5
  73. mindspore/dataset/engine/datasets.py +292 -267
  74. mindspore/dataset/engine/datasets_audio.py +22 -8
  75. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  76. mindspore/dataset/engine/datasets_text.py +78 -48
  77. mindspore/dataset/engine/datasets_user_defined.py +183 -117
  78. mindspore/dataset/engine/datasets_vision.py +120 -44
  79. mindspore/dataset/engine/iterators.py +283 -63
  80. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  81. mindspore/dataset/engine/obs/util.py +8 -0
  82. mindspore/dataset/engine/queue.py +40 -0
  83. mindspore/dataset/engine/samplers.py +289 -43
  84. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  85. mindspore/dataset/engine/validators.py +53 -11
  86. mindspore/dataset/text/__init__.py +7 -6
  87. mindspore/dataset/text/transforms.py +6 -5
  88. mindspore/dataset/text/utils.py +3 -3
  89. mindspore/dataset/transforms/__init__.py +0 -9
  90. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  91. mindspore/dataset/transforms/transforms.py +31 -14
  92. mindspore/dataset/utils/browse_dataset.py +1 -1
  93. mindspore/dataset/vision/__init__.py +2 -9
  94. mindspore/dataset/vision/transforms.py +202 -158
  95. mindspore/dataset/vision/utils.py +7 -5
  96. mindspore/dataset/vision/validators.py +1 -2
  97. mindspore/device_context/__init__.py +21 -0
  98. mindspore/device_context/ascend/__init__.py +25 -0
  99. mindspore/device_context/ascend/device.py +72 -0
  100. mindspore/device_context/ascend/op_debug.py +153 -0
  101. mindspore/device_context/ascend/op_precision.py +193 -0
  102. mindspore/device_context/ascend/op_tuning.py +123 -0
  103. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  104. mindspore/device_context/cpu/device.py +62 -0
  105. mindspore/device_context/cpu/op_tuning.py +43 -0
  106. mindspore/device_context/gpu/__init__.py +21 -0
  107. mindspore/device_context/gpu/device.py +70 -0
  108. mindspore/device_context/gpu/op_precision.py +67 -0
  109. mindspore/device_context/gpu/op_tuning.py +175 -0
  110. mindspore/device_manager.py +170 -0
  111. mindspore/dnnl.dll +0 -0
  112. mindspore/experimental/es/embedding_service.py +35 -27
  113. mindspore/experimental/llm_boost/__init__.py +1 -0
  114. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  115. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
  116. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  117. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  118. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  119. mindspore/experimental/llm_boost/register.py +1 -0
  120. mindspore/experimental/map_parameter.py +4 -4
  121. mindspore/experimental/optim/adadelta.py +6 -6
  122. mindspore/experimental/optim/adagrad.py +4 -4
  123. mindspore/experimental/optim/adam.py +7 -0
  124. mindspore/experimental/optim/adamax.py +4 -4
  125. mindspore/experimental/optim/adamw.py +4 -0
  126. mindspore/experimental/optim/asgd.py +1 -1
  127. mindspore/experimental/optim/lr_scheduler.py +73 -46
  128. mindspore/experimental/optim/radam.py +34 -31
  129. mindspore/experimental/optim/rprop.py +1 -1
  130. mindspore/experimental/optim/sgd.py +1 -1
  131. mindspore/hal/contiguous_tensors_handle.py +6 -10
  132. mindspore/hal/device.py +55 -53
  133. mindspore/hal/event.py +52 -52
  134. mindspore/hal/memory.py +179 -120
  135. mindspore/hal/stream.py +150 -109
  136. mindspore/include/api/context.h +0 -1
  137. mindspore/include/dataset/constants.h +7 -4
  138. mindspore/include/dataset/execute.h +2 -2
  139. mindspore/jpeg62.dll +0 -0
  140. mindspore/log.py +50 -0
  141. mindspore/mindrecord/__init__.py +21 -8
  142. mindspore/mindrecord/config.py +17 -316
  143. mindspore/mindrecord/filereader.py +1 -9
  144. mindspore/mindrecord/filewriter.py +5 -15
  145. mindspore/mindrecord/mindpage.py +1 -9
  146. mindspore/mindspore_backend_common.dll +0 -0
  147. mindspore/mindspore_backend_manager.dll +0 -0
  148. mindspore/mindspore_common.dll +0 -0
  149. mindspore/mindspore_core.dll +0 -0
  150. mindspore/mindspore_dump.dll +0 -0
  151. mindspore/mindspore_frontend.dll +0 -0
  152. mindspore/mindspore_glog.dll +0 -0
  153. mindspore/mindspore_memory_pool.dll +0 -0
  154. mindspore/mindspore_ms_backend.dll +0 -0
  155. mindspore/mindspore_ops.dll +0 -0
  156. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  157. mindspore/mindspore_ops_kernel_common.dll +0 -0
  158. mindspore/mindspore_profiler.dll +0 -0
  159. mindspore/mindspore_pyboost.dll +0 -0
  160. mindspore/mindspore_pynative.dll +0 -0
  161. mindspore/mindspore_res_manager.dll +0 -0
  162. mindspore/mindspore_runtime_pipeline.dll +0 -0
  163. mindspore/mint/__init__.py +798 -761
  164. mindspore/mint/distributed/__init__.py +70 -4
  165. mindspore/mint/distributed/distributed.py +2679 -44
  166. mindspore/mint/linalg/__init__.py +8 -0
  167. mindspore/mint/nn/__init__.py +743 -22
  168. mindspore/mint/nn/functional.py +716 -23
  169. mindspore/mint/nn/layer/__init__.py +21 -4
  170. mindspore/mint/nn/layer/_functions.py +334 -0
  171. mindspore/mint/nn/layer/activation.py +276 -1
  172. mindspore/mint/nn/layer/basic.py +123 -0
  173. mindspore/mint/nn/layer/conv.py +933 -0
  174. mindspore/mint/nn/layer/normalization.py +223 -28
  175. mindspore/mint/nn/layer/padding.py +797 -0
  176. mindspore/mint/nn/layer/pooling.py +235 -0
  177. mindspore/mint/optim/__init__.py +3 -1
  178. mindspore/mint/optim/adam.py +223 -0
  179. mindspore/mint/optim/adamw.py +26 -19
  180. mindspore/mint/optim/sgd.py +171 -0
  181. mindspore/mint/special/__init__.py +2 -1
  182. mindspore/multiprocessing/__init__.py +5 -0
  183. mindspore/nn/__init__.py +4 -1
  184. mindspore/nn/cell.py +1373 -192
  185. mindspore/nn/dynamic_lr.py +2 -1
  186. mindspore/nn/layer/activation.py +29 -27
  187. mindspore/nn/layer/basic.py +51 -35
  188. mindspore/nn/layer/channel_shuffle.py +3 -3
  189. mindspore/nn/layer/container.py +1 -1
  190. mindspore/nn/layer/conv.py +53 -42
  191. mindspore/nn/layer/embedding.py +12 -11
  192. mindspore/nn/layer/normalization.py +56 -49
  193. mindspore/nn/layer/padding.py +4 -3
  194. mindspore/nn/layer/pooling.py +120 -42
  195. mindspore/nn/layer/rnn_cells.py +1 -1
  196. mindspore/nn/layer/rnns.py +2 -1
  197. mindspore/nn/layer/timedistributed.py +5 -5
  198. mindspore/nn/layer/transformer.py +59 -36
  199. mindspore/nn/learning_rate_schedule.py +8 -4
  200. mindspore/nn/loss/loss.py +58 -55
  201. mindspore/nn/optim/ada_grad.py +7 -5
  202. mindspore/nn/optim/adadelta.py +11 -9
  203. mindspore/nn/optim/adafactor.py +1 -1
  204. mindspore/nn/optim/adam.py +19 -15
  205. mindspore/nn/optim/adamax.py +8 -7
  206. mindspore/nn/optim/adasum.py +5 -5
  207. mindspore/nn/optim/asgd.py +3 -1
  208. mindspore/nn/optim/ftrl.py +11 -9
  209. mindspore/nn/optim/lamb.py +1 -1
  210. mindspore/nn/optim/lars.py +1 -4
  211. mindspore/nn/optim/lazyadam.py +12 -10
  212. mindspore/nn/optim/momentum.py +7 -6
  213. mindspore/nn/optim/optimizer.py +3 -3
  214. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  215. mindspore/nn/optim/rmsprop.py +13 -12
  216. mindspore/nn/optim/rprop.py +11 -9
  217. mindspore/nn/optim/sgd.py +9 -6
  218. mindspore/nn/optim/tft_wrapper.py +5 -2
  219. mindspore/nn/optim/thor.py +2 -1
  220. mindspore/nn/probability/bijector/bijector.py +17 -11
  221. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  222. mindspore/nn/probability/bijector/invert.py +2 -2
  223. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  224. mindspore/nn/probability/bijector/softplus.py +3 -2
  225. mindspore/nn/probability/distribution/beta.py +3 -3
  226. mindspore/nn/probability/distribution/categorical.py +1 -1
  227. mindspore/nn/probability/distribution/cauchy.py +4 -2
  228. mindspore/nn/probability/distribution/exponential.py +6 -7
  229. mindspore/nn/probability/distribution/gamma.py +2 -2
  230. mindspore/nn/probability/distribution/gumbel.py +2 -2
  231. mindspore/nn/probability/distribution/half_normal.py +5 -3
  232. mindspore/nn/probability/distribution/logistic.py +5 -3
  233. mindspore/nn/probability/distribution/poisson.py +1 -1
  234. mindspore/nn/probability/distribution/uniform.py +5 -3
  235. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  236. mindspore/nn/reinforcement/tensor_array.py +1 -1
  237. mindspore/nn/utils/init.py +13 -11
  238. mindspore/nn/wrap/__init__.py +6 -6
  239. mindspore/nn/wrap/cell_wrapper.py +181 -122
  240. mindspore/nn/wrap/grad_reducer.py +45 -36
  241. mindspore/nn/wrap/loss_scale.py +6 -7
  242. mindspore/numpy/array_creations.py +63 -65
  243. mindspore/numpy/array_ops.py +149 -144
  244. mindspore/numpy/logic_ops.py +41 -42
  245. mindspore/numpy/math_ops.py +361 -359
  246. mindspore/numpy/utils.py +17 -18
  247. mindspore/numpy/utils_const.py +5 -6
  248. mindspore/opencv_core452.dll +0 -0
  249. mindspore/opencv_imgcodecs452.dll +0 -0
  250. mindspore/opencv_imgproc452.dll +0 -0
  251. mindspore/ops/__init__.py +5 -3
  252. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  253. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  254. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  255. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  256. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  257. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  258. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  259. mindspore/ops/_register_for_op.py +0 -11
  260. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  261. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  262. mindspore/ops/_vmap/vmap_array_ops.py +52 -25
  263. mindspore/ops/_vmap/vmap_base.py +0 -2
  264. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  265. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  266. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  267. mindspore/ops/auto_generate/__init__.py +4 -3
  268. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
  269. mindspore/ops/auto_generate/gen_extend_func.py +757 -185
  270. mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
  271. mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
  272. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  273. mindspore/ops/composite/__init__.py +2 -1
  274. mindspore/ops/composite/base.py +20 -25
  275. mindspore/ops/composite/math_ops.py +6 -16
  276. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  277. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  278. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  279. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  280. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  281. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  282. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  283. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  284. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  285. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  286. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  287. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  288. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  289. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  290. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  291. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  292. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  293. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  294. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  295. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  296. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  299. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  301. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  302. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  303. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  304. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  305. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  306. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  307. mindspore/ops/function/__init__.py +40 -2
  308. mindspore/ops/function/_add_attr_func.py +58 -0
  309. mindspore/ops/function/array_func.py +2089 -2403
  310. mindspore/ops/function/clip_func.py +80 -23
  311. mindspore/ops/function/debug_func.py +57 -57
  312. mindspore/ops/function/grad/__init__.py +1 -0
  313. mindspore/ops/function/grad/grad_func.py +104 -71
  314. mindspore/ops/function/image_func.py +2 -2
  315. mindspore/ops/function/linalg_func.py +47 -78
  316. mindspore/ops/function/math_func.py +4351 -3813
  317. mindspore/ops/function/nn_func.py +1712 -637
  318. mindspore/ops/function/other_func.py +159 -1
  319. mindspore/ops/function/parameter_func.py +18 -84
  320. mindspore/ops/function/random_func.py +452 -387
  321. mindspore/ops/function/reshard_func.py +4 -70
  322. mindspore/ops/function/sparse_func.py +3 -3
  323. mindspore/ops/function/sparse_unary_func.py +6 -6
  324. mindspore/ops/function/spectral_func.py +25 -58
  325. mindspore/ops/function/vmap_func.py +26 -18
  326. mindspore/ops/functional.py +23 -7
  327. mindspore/ops/functional_overload.py +1548 -0
  328. mindspore/ops/op_info_register.py +32 -244
  329. mindspore/ops/operations/__init__.py +23 -15
  330. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  331. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  332. mindspore/ops/operations/_grad_ops.py +2 -43
  333. mindspore/ops/operations/_infer_ops.py +2 -1
  334. mindspore/ops/operations/_inner_ops.py +43 -84
  335. mindspore/ops/operations/_ms_kernel.py +4 -10
  336. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  337. mindspore/ops/operations/_scalar_ops.py +3 -2
  338. mindspore/ops/operations/_sequence_ops.py +1 -1
  339. mindspore/ops/operations/_tensor_array.py +1 -1
  340. mindspore/ops/operations/array_ops.py +81 -324
  341. mindspore/ops/operations/comm_ops.py +154 -108
  342. mindspore/ops/operations/custom_ops.py +298 -87
  343. mindspore/ops/operations/debug_ops.py +157 -59
  344. mindspore/ops/operations/inner_ops.py +7 -5
  345. mindspore/ops/operations/linalg_ops.py +1 -57
  346. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  347. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  348. mindspore/ops/operations/math_ops.py +32 -234
  349. mindspore/ops/operations/nn_ops.py +212 -531
  350. mindspore/ops/operations/other_ops.py +62 -9
  351. mindspore/ops/operations/random_ops.py +13 -7
  352. mindspore/ops/operations/reshard_ops.py +1 -1
  353. mindspore/ops/operations/sparse_ops.py +2 -2
  354. mindspore/ops/primitive.py +66 -53
  355. mindspore/ops/tensor_method.py +1895 -0
  356. mindspore/ops_generate/__init__.py +0 -5
  357. mindspore/ops_generate/aclnn/__init__.py +0 -0
  358. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  359. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  360. mindspore/ops_generate/api/__init__.py +0 -0
  361. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  362. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  363. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  364. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  365. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  366. mindspore/ops_generate/api/gen_api.py +103 -0
  367. mindspore/ops_generate/api/op_api_proto.py +235 -0
  368. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  369. mindspore/ops_generate/common/__init__.py +0 -0
  370. mindspore/ops_generate/common/base_generator.py +11 -0
  371. mindspore/ops_generate/common/gen_constants.py +91 -0
  372. mindspore/ops_generate/common/gen_utils.py +348 -0
  373. mindspore/ops_generate/common/op_proto.py +473 -0
  374. mindspore/ops_generate/common/template.py +523 -0
  375. mindspore/ops_generate/gen_ops.py +22 -1069
  376. mindspore/ops_generate/op_def/__init__.py +0 -0
  377. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  378. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  379. mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
  380. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  381. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  382. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  383. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  384. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  385. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  386. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  387. mindspore/ops_generate/pyboost/__init__.py +0 -0
  388. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  389. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  390. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  391. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  392. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  393. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  394. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  395. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  396. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  397. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  398. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  399. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  400. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  401. mindspore/ops_generate/resources/__init__.py +0 -0
  402. mindspore/ops_generate/resources/resource_list.py +30 -0
  403. mindspore/ops_generate/resources/resource_loader.py +36 -0
  404. mindspore/ops_generate/resources/resource_manager.py +64 -0
  405. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  406. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  407. mindspore/parallel/__init__.py +7 -3
  408. mindspore/parallel/_auto_parallel_context.py +159 -40
  409. mindspore/parallel/_cell_wrapper.py +132 -15
  410. mindspore/parallel/_parallel_serialization.py +107 -5
  411. mindspore/parallel/_ps_context.py +1 -1
  412. mindspore/parallel/_recovery_context.py +7 -2
  413. mindspore/parallel/_tensor.py +142 -18
  414. mindspore/parallel/_utils.py +199 -23
  415. mindspore/parallel/algo_parameter_config.py +4 -4
  416. mindspore/parallel/auto_parallel.py +732 -0
  417. mindspore/parallel/checkpoint_convert.py +159 -0
  418. mindspore/parallel/checkpoint_transform.py +700 -35
  419. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  420. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  421. mindspore/parallel/cluster/run.py +21 -4
  422. mindspore/parallel/function/__init__.py +24 -0
  423. mindspore/parallel/function/reshard_func.py +258 -0
  424. mindspore/parallel/nn/__init__.py +25 -0
  425. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  426. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  427. mindspore/parallel/parameter_broadcast.py +25 -14
  428. mindspore/parallel/shard.py +137 -59
  429. mindspore/parallel/transform_safetensors.py +364 -305
  430. mindspore/profiler/__init__.py +22 -5
  431. mindspore/profiler/analysis/__init__.py +0 -0
  432. mindspore/profiler/analysis/parser/__init__.py +0 -0
  433. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  434. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  435. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  436. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  437. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  438. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  439. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  440. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  441. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
  442. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  443. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  444. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  445. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  446. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  447. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  448. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  449. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  450. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  451. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  452. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  453. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  454. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  455. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  456. mindspore/profiler/analysis/task_manager.py +131 -0
  457. mindspore/profiler/analysis/time_converter.py +84 -0
  458. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  459. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  460. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  461. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  462. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  463. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  464. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  465. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  466. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  467. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  468. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  469. mindspore/profiler/analysis/work_flow.py +73 -0
  470. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  471. mindspore/profiler/common/command_executor.py +90 -0
  472. mindspore/profiler/common/constant.py +186 -3
  473. mindspore/profiler/common/file_manager.py +208 -0
  474. mindspore/profiler/common/log.py +130 -0
  475. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  476. mindspore/profiler/common/path_manager.py +395 -0
  477. mindspore/profiler/common/process_bar.py +168 -0
  478. mindspore/profiler/common/process_pool.py +9 -3
  479. mindspore/profiler/common/profiler_context.py +500 -0
  480. mindspore/profiler/common/profiler_info.py +304 -0
  481. mindspore/profiler/common/profiler_meta_data.py +74 -0
  482. mindspore/profiler/common/profiler_output_path.py +284 -0
  483. mindspore/profiler/common/profiler_parameters.py +251 -0
  484. mindspore/profiler/common/profiler_path_manager.py +179 -0
  485. mindspore/profiler/common/record_function.py +76 -0
  486. mindspore/profiler/common/tlv_decoder.py +76 -0
  487. mindspore/profiler/common/util.py +75 -2
  488. mindspore/profiler/dynamic_profiler.py +341 -75
  489. mindspore/profiler/envprofiler.py +163 -0
  490. mindspore/profiler/experimental_config.py +197 -0
  491. mindspore/profiler/mstx.py +242 -0
  492. mindspore/profiler/platform/__init__.py +21 -0
  493. mindspore/profiler/platform/base_profiler.py +40 -0
  494. mindspore/profiler/platform/cpu_profiler.py +124 -0
  495. mindspore/profiler/platform/gpu_profiler.py +74 -0
  496. mindspore/profiler/platform/npu_profiler.py +335 -0
  497. mindspore/profiler/profiler.py +1073 -90
  498. mindspore/profiler/profiler_action_controller.py +187 -0
  499. mindspore/profiler/profiler_interface.py +118 -0
  500. mindspore/profiler/schedule.py +243 -0
  501. mindspore/rewrite/api/node.py +15 -13
  502. mindspore/rewrite/api/symbol_tree.py +2 -3
  503. mindspore/run_check/_check_version.py +27 -20
  504. mindspore/run_check/run_check.py +1 -1
  505. mindspore/runtime/__init__.py +37 -0
  506. mindspore/runtime/device.py +27 -0
  507. mindspore/runtime/event.py +209 -0
  508. mindspore/runtime/executor.py +177 -0
  509. mindspore/runtime/memory.py +416 -0
  510. mindspore/runtime/stream.py +460 -0
  511. mindspore/runtime/thread_bind_core.py +401 -0
  512. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  513. mindspore/swresample-4.dll +0 -0
  514. mindspore/swscale-6.dll +0 -0
  515. mindspore/tinyxml2.dll +0 -0
  516. mindspore/train/__init__.py +8 -8
  517. mindspore/train/_utils.py +96 -27
  518. mindspore/train/amp.py +9 -5
  519. mindspore/train/callback/__init__.py +2 -2
  520. mindspore/train/callback/_callback.py +2 -16
  521. mindspore/train/callback/_checkpoint.py +53 -55
  522. mindspore/train/callback/_cluster_monitor.py +14 -18
  523. mindspore/train/callback/_early_stop.py +1 -1
  524. mindspore/train/callback/_flops_collector.py +103 -68
  525. mindspore/train/callback/_history.py +8 -5
  526. mindspore/train/callback/_lambda_callback.py +2 -2
  527. mindspore/train/callback/_landscape.py +0 -3
  528. mindspore/train/callback/_loss_monitor.py +2 -1
  529. mindspore/train/callback/_on_request_exit.py +6 -5
  530. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  531. mindspore/train/callback/_summary_collector.py +52 -19
  532. mindspore/train/callback/_time_monitor.py +2 -1
  533. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
  534. mindspore/train/data_sink.py +25 -2
  535. mindspore/train/dataset_helper.py +15 -16
  536. mindspore/train/loss_scale_manager.py +8 -7
  537. mindspore/train/metrics/accuracy.py +3 -3
  538. mindspore/train/metrics/confusion_matrix.py +9 -9
  539. mindspore/train/metrics/error.py +3 -3
  540. mindspore/train/metrics/hausdorff_distance.py +4 -4
  541. mindspore/train/metrics/mean_surface_distance.py +3 -3
  542. mindspore/train/metrics/metric.py +0 -12
  543. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  544. mindspore/train/metrics/precision.py +11 -10
  545. mindspore/train/metrics/recall.py +9 -9
  546. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  547. mindspore/train/mind_ir_pb2.py +174 -46
  548. mindspore/train/model.py +269 -136
  549. mindspore/train/serialization.py +622 -978
  550. mindspore/train/summary/_summary_adapter.py +2 -2
  551. mindspore/train/summary/summary_record.py +2 -3
  552. mindspore/train/train_thor/model_thor.py +1 -1
  553. mindspore/turbojpeg.dll +0 -0
  554. mindspore/utils/__init__.py +6 -3
  555. mindspore/utils/dryrun.py +140 -0
  556. mindspore/utils/hooks.py +81 -0
  557. mindspore/utils/runtime_execution_order_check.py +552 -0
  558. mindspore/utils/utils.py +138 -4
  559. mindspore/version.py +1 -1
  560. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
  561. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +564 -395
  562. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
  563. mindspore/_install_custom.py +0 -43
  564. mindspore/common/_register_for_adapter.py +0 -74
  565. mindspore/common/_tensor_overload.py +0 -139
  566. mindspore/mindspore_np_dtype.dll +0 -0
  567. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  568. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  569. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  570. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  571. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  572. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  573. mindspore/ops_generate/gen_utils.py +0 -209
  574. mindspore/ops_generate/op_proto.py +0 -145
  575. mindspore/ops_generate/template.py +0 -261
  576. mindspore/profiler/envprofiling.py +0 -254
  577. mindspore/profiler/profiling.py +0 -1926
  578. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  579. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -224,7 +224,8 @@ def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, deca
224
224
  total_step (int): The total number of steps.
225
225
  step_per_epoch (int): The number of steps in per epoch.
226
226
  decay_epoch (int): Number of epochs to decay over.
227
- is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: ``False`` .
227
+ is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. If False, the learning rate
228
+ decays for every epoch. Default: ``False`` .
228
229
 
229
230
  Returns:
230
231
  list[float]. The size of list is `total_step`.
@@ -82,7 +82,7 @@ class CELU(Cell):
82
82
  :align: center
83
83
 
84
84
  Args:
85
- alpha (float): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
85
+ alpha (float, optional): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
86
86
 
87
87
  Inputs:
88
88
  - **x** (Tensor) - The input of CELU. The required dtype is float16 or float32.
@@ -136,20 +136,22 @@ class Softmin(Cell):
136
136
  where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
137
137
 
138
138
  Args:
139
- axis (Union[int, tuple[int]]): The axis to apply Softmin operation, if the dimension of input `x` is x.ndim,
140
- the range of axis is `[-x.ndim, x.ndim)`. -1 means the last dimension. Default: ``-1`` .
139
+ axis (Union[int, tuple[int]], optional): The axis to apply Softmin operation,
140
+ if the dimension of input `x` is x.ndim,
141
+ the range of axis is :math:`[-x.ndim, x.ndim)`. -1 means the last dimension.
142
+ Default: ``-1`` . In CPU environment, `axis` only supports int type.
141
143
 
142
144
  Inputs:
143
145
  - **x** (Tensor) - Tensor for computing Softmin functions with data type of float16 or float32.
144
146
 
145
147
  Outputs:
146
- Tensor, which has the same type and shape as `x` with values in the range [0,1].
148
+ Tensor, which has the same type and shape as `x` with values in the range :math:`[0, 1]`.
147
149
 
148
150
  Raises:
149
151
  TypeError: If `axis` is neither an int nor a tuple.
150
152
  TypeError: If dtype of `x` is neither float16 nor float32.
151
153
  ValueError: If `axis` is a tuple whose length is less than 1.
152
- ValueError: If `axis` is a tuple whose elements are not all in the range [-x.ndim, x.ndim).
154
+ ValueError: If `axis` is a tuple whose elements are not all in the range :math:`[-x.ndim, x.ndim)`.
153
155
 
154
156
  Supported Platforms:
155
157
  ``Ascend`` ``GPU`` ``CPU``
@@ -179,7 +181,7 @@ class Softmax2d(Cell):
179
181
  r"""
180
182
  Softmax function applied to 2D features data.
181
183
 
182
- Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
184
+ Applies `Softmax` to each location with an input Tensor of shape :math:`(C, H, W)` .
183
185
 
184
186
  Inputs:
185
187
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
@@ -957,7 +959,7 @@ class GELU(Cell):
957
959
  :align: center
958
960
 
959
961
  Args:
960
- approximate (bool): Whether to enable approximation. Default: ``True`` .
962
+ approximate (bool, optional): Whether to enable approximation. Default: ``True`` .
961
963
 
962
964
  If `approximate` is ``True``, The gaussian error linear activation is:
963
965
 
@@ -965,7 +967,14 @@ class GELU(Cell):
965
967
 
966
968
  else, it is:
967
969
 
968
- :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1).
970
+ :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where :math:`P(X) ~ N(0, 1)`.
971
+
972
+ Note:
973
+ - when calculating the input gradient of GELU with an input value of infinity, there are differences
974
+ in the output of the backward between ``Ascend`` and ``GPU``.
975
+ - when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
976
+ - when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
977
+ - In mathematical terms, the result of Ascend has higher precision.
969
978
 
970
979
  Inputs:
971
980
  - **x** (Tensor) - The input of GELU with data type of float16, float32, or float64.
@@ -974,13 +983,6 @@ class GELU(Cell):
974
983
  Outputs:
975
984
  Tensor, with the same type and shape as the `x`.
976
985
 
977
- Note:
978
- when calculating the input gradient of GELU with an input value of infinity, there are differences
979
- in the output of the backward between ``Ascend`` and ``GPU``.
980
- when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
981
- when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
982
- In mathematical terms, the result of Ascend has higher precision.
983
-
984
986
  Raises:
985
987
  TypeError: If dtype of `x` is not one of float16, float32, or float64.
986
988
 
@@ -1165,7 +1167,7 @@ class PReLU(Cell):
1165
1167
 
1166
1168
  where :math:`x_i` is an element of an channel of the input.
1167
1169
 
1168
- Here :math:`w` is a learnable parameter with a default initial value 0.25.
1170
+ Here :math:`w` is a learnable parameter with a default initial value ``0.25``.
1169
1171
  Parameter :math:`w` has dimensionality of the argument channel. If called without argument
1170
1172
  channel, a single parameter :math:`w` will be shared across all channels.
1171
1173
 
@@ -1175,9 +1177,9 @@ class PReLU(Cell):
1175
1177
  :align: center
1176
1178
 
1177
1179
  Args:
1178
- channel (int): The elements number of parameter :math:`w`.
1179
- It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: ``1`` .
1180
- w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
1180
+ channel (int, optional): The elements number of parameter :math:`w`.
1181
+ It could be an int, and the value is ``1`` or the channels number of input tensor `x`. Default: ``1`` .
1182
+ w (Union[float, list, Tensor], optional): The initial value of parameter. It could be a float, a float list or
1181
1183
  a tensor has the same dtype as the input tensor `x`. Default: ``0.25`` .
1182
1184
 
1183
1185
  Inputs:
@@ -1189,7 +1191,7 @@ class PReLU(Cell):
1189
1191
 
1190
1192
  Raises:
1191
1193
  TypeError: If `channel` is not an int.
1192
- TypeError: If `w` is not one of a float, a float list, a float Tensor.
1194
+ TypeError: If `w` is not one of a float, a list[float], a Tensor[float].
1193
1195
  TypeError: If dtype of `x` is neither float16 nor float32.
1194
1196
  ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
1195
1197
  ValueError: If `channel` is less than 1.
@@ -1273,9 +1275,9 @@ class PReLUExt(Cell):
1273
1275
  no channel dim and the number of channels = 1.
1274
1276
 
1275
1277
  Args:
1276
- num_parameters (int): number of `w` to learn. Although it takes an int as input,
1278
+ num_parameters (int, optional): number of `w` to learn. Although it takes an int as input,
1277
1279
  there is only two legitimate values: 1, or the number of channels at Tensor `input`. Default: ``1`` .
1278
- init (float): the initial value of `w`. Default: ``0.25`` .
1280
+ init (float, optional): the initial value of `w`. Default: ``0.25`` .
1279
1281
  dtype (mindspore.dtype, optional): the type of `w`. Default: ``None`` . Supported data type
1280
1282
  is {float16, float32, bfloat16}.
1281
1283
 
@@ -1320,7 +1322,7 @@ class HSwish(Cell):
1320
1322
  Hard swish is defined as:
1321
1323
 
1322
1324
  .. math::
1323
- \text{Hardswish}(input) =
1325
+ \text{HSwish}(input) =
1324
1326
  \begin{cases}
1325
1327
  0, & \text{ if } input \leq -3, \\
1326
1328
  input, & \text{ if } input \geq +3, \\
@@ -1372,7 +1374,7 @@ class HSigmoid(Cell):
1372
1374
  Hard Sigmoid is defined as:
1373
1375
 
1374
1376
  .. math::
1375
- \text{Hardsigmoid}(input) =
1377
+ \text{HSigmoid}(input) =
1376
1378
  \begin{cases}
1377
1379
  0, & \text{ if } input \leq -3, \\
1378
1380
  1, & \text{ if } input \geq +3, \\
@@ -1578,7 +1580,7 @@ class HShrink(Cell):
1578
1580
  The formula is defined as follows:
1579
1581
 
1580
1582
  .. math::
1581
- \text{HardShrink}(x) =
1583
+ \text{HShrink}(x) =
1582
1584
  \begin{cases}
1583
1585
  x, & \text{ if } x > \lambda \\
1584
1586
  x, & \text{ if } x < -\lambda \\
@@ -1728,7 +1730,7 @@ class GLU(Cell):
1728
1730
  Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
1729
1731
 
1730
1732
  Args:
1731
- axis (int): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1733
+ axis (int, optional): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1732
1734
 
1733
1735
  Inputs:
1734
1736
  - **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
@@ -1811,7 +1813,7 @@ def get_activation(name, prim_name=None):
1811
1813
  >>> import mindspore.nn as nn
1812
1814
  >>> sigmoid = nn.get_activation('sigmoid')
1813
1815
  >>> print(sigmoid)
1814
- Sigmoid<>
1816
+ Sigmoid()
1815
1817
  """
1816
1818
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
1817
1819
  if name is None:
@@ -25,10 +25,9 @@ from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_util
25
25
  from mindspore.common.seed import _get_graph_seed
26
26
  from mindspore.common.tensor import Tensor
27
27
  from mindspore.common.initializer import initializer, HeUniform, Uniform
28
+ from mindspore import ops
28
29
  from mindspore.ops import operations as P
29
30
  from mindspore.ops import functional as F
30
- from mindspore.ops.function.nn_func import interpolate_ext
31
- from mindspore.ops.auto_generate import unfold_ext
32
31
  from mindspore.ops.operations import _inner_ops as inner
33
32
  from mindspore.ops.primitive import constexpr, Primitive, _primexpr
34
33
  from mindspore.common.parameter import Parameter
@@ -37,7 +36,6 @@ from mindspore import _checkparam as Validator
37
36
  from mindspore.nn.cell import Cell
38
37
  from mindspore.nn.layer.activation import get_activation
39
38
  from mindspore.common._decorator import deprecated
40
- from mindspore.ops.auto_generate import dropout_ext_op, fold_ext
41
39
  from mindspore.common.generator import default_generator
42
40
 
43
41
  __all__ = ['Dropout', 'Flatten', 'Dense', 'Linear', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
@@ -140,6 +138,7 @@ class Dropout(Cell):
140
138
 
141
139
  Inputs:
142
140
  - **x** (Tensor) - The input of Dropout with data type of float16 or float32.
141
+ The shape of `x` cannot be less than 1.
143
142
 
144
143
  Outputs:
145
144
  Tensor, output tensor with the same shape as the `x`.
@@ -225,8 +224,10 @@ class DropoutExt(Cell):
225
224
  - Parameter `p` means the probability of the element of the input tensor to be zeroed.
226
225
 
227
226
  Args:
228
- p (float): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
227
+ p (float, optional): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
229
228
  Default: ``0.5`` .
229
+ inplace (bool, optional): Whether to enable the operation in-place.
230
+ If set to ``True`` , will do this operation in-place. Default: ``False`` .
230
231
 
231
232
  Inputs:
232
233
  - **x** (Tensor) - The input of Dropout.
@@ -253,18 +254,23 @@ class DropoutExt(Cell):
253
254
  (2, 2, 3)
254
255
  """
255
256
 
256
- def __init__(self, p=0.5):
257
+ def __init__(self, p=0.5, inplace=False):
257
258
  """Initialize DropoutExt."""
258
259
  super(DropoutExt, self).__init__()
259
260
  self.p = p
260
- self.generator_step = Tensor(1, mstype.int64)
261
+ self.inplace = inplace
262
+ self.generator_step = Tensor(12, mstype.int64)
261
263
 
262
264
  def construct(self, x):
263
265
  if not self.training or self.p == 0:
264
266
  return x
265
267
 
266
268
  seed, offset = default_generator._step(self.generator_step) # pylint: disable=protected-access
267
- out, _ = dropout_ext_op(x, self.p, seed, offset)
269
+ out, _ = ops.auto_generate.dropout_ext_op(x, self.p, seed, offset)
270
+
271
+ if self.inplace:
272
+ x.copy_(out)
273
+ return x
268
274
  return out
269
275
 
270
276
 
@@ -342,8 +348,8 @@ class Dropout2d(Cell):
342
348
 
343
349
  For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
344
350
  `2D` tensor input[i,j].
345
- Each channel will be zeroed out independently on every forward call with probability `p` using samples
346
- from a Bernoulli distribution.
351
+ At each forward propagation,
352
+ each channel will be independently determined to be set to zero with probability `p`.
347
353
 
348
354
  `Dropout2d` can improve the independence between channel feature maps.
349
355
 
@@ -479,6 +485,9 @@ class UpsampleExt(Cell):
479
485
  r"""
480
486
  For details, please refer to :func:`mindspore.mint.nn.functional.interpolate`.
481
487
 
488
+ .. warning::
489
+ This is an experimental API that is subject to change or deletion.
490
+
482
491
  Supported Platforms:
483
492
  ``Ascend``
484
493
 
@@ -511,8 +520,8 @@ class UpsampleExt(Cell):
511
520
  self.recompute_scale_factor = recompute_scale_factor
512
521
 
513
522
  def construct(self, input):
514
- out = interpolate_ext(input, self.size, self.scale_factor, self.mode,
515
- self.align_corners, self.recompute_scale_factor)
523
+ out = ops.function.nn_func.interpolate_ext(input, self.size, self.scale_factor, self.mode,
524
+ self.align_corners, self.recompute_scale_factor)
516
525
  return out
517
526
 
518
527
 
@@ -623,25 +632,27 @@ class Dense(Cell):
623
632
  where :math:`X` is the input tensors, :math:`\text{activation}` is the activation function passed as the activation
624
633
  argument (if passed in), :math:`\text{kernel}` is a weight matrix with the same
625
634
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
626
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
635
+ with the same data type as the :math:`X` created by the layer (only if `has_bias` is ``True``).
627
636
 
628
637
  .. warning::
629
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
638
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
630
639
 
631
640
  Args:
632
641
  in_channels (int): The number of channels in the input space.
633
642
  out_channels (int): The number of channels in the output space.
634
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
635
- is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
643
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable weight_init parameter.
644
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
636
645
  weight will be initialized using HeUniform.
637
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
638
- same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
646
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable bias_init parameter.
647
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
639
648
  bias will be initialized using Uniform.
640
- has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
641
- activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
649
+ has_bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`.
650
+ Default: ``True``.
651
+ activation (Union[str, Cell, Primitive, None], optional): activate function applied to
652
+ the output of the fully connected
642
653
  layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
643
654
  are supported. Default: ``None`` .
644
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
655
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``mstype.float32`` .
645
656
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
646
657
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
647
658
 
@@ -660,7 +671,7 @@ class Dense(Cell):
660
671
  is not equal to `out_channels` or shape[1] of `weight_init` is not equal to `in_channels`.
661
672
  ValueError: If length of shape of `bias_init` is not equal to 1
662
673
  or shape[0] of `bias_init` is not equal to `out_channels`.
663
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
674
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
664
675
 
665
676
  Supported Platforms:
666
677
  ``Ascend`` ``GPU`` ``CPU``
@@ -763,23 +774,28 @@ class Linear(Cell):
763
774
  \text{outputs} = X * kernel + bias
764
775
 
765
776
  .. warning::
766
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
777
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
767
778
 
768
779
  where :math:`X` is the input tensors, :math:`\text{kernel}` is a weight matrix with the same
769
780
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
770
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
781
+ with the same data type as the :math:`X` created by the layer (only if the parameter `bias` is True).
782
+
783
+ .. warning::
784
+ In PyNative mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
771
785
 
772
786
  Args:
773
787
  in_features (int): The number of features in the input space.
774
788
  out_features (int): The number of features in the output space.
775
- bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
776
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
789
+ bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
790
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
791
+ The trainable weight_init parameter. The dtype
777
792
  is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
778
793
  weight will be initialized using HeUniform.
779
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
794
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional):
795
+ The trainable bias_init parameter. The dtype is
780
796
  same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
781
797
  bias will be initialized using Uniform.
782
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``None`` .
798
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``None`` .
783
799
  If `dtype` is ``None`` , `dtype` is set to ``mstype.float32`` when initializing the method.
784
800
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
785
801
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
@@ -798,7 +814,7 @@ class Linear(Cell):
798
814
  is not equal to `out_features` or shape[1] of `weight_init` is not equal to `in_features`.
799
815
  ValueError: If length of shape of `bias_init` is not equal to 1
800
816
  or shape[0] of `bias_init` is not equal to `out_features`.
801
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
817
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
802
818
 
803
819
  Supported Platforms:
804
820
  ``Ascend`` ``GPU`` ``CPU``
@@ -806,10 +822,10 @@ class Linear(Cell):
806
822
  Examples:
807
823
  >>> import mindspore
808
824
  >>> from mindspore import Tensor
809
- >>> from mindspore import nn
825
+ >>> from mindspore import mint
810
826
  >>> import numpy as np
811
827
  >>> x = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
812
- >>> net = nn.mint.nn.Linear(3, 4)
828
+ >>> net = mint.nn.Linear(3, 4)
813
829
  >>> output = net(x)
814
830
  >>> print(output.shape)
815
831
  (2, 4)
@@ -1285,7 +1301,7 @@ class UnfoldExt(Cell):
1285
1301
  self.stride = stride
1286
1302
 
1287
1303
  def construct(self, input):
1288
- return unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1304
+ return ops.auto_generate.unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1289
1305
 
1290
1306
 
1291
1307
  class Fold(Cell):
@@ -1316,8 +1332,8 @@ class Fold(Cell):
1316
1332
  self.stride = stride
1317
1333
 
1318
1334
  def construct(self, input):
1319
- return fold_ext(input, self.output_size, self.kernel_size,
1320
- self.dilation, self.padding, self.stride)
1335
+ return ops.auto_generate.fold_ext(input, self.output_size, self.kernel_size,
1336
+ self.dilation, self.padding, self.stride)
1321
1337
 
1322
1338
 
1323
1339
  @_primexpr
@@ -1555,7 +1571,7 @@ class Roll(Cell):
1555
1571
  else:
1556
1572
  if not isinstance(self.axis, (list, tuple)):
1557
1573
  self.op_list.append(
1558
- (P.Roll(shift=self.shift, axis=0), self.axis))
1574
+ (P.Roll(shifts=self.shift, dims=0), self.axis))
1559
1575
  else:
1560
1576
  if len(self.shift) != len(self.axis):
1561
1577
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
@@ -1563,7 +1579,7 @@ class Roll(Cell):
1563
1579
  f"and the length of 'axis' {len(self.axis)}.")
1564
1580
  for idx, _ in enumerate(self.axis):
1565
1581
  self.op_list.append(
1566
- (P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1582
+ (P.Roll(shifts=self.shift[idx], dims=0), self.axis[idx]))
1567
1583
 
1568
1584
  def construct(self, input_x):
1569
1585
  dim = len(self.shape_op(input_x))
@@ -21,9 +21,9 @@ __all__ = ['ChannelShuffle']
21
21
 
22
22
  class ChannelShuffle(Cell):
23
23
  r"""
24
- Divide the channels of Tensor whose shape is :math:`(*, C, H, W)` into :math:`g` groups to obtain a Tensor with
25
- shape :math:`(*, C \frac g, g, H, W)`, and transpose along the corresponding axis of :math:`C`,
26
- :math:`\frac{g}{}` and :math:`g` to restore Tensor to the original shape.
24
+ Divide the channels in a tensor of shape :math:`(*, C, H, W)` into :math:`g` group and
25
+ rearrange them as :math:`(*, \frac{C}{g}, g, H*W)`, while retaining the original tensor
26
+ shape in the final output.
27
27
 
28
28
  Args:
29
29
  groups (int): Number of groups to divide channels in, must be greater than 0.
@@ -648,7 +648,7 @@ class CellDict(_CellDictBase, Cell):
648
648
  Remove key from the CellDict and return its cell.
649
649
 
650
650
  Args:
651
- key (string): key to pop from the CellDict.
651
+ key (str): key to pop from the CellDict.
652
652
 
653
653
  Raises:
654
654
  KeyError: If `key` not exist in CellDict when attempt to access cell.
@@ -272,20 +272,20 @@ class Conv2d(_Conv):
272
272
 
273
273
  .. math::
274
274
  \begin{array}{ll} \\
275
- H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
276
- {\text{stride[0]}}} \right \rceil \\
277
- W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
278
- {\text{stride[1]}}} \right \rceil \\
275
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
276
+ {\text{stride[0]}}} \right \rfloor + 1 \\
277
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
278
+ {\text{stride[1]}}} \right \rfloor + 1 \\
279
279
  \end{array}
280
280
 
281
281
  pad_mode is ``'pad'``:
282
282
 
283
283
  .. math::
284
284
  \begin{array}{ll} \\
285
- H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
286
- \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
287
- W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
288
- \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
285
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
286
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
287
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
288
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
289
289
  \end{array}
290
290
 
291
291
  Raises:
@@ -476,19 +476,25 @@ class Conv1d(_Conv):
476
476
  pad_mode is ``'same'``:
477
477
 
478
478
  .. math::
479
- L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil
479
+ \begin{array}{ll} \\
480
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
481
+ \end{array}
480
482
 
481
483
  pad_mode is ``'valid'``:
482
484
 
483
485
  .. math::
484
- L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
485
- {\text{stride}}} \right \rceil
486
+ \begin{array}{ll} \\
487
+ L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) - 1}
488
+ {\text{stride}}} \right \rfloor + 1 \\
489
+ \end{array}
486
490
 
487
491
  pad_mode is ``'pad'``:
488
492
 
489
493
  .. math::
490
- L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{kernel_size} - 1) \times
491
- \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor
494
+ \begin{array}{ll} \\
495
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - \text{dilation} \times
496
+ (\text{kernel_size} - 1) - 1}{\text{stride}}} \right \rfloor + 1 \\
497
+ \end{array}
492
498
 
493
499
  Raises:
494
500
  TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
@@ -727,24 +733,24 @@ class Conv3d(_Conv):
727
733
 
728
734
  .. math::
729
735
  \begin{array}{ll} \\
730
- D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
731
- {\text{stride[0]}} + 1} \right \rfloor \\
732
- H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
733
- {\text{stride[1]}} + 1} \right \rfloor \\
734
- W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
735
- {\text{stride[2]}} + 1} \right \rfloor \\
736
+ D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
737
+ {\text{stride[0]}}} \right \rfloor + 1 \\
738
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
739
+ {\text{stride[1]}}} \right \rfloor + 1 \\
740
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) - 1}
741
+ {\text{stride[2]}}} \right \rfloor + 1 \\
736
742
  \end{array}
737
743
 
738
744
  pad_mode is ``'pad'`` :
739
745
 
740
746
  .. math::
741
747
  \begin{array}{ll} \\
742
- D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
743
- \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
744
- H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
745
- \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
746
- W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
747
- \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
748
+ D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
749
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
750
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
751
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
752
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - \text{dilation[2]} \times
753
+ (\text{kernel_size[2]} - 1) - 1}{\text{stride[2]}}} \right \rfloor + 1 \\
748
754
  \end{array}
749
755
 
750
756
  Raises:
@@ -856,11 +862,12 @@ class Conv3dTranspose(_Conv):
856
862
  where :math:`N` is batch size, :math:`C_{in}` is a number of
857
863
  channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
858
864
 
859
- When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
865
+ When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to ``'pad'``,
860
866
  :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to the depth, height and width
861
867
  directions of the input, they are inverses of each other in regard to the input and output shapes in this case.
862
- However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape. Deconvolutional network
863
- can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
868
+ However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape.
869
+ For the detailed information of Deconvolutional network,
870
+ refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
864
871
 
865
872
  Note:
866
873
  For Atlas A2 training series products, `output_padding` is currently not supported.
@@ -872,7 +879,7 @@ class Conv3dTranspose(_Conv):
872
879
  The data type is an integer or a tuple of three integers. An integer represents the depth, height
873
880
  and width of the convolution kernel. A tuple of three integers represents the depth, height
874
881
  and width of the convolution kernel respectively.
875
- stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
882
+ stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
876
883
  The data type is an integer or a tuple of three integers. An integer represents the movement step size
877
884
  in depth, height and width directions. A tuple of three integers represents the movement step size
878
885
  in the depth, height and width directions respectively. Default: ``1`` .
@@ -892,13 +899,15 @@ class Conv3dTranspose(_Conv):
892
899
  in the depth, height and width dimension is determined by the `padding` parameter.
893
900
  If this mode is set, `padding` must be greater than or equal to 0.
894
901
 
895
- padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
902
+ padding (Union(int, tuple[int]), optional): The number of padding on the depth, height and
903
+ width directions of the input.
896
904
  The data type is an integer or a tuple of six integers. If `padding` is an integer,
897
905
  then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
898
906
  If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
899
907
  is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
900
908
  respectively. The value should be greater than or equal to 0. Default: ``0`` .
901
- dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. The data type
909
+ dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
910
+ The data type
902
911
  can be a single int or a tuple of 3 integers. A single int means the dilation size is the same in the
903
912
  depth, height and width directions. A tuple of 3 ints represents the dilation size in the depth, height
904
913
  and width directions, respectively.
@@ -908,33 +917,35 @@ class Conv3dTranspose(_Conv):
908
917
  The values in the depth, height and width dimensions are in
909
918
  the ranges [1, D], [1, H] and [1, W], respectively.
910
919
  Default: ``1`` .
911
- group (int): Splits filter into groups, `in_channels` and `out_channels` must be
920
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
912
921
  divisible by `group`. Default: ``1`` .
913
- output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
922
+ output_padding (Union(int, tuple[int]), optional): The number of padding on the depth,
923
+ height and width directions of
914
924
  the output. The data type is an integer or a tuple of three integers. If `output_padding` is an integer,
915
925
  then the depth, height, and width dimension padding are all equal to `output_padding`.
916
926
  If `output_padding` is a tuple of three integers, then the depth, height, and width padding is equal to
917
927
  `output_padding[0]`, `output_padding[1]` and `output_padding[2]` respectively.
918
928
  The value should be greater than or equal to 0.
919
929
  Default: ``0`` .
920
- has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
921
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
930
+ has_bias (bool, optional): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
931
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
932
+ weight parameter.
922
933
  It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
923
934
  values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
924
935
  distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
925
936
  ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
926
937
  lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
927
938
  weight will be initialized using HeUniform.
928
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
939
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
929
940
  Available initialization methods are the same as 'weight_init'. Refer to the values of
930
941
  Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
931
- data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
942
+ data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
932
943
  Default: ``'NCDHW'`` .
933
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
944
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
934
945
 
935
946
  Inputs:
936
947
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
937
- Currently input data dtype only support float16 and float32.
948
+ Currently input data dtype only supports float16 and float32.
938
949
 
939
950
  Outputs:
940
951
  Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
@@ -980,10 +991,10 @@ class Conv3dTranspose(_Conv):
980
991
  TypeError: If input data type is not float16 or float32.
981
992
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
982
993
  ValueError: If `padding` is less than 0.
983
- ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
994
+ ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'``, ``'pad'``.
984
995
  ValueError: If `padding` is a tuple whose length is not equal to 6.
985
- ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0, 0, 0).
986
- ValueError: If `data_format` is not 'NCDHW'.
996
+ ValueError: If `pad_mode` is not equal to ``'pad'`` and `padding` is not equal to (0, 0, 0, 0, 0, 0).
997
+ ValueError: If `data_format` is not ``'NCDHW'``.
987
998
 
988
999
  Supported Platforms:
989
1000
  ``Ascend`` ``GPU`` ``CPU``