mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -224,7 +224,8 @@ def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, deca
224
224
  total_step (int): The total number of steps.
225
225
  step_per_epoch (int): The number of steps in per epoch.
226
226
  decay_epoch (int): Number of epochs to decay over.
227
- is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: ``False`` .
227
+ is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. If False, the learning rate
228
+ decays for every epoch. Default: ``False`` .
228
229
 
229
230
  Returns:
230
231
  list[float]. The size of list is `total_step`.
@@ -82,7 +82,7 @@ class CELU(Cell):
82
82
  :align: center
83
83
 
84
84
  Args:
85
- alpha (float): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
85
+ alpha (float, optional): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
86
86
 
87
87
  Inputs:
88
88
  - **x** (Tensor) - The input of CELU. The required dtype is float16 or float32.
@@ -136,20 +136,22 @@ class Softmin(Cell):
136
136
  where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
137
137
 
138
138
  Args:
139
- axis (Union[int, tuple[int]]): The axis to apply Softmin operation, if the dimension of input `x` is x.ndim,
140
- the range of axis is `[-x.ndim, x.ndim)`. -1 means the last dimension. Default: ``-1`` .
139
+ axis (Union[int, tuple[int]], optional): The axis to apply Softmin operation,
140
+ if the dimension of input `x` is x.ndim,
141
+ the range of axis is :math:`[-x.ndim, x.ndim)`. -1 means the last dimension.
142
+ Default: ``-1`` . In CPU environment, `axis` only supports int type.
141
143
 
142
144
  Inputs:
143
145
  - **x** (Tensor) - Tensor for computing Softmin functions with data type of float16 or float32.
144
146
 
145
147
  Outputs:
146
- Tensor, which has the same type and shape as `x` with values in the range [0,1].
148
+ Tensor, which has the same type and shape as `x` with values in the range :math:`[0, 1]`.
147
149
 
148
150
  Raises:
149
151
  TypeError: If `axis` is neither an int nor a tuple.
150
152
  TypeError: If dtype of `x` is neither float16 nor float32.
151
153
  ValueError: If `axis` is a tuple whose length is less than 1.
152
- ValueError: If `axis` is a tuple whose elements are not all in the range [-x.ndim, x.ndim).
154
+ ValueError: If `axis` is a tuple whose elements are not all in the range :math:`[-x.ndim, x.ndim)`.
153
155
 
154
156
  Supported Platforms:
155
157
  ``Ascend`` ``GPU`` ``CPU``
@@ -179,7 +181,7 @@ class Softmax2d(Cell):
179
181
  r"""
180
182
  Softmax function applied to 2D features data.
181
183
 
182
- Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
184
+ Applies `Softmax` to each location with an input Tensor of shape :math:`(C, H, W)` .
183
185
 
184
186
  Inputs:
185
187
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
@@ -957,7 +959,7 @@ class GELU(Cell):
957
959
  :align: center
958
960
 
959
961
  Args:
960
- approximate (bool): Whether to enable approximation. Default: ``True`` .
962
+ approximate (bool, optional): Whether to enable approximation. Default: ``True`` .
961
963
 
962
964
  If `approximate` is ``True``, The gaussian error linear activation is:
963
965
 
@@ -965,7 +967,14 @@ class GELU(Cell):
965
967
 
966
968
  else, it is:
967
969
 
968
- :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1).
970
+ :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where :math:`P(X) ~ N(0, 1)`.
971
+
972
+ Note:
973
+ - when calculating the input gradient of GELU with an input value of infinity, there are differences
974
+ in the output of the backward between ``Ascend`` and ``GPU``.
975
+ - when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
976
+ - when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
977
+ - In mathematical terms, the result of Ascend has higher precision.
969
978
 
970
979
  Inputs:
971
980
  - **x** (Tensor) - The input of GELU with data type of float16, float32, or float64.
@@ -974,13 +983,6 @@ class GELU(Cell):
974
983
  Outputs:
975
984
  Tensor, with the same type and shape as the `x`.
976
985
 
977
- Note:
978
- when calculating the input gradient of GELU with an input value of infinity, there are differences
979
- in the output of the backward between ``Ascend`` and ``GPU``.
980
- when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
981
- when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
982
- In mathematical terms, the result of Ascend has higher precision.
983
-
984
986
  Raises:
985
987
  TypeError: If dtype of `x` is not one of float16, float32, or float64.
986
988
 
@@ -1165,7 +1167,7 @@ class PReLU(Cell):
1165
1167
 
1166
1168
  where :math:`x_i` is an element of an channel of the input.
1167
1169
 
1168
- Here :math:`w` is a learnable parameter with a default initial value 0.25.
1170
+ Here :math:`w` is a learnable parameter with a default initial value ``0.25``.
1169
1171
  Parameter :math:`w` has dimensionality of the argument channel. If called without argument
1170
1172
  channel, a single parameter :math:`w` will be shared across all channels.
1171
1173
 
@@ -1175,9 +1177,9 @@ class PReLU(Cell):
1175
1177
  :align: center
1176
1178
 
1177
1179
  Args:
1178
- channel (int): The elements number of parameter :math:`w`.
1179
- It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: ``1`` .
1180
- w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
1180
+ channel (int, optional): The elements number of parameter :math:`w`.
1181
+ It could be an int, and the value is ``1`` or the channels number of input tensor `x`. Default: ``1`` .
1182
+ w (Union[float, list, Tensor], optional): The initial value of parameter. It could be a float, a float list or
1181
1183
  a tensor has the same dtype as the input tensor `x`. Default: ``0.25`` .
1182
1184
 
1183
1185
  Inputs:
@@ -1189,7 +1191,7 @@ class PReLU(Cell):
1189
1191
 
1190
1192
  Raises:
1191
1193
  TypeError: If `channel` is not an int.
1192
- TypeError: If `w` is not one of a float, a float list, a float Tensor.
1194
+ TypeError: If `w` is not one of a float, a list[float], a Tensor[float].
1193
1195
  TypeError: If dtype of `x` is neither float16 nor float32.
1194
1196
  ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
1195
1197
  ValueError: If `channel` is less than 1.
@@ -1273,9 +1275,9 @@ class PReLUExt(Cell):
1273
1275
  no channel dim and the number of channels = 1.
1274
1276
 
1275
1277
  Args:
1276
- num_parameters (int): number of `w` to learn. Although it takes an int as input,
1278
+ num_parameters (int, optional): number of `w` to learn. Although it takes an int as input,
1277
1279
  there is only two legitimate values: 1, or the number of channels at Tensor `input`. Default: ``1`` .
1278
- init (float): the initial value of `w`. Default: ``0.25`` .
1280
+ init (float, optional): the initial value of `w`. Default: ``0.25`` .
1279
1281
  dtype (mindspore.dtype, optional): the type of `w`. Default: ``None`` . Supported data type
1280
1282
  is {float16, float32, bfloat16}.
1281
1283
 
@@ -1320,7 +1322,7 @@ class HSwish(Cell):
1320
1322
  Hard swish is defined as:
1321
1323
 
1322
1324
  .. math::
1323
- \text{Hardswish}(input) =
1325
+ \text{HSwish}(input) =
1324
1326
  \begin{cases}
1325
1327
  0, & \text{ if } input \leq -3, \\
1326
1328
  input, & \text{ if } input \geq +3, \\
@@ -1372,7 +1374,7 @@ class HSigmoid(Cell):
1372
1374
  Hard Sigmoid is defined as:
1373
1375
 
1374
1376
  .. math::
1375
- \text{Hardsigmoid}(input) =
1377
+ \text{HSigmoid}(input) =
1376
1378
  \begin{cases}
1377
1379
  0, & \text{ if } input \leq -3, \\
1378
1380
  1, & \text{ if } input \geq +3, \\
@@ -1578,7 +1580,7 @@ class HShrink(Cell):
1578
1580
  The formula is defined as follows:
1579
1581
 
1580
1582
  .. math::
1581
- \text{HardShrink}(x) =
1583
+ \text{HShrink}(x) =
1582
1584
  \begin{cases}
1583
1585
  x, & \text{ if } x > \lambda \\
1584
1586
  x, & \text{ if } x < -\lambda \\
@@ -1728,7 +1730,7 @@ class GLU(Cell):
1728
1730
  Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
1729
1731
 
1730
1732
  Args:
1731
- axis (int): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1733
+ axis (int, optional): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1732
1734
 
1733
1735
  Inputs:
1734
1736
  - **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
@@ -1811,7 +1813,7 @@ def get_activation(name, prim_name=None):
1811
1813
  >>> import mindspore.nn as nn
1812
1814
  >>> sigmoid = nn.get_activation('sigmoid')
1813
1815
  >>> print(sigmoid)
1814
- Sigmoid<>
1816
+ Sigmoid()
1815
1817
  """
1816
1818
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
1817
1819
  if name is None:
@@ -25,10 +25,9 @@ from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_util
25
25
  from mindspore.common.seed import _get_graph_seed
26
26
  from mindspore.common.tensor import Tensor
27
27
  from mindspore.common.initializer import initializer, HeUniform, Uniform
28
+ from mindspore import ops
28
29
  from mindspore.ops import operations as P
29
30
  from mindspore.ops import functional as F
30
- from mindspore.ops.function.nn_func import interpolate_ext
31
- from mindspore.ops.auto_generate import unfold_ext
32
31
  from mindspore.ops.operations import _inner_ops as inner
33
32
  from mindspore.ops.primitive import constexpr, Primitive, _primexpr
34
33
  from mindspore.common.parameter import Parameter
@@ -37,7 +36,6 @@ from mindspore import _checkparam as Validator
37
36
  from mindspore.nn.cell import Cell
38
37
  from mindspore.nn.layer.activation import get_activation
39
38
  from mindspore.common._decorator import deprecated
40
- from mindspore.ops.auto_generate import dropout_ext_op, fold_ext
41
39
  from mindspore.common.generator import default_generator
42
40
 
43
41
  __all__ = ['Dropout', 'Flatten', 'Dense', 'Linear', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
@@ -140,6 +138,7 @@ class Dropout(Cell):
140
138
 
141
139
  Inputs:
142
140
  - **x** (Tensor) - The input of Dropout with data type of float16 or float32.
141
+ The shape of `x` cannot be less than 1.
143
142
 
144
143
  Outputs:
145
144
  Tensor, output tensor with the same shape as the `x`.
@@ -225,8 +224,10 @@ class DropoutExt(Cell):
225
224
  - Parameter `p` means the probability of the element of the input tensor to be zeroed.
226
225
 
227
226
  Args:
228
- p (float): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
227
+ p (float, optional): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
229
228
  Default: ``0.5`` .
229
+ inplace (bool, optional): Whether to enable the operation in-place.
230
+ If set to ``True`` , will do this operation in-place. Default: ``False`` .
230
231
 
231
232
  Inputs:
232
233
  - **x** (Tensor) - The input of Dropout.
@@ -253,18 +254,23 @@ class DropoutExt(Cell):
253
254
  (2, 2, 3)
254
255
  """
255
256
 
256
- def __init__(self, p=0.5):
257
+ def __init__(self, p=0.5, inplace=False):
257
258
  """Initialize DropoutExt."""
258
259
  super(DropoutExt, self).__init__()
259
260
  self.p = p
260
- self.generator_step = Tensor(1, mstype.int64)
261
+ self.inplace = inplace
262
+ self.generator_step = Tensor(12, mstype.int64)
261
263
 
262
264
  def construct(self, x):
263
265
  if not self.training or self.p == 0:
264
266
  return x
265
267
 
266
268
  seed, offset = default_generator._step(self.generator_step) # pylint: disable=protected-access
267
- out, _ = dropout_ext_op(x, self.p, seed, offset)
269
+ out, _ = ops.auto_generate.dropout_ext_op(x, self.p, seed, offset)
270
+
271
+ if self.inplace:
272
+ x.copy_(out)
273
+ return x
268
274
  return out
269
275
 
270
276
 
@@ -342,8 +348,8 @@ class Dropout2d(Cell):
342
348
 
343
349
  For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
344
350
  `2D` tensor input[i,j].
345
- Each channel will be zeroed out independently on every forward call with probability `p` using samples
346
- from a Bernoulli distribution.
351
+ At each forward propagation,
352
+ each channel will be independently determined to be set to zero with probability `p`.
347
353
 
348
354
  `Dropout2d` can improve the independence between channel feature maps.
349
355
 
@@ -479,6 +485,9 @@ class UpsampleExt(Cell):
479
485
  r"""
480
486
  For details, please refer to :func:`mindspore.mint.nn.functional.interpolate`.
481
487
 
488
+ .. warning::
489
+ This is an experimental API that is subject to change or deletion.
490
+
482
491
  Supported Platforms:
483
492
  ``Ascend``
484
493
 
@@ -511,8 +520,8 @@ class UpsampleExt(Cell):
511
520
  self.recompute_scale_factor = recompute_scale_factor
512
521
 
513
522
  def construct(self, input):
514
- out = interpolate_ext(input, self.size, self.scale_factor, self.mode,
515
- self.align_corners, self.recompute_scale_factor)
523
+ out = ops.function.nn_func.interpolate_ext(input, self.size, self.scale_factor, self.mode,
524
+ self.align_corners, self.recompute_scale_factor)
516
525
  return out
517
526
 
518
527
 
@@ -623,25 +632,27 @@ class Dense(Cell):
623
632
  where :math:`X` is the input tensors, :math:`\text{activation}` is the activation function passed as the activation
624
633
  argument (if passed in), :math:`\text{kernel}` is a weight matrix with the same
625
634
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
626
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
635
+ with the same data type as the :math:`X` created by the layer (only if `has_bias` is ``True``).
627
636
 
628
637
  .. warning::
629
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
638
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
630
639
 
631
640
  Args:
632
641
  in_channels (int): The number of channels in the input space.
633
642
  out_channels (int): The number of channels in the output space.
634
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
635
- is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
643
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable weight_init parameter.
644
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
636
645
  weight will be initialized using HeUniform.
637
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
638
- same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
646
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable bias_init parameter.
647
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
639
648
  bias will be initialized using Uniform.
640
- has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
641
- activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
649
+ has_bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`.
650
+ Default: ``True``.
651
+ activation (Union[str, Cell, Primitive, None], optional): activate function applied to
652
+ the output of the fully connected
642
653
  layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
643
654
  are supported. Default: ``None`` .
644
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
655
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``mstype.float32`` .
645
656
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
646
657
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
647
658
 
@@ -660,7 +671,7 @@ class Dense(Cell):
660
671
  is not equal to `out_channels` or shape[1] of `weight_init` is not equal to `in_channels`.
661
672
  ValueError: If length of shape of `bias_init` is not equal to 1
662
673
  or shape[0] of `bias_init` is not equal to `out_channels`.
663
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
674
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
664
675
 
665
676
  Supported Platforms:
666
677
  ``Ascend`` ``GPU`` ``CPU``
@@ -763,23 +774,28 @@ class Linear(Cell):
763
774
  \text{outputs} = X * kernel + bias
764
775
 
765
776
  .. warning::
766
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
777
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
767
778
 
768
779
  where :math:`X` is the input tensors, :math:`\text{kernel}` is a weight matrix with the same
769
780
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
770
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
781
+ with the same data type as the :math:`X` created by the layer (only if the parameter `bias` is True).
782
+
783
+ .. warning::
784
+ In PyNative mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
771
785
 
772
786
  Args:
773
787
  in_features (int): The number of features in the input space.
774
788
  out_features (int): The number of features in the output space.
775
- bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
776
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
789
+ bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
790
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
791
+ The trainable weight_init parameter. The dtype
777
792
  is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
778
793
  weight will be initialized using HeUniform.
779
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
794
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional):
795
+ The trainable bias_init parameter. The dtype is
780
796
  same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
781
797
  bias will be initialized using Uniform.
782
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``None`` .
798
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``None`` .
783
799
  If `dtype` is ``None`` , `dtype` is set to ``mstype.float32`` when initializing the method.
784
800
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
785
801
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
@@ -798,7 +814,7 @@ class Linear(Cell):
798
814
  is not equal to `out_features` or shape[1] of `weight_init` is not equal to `in_features`.
799
815
  ValueError: If length of shape of `bias_init` is not equal to 1
800
816
  or shape[0] of `bias_init` is not equal to `out_features`.
801
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
817
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
802
818
 
803
819
  Supported Platforms:
804
820
  ``Ascend`` ``GPU`` ``CPU``
@@ -806,10 +822,10 @@ class Linear(Cell):
806
822
  Examples:
807
823
  >>> import mindspore
808
824
  >>> from mindspore import Tensor
809
- >>> from mindspore import nn
825
+ >>> from mindspore import mint
810
826
  >>> import numpy as np
811
827
  >>> x = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
812
- >>> net = nn.mint.nn.Linear(3, 4)
828
+ >>> net = mint.nn.Linear(3, 4)
813
829
  >>> output = net(x)
814
830
  >>> print(output.shape)
815
831
  (2, 4)
@@ -1285,7 +1301,7 @@ class UnfoldExt(Cell):
1285
1301
  self.stride = stride
1286
1302
 
1287
1303
  def construct(self, input):
1288
- return unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1304
+ return ops.auto_generate.unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1289
1305
 
1290
1306
 
1291
1307
  class Fold(Cell):
@@ -1316,8 +1332,8 @@ class Fold(Cell):
1316
1332
  self.stride = stride
1317
1333
 
1318
1334
  def construct(self, input):
1319
- return fold_ext(input, self.output_size, self.kernel_size,
1320
- self.dilation, self.padding, self.stride)
1335
+ return ops.auto_generate.fold_ext(input, self.output_size, self.kernel_size,
1336
+ self.dilation, self.padding, self.stride)
1321
1337
 
1322
1338
 
1323
1339
  @_primexpr
@@ -1555,7 +1571,7 @@ class Roll(Cell):
1555
1571
  else:
1556
1572
  if not isinstance(self.axis, (list, tuple)):
1557
1573
  self.op_list.append(
1558
- (P.Roll(shift=self.shift, axis=0), self.axis))
1574
+ (P.Roll(shifts=self.shift, dims=0), self.axis))
1559
1575
  else:
1560
1576
  if len(self.shift) != len(self.axis):
1561
1577
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
@@ -1563,7 +1579,7 @@ class Roll(Cell):
1563
1579
  f"and the length of 'axis' {len(self.axis)}.")
1564
1580
  for idx, _ in enumerate(self.axis):
1565
1581
  self.op_list.append(
1566
- (P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1582
+ (P.Roll(shifts=self.shift[idx], dims=0), self.axis[idx]))
1567
1583
 
1568
1584
  def construct(self, input_x):
1569
1585
  dim = len(self.shape_op(input_x))
@@ -21,9 +21,9 @@ __all__ = ['ChannelShuffle']
21
21
 
22
22
  class ChannelShuffle(Cell):
23
23
  r"""
24
- Divide the channels of Tensor whose shape is :math:`(*, C, H, W)` into :math:`g` groups to obtain a Tensor with
25
- shape :math:`(*, C \frac g, g, H, W)`, and transpose along the corresponding axis of :math:`C`,
26
- :math:`\frac{g}{}` and :math:`g` to restore Tensor to the original shape.
24
+ Divide the channels in a tensor of shape :math:`(*, C, H, W)` into :math:`g` group and
25
+ rearrange them as :math:`(*, \frac{C}{g}, g, H*W)`, while retaining the original tensor
26
+ shape in the final output.
27
27
 
28
28
  Args:
29
29
  groups (int): Number of groups to divide channels in, must be greater than 0.
@@ -648,7 +648,7 @@ class CellDict(_CellDictBase, Cell):
648
648
  Remove key from the CellDict and return its cell.
649
649
 
650
650
  Args:
651
- key (string): key to pop from the CellDict.
651
+ key (str): key to pop from the CellDict.
652
652
 
653
653
  Raises:
654
654
  KeyError: If `key` not exist in CellDict when attempt to access cell.
@@ -856,11 +856,12 @@ class Conv3dTranspose(_Conv):
856
856
  where :math:`N` is batch size, :math:`C_{in}` is a number of
857
857
  channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
858
858
 
859
- When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
859
+ When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to ``'pad'``,
860
860
  :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to the depth, height and width
861
861
  directions of the input, they are inverses of each other in regard to the input and output shapes in this case.
862
- However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape. Deconvolutional network
863
- can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
862
+ However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape.
863
+ For the detailed information of Deconvolutional network,
864
+ refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
864
865
 
865
866
  Note:
866
867
  For Atlas A2 training series products, `output_padding` is currently not supported.
@@ -872,7 +873,7 @@ class Conv3dTranspose(_Conv):
872
873
  The data type is an integer or a tuple of three integers. An integer represents the depth, height
873
874
  and width of the convolution kernel. A tuple of three integers represents the depth, height
874
875
  and width of the convolution kernel respectively.
875
- stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
876
+ stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
876
877
  The data type is an integer or a tuple of three integers. An integer represents the movement step size
877
878
  in depth, height and width directions. A tuple of three integers represents the movement step size
878
879
  in the depth, height and width directions respectively. Default: ``1`` .
@@ -892,13 +893,15 @@ class Conv3dTranspose(_Conv):
892
893
  in the depth, height and width dimension is determined by the `padding` parameter.
893
894
  If this mode is set, `padding` must be greater than or equal to 0.
894
895
 
895
- padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
896
+ padding (Union(int, tuple[int]), optional): The number of padding on the depth, height and
897
+ width directions of the input.
896
898
  The data type is an integer or a tuple of six integers. If `padding` is an integer,
897
899
  then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
898
900
  If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
899
901
  is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
900
902
  respectively. The value should be greater than or equal to 0. Default: ``0`` .
901
- dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. The data type
903
+ dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
904
+ The data type
902
905
  can be a single int or a tuple of 3 integers. A single int means the dilation size is the same in the
903
906
  depth, height and width directions. A tuple of 3 ints represents the dilation size in the depth, height
904
907
  and width directions, respectively.
@@ -908,33 +911,35 @@ class Conv3dTranspose(_Conv):
908
911
  The values in the depth, height and width dimensions are in
909
912
  the ranges [1, D], [1, H] and [1, W], respectively.
910
913
  Default: ``1`` .
911
- group (int): Splits filter into groups, `in_channels` and `out_channels` must be
914
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
912
915
  divisible by `group`. Default: ``1`` .
913
- output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
916
+ output_padding (Union(int, tuple[int]), optional): The number of padding on the depth,
917
+ height and width directions of
914
918
  the output. The data type is an integer or a tuple of three integers. If `output_padding` is an integer,
915
919
  then the depth, height, and width dimension padding are all equal to `output_padding`.
916
920
  If `output_padding` is a tuple of three integers, then the depth, height, and width padding is equal to
917
921
  `output_padding[0]`, `output_padding[1]` and `output_padding[2]` respectively.
918
922
  The value should be greater than or equal to 0.
919
923
  Default: ``0`` .
920
- has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
921
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
924
+ has_bias (bool, optional): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
925
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
926
+ weight parameter.
922
927
  It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
923
928
  values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
924
929
  distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
925
930
  ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
926
931
  lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
927
932
  weight will be initialized using HeUniform.
928
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
933
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
929
934
  Available initialization methods are the same as 'weight_init'. Refer to the values of
930
935
  Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
931
- data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
936
+ data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
932
937
  Default: ``'NCDHW'`` .
933
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
938
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
934
939
 
935
940
  Inputs:
936
941
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
937
- Currently input data dtype only support float16 and float32.
942
+ Currently input data dtype only supports float16 and float32.
938
943
 
939
944
  Outputs:
940
945
  Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
@@ -980,10 +985,10 @@ class Conv3dTranspose(_Conv):
980
985
  TypeError: If input data type is not float16 or float32.
981
986
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
982
987
  ValueError: If `padding` is less than 0.
983
- ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
988
+ ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'``, ``'pad'``.
984
989
  ValueError: If `padding` is a tuple whose length is not equal to 6.
985
- ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0, 0, 0).
986
- ValueError: If `data_format` is not 'NCDHW'.
990
+ ValueError: If `pad_mode` is not equal to ``'pad'`` and `padding` is not equal to (0, 0, 0, 0, 0, 0).
991
+ ValueError: If `data_format` is not ``'NCDHW'``.
987
992
 
988
993
  Supported Platforms:
989
994
  ``Ascend`` ``GPU`` ``CPU``
@@ -179,7 +179,7 @@ class EmbeddingExt(Cell):
179
179
  `[-num_embeddings, num_embeddings)` if it's not ``None``. Default ``None``.
180
180
  max_norm (float, optional): If the value is not None, firstly get the p-norm result of the embedding
181
181
  vector specified by `input` where p is specified by `norm_type`; if the result is larger then `max_norm`,
182
- update the embedding vector` with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
182
+ update the embedding vector with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
183
183
  norm_type (float, optional): Indicated the value of p in p-norm. Default ``2.0``.
184
184
  scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency
185
185
  of the index in `input`. Default ``False``.
@@ -193,8 +193,8 @@ class EmbeddingExt(Cell):
193
193
  not None. Default: ``None``.
194
194
 
195
195
  Variables:
196
- weight (Parameter): The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
- initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
196
+ - **weight** (Parameter) - The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
+ initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
198
198
 
199
199
  Inputs:
200
200
  - **input** (Tensor) - The indices used to lookup in the embedding vector. The data type must be
@@ -220,18 +220,19 @@ class EmbeddingExt(Cell):
220
220
  >>> import mindspore
221
221
  >>> import numpy as np
222
222
  >>> from mindspore import Tensor, nn
223
+ >>> mindspore.set_seed(0)
223
224
  >>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
224
225
  >>> embedding = nn.EmbeddingExt(num_embeddings=10, embedding_dim=3)
225
226
  >>> output = embedding(input)
226
227
  >>> print(output)
227
- [[[-0.0024154 -0.01203444 0.00811537]
228
- [ 0.00233847 -0.00596091 0.00536799]
229
- [-0.0024154 -0.01203444 0.00811537]
230
- [-0.0024154 -0.01203444 0.00811537]]
231
- [[ 0.00233847 -0.00596091 0.00536799]
232
- [ 0.00233847 -0.00596091 0.00536799]
233
- [-0.0024154 -0.01203444 0.00811537]
234
- [ 0.00233847 -0.00596091 0.00536799]]]
228
+ [[[ 0.6712398 0.5407775 1.0317237]
229
+ [-0.49091062 -0.42302188 -1.4807187]
230
+ [ 0.6712398 0.5407775 1.0317237]
231
+ [ 0.0024154 0.5407775 1.0317237]]
232
+ [[-0.49091062 -0.42302188 -1.4807187]
233
+ [-0.49091062 -0.42302188 -1.4807187]
234
+ [ 0.6712398 0.5407775 1.0317237]
235
+ [-0.49091062 -0.42302188 -1.4807187]]]
235
236
  """
236
237
 
237
238
  def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0,