mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -82,7 +82,7 @@ class CELU(Cell):
82
82
  :align: center
83
83
 
84
84
  Args:
85
- alpha (float): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
85
+ alpha (float, optional): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
86
86
 
87
87
  Inputs:
88
88
  - **x** (Tensor) - The input of CELU. The required dtype is float16 or float32.
@@ -136,20 +136,22 @@ class Softmin(Cell):
136
136
  where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
137
137
 
138
138
  Args:
139
- axis (Union[int, tuple[int]]): The axis to apply Softmin operation, if the dimension of input `x` is x.ndim,
140
- the range of axis is `[-x.ndim, x.ndim)`. -1 means the last dimension. Default: ``-1`` .
139
+ axis (Union[int, tuple[int]], optional): The axis to apply Softmin operation,
140
+ if the dimension of input `x` is x.ndim,
141
+ the range of axis is :math:`[-x.ndim, x.ndim)`. -1 means the last dimension.
142
+ Default: ``-1`` . In CPU environment, `axis` only supports int type.
141
143
 
142
144
  Inputs:
143
145
  - **x** (Tensor) - Tensor for computing Softmin functions with data type of float16 or float32.
144
146
 
145
147
  Outputs:
146
- Tensor, which has the same type and shape as `x` with values in the range [0,1].
148
+ Tensor, which has the same type and shape as `x` with values in the range :math:`[0, 1]`.
147
149
 
148
150
  Raises:
149
151
  TypeError: If `axis` is neither an int nor a tuple.
150
152
  TypeError: If dtype of `x` is neither float16 nor float32.
151
153
  ValueError: If `axis` is a tuple whose length is less than 1.
152
- ValueError: If `axis` is a tuple whose elements are not all in the range [-x.ndim, x.ndim).
154
+ ValueError: If `axis` is a tuple whose elements are not all in the range :math:`[-x.ndim, x.ndim)`.
153
155
 
154
156
  Supported Platforms:
155
157
  ``Ascend`` ``GPU`` ``CPU``
@@ -957,7 +959,7 @@ class GELU(Cell):
957
959
  :align: center
958
960
 
959
961
  Args:
960
- approximate (bool): Whether to enable approximation. Default: ``True`` .
962
+ approximate (bool, optional): Whether to enable approximation. Default: ``True`` .
961
963
 
962
964
  If `approximate` is ``True``, The gaussian error linear activation is:
963
965
 
@@ -965,7 +967,14 @@ class GELU(Cell):
965
967
 
966
968
  else, it is:
967
969
 
968
- :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1).
970
+ :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where :math:`P(X) ~ N(0, 1)`.
971
+
972
+ Note:
973
+ - when calculating the input gradient of GELU with an input value of infinity, there are differences
974
+ in the output of the backward between ``Ascend`` and ``GPU``.
975
+ - when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
976
+ - when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
977
+ - In mathematical terms, the result of Ascend has higher precision.
969
978
 
970
979
  Inputs:
971
980
  - **x** (Tensor) - The input of GELU with data type of float16, float32, or float64.
@@ -974,13 +983,6 @@ class GELU(Cell):
974
983
  Outputs:
975
984
  Tensor, with the same type and shape as the `x`.
976
985
 
977
- Note:
978
- when calculating the input gradient of GELU with an input value of infinity, there are differences
979
- in the output of the backward between ``Ascend`` and ``GPU``.
980
- when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
981
- when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
982
- In mathematical terms, the result of Ascend has higher precision.
983
-
984
986
  Raises:
985
987
  TypeError: If dtype of `x` is not one of float16, float32, or float64.
986
988
 
@@ -1165,7 +1167,7 @@ class PReLU(Cell):
1165
1167
 
1166
1168
  where :math:`x_i` is an element of an channel of the input.
1167
1169
 
1168
- Here :math:`w` is a learnable parameter with a default initial value 0.25.
1170
+ Here :math:`w` is a learnable parameter with a default initial value ``0.25``.
1169
1171
  Parameter :math:`w` has dimensionality of the argument channel. If called without argument
1170
1172
  channel, a single parameter :math:`w` will be shared across all channels.
1171
1173
 
@@ -1175,9 +1177,9 @@ class PReLU(Cell):
1175
1177
  :align: center
1176
1178
 
1177
1179
  Args:
1178
- channel (int): The elements number of parameter :math:`w`.
1179
- It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: ``1`` .
1180
- w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
1180
+ channel (int, optional): The elements number of parameter :math:`w`.
1181
+ It could be an int, and the value is ``1`` or the channels number of input tensor `x`. Default: ``1`` .
1182
+ w (Union[float, list, Tensor], optional): The initial value of parameter. It could be a float, a float list or
1181
1183
  a tensor has the same dtype as the input tensor `x`. Default: ``0.25`` .
1182
1184
 
1183
1185
  Inputs:
@@ -1189,7 +1191,7 @@ class PReLU(Cell):
1189
1191
 
1190
1192
  Raises:
1191
1193
  TypeError: If `channel` is not an int.
1192
- TypeError: If `w` is not one of a float, a float list, a float Tensor.
1194
+ TypeError: If `w` is not one of a float, a list[float], a Tensor[float].
1193
1195
  TypeError: If dtype of `x` is neither float16 nor float32.
1194
1196
  ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
1195
1197
  ValueError: If `channel` is less than 1.
@@ -1728,7 +1730,7 @@ class GLU(Cell):
1728
1730
  Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
1729
1731
 
1730
1732
  Args:
1731
- axis (int): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1733
+ axis (int, optional): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1732
1734
 
1733
1735
  Inputs:
1734
1736
  - **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
@@ -1811,7 +1813,7 @@ def get_activation(name, prim_name=None):
1811
1813
  >>> import mindspore.nn as nn
1812
1814
  >>> sigmoid = nn.get_activation('sigmoid')
1813
1815
  >>> print(sigmoid)
1814
- Sigmoid<>
1816
+ Sigmoid()
1815
1817
  """
1816
1818
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
1817
1819
  if name is None:
@@ -226,7 +226,8 @@ class DropoutExt(Cell):
226
226
  Args:
227
227
  p (float, optional): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
228
228
  Default: ``0.5`` .
229
- inplace (bool, optional): If set to ``True`` , will do this operation in-place. Default: ``False`` .
229
+ inplace (bool, optional): Whether to enable the operation in-place.
230
+ If set to ``True`` , will do this operation in-place. Default: ``False`` .
230
231
 
231
232
  Inputs:
232
233
  - **x** (Tensor) - The input of Dropout.
@@ -347,8 +348,8 @@ class Dropout2d(Cell):
347
348
 
348
349
  For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
349
350
  `2D` tensor input[i,j].
350
- Each channel will be zeroed out independently on every forward call with probability `p` using samples
351
- from a Bernoulli distribution.
351
+ At each forward propagation,
352
+ each channel will be independently determined to be set to zero with probability `p`.
352
353
 
353
354
  `Dropout2d` can improve the independence between channel feature maps.
354
355
 
@@ -631,25 +632,27 @@ class Dense(Cell):
631
632
  where :math:`X` is the input tensors, :math:`\text{activation}` is the activation function passed as the activation
632
633
  argument (if passed in), :math:`\text{kernel}` is a weight matrix with the same
633
634
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
634
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
635
+ with the same data type as the :math:`X` created by the layer (only if `has_bias` is ``True``).
635
636
 
636
637
  .. warning::
637
- In PyNative mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
638
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
638
639
 
639
640
  Args:
640
641
  in_channels (int): The number of channels in the input space.
641
642
  out_channels (int): The number of channels in the output space.
642
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
643
- is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
643
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable weight_init parameter.
644
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
644
645
  weight will be initialized using HeUniform.
645
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
646
- same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
646
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): The trainable bias_init parameter.
647
+ The dtype is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
647
648
  bias will be initialized using Uniform.
648
- has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
649
- activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
649
+ has_bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`.
650
+ Default: ``True``.
651
+ activation (Union[str, Cell, Primitive, None], optional): activate function applied to
652
+ the output of the fully connected
650
653
  layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
651
654
  are supported. Default: ``None`` .
652
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``mstype.float32`` .
655
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``mstype.float32`` .
653
656
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
654
657
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
655
658
 
@@ -668,7 +671,7 @@ class Dense(Cell):
668
671
  is not equal to `out_channels` or shape[1] of `weight_init` is not equal to `in_channels`.
669
672
  ValueError: If length of shape of `bias_init` is not equal to 1
670
673
  or shape[0] of `bias_init` is not equal to `out_channels`.
671
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PyNative mode.
674
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
672
675
 
673
676
  Supported Platforms:
674
677
  ``Ascend`` ``GPU`` ``CPU``
@@ -770,6 +773,9 @@ class Linear(Cell):
770
773
  .. math::
771
774
  \text{outputs} = X * kernel + bias
772
775
 
776
+ .. warning::
777
+ On the Ascend platform, if `bias` is ``False`` , the `x` cannot be greater than 6D in PYNATIVE or KBK mode.
778
+
773
779
  where :math:`X` is the input tensors, :math:`\text{kernel}` is a weight matrix with the same
774
780
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
775
781
  with the same data type as the :math:`X` created by the layer (only if the parameter `bias` is True).
@@ -808,7 +814,7 @@ class Linear(Cell):
808
814
  is not equal to `out_features` or shape[1] of `weight_init` is not equal to `in_features`.
809
815
  ValueError: If length of shape of `bias_init` is not equal to 1
810
816
  or shape[0] of `bias_init` is not equal to `out_features`.
811
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PyNative mode.
817
+ RuntimeError: On the Ascend platform, if `bias` is ``False`` and `x` is greater than 6D in PYNATIVE or KBK mode.
812
818
 
813
819
  Supported Platforms:
814
820
  ``Ascend`` ``GPU`` ``CPU``
@@ -1565,7 +1571,7 @@ class Roll(Cell):
1565
1571
  else:
1566
1572
  if not isinstance(self.axis, (list, tuple)):
1567
1573
  self.op_list.append(
1568
- (P.Roll(shift=self.shift, axis=0), self.axis))
1574
+ (P.Roll(shifts=self.shift, dims=0), self.axis))
1569
1575
  else:
1570
1576
  if len(self.shift) != len(self.axis):
1571
1577
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
@@ -1573,7 +1579,7 @@ class Roll(Cell):
1573
1579
  f"and the length of 'axis' {len(self.axis)}.")
1574
1580
  for idx, _ in enumerate(self.axis):
1575
1581
  self.op_list.append(
1576
- (P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1582
+ (P.Roll(shifts=self.shift[idx], dims=0), self.axis[idx]))
1577
1583
 
1578
1584
  def construct(self, input_x):
1579
1585
  dim = len(self.shape_op(input_x))
@@ -648,7 +648,7 @@ class CellDict(_CellDictBase, Cell):
648
648
  Remove key from the CellDict and return its cell.
649
649
 
650
650
  Args:
651
- key (string): key to pop from the CellDict.
651
+ key (str): key to pop from the CellDict.
652
652
 
653
653
  Raises:
654
654
  KeyError: If `key` not exist in CellDict when attempt to access cell.
@@ -272,20 +272,20 @@ class Conv2d(_Conv):
272
272
 
273
273
  .. math::
274
274
  \begin{array}{ll} \\
275
- H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
276
- {\text{stride[0]}}} \right \rceil \\
277
- W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
278
- {\text{stride[1]}}} \right \rceil \\
275
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
276
+ {\text{stride[0]}}} \right \rfloor + 1 \\
277
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
278
+ {\text{stride[1]}}} \right \rfloor + 1 \\
279
279
  \end{array}
280
280
 
281
281
  pad_mode is ``'pad'``:
282
282
 
283
283
  .. math::
284
284
  \begin{array}{ll} \\
285
- H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
286
- \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
287
- W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
288
- \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
285
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
286
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
287
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
288
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
289
289
  \end{array}
290
290
 
291
291
  Raises:
@@ -476,19 +476,25 @@ class Conv1d(_Conv):
476
476
  pad_mode is ``'same'``:
477
477
 
478
478
  .. math::
479
- L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil
479
+ \begin{array}{ll} \\
480
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
481
+ \end{array}
480
482
 
481
483
  pad_mode is ``'valid'``:
482
484
 
483
485
  .. math::
484
- L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
485
- {\text{stride}}} \right \rceil
486
+ \begin{array}{ll} \\
487
+ L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) - 1}
488
+ {\text{stride}}} \right \rfloor + 1 \\
489
+ \end{array}
486
490
 
487
491
  pad_mode is ``'pad'``:
488
492
 
489
493
  .. math::
490
- L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{kernel_size} - 1) \times
491
- \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor
494
+ \begin{array}{ll} \\
495
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - \text{dilation} \times
496
+ (\text{kernel_size} - 1) - 1}{\text{stride}}} \right \rfloor + 1 \\
497
+ \end{array}
492
498
 
493
499
  Raises:
494
500
  TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
@@ -727,24 +733,24 @@ class Conv3d(_Conv):
727
733
 
728
734
  .. math::
729
735
  \begin{array}{ll} \\
730
- D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
731
- {\text{stride[0]}} + 1} \right \rfloor \\
732
- H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
733
- {\text{stride[1]}} + 1} \right \rfloor \\
734
- W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
735
- {\text{stride[2]}} + 1} \right \rfloor \\
736
+ D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
737
+ {\text{stride[0]}}} \right \rfloor + 1 \\
738
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
739
+ {\text{stride[1]}}} \right \rfloor + 1 \\
740
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) - 1}
741
+ {\text{stride[2]}}} \right \rfloor + 1 \\
736
742
  \end{array}
737
743
 
738
744
  pad_mode is ``'pad'`` :
739
745
 
740
746
  .. math::
741
747
  \begin{array}{ll} \\
742
- D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
743
- \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
744
- H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
745
- \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
746
- W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
747
- \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
748
+ D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
749
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
750
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
751
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
752
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - \text{dilation[2]} \times
753
+ (\text{kernel_size[2]} - 1) - 1}{\text{stride[2]}}} \right \rfloor + 1 \\
748
754
  \end{array}
749
755
 
750
756
  Raises:
@@ -856,11 +862,12 @@ class Conv3dTranspose(_Conv):
856
862
  where :math:`N` is batch size, :math:`C_{in}` is a number of
857
863
  channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
858
864
 
859
- When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
865
+ When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to ``'pad'``,
860
866
  :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to the depth, height and width
861
867
  directions of the input, they are inverses of each other in regard to the input and output shapes in this case.
862
- However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape. Deconvolutional network
863
- can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
868
+ However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape.
869
+ For the detailed information of Deconvolutional network,
870
+ refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
864
871
 
865
872
  Note:
866
873
  For Atlas A2 training series products, `output_padding` is currently not supported.
@@ -872,7 +879,7 @@ class Conv3dTranspose(_Conv):
872
879
  The data type is an integer or a tuple of three integers. An integer represents the depth, height
873
880
  and width of the convolution kernel. A tuple of three integers represents the depth, height
874
881
  and width of the convolution kernel respectively.
875
- stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
882
+ stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
876
883
  The data type is an integer or a tuple of three integers. An integer represents the movement step size
877
884
  in depth, height and width directions. A tuple of three integers represents the movement step size
878
885
  in the depth, height and width directions respectively. Default: ``1`` .
@@ -892,13 +899,15 @@ class Conv3dTranspose(_Conv):
892
899
  in the depth, height and width dimension is determined by the `padding` parameter.
893
900
  If this mode is set, `padding` must be greater than or equal to 0.
894
901
 
895
- padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
902
+ padding (Union(int, tuple[int]), optional): The number of padding on the depth, height and
903
+ width directions of the input.
896
904
  The data type is an integer or a tuple of six integers. If `padding` is an integer,
897
905
  then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
898
906
  If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
899
907
  is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
900
908
  respectively. The value should be greater than or equal to 0. Default: ``0`` .
901
- dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. The data type
909
+ dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
910
+ The data type
902
911
  can be a single int or a tuple of 3 integers. A single int means the dilation size is the same in the
903
912
  depth, height and width directions. A tuple of 3 ints represents the dilation size in the depth, height
904
913
  and width directions, respectively.
@@ -908,33 +917,35 @@ class Conv3dTranspose(_Conv):
908
917
  The values in the depth, height and width dimensions are in
909
918
  the ranges [1, D], [1, H] and [1, W], respectively.
910
919
  Default: ``1`` .
911
- group (int): Splits filter into groups, `in_channels` and `out_channels` must be
920
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
912
921
  divisible by `group`. Default: ``1`` .
913
- output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
922
+ output_padding (Union(int, tuple[int]), optional): The number of padding on the depth,
923
+ height and width directions of
914
924
  the output. The data type is an integer or a tuple of three integers. If `output_padding` is an integer,
915
925
  then the depth, height, and width dimension padding are all equal to `output_padding`.
916
926
  If `output_padding` is a tuple of three integers, then the depth, height, and width padding is equal to
917
927
  `output_padding[0]`, `output_padding[1]` and `output_padding[2]` respectively.
918
928
  The value should be greater than or equal to 0.
919
929
  Default: ``0`` .
920
- has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
921
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
930
+ has_bias (bool, optional): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
931
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
932
+ weight parameter.
922
933
  It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
923
934
  values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
924
935
  distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
925
936
  ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
926
937
  lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
927
938
  weight will be initialized using HeUniform.
928
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
939
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
929
940
  Available initialization methods are the same as 'weight_init'. Refer to the values of
930
941
  Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
931
- data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
942
+ data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
932
943
  Default: ``'NCDHW'`` .
933
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
944
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
934
945
 
935
946
  Inputs:
936
947
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
937
- Currently input data dtype only support float16 and float32.
948
+ Currently input data dtype only supports float16 and float32.
938
949
 
939
950
  Outputs:
940
951
  Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
@@ -980,10 +991,10 @@ class Conv3dTranspose(_Conv):
980
991
  TypeError: If input data type is not float16 or float32.
981
992
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
982
993
  ValueError: If `padding` is less than 0.
983
- ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
994
+ ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'``, ``'pad'``.
984
995
  ValueError: If `padding` is a tuple whose length is not equal to 6.
985
- ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0, 0, 0).
986
- ValueError: If `data_format` is not 'NCDHW'.
996
+ ValueError: If `pad_mode` is not equal to ``'pad'`` and `padding` is not equal to (0, 0, 0, 0, 0, 0).
997
+ ValueError: If `data_format` is not ``'NCDHW'``.
987
998
 
988
999
  Supported Platforms:
989
1000
  ``Ascend`` ``GPU`` ``CPU``
@@ -220,18 +220,19 @@ class EmbeddingExt(Cell):
220
220
  >>> import mindspore
221
221
  >>> import numpy as np
222
222
  >>> from mindspore import Tensor, nn
223
+ >>> mindspore.set_seed(0)
223
224
  >>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
224
225
  >>> embedding = nn.EmbeddingExt(num_embeddings=10, embedding_dim=3)
225
226
  >>> output = embedding(input)
226
227
  >>> print(output)
227
- [[[-0.0024154 -0.01203444 0.00811537]
228
- [ 0.00233847 -0.00596091 0.00536799]
229
- [-0.0024154 -0.01203444 0.00811537]
230
- [-0.0024154 -0.01203444 0.00811537]]
231
- [[ 0.00233847 -0.00596091 0.00536799]
232
- [ 0.00233847 -0.00596091 0.00536799]
233
- [-0.0024154 -0.01203444 0.00811537]
234
- [ 0.00233847 -0.00596091 0.00536799]]]
228
+ [[[ 0.6712398 0.5407775 1.0317237]
229
+ [-0.49091062 -0.42302188 -1.4807187]
230
+ [ 0.6712398 0.5407775 1.0317237]
231
+ [ 0.0024154 0.5407775 1.0317237]]
232
+ [[-0.49091062 -0.42302188 -1.4807187]
233
+ [-0.49091062 -0.42302188 -1.4807187]
234
+ [ 0.6712398 0.5407775 1.0317237]
235
+ [-0.49091062 -0.42302188 -1.4807187]]]
235
236
  """
236
237
 
237
238
  def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0,