mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.5.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (389) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +8 -3
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +0 -5
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/compile_config.py +64 -0
  11. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
  13. mindspore/_extends/parse/parser.py +23 -5
  14. mindspore/_extends/parse/standard_method.py +123 -27
  15. mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
  16. mindspore/amp.py +7 -1
  17. mindspore/atlprov.dll +0 -0
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/boost_cell_wrapper.py +136 -41
  24. mindspore/c1.dll +0 -0
  25. mindspore/c1xx.dll +0 -0
  26. mindspore/c2.dll +0 -0
  27. mindspore/common/__init__.py +3 -1
  28. mindspore/common/_register_for_tensor.py +0 -1
  29. mindspore/common/_stub_tensor.py +25 -4
  30. mindspore/common/_tensor_cpp_method.py +17 -0
  31. mindspore/common/_tensor_docs.py +6132 -0
  32. mindspore/common/api.py +98 -21
  33. mindspore/common/dtype.py +34 -34
  34. mindspore/common/dump.py +2 -1
  35. mindspore/common/file_system.py +8 -3
  36. mindspore/common/generator.py +2 -0
  37. mindspore/common/hook_handle.py +3 -1
  38. mindspore/common/initializer.py +3 -4
  39. mindspore/common/lazy_inline.py +8 -2
  40. mindspore/common/mindir_util.py +10 -2
  41. mindspore/common/parameter.py +31 -15
  42. mindspore/common/tensor.py +713 -1337
  43. mindspore/communication/__init__.py +1 -1
  44. mindspore/communication/_comm_helper.py +5 -0
  45. mindspore/communication/comm_func.py +215 -173
  46. mindspore/communication/management.py +23 -20
  47. mindspore/context.py +285 -191
  48. mindspore/dataset/__init__.py +23 -19
  49. mindspore/dataset/callback/ds_callback.py +2 -1
  50. mindspore/dataset/core/config.py +84 -3
  51. mindspore/dataset/engine/cache_admin.py +3 -3
  52. mindspore/dataset/engine/cache_client.py +5 -4
  53. mindspore/dataset/engine/datasets.py +192 -149
  54. mindspore/dataset/engine/datasets_audio.py +14 -0
  55. mindspore/dataset/engine/datasets_standard_format.py +11 -11
  56. mindspore/dataset/engine/datasets_text.py +38 -1
  57. mindspore/dataset/engine/datasets_user_defined.py +100 -66
  58. mindspore/dataset/engine/datasets_vision.py +81 -8
  59. mindspore/dataset/engine/iterators.py +281 -63
  60. mindspore/dataset/engine/obs/util.py +8 -0
  61. mindspore/dataset/engine/queue.py +40 -0
  62. mindspore/dataset/engine/samplers.py +26 -2
  63. mindspore/dataset/engine/serializer_deserializer.py +1 -1
  64. mindspore/dataset/engine/validators.py +43 -11
  65. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  66. mindspore/dataset/transforms/transforms.py +29 -12
  67. mindspore/dataset/vision/validators.py +1 -2
  68. mindspore/device_context/__init__.py +21 -0
  69. mindspore/device_context/ascend/__init__.py +25 -0
  70. mindspore/device_context/ascend/device.py +72 -0
  71. mindspore/device_context/ascend/op_debug.py +94 -0
  72. mindspore/device_context/ascend/op_precision.py +193 -0
  73. mindspore/device_context/ascend/op_tuning.py +127 -0
  74. mindspore/device_context/cpu/__init__.py +25 -0
  75. mindspore/device_context/cpu/device.py +62 -0
  76. mindspore/device_context/cpu/op_tuning.py +43 -0
  77. mindspore/device_context/gpu/__init__.py +21 -0
  78. mindspore/device_context/gpu/device.py +70 -0
  79. mindspore/device_context/gpu/op_precision.py +67 -0
  80. mindspore/device_context/gpu/op_tuning.py +175 -0
  81. mindspore/device_manager.py +134 -0
  82. mindspore/dnnl.dll +0 -0
  83. mindspore/dpcmi.dll +0 -0
  84. mindspore/experimental/llm_boost/__init__.py +1 -0
  85. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  86. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  87. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  90. mindspore/experimental/llm_boost/register.py +1 -0
  91. mindspore/experimental/optim/adadelta.py +26 -22
  92. mindspore/experimental/optim/adam.py +3 -0
  93. mindspore/experimental/optim/lr_scheduler.py +33 -24
  94. mindspore/experimental/optim/radam.py +33 -30
  95. mindspore/hal/device.py +28 -0
  96. mindspore/hal/event.py +17 -0
  97. mindspore/hal/memory.py +94 -3
  98. mindspore/hal/stream.py +91 -6
  99. mindspore/include/api/context.h +0 -1
  100. mindspore/jpeg62.dll +0 -0
  101. mindspore/log.py +12 -0
  102. mindspore/mindrecord/__init__.py +1 -1
  103. mindspore/mindrecord/config.py +17 -316
  104. mindspore/mindrecord/filereader.py +1 -9
  105. mindspore/mindrecord/filewriter.py +5 -15
  106. mindspore/mindrecord/mindpage.py +1 -9
  107. mindspore/mindspore_backend.dll +0 -0
  108. mindspore/mindspore_common.dll +0 -0
  109. mindspore/mindspore_core.dll +0 -0
  110. mindspore/mindspore_glog.dll +0 -0
  111. mindspore/mindspore_ops.dll +0 -0
  112. mindspore/mint/__init__.py +824 -218
  113. mindspore/mint/distributed/__init__.py +66 -4
  114. mindspore/mint/distributed/distributed.py +2594 -44
  115. mindspore/mint/linalg/__init__.py +6 -0
  116. mindspore/mint/nn/__init__.py +473 -14
  117. mindspore/mint/nn/functional.py +486 -11
  118. mindspore/mint/nn/layer/__init__.py +17 -4
  119. mindspore/mint/nn/layer/_functions.py +330 -0
  120. mindspore/mint/nn/layer/activation.py +169 -1
  121. mindspore/mint/nn/layer/basic.py +123 -0
  122. mindspore/mint/nn/layer/conv.py +727 -0
  123. mindspore/mint/nn/layer/normalization.py +215 -19
  124. mindspore/mint/nn/layer/padding.py +797 -0
  125. mindspore/mint/nn/layer/pooling.py +170 -0
  126. mindspore/mint/optim/__init__.py +2 -1
  127. mindspore/mint/optim/adam.py +223 -0
  128. mindspore/mint/optim/adamw.py +26 -19
  129. mindspore/mint/special/__init__.py +2 -1
  130. mindspore/msobj140.dll +0 -0
  131. mindspore/mspdb140.dll +0 -0
  132. mindspore/mspdbcore.dll +0 -0
  133. mindspore/mspdbst.dll +0 -0
  134. mindspore/mspft140.dll +0 -0
  135. mindspore/msvcdis140.dll +0 -0
  136. mindspore/msvcp140_1.dll +0 -0
  137. mindspore/msvcp140_2.dll +0 -0
  138. mindspore/msvcp140_atomic_wait.dll +0 -0
  139. mindspore/msvcp140_codecvt_ids.dll +0 -0
  140. mindspore/multiprocessing/__init__.py +5 -0
  141. mindspore/nn/cell.py +126 -19
  142. mindspore/nn/dynamic_lr.py +2 -1
  143. mindspore/nn/layer/activation.py +6 -6
  144. mindspore/nn/layer/basic.py +35 -25
  145. mindspore/nn/layer/channel_shuffle.py +3 -3
  146. mindspore/nn/layer/embedding.py +3 -3
  147. mindspore/nn/layer/normalization.py +8 -7
  148. mindspore/nn/layer/padding.py +4 -3
  149. mindspore/nn/layer/pooling.py +47 -13
  150. mindspore/nn/layer/rnn_cells.py +1 -1
  151. mindspore/nn/layer/rnns.py +2 -1
  152. mindspore/nn/layer/timedistributed.py +5 -5
  153. mindspore/nn/layer/transformer.py +48 -26
  154. mindspore/nn/learning_rate_schedule.py +5 -3
  155. mindspore/nn/loss/loss.py +31 -36
  156. mindspore/nn/optim/ada_grad.py +1 -0
  157. mindspore/nn/optim/adadelta.py +2 -2
  158. mindspore/nn/optim/adam.py +1 -1
  159. mindspore/nn/optim/lars.py +1 -4
  160. mindspore/nn/optim/optimizer.py +1 -1
  161. mindspore/nn/optim/rprop.py +2 -2
  162. mindspore/nn/optim/thor.py +2 -1
  163. mindspore/nn/utils/init.py +13 -11
  164. mindspore/nn/wrap/cell_wrapper.py +4 -6
  165. mindspore/nn/wrap/loss_scale.py +3 -4
  166. mindspore/numpy/array_creations.py +60 -62
  167. mindspore/numpy/array_ops.py +148 -143
  168. mindspore/numpy/logic_ops.py +41 -42
  169. mindspore/numpy/math_ops.py +361 -359
  170. mindspore/numpy/utils.py +16 -16
  171. mindspore/numpy/utils_const.py +4 -4
  172. mindspore/opencv_core452.dll +0 -0
  173. mindspore/opencv_imgcodecs452.dll +0 -0
  174. mindspore/opencv_imgproc452.dll +0 -0
  175. mindspore/ops/__init__.py +2 -1
  176. mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
  177. mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
  178. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  179. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  180. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  181. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  182. mindspore/ops/_vmap/vmap_array_ops.py +20 -19
  183. mindspore/ops/_vmap/vmap_base.py +0 -2
  184. mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
  185. mindspore/ops/_vmap/vmap_math_ops.py +11 -9
  186. mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
  187. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
  188. mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
  189. mindspore/ops/auto_generate/gen_extend_func.py +554 -60
  190. mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
  191. mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
  192. mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
  193. mindspore/ops/composite/base.py +1 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
  195. mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
  196. mindspore/ops/function/__init__.py +12 -0
  197. mindspore/ops/function/array_func.py +561 -159
  198. mindspore/ops/function/clip_func.py +64 -0
  199. mindspore/ops/function/debug_func.py +28 -20
  200. mindspore/ops/function/image_func.py +1 -1
  201. mindspore/ops/function/linalg_func.py +5 -4
  202. mindspore/ops/function/math_func.py +1659 -290
  203. mindspore/ops/function/nn_func.py +988 -317
  204. mindspore/ops/function/parameter_func.py +3 -56
  205. mindspore/ops/function/random_func.py +243 -33
  206. mindspore/ops/function/sparse_unary_func.py +1 -1
  207. mindspore/ops/functional.py +18 -5
  208. mindspore/ops/functional_overload.py +897 -0
  209. mindspore/ops/operations/__init__.py +3 -2
  210. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  211. mindspore/ops/operations/_grad_ops.py +2 -34
  212. mindspore/ops/operations/_infer_ops.py +2 -1
  213. mindspore/ops/operations/_inner_ops.py +38 -8
  214. mindspore/ops/operations/array_ops.py +45 -303
  215. mindspore/ops/operations/comm_ops.py +19 -16
  216. mindspore/ops/operations/custom_ops.py +11 -55
  217. mindspore/ops/operations/debug_ops.py +42 -47
  218. mindspore/ops/operations/inner_ops.py +6 -4
  219. mindspore/ops/operations/linalg_ops.py +3 -2
  220. mindspore/ops/operations/manually_defined/ops_def.py +185 -104
  221. mindspore/ops/operations/math_ops.py +11 -216
  222. mindspore/ops/operations/nn_ops.py +146 -308
  223. mindspore/ops/primitive.py +23 -21
  224. mindspore/ops/tensor_method.py +1669 -0
  225. mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
  226. mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
  227. mindspore/ops_generate/arg_handler.py +0 -61
  228. mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
  229. mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
  230. mindspore/ops_generate/base_generator.py +11 -0
  231. mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
  232. mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
  233. mindspore/ops_generate/functional_overload_py_generator.py +110 -0
  234. mindspore/ops_generate/functions_cc_generator.py +233 -0
  235. mindspore/ops_generate/gen_aclnn_implement.py +110 -114
  236. mindspore/ops_generate/gen_constants.py +157 -3
  237. mindspore/ops_generate/gen_ops.py +245 -990
  238. mindspore/ops_generate/gen_pyboost_func.py +97 -998
  239. mindspore/ops_generate/gen_utils.py +119 -33
  240. mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
  241. mindspore/ops_generate/op_api_proto.py +206 -0
  242. mindspore/ops_generate/op_def_py_generator.py +131 -0
  243. mindspore/ops_generate/op_prim_py_generator.py +480 -0
  244. mindspore/ops_generate/op_proto.py +373 -108
  245. mindspore/ops_generate/op_template_parser.py +436 -0
  246. mindspore/ops_generate/ops_def_cc_generator.py +288 -0
  247. mindspore/ops_generate/ops_def_h_generator.py +74 -0
  248. mindspore/ops_generate/ops_name_h_generator.py +68 -0
  249. mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
  250. mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
  251. mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
  252. mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
  253. mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
  254. mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
  255. mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
  256. mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
  257. mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
  258. mindspore/ops_generate/pyboost_utils.py +92 -33
  259. mindspore/ops_generate/template.py +294 -44
  260. mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
  261. mindspore/parallel/__init__.py +3 -3
  262. mindspore/parallel/_auto_parallel_context.py +24 -33
  263. mindspore/parallel/_parallel_serialization.py +13 -2
  264. mindspore/parallel/_utils.py +4 -1
  265. mindspore/parallel/algo_parameter_config.py +1 -1
  266. mindspore/parallel/checkpoint_transform.py +44 -0
  267. mindspore/parallel/cluster/process_entity/_api.py +131 -37
  268. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  269. mindspore/parallel/cluster/run.py +20 -3
  270. mindspore/parallel/parameter_broadcast.py +1 -1
  271. mindspore/parallel/shard.py +3 -0
  272. mindspore/parallel/transform_safetensors.py +119 -253
  273. mindspore/pgodb140.dll +0 -0
  274. mindspore/pgort140.dll +0 -0
  275. mindspore/profiler/__init__.py +17 -4
  276. mindspore/profiler/analysis/__init__.py +0 -0
  277. mindspore/profiler/analysis/parser/__init__.py +0 -0
  278. mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
  279. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  280. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  281. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  282. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  283. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  284. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
  285. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  286. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
  287. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  288. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  289. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  290. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  291. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  292. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  293. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  294. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  295. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  296. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  297. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
  298. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  299. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  300. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  301. mindspore/profiler/analysis/task_manager.py +131 -0
  302. mindspore/profiler/analysis/time_converter.py +84 -0
  303. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  304. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
  305. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  306. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
  307. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
  308. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
  309. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
  310. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  311. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  312. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
  313. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  314. mindspore/profiler/analysis/work_flow.py +73 -0
  315. mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
  316. mindspore/profiler/common/command_executor.py +90 -0
  317. mindspore/profiler/common/constant.py +174 -3
  318. mindspore/profiler/common/file_manager.py +208 -0
  319. mindspore/profiler/common/log.py +130 -0
  320. mindspore/profiler/common/msprof_cmd_tool.py +202 -0
  321. mindspore/profiler/common/path_manager.py +371 -0
  322. mindspore/profiler/common/process_bar.py +168 -0
  323. mindspore/profiler/common/process_pool.py +9 -3
  324. mindspore/profiler/common/profiler_context.py +476 -0
  325. mindspore/profiler/common/profiler_info.py +304 -0
  326. mindspore/profiler/common/profiler_output_path.py +284 -0
  327. mindspore/profiler/common/profiler_parameters.py +210 -0
  328. mindspore/profiler/common/profiler_path_manager.py +120 -0
  329. mindspore/profiler/common/record_function.py +76 -0
  330. mindspore/profiler/common/tlv_decoder.py +76 -0
  331. mindspore/profiler/common/util.py +75 -2
  332. mindspore/profiler/dynamic_profiler.py +270 -37
  333. mindspore/profiler/envprofiler.py +138 -0
  334. mindspore/profiler/mstx.py +199 -0
  335. mindspore/profiler/platform/__init__.py +21 -0
  336. mindspore/profiler/platform/base_profiler.py +40 -0
  337. mindspore/profiler/platform/cpu_profiler.py +124 -0
  338. mindspore/profiler/platform/gpu_profiler.py +74 -0
  339. mindspore/profiler/platform/npu_profiler.py +309 -0
  340. mindspore/profiler/profiler.py +580 -93
  341. mindspore/profiler/profiler_action_controller.py +187 -0
  342. mindspore/profiler/profiler_interface.py +114 -0
  343. mindspore/profiler/schedule.py +208 -0
  344. mindspore/rewrite/api/symbol_tree.py +1 -2
  345. mindspore/run_check/_check_version.py +2 -6
  346. mindspore/runtime/__init__.py +37 -0
  347. mindspore/runtime/device.py +27 -0
  348. mindspore/runtime/event.py +209 -0
  349. mindspore/runtime/executor.py +148 -0
  350. mindspore/runtime/memory.py +392 -0
  351. mindspore/runtime/stream.py +460 -0
  352. mindspore/runtime/thread_bind_core.py +401 -0
  353. mindspore/swresample-4.dll +0 -0
  354. mindspore/swscale-6.dll +0 -0
  355. mindspore/tbbmalloc.dll +0 -0
  356. mindspore/tinyxml2.dll +0 -0
  357. mindspore/train/__init__.py +2 -2
  358. mindspore/train/_utils.py +53 -18
  359. mindspore/train/amp.py +8 -4
  360. mindspore/train/callback/_checkpoint.py +32 -18
  361. mindspore/train/callback/_early_stop.py +1 -1
  362. mindspore/train/callback/_flops_collector.py +105 -69
  363. mindspore/train/callback/_history.py +1 -1
  364. mindspore/train/callback/_summary_collector.py +44 -6
  365. mindspore/train/callback/_tft_register.py +31 -10
  366. mindspore/train/dataset_helper.py +11 -11
  367. mindspore/train/metrics/precision.py +4 -5
  368. mindspore/train/mind_ir_pb2.py +167 -46
  369. mindspore/train/model.py +13 -15
  370. mindspore/train/serialization.py +462 -76
  371. mindspore/train/summary/summary_record.py +1 -2
  372. mindspore/train/train_thor/model_thor.py +1 -1
  373. mindspore/turbojpeg.dll +0 -0
  374. mindspore/utils/__init__.py +4 -2
  375. mindspore/utils/dryrun.py +138 -0
  376. mindspore/utils/runtime_execution_order_check.py +550 -0
  377. mindspore/vcmeta.dll +0 -0
  378. mindspore/vcruntime140.dll +0 -0
  379. mindspore/vcruntime140_1.dll +0 -0
  380. mindspore/version.py +1 -1
  381. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
  382. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +385 -261
  383. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
  384. mindspore/common/_tensor_overload.py +0 -139
  385. mindspore/mindspore_np_dtype.dll +0 -0
  386. mindspore/profiler/envprofiling.py +0 -254
  387. mindspore/profiler/profiling.py +0 -1926
  388. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
  389. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
mindspore/nn/cell.py CHANGED
@@ -60,7 +60,7 @@ class Cell(Cell_):
60
60
  .. note::
61
61
  Cell is the inference mode by default. For a class that inherits a Cell,
62
62
  if the training and inference have different structures, the subclass performs the inference branch by default.
63
- To set the training mode, refer to `mindspore.nn.Cell.set_train` .
63
+ To set the training mode, refer to :func:`mindspore.nn.Cell.set_train` .
64
64
 
65
65
  .. warning::
66
66
  In the subclass of Cell, it's not allowed to define a method named 'cast' and not allowed to define an attribute
@@ -105,7 +105,8 @@ class Cell(Cell_):
105
105
  '_func_graph_flags', '_parameter_layout_dict', '_params_list', '_phase', '_bprop_debug',
106
106
  '_forward_pre_hook', '_forward_hook', '_backward_pre_hook', '_backward_hook',
107
107
  '_cell_backward_pre_hook', '_cell_backward_hook', '_is_run', '_param_prefix',
108
- '_attr_synced', 'pynative', 'requires_grad', 'cell_type']
108
+ '_attr_synced', 'pynative', 'requires_grad', 'cell_type',
109
+ '_parameters_forward_hook', '_parameters_backward_hook']
109
110
  total_instance_count = 0
110
111
 
111
112
  def __init__(self, auto_prefix=True, flags=None):
@@ -143,6 +144,8 @@ class Cell(Cell_):
143
144
 
144
145
  # call gc to release GE session resources used by non-used cell objects
145
146
  if os.getenv('GC_COLLECT_IN_CELL') == '1':
147
+ logger.warning("The convenient environment 'GC_COLLECT_IN_CELL' is deprecated from version 2.5 "
148
+ "and will be removed in a future version.")
146
149
  gc.collect()
147
150
 
148
151
  if flags:
@@ -158,6 +161,10 @@ class Cell(Cell_):
158
161
  self._cell_backward_hook = None
159
162
  self._is_recursion_hook = False
160
163
 
164
+ # parameters hook
165
+ self._parameters_forward_hook = None
166
+ self._parameters_backward_hook = None
167
+
161
168
  self.cell_type = None
162
169
  self.cast = Cast()
163
170
  self._has_config_recompute = False
@@ -492,14 +499,17 @@ class Cell(Cell_):
492
499
  if self._forward_pre_hook:
493
500
  inputs = self._run_forward_pre_hook(inputs)
494
501
 
495
- if self._backward_hook:
496
- output = self._backward_hook_construct(*inputs, **kwargs)
497
- elif self._shard_fn is not None:
502
+ if self._shard_fn is not None:
498
503
  output = self._shard_fn(*inputs, **kwargs)
499
- elif self._recompute_cell is not None:
500
- output = self._recompute_cell(*inputs, **kwargs)
501
- elif self.has_bprop and _pynative_executor.requires_grad():
502
- output = self._call_custom_bprop(*inputs, **kwargs)
504
+ elif _pynative_executor.requires_grad():
505
+ if self._backward_hook:
506
+ output = self._backward_hook_construct(*inputs, **kwargs)
507
+ elif self._recompute_cell is not None:
508
+ output = self._recompute_cell(*inputs, **kwargs)
509
+ elif self.has_bprop:
510
+ output = self._call_custom_bprop(*inputs, **kwargs)
511
+ else:
512
+ output = self.construct(*inputs, **kwargs)
503
513
  else:
504
514
  output = self.construct(*inputs, **kwargs)
505
515
 
@@ -598,7 +608,7 @@ class Cell(Cell_):
598
608
  strategy for others will be set by sharding propagation.
599
609
  in_strategy and out_strategy define the input and output layout respectively.
600
610
  in_strategy/out_strategy should be a tuple, each element of which corresponds to the desired layout of
601
- this input/output, which can refer to the description of `mindspore.ops.Primitive.shard`.
611
+ this input/output, which can refer to the description of :func:`mindspore.ops.Primitive.shard`.
602
612
  The parallel strategies of remaining operators are derived from the strategy specified by the input and output.
603
613
 
604
614
  Note:
@@ -1350,7 +1360,7 @@ class Cell(Cell_):
1350
1360
  def _updata(param):
1351
1361
  if param in replace:
1352
1362
  return replace.get(param)
1353
- new_p = param.init_data(None, set_sliced=False)
1363
+ new_p = param.init_data(None, set_sliced=param.sliced)
1354
1364
  replace[param] = new_p
1355
1365
  return new_p
1356
1366
 
@@ -1822,6 +1832,9 @@ class Cell(Cell_):
1822
1832
  if not hasattr(self, "_func_graph_flags"):
1823
1833
  self._func_graph_flags = {}
1824
1834
  self._func_graph_flags.update({**flags})
1835
+ if context._get_mode() == context.PYNATIVE_MODE and self._func_graph_flags.get("output_no_recompute"):
1836
+ raise TypeError("Recompute is not supported in PyNative mode currently, you can use "
1837
+ "'context.set_context(mode=context.GRAPH_MODE)' or @jit to set graph mode.")
1825
1838
  self.__dict__.update({**flags})
1826
1839
  self._add_mixed_precision_flag(**flags)
1827
1840
  return self
@@ -1955,9 +1968,8 @@ class Cell(Cell_):
1955
1968
 
1956
1969
  def set_grad(self, requires_grad=True):
1957
1970
  """
1958
- Sets the cell flag for gradient. In pynative mode, this parameter specifies whether the network requires
1959
- gradients. If ``true`` , the backward network needed to compute the gradients will be generated when the forward
1960
- network is executed.
1971
+ Sets the cell flag for gradient.
1972
+
1961
1973
 
1962
1974
  Args:
1963
1975
  requires_grad (bool): Specifies if the net need to grad, if it is
@@ -2217,6 +2229,8 @@ class Cell(Cell_):
2217
2229
  (Tensor(shape=[1], dtype=Float32, value= [ 2.00000000e+00]), Tensor(shape=[1], dtype=Float32,
2218
2230
  value= [ 2.00000000e+00]))
2219
2231
  """
2232
+ if self.has_bprop:
2233
+ return HookHandle()
2220
2234
  if context._get_mode() == context.GRAPH_MODE:
2221
2235
  return HookHandle()
2222
2236
  if not check_hook_fn("register_forward_hook", hook_fn):
@@ -2334,9 +2348,12 @@ class Cell(Cell_):
2334
2348
  Supported Platforms:
2335
2349
  ``Ascend`` ``GPU`` ``CPU``
2336
2350
  """
2337
- ret = self._cell_backward_pre_hook(outputs)
2338
2351
  if isinstance(outputs, tuple):
2339
- if not isinstance(ret, tuple):
2352
+ ret = self._cell_backward_pre_hook(*outputs)
2353
+ else:
2354
+ ret = self._cell_backward_pre_hook(outputs)
2355
+ if isinstance(outputs, tuple):
2356
+ if len(outputs) == 1:
2340
2357
  ret = (ret,)
2341
2358
  if len(ret) != len(outputs):
2342
2359
  raise TypeError(
@@ -2452,9 +2469,14 @@ class Cell(Cell_):
2452
2469
  outputs = self.construct(*outputs, **kwargs)
2453
2470
  else:
2454
2471
  outputs = self.construct(outputs, **kwargs)
2455
-
2456
- outputs = self._cell_backward_hook(outputs)
2457
- return outputs
2472
+ if isinstance(outputs, tuple):
2473
+ new_outputs = self._cell_backward_hook(*outputs)
2474
+ else:
2475
+ new_outputs = self._cell_backward_hook(outputs)
2476
+ # if outputs is (X,) and new_outpus is X
2477
+ if isinstance(outputs, tuple) and len(outputs) == 1:
2478
+ new_outputs = (new_outputs,)
2479
+ return new_outputs
2458
2480
 
2459
2481
  def set_param_ps(self, recurse=True, init_in_server=False):
2460
2482
  """
@@ -2699,6 +2721,91 @@ class Cell(Cell_):
2699
2721
  for cell in self.cells():
2700
2722
  cell._add_recompute_flag()
2701
2723
 
2724
+ def _register_parameters_hook(self, forward_hook=None, backward_hook=None, all=False):
2725
+ """
2726
+ Register the forward hook for parameters and register the backward hook for the corresponding gradient.
2727
+
2728
+ .. warning::
2729
+ This is an experimental prototype that is subject to change and/or deletion.
2730
+
2731
+ Note:
2732
+ - The `_register_parameters_hook(forward_hook, backward_hook)` only work in graph mode
2733
+ - The `forward_hook` must be defined as the following code.
2734
+ `parameters`: the tuple of the trainble parameters of the Cell, each element in the tuple shuould be
2735
+ in the format of `(param_name, Parameter)`.
2736
+ - The `forward_hook` should have the following signature:
2737
+ forward_hook(parameters) -> None.
2738
+ - The `backward_hook` must be defined as the following code.
2739
+ `gradients`: the tuple of the gradients corresponding to the trainble parameters of the Cell, each
2740
+ element in the tuple shuould be in the format of `(param_name, gradient)`.
2741
+ - The `backward_hook` should have the following signature:
2742
+ backward_hook(parameters) -> New gradients.
2743
+
2744
+ Args:
2745
+ forward_hook (function, optional): Python function or ``None``, Forward hook function. Default: ``None``
2746
+ backward_hook (function, optional): Python function or ``None``, Backward hook function. Default ``None``
2747
+ all (bool, optional): bool, whether to set hooks for all sub cells recursively. Default: ``False``
2748
+
2749
+ Returns:
2750
+ None
2751
+
2752
+ Raises:
2753
+ RuntimeError: If the `forward_hook` or `backward_hook ` has unspoorted syntax under GRAPH MODE.
2754
+ TypeError: If the `forward_hook` or `backward_hook` is not defined as required.
2755
+
2756
+ Supported Platforms:
2757
+ ``Ascend`` ``GPU`` ``CPU``
2758
+
2759
+ Examples:
2760
+ >>> import mindspore as ms
2761
+ >>> from mindspore import Tensor, nn, ops, Parameter
2762
+ >>>
2763
+ >>> ms.set_context(mode=ms.GRAPH_MODE)
2764
+ >>> def parameter_hook(parameters):
2765
+ ... print("--- enter parameter hook ---")
2766
+ ... for name, param in parameters:
2767
+ ... print (name, param)
2768
+ ... print("--- leave parameter hook ---")
2769
+ ...
2770
+ >>> def gradient_hook(gradients):
2771
+ ... print("--- enter gradient hook ---")
2772
+ ... outs = []
2773
+ ... for name, gradient in gradients:
2774
+ ... print(name, gradient)
2775
+ ... outs.append(gradient * 2) # double gradient
2776
+ ... print("--- leave gradient hook ---")
2777
+ ... return outs
2778
+ ...
2779
+ >>> class Net(nn.Cell):
2780
+ ... def __init__(self)
2781
+ ... super(Net, self).__init__()
2782
+ ... self.w = Parameter(Tensor(np.array([3.0], np.float32)), name='w')
2783
+ ... def construct(self, x):
2784
+ ... return self.w * x
2785
+ ...
2786
+ >>> grad = ops.GradOperation(get_by_list=True)
2787
+ >>> net = Net()
2788
+ >>> net._register_parameters_hook(forward_hook=parameter_hook, backward_hook=gradient_hook)
2789
+ >>> x = Tensor(np.array([4.0]).astype(np.float32))
2790
+ >>> output = grad(net, net.trainable_params())(x)
2791
+ --- enter parameter hook ---
2792
+ w
2793
+ Tensor(shape=[1], dtype=Float32, value=[ 3.00000000e+00])
2794
+ --- leave parameter hook ---
2795
+ --- enter gradient hook ---
2796
+ w
2797
+ Tensor(shape=[1], dtype=Float32, value=[ 4.00000000e+00])
2798
+ --- leave gradient hook ---
2799
+ >>> print("doubled grad: ", output)
2800
+ doubled grad: (Tensor(shape=[1], dtype=Float32, value=[ 8.00000000e+00]),)
2801
+ """
2802
+ if not all:
2803
+ self._parameters_forward_hook = forward_hook
2804
+ self._parameters_backward_hook = backward_hook
2805
+ else:
2806
+ for _, cell in self.cells_and_names():
2807
+ cell._parameters_forward_hook = forward_hook
2808
+ cell._parameters_backward_hook = backward_hook
2702
2809
 
2703
2810
  class GraphCell(Cell):
2704
2811
  """
@@ -224,7 +224,8 @@ def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, deca
224
224
  total_step (int): The total number of steps.
225
225
  step_per_epoch (int): The number of steps in per epoch.
226
226
  decay_epoch (int): Number of epochs to decay over.
227
- is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: ``False`` .
227
+ is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. If False, the learning rate
228
+ decays for every epoch. Default: ``False`` .
228
229
 
229
230
  Returns:
230
231
  list[float]. The size of list is `total_step`.
@@ -179,7 +179,7 @@ class Softmax2d(Cell):
179
179
  r"""
180
180
  Softmax function applied to 2D features data.
181
181
 
182
- Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
182
+ Applies `Softmax` to each location with an input Tensor of shape :math:`(C, H, W)` .
183
183
 
184
184
  Inputs:
185
185
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
@@ -1273,9 +1273,9 @@ class PReLUExt(Cell):
1273
1273
  no channel dim and the number of channels = 1.
1274
1274
 
1275
1275
  Args:
1276
- num_parameters (int): number of `w` to learn. Although it takes an int as input,
1276
+ num_parameters (int, optional): number of `w` to learn. Although it takes an int as input,
1277
1277
  there is only two legitimate values: 1, or the number of channels at Tensor `input`. Default: ``1`` .
1278
- init (float): the initial value of `w`. Default: ``0.25`` .
1278
+ init (float, optional): the initial value of `w`. Default: ``0.25`` .
1279
1279
  dtype (mindspore.dtype, optional): the type of `w`. Default: ``None`` . Supported data type
1280
1280
  is {float16, float32, bfloat16}.
1281
1281
 
@@ -1320,7 +1320,7 @@ class HSwish(Cell):
1320
1320
  Hard swish is defined as:
1321
1321
 
1322
1322
  .. math::
1323
- \text{Hardswish}(input) =
1323
+ \text{HSwish}(input) =
1324
1324
  \begin{cases}
1325
1325
  0, & \text{ if } input \leq -3, \\
1326
1326
  input, & \text{ if } input \geq +3, \\
@@ -1372,7 +1372,7 @@ class HSigmoid(Cell):
1372
1372
  Hard Sigmoid is defined as:
1373
1373
 
1374
1374
  .. math::
1375
- \text{Hardsigmoid}(input) =
1375
+ \text{HSigmoid}(input) =
1376
1376
  \begin{cases}
1377
1377
  0, & \text{ if } input \leq -3, \\
1378
1378
  1, & \text{ if } input \geq +3, \\
@@ -1578,7 +1578,7 @@ class HShrink(Cell):
1578
1578
  The formula is defined as follows:
1579
1579
 
1580
1580
  .. math::
1581
- \text{HardShrink}(x) =
1581
+ \text{HShrink}(x) =
1582
1582
  \begin{cases}
1583
1583
  x, & \text{ if } x > \lambda \\
1584
1584
  x, & \text{ if } x < -\lambda \\
@@ -25,10 +25,9 @@ from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_util
25
25
  from mindspore.common.seed import _get_graph_seed
26
26
  from mindspore.common.tensor import Tensor
27
27
  from mindspore.common.initializer import initializer, HeUniform, Uniform
28
+ from mindspore import ops
28
29
  from mindspore.ops import operations as P
29
30
  from mindspore.ops import functional as F
30
- from mindspore.ops.function.nn_func import interpolate_ext
31
- from mindspore.ops.auto_generate import unfold_ext
32
31
  from mindspore.ops.operations import _inner_ops as inner
33
32
  from mindspore.ops.primitive import constexpr, Primitive, _primexpr
34
33
  from mindspore.common.parameter import Parameter
@@ -37,7 +36,6 @@ from mindspore import _checkparam as Validator
37
36
  from mindspore.nn.cell import Cell
38
37
  from mindspore.nn.layer.activation import get_activation
39
38
  from mindspore.common._decorator import deprecated
40
- from mindspore.ops.auto_generate import dropout_ext_op, fold_ext
41
39
  from mindspore.common.generator import default_generator
42
40
 
43
41
  __all__ = ['Dropout', 'Flatten', 'Dense', 'Linear', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
@@ -140,6 +138,7 @@ class Dropout(Cell):
140
138
 
141
139
  Inputs:
142
140
  - **x** (Tensor) - The input of Dropout with data type of float16 or float32.
141
+ The shape of `x` cannot be less than 1.
143
142
 
144
143
  Outputs:
145
144
  Tensor, output tensor with the same shape as the `x`.
@@ -225,8 +224,9 @@ class DropoutExt(Cell):
225
224
  - Parameter `p` means the probability of the element of the input tensor to be zeroed.
226
225
 
227
226
  Args:
228
- p (float): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
227
+ p (float, optional): The dropout rate of input neurons, E.g. `p` =0.9, dropping out 90% of input neurons.
229
228
  Default: ``0.5`` .
229
+ inplace (bool, optional): If set to ``True`` , will do this operation in-place. Default: ``False`` .
230
230
 
231
231
  Inputs:
232
232
  - **x** (Tensor) - The input of Dropout.
@@ -253,18 +253,23 @@ class DropoutExt(Cell):
253
253
  (2, 2, 3)
254
254
  """
255
255
 
256
- def __init__(self, p=0.5):
256
+ def __init__(self, p=0.5, inplace=False):
257
257
  """Initialize DropoutExt."""
258
258
  super(DropoutExt, self).__init__()
259
259
  self.p = p
260
- self.generator_step = Tensor(1, mstype.int64)
260
+ self.inplace = inplace
261
+ self.generator_step = Tensor(12, mstype.int64)
261
262
 
262
263
  def construct(self, x):
263
264
  if not self.training or self.p == 0:
264
265
  return x
265
266
 
266
267
  seed, offset = default_generator._step(self.generator_step) # pylint: disable=protected-access
267
- out, _ = dropout_ext_op(x, self.p, seed, offset)
268
+ out, _ = ops.auto_generate.dropout_ext_op(x, self.p, seed, offset)
269
+
270
+ if self.inplace:
271
+ x.copy_(out)
272
+ return x
268
273
  return out
269
274
 
270
275
 
@@ -479,6 +484,9 @@ class UpsampleExt(Cell):
479
484
  r"""
480
485
  For details, please refer to :func:`mindspore.mint.nn.functional.interpolate`.
481
486
 
487
+ .. warning::
488
+ This is an experimental API that is subject to change or deletion.
489
+
482
490
  Supported Platforms:
483
491
  ``Ascend``
484
492
 
@@ -511,8 +519,8 @@ class UpsampleExt(Cell):
511
519
  self.recompute_scale_factor = recompute_scale_factor
512
520
 
513
521
  def construct(self, input):
514
- out = interpolate_ext(input, self.size, self.scale_factor, self.mode,
515
- self.align_corners, self.recompute_scale_factor)
522
+ out = ops.function.nn_func.interpolate_ext(input, self.size, self.scale_factor, self.mode,
523
+ self.align_corners, self.recompute_scale_factor)
516
524
  return out
517
525
 
518
526
 
@@ -626,7 +634,7 @@ class Dense(Cell):
626
634
  with the same data type as the :math:`X` created by the layer (only if has_bias is True).
627
635
 
628
636
  .. warning::
629
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
637
+ In PyNative mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
630
638
 
631
639
  Args:
632
640
  in_channels (int): The number of channels in the input space.
@@ -660,7 +668,7 @@ class Dense(Cell):
660
668
  is not equal to `out_channels` or shape[1] of `weight_init` is not equal to `in_channels`.
661
669
  ValueError: If length of shape of `bias_init` is not equal to 1
662
670
  or shape[0] of `bias_init` is not equal to `out_channels`.
663
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
671
+ RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PyNative mode.
664
672
 
665
673
  Supported Platforms:
666
674
  ``Ascend`` ``GPU`` ``CPU``
@@ -762,24 +770,26 @@ class Linear(Cell):
762
770
  .. math::
763
771
  \text{outputs} = X * kernel + bias
764
772
 
765
- .. warning::
766
- In PYNATIVE mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
767
-
768
773
  where :math:`X` is the input tensors, :math:`\text{kernel}` is a weight matrix with the same
769
774
  data type as the :math:`X` created by the layer, and :math:`\text{bias}` is a bias vector
770
- with the same data type as the :math:`X` created by the layer (only if has_bias is True).
775
+ with the same data type as the :math:`X` created by the layer (only if the parameter `bias` is True).
776
+
777
+ .. warning::
778
+ In PyNative mode, if `bias` is ``False`` , the `x` cannot be greater than 6D.
771
779
 
772
780
  Args:
773
781
  in_features (int): The number of features in the input space.
774
782
  out_features (int): The number of features in the output space.
775
- bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
776
- weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
783
+ bias (bool, optional): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: ``True``.
784
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
785
+ The trainable weight_init parameter. The dtype
777
786
  is same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
778
787
  weight will be initialized using HeUniform.
779
- bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
788
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional):
789
+ The trainable bias_init parameter. The dtype is
780
790
  same as `x`. The values of str refer to the function `initializer`. Default: ``None`` ,
781
791
  bias will be initialized using Uniform.
782
- dtype (:class:`mindspore.dtype`): Data type of Parameter. Default: ``None`` .
792
+ dtype (:class:`mindspore.dtype`, optional): Data type of Parameter. Default: ``None`` .
783
793
  If `dtype` is ``None`` , `dtype` is set to ``mstype.float32`` when initializing the method.
784
794
  When `weight_init` is Tensor, Parameter has the same data type as `weight_init` ,
785
795
  in other cases, Parameter has the same data type as `dtype`, the same goes for `bias_init`.
@@ -798,7 +808,7 @@ class Linear(Cell):
798
808
  is not equal to `out_features` or shape[1] of `weight_init` is not equal to `in_features`.
799
809
  ValueError: If length of shape of `bias_init` is not equal to 1
800
810
  or shape[0] of `bias_init` is not equal to `out_features`.
801
- RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PYNATIVE mode.
811
+ RuntimeError: If `bias` is ``False`` and `x` is greater than 6D in PyNative mode.
802
812
 
803
813
  Supported Platforms:
804
814
  ``Ascend`` ``GPU`` ``CPU``
@@ -806,10 +816,10 @@ class Linear(Cell):
806
816
  Examples:
807
817
  >>> import mindspore
808
818
  >>> from mindspore import Tensor
809
- >>> from mindspore import nn
819
+ >>> from mindspore import mint
810
820
  >>> import numpy as np
811
821
  >>> x = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
812
- >>> net = nn.mint.nn.Linear(3, 4)
822
+ >>> net = mint.nn.Linear(3, 4)
813
823
  >>> output = net(x)
814
824
  >>> print(output.shape)
815
825
  (2, 4)
@@ -1285,7 +1295,7 @@ class UnfoldExt(Cell):
1285
1295
  self.stride = stride
1286
1296
 
1287
1297
  def construct(self, input):
1288
- return unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1298
+ return ops.auto_generate.unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
1289
1299
 
1290
1300
 
1291
1301
  class Fold(Cell):
@@ -1316,8 +1326,8 @@ class Fold(Cell):
1316
1326
  self.stride = stride
1317
1327
 
1318
1328
  def construct(self, input):
1319
- return fold_ext(input, self.output_size, self.kernel_size,
1320
- self.dilation, self.padding, self.stride)
1329
+ return ops.auto_generate.fold_ext(input, self.output_size, self.kernel_size,
1330
+ self.dilation, self.padding, self.stride)
1321
1331
 
1322
1332
 
1323
1333
  @_primexpr
@@ -21,9 +21,9 @@ __all__ = ['ChannelShuffle']
21
21
 
22
22
  class ChannelShuffle(Cell):
23
23
  r"""
24
- Divide the channels of Tensor whose shape is :math:`(*, C, H, W)` into :math:`g` groups to obtain a Tensor with
25
- shape :math:`(*, C \frac g, g, H, W)`, and transpose along the corresponding axis of :math:`C`,
26
- :math:`\frac{g}{}` and :math:`g` to restore Tensor to the original shape.
24
+ Divide the channels in a tensor of shape :math:`(*, C, H, W)` into :math:`g` group and
25
+ rearrange them as :math:`(*, \frac{C}{g}, g, H*W)`, while retaining the original tensor
26
+ shape in the final output.
27
27
 
28
28
  Args:
29
29
  groups (int): Number of groups to divide channels in, must be greater than 0.
@@ -179,7 +179,7 @@ class EmbeddingExt(Cell):
179
179
  `[-num_embeddings, num_embeddings)` if it's not ``None``. Default ``None``.
180
180
  max_norm (float, optional): If the value is not None, firstly get the p-norm result of the embedding
181
181
  vector specified by `input` where p is specified by `norm_type`; if the result is larger then `max_norm`,
182
- update the embedding vector` with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
182
+ update the embedding vector with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
183
183
  norm_type (float, optional): Indicated the value of p in p-norm. Default ``2.0``.
184
184
  scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency
185
185
  of the index in `input`. Default ``False``.
@@ -193,8 +193,8 @@ class EmbeddingExt(Cell):
193
193
  not None. Default: ``None``.
194
194
 
195
195
  Variables:
196
- weight (Parameter): The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
- initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
196
+ - **weight** (Parameter) - The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
+ initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
198
198
 
199
199
  Inputs:
200
200
  - **input** (Tensor) - The indices used to lookup in the embedding vector. The data type must be
@@ -36,7 +36,6 @@ from mindspore.common import dtype as mstype
36
36
  from mindspore.parallel._utils import _is_in_auto_parallel_mode
37
37
  from mindspore.nn.cell import Cell
38
38
  from mindspore import log as logger
39
- from mindspore.ops import group_norm
40
39
 
41
40
  __all__ = ['BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'LayerNorm', 'LayerNormExt', 'GroupNorm',
42
41
  'SyncBatchNorm', 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d']
@@ -795,13 +794,15 @@ class LayerNormExt(Cell):
795
794
  This is an experimental API that is subject to change or deletion.
796
795
 
797
796
  Args:
798
- normalized_shape (Union(tuple[int], list[int], int)): The normalized shape of `x` for LayerNorm
799
- eps (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
800
- elementwise_affine (bool): Whether affine transformation is required. When this parameter is set to ``True``,
797
+ normalized_shape (Union(tuple[int], list[int], int)): The normalized shape of `x` for LayerNorm.
798
+ eps (float, optional): A value added to the denominator for numerical stability( :math:`\epsilon` ).
799
+ Default: ``1e-5`` .
800
+ elementwise_affine (bool, optional): Whether affine transformation is required.
801
+ When this parameter is set to ``True``,
801
802
  the weight parameter is initialized to 1 and the offset is initialized to 0. Default: ``True``.
802
- bias (bool): If set to ``False``, the layer will not learn an additive bias (only relevant if
803
+ bias (bool, optional): If set to ``False``, the layer will not learn an additive bias (only relevant if
803
804
  `elementwise_affine` is ``True``). Default: ``True``.
804
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``None`` .
805
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None`` .
805
806
 
806
807
  Inputs:
807
808
  - **x** (Tensor) - The shape is :math:`(N, *)`, where :math:`*` is equal to normalized_shape.
@@ -1248,7 +1249,7 @@ class GroupNorm(Cell):
1248
1249
 
1249
1250
  def _cal_output(self, x):
1250
1251
  """calculate groupnorm output"""
1251
- return group_norm(x, self.num_groups, self.gamma.to(x.dtype), self.beta.to(x.dtype), self.eps)
1252
+ return ops.group_norm(x, self.num_groups, self.gamma.to(x.dtype), self.beta.to(x.dtype), self.eps)
1252
1253
 
1253
1254
  @staticmethod
1254
1255
  @_primexpr
@@ -442,7 +442,7 @@ class _ReflectionPadNd(Cell):
442
442
 
443
443
  class ReflectionPad1d(_ReflectionPadNd):
444
444
  r"""
445
- Using a given padding to do reflection pad on the given tensor.
445
+ Using a given padding to do reflection pad on the given tensor. 1d means the dimension of padding is 1-dimension.
446
446
 
447
447
  Args:
448
448
  padding (union[int, tuple]): The padding size to pad the last dimension of input tensor.
@@ -490,7 +490,7 @@ class ReflectionPad1d(_ReflectionPadNd):
490
490
 
491
491
  class ReflectionPad2d(_ReflectionPadNd):
492
492
  r"""
493
- Using a given padding to do reflection pad the given tensor.
493
+ Using a given padding to do reflection pad the given tensor. 2d means the dimension of padding is 2-dimension.
494
494
 
495
495
  Args:
496
496
  padding (union[int, tuple]): The padding size to pad the input tensor.
@@ -542,7 +542,8 @@ class ReflectionPad2d(_ReflectionPadNd):
542
542
 
543
543
  class ReflectionPad3d(_ReflectionPadNd):
544
544
  r"""
545
- Pad the given tensor in a reflecting way using the input boundaries as the axis of symmetry.
545
+ Pad the given tensor in a reflecting way using the input boundaries as the axis of symmetry. 3d means the dimension
546
+ of padding is 3-dimension.
546
547
 
547
548
  Note:
548
549
  ReflectionPad3d has not supported 5D tensor yet.