mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (455) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +64 -83
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +47 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +177 -52
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +338 -208
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +2 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +84 -133
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +47 -38
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +69 -23
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +1 -0
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +4 -44
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +425 -19
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +125 -101
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +488 -620
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +86 -85
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +2 -4
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/lamb.py +1 -3
  188. mindspore/nn/optim/optimizer.py +1 -1
  189. mindspore/nn/optim/tft_wrapper.py +2 -3
  190. mindspore/nn/optim/thor.py +2 -2
  191. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  192. mindspore/nn/probability/distribution/exponential.py +2 -1
  193. mindspore/nn/probability/distribution/poisson.py +2 -1
  194. mindspore/nn/sparse/sparse.py +3 -3
  195. mindspore/nn/wrap/cell_wrapper.py +73 -42
  196. mindspore/nn/wrap/grad_reducer.py +37 -52
  197. mindspore/nn/wrap/loss_scale.py +72 -74
  198. mindspore/numpy/array_creations.py +7 -7
  199. mindspore/numpy/fft.py +1 -1
  200. mindspore/numpy/math_ops.py +1 -1
  201. mindspore/numpy/utils_const.py +1 -1
  202. mindspore/opencv_core452.dll +0 -0
  203. mindspore/opencv_imgcodecs452.dll +0 -0
  204. mindspore/opencv_imgproc452.dll +0 -0
  205. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  206. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  207. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  208. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  209. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  210. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  211. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  212. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
  213. mindspore/ops/auto_generate/gen_extend_func.py +5 -55
  214. mindspore/ops/auto_generate/gen_ops_def.py +753 -273
  215. mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
  216. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  217. mindspore/ops/composite/__init__.py +10 -0
  218. mindspore/ops/composite/base.py +9 -5
  219. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  220. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  221. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  222. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  223. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  224. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  225. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  226. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  227. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  228. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  229. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  230. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  231. mindspore/ops/function/__init__.py +4 -1
  232. mindspore/ops/function/_add_attr_func.py +11 -6
  233. mindspore/ops/function/array_func.py +17 -100
  234. mindspore/ops/function/debug_func.py +8 -5
  235. mindspore/ops/function/grad/grad_func.py +5 -13
  236. mindspore/ops/function/math_func.py +65 -399
  237. mindspore/ops/function/nn_func.py +44 -61
  238. mindspore/ops/function/other_func.py +4 -1
  239. mindspore/ops/function/random_func.py +31 -4
  240. mindspore/ops/functional.py +2 -3
  241. mindspore/ops/functional_overload.py +486 -18
  242. mindspore/ops/op_info_register.py +21 -0
  243. mindspore/ops/operations/__init__.py +5 -2
  244. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  245. mindspore/ops/operations/_inner_ops.py +14 -18
  246. mindspore/ops/operations/_sequence_ops.py +1 -1
  247. mindspore/ops/operations/array_ops.py +4 -50
  248. mindspore/ops/operations/comm_ops.py +186 -41
  249. mindspore/ops/operations/custom_ops.py +244 -175
  250. mindspore/ops/operations/debug_ops.py +55 -4
  251. mindspore/ops/operations/image_ops.py +13 -13
  252. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  253. mindspore/ops/operations/math_ops.py +8 -9
  254. mindspore/ops/operations/nn_ops.py +6 -7
  255. mindspore/ops/primitive.py +9 -20
  256. mindspore/ops/tensor_method.py +52 -11
  257. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  258. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  259. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  260. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  261. mindspore/ops_generate/common/base_generator.py +14 -0
  262. mindspore/ops_generate/common/gen_constants.py +7 -2
  263. mindspore/ops_generate/common/gen_utils.py +0 -19
  264. mindspore/ops_generate/common/op_proto.py +11 -4
  265. mindspore/ops_generate/common/template.py +88 -11
  266. mindspore/ops_generate/gen_ops.py +1 -1
  267. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  268. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  269. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  270. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  271. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  272. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  273. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  274. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  275. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  276. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  277. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  278. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  279. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  280. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  281. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  282. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  283. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  284. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  285. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  286. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  287. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  288. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  289. mindspore/parallel/_auto_parallel_context.py +9 -17
  290. mindspore/parallel/_cell_wrapper.py +106 -40
  291. mindspore/parallel/_parallel_serialization.py +4 -3
  292. mindspore/parallel/_ps_context.py +4 -6
  293. mindspore/parallel/_tensor.py +167 -12
  294. mindspore/parallel/_transformer/moe.py +1 -1
  295. mindspore/parallel/_transformer/transformer.py +17 -12
  296. mindspore/parallel/_utils.py +5 -11
  297. mindspore/parallel/auto_parallel.py +33 -12
  298. mindspore/parallel/checkpoint_convert.py +3 -3
  299. mindspore/parallel/checkpoint_transform.py +5 -1
  300. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  301. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  302. mindspore/parallel/cluster/run.py +48 -7
  303. mindspore/parallel/function/__init__.py +8 -1
  304. mindspore/parallel/function/reshard_func.py +7 -6
  305. mindspore/parallel/nn/__init__.py +15 -2
  306. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  307. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  308. mindspore/parallel/shard.py +9 -23
  309. mindspore/parallel/transform_safetensors.py +468 -174
  310. mindspore/pgodb140.dll +0 -0
  311. mindspore/pgort140.dll +0 -0
  312. mindspore/profiler/__init__.py +2 -1
  313. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  314. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  315. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  316. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  317. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  318. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  319. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  321. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  322. mindspore/profiler/analysis/task_manager.py +1 -1
  323. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  324. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  325. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  326. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  327. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  328. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  329. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  330. mindspore/profiler/common/constant.py +16 -0
  331. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  332. mindspore/profiler/common/path_manager.py +9 -0
  333. mindspore/profiler/common/profiler_context.py +50 -29
  334. mindspore/profiler/common/profiler_info.py +0 -16
  335. mindspore/profiler/common/profiler_meta_data.py +1 -0
  336. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  337. mindspore/profiler/common/profiler_output_path.py +23 -8
  338. mindspore/profiler/common/profiler_parameters.py +128 -35
  339. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  340. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  341. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  342. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  343. mindspore/profiler/dynamic_profiler.py +374 -338
  344. mindspore/profiler/envprofiler.py +42 -12
  345. mindspore/profiler/experimental_config.py +112 -7
  346. mindspore/profiler/mstx.py +33 -12
  347. mindspore/profiler/platform/__init__.py +2 -3
  348. mindspore/profiler/platform/cpu_profiler.py +10 -4
  349. mindspore/profiler/platform/npu_profiler.py +30 -20
  350. mindspore/profiler/profiler.py +218 -154
  351. mindspore/profiler/profiler_action_controller.py +65 -77
  352. mindspore/profiler/profiler_interface.py +2 -2
  353. mindspore/profiler/schedule.py +10 -4
  354. mindspore/rewrite/common/config.py +1 -0
  355. mindspore/rewrite/common/namer.py +1 -0
  356. mindspore/rewrite/common/namespace.py +1 -0
  357. mindspore/rewrite/node/node.py +31 -11
  358. mindspore/rewrite/parsers/assign_parser.py +1 -1
  359. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  360. mindspore/run_check/_check_version.py +7 -10
  361. mindspore/runtime/__init__.py +8 -6
  362. mindspore/runtime/event.py +10 -4
  363. mindspore/runtime/executor.py +87 -45
  364. mindspore/runtime/memory.py +22 -30
  365. mindspore/runtime/thread_bind_core.py +299 -165
  366. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  367. mindspore/swresample-4.dll +0 -0
  368. mindspore/swscale-6.dll +0 -0
  369. mindspore/tbbmalloc.dll +0 -0
  370. mindspore/tinyxml2.dll +0 -0
  371. mindspore/train/_utils.py +9 -5
  372. mindspore/train/amp.py +43 -23
  373. mindspore/train/callback/__init__.py +5 -5
  374. mindspore/train/callback/_callback.py +2 -1
  375. mindspore/train/callback/_checkpoint.py +4 -14
  376. mindspore/train/callback/_flops_collector.py +11 -7
  377. mindspore/train/callback/_landscape.py +0 -1
  378. mindspore/train/callback/_train_fault_tolerance.py +72 -18
  379. mindspore/train/data_sink.py +15 -6
  380. mindspore/train/dataset_helper.py +14 -5
  381. mindspore/train/model.py +49 -47
  382. mindspore/train/serialization.py +168 -126
  383. mindspore/train/summary/summary_record.py +13 -2
  384. mindspore/train/train_thor/model_thor.py +2 -2
  385. mindspore/turbojpeg.dll +0 -0
  386. mindspore/utils/__init__.py +3 -2
  387. mindspore/utils/dryrun.py +0 -6
  388. mindspore/utils/runtime_execution_order_check.py +162 -78
  389. mindspore/utils/sdc_detect.py +68 -0
  390. mindspore/utils/utils.py +14 -17
  391. mindspore/vcmeta.dll +0 -0
  392. mindspore/vcruntime140.dll +0 -0
  393. mindspore/vcruntime140_1.dll +0 -0
  394. mindspore/version.py +1 -1
  395. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  396. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
  397. mindspore/_deprecated/jit.py +0 -198
  398. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  399. mindspore/communication/_hccl_management.py +0 -297
  400. mindspore/experimental/es/embedding_service.py +0 -891
  401. mindspore/experimental/es/embedding_service_layer.py +0 -581
  402. mindspore/profiler/common/validator/__init__.py +0 -14
  403. mindspore/profiler/common/validator/validate_path.py +0 -84
  404. mindspore/profiler/parser/__init__.py +0 -14
  405. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  406. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  407. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  408. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  409. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  410. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  411. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  412. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  413. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  414. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  415. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  416. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  417. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  418. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  419. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  420. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  421. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  422. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  423. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  424. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  425. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  426. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  427. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  428. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  429. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  430. mindspore/profiler/parser/container.py +0 -229
  431. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  432. mindspore/profiler/parser/flops_parser.py +0 -531
  433. mindspore/profiler/parser/framework_enum.py +0 -111
  434. mindspore/profiler/parser/framework_parser.py +0 -464
  435. mindspore/profiler/parser/framework_struct.py +0 -61
  436. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  437. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  438. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  439. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  440. mindspore/profiler/parser/hccl_parser.py +0 -573
  441. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  442. mindspore/profiler/parser/integrator.py +0 -526
  443. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  444. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  445. mindspore/profiler/parser/minddata_parser.py +0 -186
  446. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  447. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  448. mindspore/profiler/parser/optime_parser.py +0 -250
  449. mindspore/profiler/parser/profiler_info.py +0 -213
  450. mindspore/profiler/parser/step_trace_parser.py +0 -666
  451. mindspore/utils/hooks.py +0 -81
  452. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  453. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  454. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  455. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -15,8 +15,8 @@
15
15
  """Sparse related tools."""
16
16
  from __future__ import absolute_import
17
17
 
18
+ from mindspore import ops
18
19
  from mindspore import log as logger
19
- from mindspore.ops import operations as P
20
20
  from mindspore.nn.cell import Cell
21
21
 
22
22
 
@@ -76,7 +76,7 @@ class SparseToDense(Cell):
76
76
  logger.warning("'nn.SparseToDense' is deprecated from version 2.0 and will be removed in a future version. " +
77
77
  "Please use 'COOTensor.to_dense()' instead.")
78
78
  super(SparseToDense, self).__init__()
79
- self.sparse_to_dense = P.SparseToDense()
79
+ self.sparse_to_dense = ops.SparseToDense()
80
80
 
81
81
  def construct(self, sparse_tensor):
82
82
  return self.sparse_to_dense(sparse_tensor.indices,
@@ -141,7 +141,7 @@ class SparseTensorDenseMatmul(Cell):
141
141
  super(SparseTensorDenseMatmul, self).__init__()
142
142
  self.adj_st = adjoint_st
143
143
  self.adj_dt = adjoint_dt
144
- self.sparse_dense_matmul = P.SparseTensorDenseMatmul(adjoint_st=self.adj_st, adjoint_dt=self.adj_dt)
144
+ self.sparse_dense_matmul = ops.SparseTensorDenseMatmul(adjoint_st=self.adj_st, adjoint_dt=self.adj_dt)
145
145
 
146
146
  def construct(self, indices, values, sparse_shape, dense):
147
147
  return self.sparse_dense_matmul(indices, values, sparse_shape, dense)
@@ -23,7 +23,7 @@ from types import FunctionType, MethodType
23
23
 
24
24
  from mindspore import log as logger
25
25
  from mindspore.parallel._utils import _get_device_num, _get_gradients_mean,\
26
- _get_parallel_mode, _get_enable_parallel_optimizer, _is_pynative_parallel
26
+ _get_parallel_mode, _get_enable_parallel_optimizer
27
27
  from mindspore.context import ParallelMode
28
28
  from mindspore import _checkparam as validator
29
29
  from mindspore import ops, nn
@@ -31,15 +31,12 @@ from mindspore.common import dtype as mstype
31
31
  from mindspore.common.parameter import Parameter, ParameterTuple
32
32
  from mindspore.common.tensor import Tensor
33
33
  from mindspore.ops.primitive import _primexpr
34
- from mindspore.ops import composite as C
35
- from mindspore.ops import functional as F
36
- from mindspore.ops import operations as P
37
34
  from mindspore.ops.operations.comm_ops import _VirtualDataset
38
35
  from mindspore.nn.cell import Cell
39
36
  from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
40
37
  from mindspore.utils import ExitByRequest
41
38
 
42
- _get_datatype = C.MultitypeFuncGraph("_get_datatype")
39
+ _get_datatype = ops.MultitypeFuncGraph("_get_datatype")
43
40
 
44
41
 
45
42
  @_get_datatype.register("Tensor")
@@ -53,10 +50,10 @@ def _tensors_get_datatype(param):
53
50
  Returns:
54
51
  mstype, the datatype of parameter.
55
52
  """
56
- return F.dtype(param)
53
+ return ops.dtype(param)
57
54
 
58
55
 
59
- _cast_datatype = C.MultitypeFuncGraph("_cast_datatype")
56
+ _cast_datatype = ops.MultitypeFuncGraph("_cast_datatype")
60
57
 
61
58
 
62
59
  @_cast_datatype.register("TypeType", "Tensor")
@@ -71,7 +68,7 @@ def _tensors_cast_datatype(datatype, param):
71
68
  Returns:
72
69
  Tensor, the parameter after operation.
73
70
  """
74
- return F.cast(param, datatype)
71
+ return ops.cast(param, datatype)
75
72
 
76
73
 
77
74
  class WithLossCell(Cell):
@@ -195,7 +192,7 @@ class WithGradCell(Cell):
195
192
  self.network = network
196
193
  self.loss_fn = loss_fn
197
194
  self.weights = ParameterTuple(network.trainable_params())
198
- self.grad = C.GradOperation(get_by_list=True, sens_param=(sens is not None))
195
+ self.grad = ops.GradOperation(get_by_list=True, sens_param=(sens is not None))
199
196
  self.sens = sens
200
197
  if loss_fn is None:
201
198
  self.network_with_loss = network
@@ -303,7 +300,7 @@ class ForwardValueAndGrad(Cell):
303
300
  self.get_all = get_all
304
301
  self.get_by_list = get_by_list
305
302
  self.sens_param = sens_param
306
- self.grad = C.GradOperation(get_all=self.get_all, get_by_list=self.get_by_list, sens_param=self.sens_param)
303
+ self.grad = ops.GradOperation(get_all=self.get_all, get_by_list=self.get_by_list, sens_param=self.sens_param)
307
304
  self._get_attr_from_cell(network)
308
305
 
309
306
  def construct(self, *inputs):
@@ -385,8 +382,8 @@ class TrainOneStepCell(Cell):
385
382
  self.network.set_grad()
386
383
  self.optimizer = optimizer
387
384
  self.weights = self.optimizer.parameters
388
- self.grad = C.GradOperation(get_by_list=True, sens_param=True)
389
- self.grad_no_sens = C.GradOperation(get_by_list=True)
385
+ self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
386
+ self.grad_no_sens = ops.GradOperation(get_by_list=True)
390
387
  self.sens = sens
391
388
  if self.sens == 0:
392
389
  raise ValueError("The input argument of 'sens' can not be 0.")
@@ -400,8 +397,7 @@ class TrainOneStepCell(Cell):
400
397
  self.reducer_flag = False
401
398
  self.grad_reducer = nn.Identity()
402
399
  self.parallel_mode = _get_parallel_mode()
403
- self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) or \
404
- _is_pynative_parallel()
400
+ self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL)
405
401
  if self.reducer_flag:
406
402
  self.mean = _get_gradients_mean()
407
403
  self.degree = _get_device_num()
@@ -428,12 +424,12 @@ class TrainOneStepCell(Cell):
428
424
  if not self.sense_flag:
429
425
  return self._no_sens_impl(*inputs)
430
426
  loss = self.network(*inputs)
431
- sens = F.fill(loss.dtype, loss.shape, self.sens)
427
+ sens = ops.fill(loss.dtype, loss.shape, self.sens)
432
428
  grads = self.grad(self.network, self.weights)(*inputs, sens)
433
429
  grads = self.grad_reducer(grads)
434
430
  if self.use_graceful_exit:
435
431
  grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
436
- loss = F.depend(loss, self.optimizer(grads))
432
+ loss = ops.depend(loss, self.optimizer(grads))
437
433
  if self.return_grad:
438
434
  grad_with_param_name = {}
439
435
  for index, value in enumerate(grads):
@@ -448,7 +444,7 @@ class TrainOneStepCell(Cell):
448
444
  grads = self.grad_reducer(grads)
449
445
  if self.use_graceful_exit:
450
446
  grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
451
- loss = F.depend(loss, self.optimizer(grads))
447
+ loss = ops.depend(loss, self.optimizer(grads))
452
448
  if self.return_grad:
453
449
  grad_with_param_name = {}
454
450
  for index, value in enumerate(grads):
@@ -496,7 +492,7 @@ class GetNextSingleOp(Cell):
496
492
 
497
493
  def __init__(self, dataset_types, dataset_shapes, queue_name):
498
494
  super(GetNextSingleOp, self).__init__()
499
- self.get_next = P.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
495
+ self.get_next = ops.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
500
496
 
501
497
  def construct(self):
502
498
  return self.get_next()
@@ -533,22 +529,22 @@ class _VirtualDatasetCell(Cell):
533
529
 
534
530
 
535
531
  def _pipeline_clear_grad(accu_grad, grad):
536
- accu_grad = F.depend(accu_grad, grad)
537
- zeros = F.zeros_like(accu_grad)
538
- return F.assign(accu_grad, zeros)
532
+ accu_grad = ops.depend(accu_grad, grad)
533
+ zeros = ops.zeros_like(accu_grad)
534
+ return ops.assign(accu_grad, zeros)
539
535
 
540
536
  def grad_scale(scale, grad):
541
537
  """grad_scale"""
542
538
  new_grad = scale * grad
543
539
  grad = ops.depend(grad, new_grad)
544
- zeros = F.zeros_like(grad)
545
- new_grad = ops.depend(new_grad, F.assign(grad, zeros))
540
+ zeros = ops.zeros_like(grad)
541
+ new_grad = ops.depend(new_grad, ops.assign(grad, zeros))
546
542
  return new_grad
547
543
 
548
544
 
549
545
  @_primexpr
550
546
  def _check_shape_value_on_axis_divided_by_target_value(input_shape, micro_size):
551
- if F.isconstant(input_shape[0]) is False:
547
+ if ops.isconstant(input_shape[0]) is False:
552
548
  return
553
549
  if input_shape[0] % micro_size != 0:
554
550
  raise ValueError(f"For micro batch initialization, the 0th dimension shape of input({input_shape[0]}) must be "
@@ -564,9 +560,9 @@ class _MicroBatch(Cell):
564
560
  """
565
561
  def __init__(self, micro_size):
566
562
  super(_MicroBatch, self).__init__()
567
- self.shape = P.Shape()
563
+ self.shape = ops.Shape()
568
564
  self.micro_size = micro_size
569
- self.strided_slice = P.StridedSlice()
565
+ self.strided_slice = ops.StridedSlice()
570
566
 
571
567
  def construct(self, i, *inputs):
572
568
  """construct for _MicroBatch."""
@@ -628,7 +624,7 @@ class GradAccumulationCell(Cell):
628
624
  micro_input = _MicroBatch(micro_size)
629
625
  micro_input.strided_slice.add_prim_attr("grad_accu_num", micro_size)
630
626
  self.micro_inputs.append(micro_input)
631
- self.add = P.Add().add_prim_attr("forward_end", i)
627
+ self.add = ops.Add().add_prim_attr("forward_end", i)
632
628
  self.add_list.append(self.add)
633
629
  self._get_attr_from_cell(network)
634
630
 
@@ -685,10 +681,10 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
685
681
  grads = self.grad_no_sens(self.network, self.weights)(*inputs)
686
682
  accu_grads = ops.depend(self.accu_grads, grads)
687
683
  if self.opt_shard:
688
- grads = self.hyper_map(F.partial(grad_scale, self.sens), grads)
684
+ grads = self.hyper_map(ops.partial(grad_scale, self.sens), grads)
689
685
  succ = self.optimizer(grads)
690
686
  else:
691
- accu_grads = self.hyper_map(F.partial(grad_scale, self.sens), accu_grads)
687
+ accu_grads = self.hyper_map(ops.partial(grad_scale, self.sens), accu_grads)
692
688
  succ = self.optimizer(accu_grads)
693
689
  loss = ops.depend(loss, succ)
694
690
  clear = self.hyper_map(_pipeline_clear_grad, accu_grads, grads)
@@ -794,8 +790,8 @@ class WithEvalCell(Cell):
794
790
  def construct(self, data, label):
795
791
  outputs = self._network(data)
796
792
  if self.add_cast_fp32:
797
- label = F.mixed_precision_cast(mstype.float32, label)
798
- outputs = F.cast(outputs, mstype.float32)
793
+ label = ops.mixed_precision_cast(mstype.float32, label)
794
+ outputs = ops.cast(outputs, mstype.float32)
799
795
  loss = self._loss_fn(outputs, label)
800
796
  return loss, outputs, label
801
797
 
@@ -845,7 +841,7 @@ class ParameterUpdate(Cell):
845
841
  self._param = param
846
842
 
847
843
  def construct(self, x):
848
- F.assign(self._param, x)
844
+ ops.assign(self._param, x)
849
845
  return x
850
846
 
851
847
 
@@ -861,21 +857,21 @@ class _BroadCastCell(Cell):
861
857
  super(_BroadCastCell, self).__init__()
862
858
  from mindspore.communication.management import get_group_size, create_group
863
859
  from mindspore import context
864
- self.map_ = C.Map()
860
+ self.map_ = ops.Map()
865
861
  self.params = tuple(params)
866
- if context.get_context("device_target") == "Ascend" and context.get_context("mode") != context.PYNATIVE_MODE:
862
+ if context.get_context("device_target") == "Ascend":
867
863
  rank_list = [id for id in range(0, get_group_size())]
868
864
  create_group("BroadcastWorldGroup", rank_list)
869
- self.broadcast = P.Broadcast(0, group="BroadcastWorldGroup")
865
+ self.broadcast = ops.Broadcast(0, group="BroadcastWorldGroup")
870
866
  else:
871
- self.broadcast = P.Broadcast(0)
867
+ self.broadcast = ops.Broadcast(0)
872
868
  self.add_flags(skip_auto_parallel_compile=True)
873
869
 
874
870
  def construct(self):
875
- datatypes = self.map_(F.partial(_get_datatype), self.params)
876
- params = self.map_(F.partial(_cast_datatype, mstype.float32), self.params)
871
+ datatypes = self.map_(ops.partial(_get_datatype), self.params)
872
+ params = self.map_(ops.partial(_cast_datatype, mstype.float32), self.params)
877
873
  params = self.broadcast(params)
878
- new_params = self.map_(F.partial(_cast_datatype), datatypes, params)
874
+ new_params = self.map_(ops.partial(_cast_datatype), datatypes, params)
879
875
  return new_params
880
876
 
881
877
 
@@ -892,6 +888,8 @@ class PipelineCell(Cell):
892
888
  micro_size (int): MicroBatch size.
893
889
  stage_config (dict, optional): The stage configuration for each cell's execution in pipeline parallel.
894
890
  Default ``None``.
891
+ segment_config (dict, optional): The segment configuration for each cell's execution in pipeline parallel.
892
+ Default ``None``.
895
893
 
896
894
  Supported Platforms:
897
895
  ``Ascend`` ``GPU``
@@ -903,7 +901,7 @@ class PipelineCell(Cell):
903
901
  >>> net = LeNet5()
904
902
  >>> net = nn.PipelineCell(net, 4)
905
903
  """
906
- def __init__(self, network, micro_size, stage_config=None):
904
+ def __init__(self, network, micro_size, stage_config=None, segment_config=None):
907
905
  super(PipelineCell, self).__init__(auto_prefix=False)
908
906
  self.network = network
909
907
  self.micro_inputs = nn.CellList()
@@ -921,7 +919,7 @@ class PipelineCell(Cell):
921
919
  for i in range(micro_size):
922
920
  micro_input = _MicroBatch(micro_size)
923
921
  self.micro_inputs.append(micro_input)
924
- self.add = P.Add().add_prim_attr("pipeline_end", i)
922
+ self.add = ops.Add().add_prim_attr("pipeline_end", i)
925
923
  self.add_list.append(self.add)
926
924
  self._get_attr_from_cell(network)
927
925
 
@@ -959,6 +957,39 @@ class PipelineCell(Cell):
959
957
  print(cell_name)
960
958
  raise KeyError("For 'PipelineCell', the argument 'stage_config' : {} is not "
961
959
  "found in 'network' : {}".format(config_dict, network))
960
+ if segment_config is None:
961
+ return
962
+ self._config_segment(segment_config)
963
+
964
+
965
+ def _config_segment(self, segment_config=None):
966
+ """
967
+ Config segment num for cell.
968
+ """
969
+ config_dict = segment_config.copy()
970
+ for cell_name, cell in self.network.cells_and_names():
971
+ if cell_name in segment_config:
972
+ setattr(cell, "pipeline_segment", segment_config[cell_name])
973
+ del config_dict[cell_name]
974
+ if str(self.network) in segment_config:
975
+ setattr(self.network, "pipeline_segment", segment_config[str(self.network)])
976
+ del config_dict[str(self.network)]
977
+ # if there are any config elements left, print them
978
+ if config_dict:
979
+ for config_cell_name, config_segment_num in config_dict.items():
980
+ logger.error("pipeline_cell segment_config set pipeline_segment fail!")
981
+ logger.warning("config cell name:" + str(config_cell_name) +
982
+ " config segment num:" + str(config_segment_num))
983
+ logger.warning("network:" + str(self.network))
984
+ logger.warning("cell name available:")
985
+ for cell_name, _ in self.network.cells_and_names():
986
+ logger.warning(cell_name)
987
+ raise KeyError("For 'PipelineCell', the argument 'segment_config' : {} is not "
988
+ "found in 'network' : {}".format(config_dict, self.network))
989
+
990
+
991
+ def shard(self, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
992
+ raise ValueError("For 'PipelineCell', no 'shard' on 'PipelineCell' is allowed.")
962
993
 
963
994
  def construct(self, *inputs):
964
995
  ret = None
@@ -1011,7 +1042,7 @@ class MicroBatchInterleaved(Cell):
1011
1042
  self.network = network
1012
1043
  self.interleave_num = interleave_num
1013
1044
  self.interleave_inputs = nn.CellList()
1014
- self.add = P.Add().add_prim_attr("micro_interleaved_add_flag", True)
1045
+ self.add = ops.Add().add_prim_attr("micro_interleaved_add_flag", True)
1015
1046
  for _ in range(interleave_num):
1016
1047
  interleave_data = _MicroBatch(interleave_num)
1017
1048
  interleave_data.strided_slice.add_prim_attr("strided_slice_flag", True)
@@ -20,7 +20,7 @@ from mindspore import log as logger
20
20
  from mindspore.nn.cell import Cell
21
21
  from mindspore.communication.management import GlobalComm, get_group_size
22
22
  from mindspore.common.sparse_tensor import RowTensorInner
23
- from mindspore.ops import functional as F, composite as C, operations as P
23
+ from mindspore import ops
24
24
  from mindspore.ops.operations.comm_ops import AllReduce, AllGather
25
25
  from mindspore.parallel._auto_parallel_context import auto_parallel_context
26
26
  import mindspore.common.dtype as mstype
@@ -33,7 +33,7 @@ from mindspore.parallel._utils import _get_enable_parallel_optimizer
33
33
  __all__ = ['DistributedGradReducer']
34
34
 
35
35
 
36
- reduce_opt = C.MultitypeFuncGraph("reduce_opt")
36
+ reduce_opt = ops.MultitypeFuncGraph("reduce_opt")
37
37
 
38
38
 
39
39
  def _init_allreduce_operators(length, split_indices, group=GlobalComm.WORLD_COMM_GROUP):
@@ -114,7 +114,7 @@ def _tensors_allreduce(degree, mean, allgather, allreduce, allreduce_filter, gra
114
114
  if allreduce_filter:
115
115
  grad = allreduce(grad)
116
116
  if mean:
117
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
117
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
118
118
  return grad
119
119
  return grad
120
120
 
@@ -135,7 +135,7 @@ def _tensors_allreduce_post(degree, mean, allreduce_filter, grad):
135
135
  """
136
136
  if allreduce_filter:
137
137
  if mean:
138
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
138
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
139
139
  return grad
140
140
  return grad
141
141
 
@@ -163,7 +163,7 @@ def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter,
163
163
  if allreduce_filter:
164
164
  grad = allreduce(grad)
165
165
  if mean:
166
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
166
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
167
167
  return grad
168
168
  return grad
169
169
 
@@ -189,7 +189,7 @@ def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce
189
189
  indices = allgather(grad.indices)
190
190
  dout = allgather(grad.values)
191
191
  if mean:
192
- dout = F.tensor_mul(dout, F.cast(degree, F.dtype(dout)))
192
+ dout = ops.tensor_mul(dout, ops.cast(degree, ops.dtype(dout)))
193
193
  grad = RowTensorInner(indices, dout, grad.dense_shape)
194
194
  return grad
195
195
 
@@ -219,12 +219,12 @@ def _tensors_allreduce_with_sparse_ps(degree, mean, allgather, allreduce, allred
219
219
  indices = allgather(grad.indices)
220
220
  dout = allgather(grad.values)
221
221
  if mean:
222
- dout = F.tensor_mul(dout, F.cast(degree, F.dtype(dout)))
222
+ dout = ops.tensor_mul(dout, ops.cast(degree, ops.dtype(dout)))
223
223
  grad = RowTensorInner(indices, dout, grad.dense_shape)
224
224
  return grad
225
225
 
226
226
 
227
- _get_datatype = C.MultitypeFuncGraph("_get_datatype")
227
+ _get_datatype = ops.MultitypeFuncGraph("_get_datatype")
228
228
 
229
229
 
230
230
  @_get_datatype.register("Tensor")
@@ -238,7 +238,7 @@ def _tensors_get_datatype(grad):
238
238
  Returns:
239
239
  mstype, the datatype of gradient.
240
240
  """
241
- return F.dtype(grad)
241
+ return ops.dtype(grad)
242
242
 
243
243
 
244
244
  @_get_datatype.register("RowTensor")
@@ -252,10 +252,10 @@ def _tensors_get_datatype_with_sparse(grad):
252
252
  Returns:
253
253
  mstype, the datatype of gradient.
254
254
  """
255
- return F.dtype(grad.values)
255
+ return ops.dtype(grad.values)
256
256
 
257
257
 
258
- _cast_datatype = C.MultitypeFuncGraph("_cast_datatype")
258
+ _cast_datatype = ops.MultitypeFuncGraph("_cast_datatype")
259
259
 
260
260
 
261
261
  @_cast_datatype.register("TypeType", "Tensor")
@@ -270,7 +270,7 @@ def _tensors_cast_datatype(datatype, grad):
270
270
  Returns:
271
271
  Tensor, the gradient tensor after operation.
272
272
  """
273
- return F.cast(grad, datatype)
273
+ return ops.cast(grad, datatype)
274
274
 
275
275
 
276
276
  @_cast_datatype.register("TypeType", "RowTensor")
@@ -285,7 +285,7 @@ def _tensors_cast_datatype_with_sparse(datatype, grad):
285
285
  Returns:
286
286
  RowTensor, the gradient after operation.
287
287
  """
288
- dout = F.cast(grad.values, datatype)
288
+ dout = ops.cast(grad.values, datatype)
289
289
  return RowTensorInner(grad.indices, dout, grad.dense_shape)
290
290
 
291
291
 
@@ -361,7 +361,7 @@ class DistributedGradReducer(Cell):
361
361
  ... def construct(self, *args):
362
362
  ... weights = self.weights
363
363
  ... loss = self.network(*args)
364
- ... sens = F.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
364
+ ... sens = ops.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
365
365
  ... grads = self.grad(self.network, weights)(*args, sens)
366
366
  ... if self.reducer_flag:
367
367
  ... # apply grad reducer on grads
@@ -394,8 +394,7 @@ class DistributedGradReducer(Cell):
394
394
 
395
395
  def __init__(self, parameters, mean=None, degree=None, fusion_type=1, group=GlobalComm.WORLD_COMM_GROUP):
396
396
  super(DistributedGradReducer, self).__init__(auto_prefix=False)
397
- self._check_parallel_mode()
398
- self.map_ = C.Map()
397
+ self.map_ = ops.Map()
399
398
  self.mean = mean
400
399
  if mean is None:
401
400
  self.mean = auto_parallel_context().get_gradients_mean()
@@ -443,54 +442,47 @@ class DistributedGradReducer(Cell):
443
442
  Returns:
444
443
  new_grads (Union[Tensor, tuple[Tensor]]), the gradient tensor or tuple after operation.
445
444
  """
446
- datatypes = self.map_(F.partial(_get_datatype), grads)
447
- grads = self.map_(F.partial(_cast_datatype, mstype.float32), grads)
445
+ datatypes = self.map_(ops.partial(_get_datatype), grads)
446
+ grads = self.map_(ops.partial(_cast_datatype, mstype.float32), grads)
448
447
 
449
448
  if self.split_fusion:
450
449
  if self.enable_parameter_server:
451
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather),
450
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
452
451
  self.op_list, self.allreduce_filter, grads, self.ps_parameters)
453
452
  else:
454
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather),
453
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
455
454
  self.op_list, self.allreduce_filter, grads)
456
455
  else:
457
456
  if self.enable_parameter_server:
458
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather,
459
- self.allreduce), self.allreduce_filter, grads, self.ps_parameters)
457
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
458
+ self.allreduce), self.allreduce_filter, grads, self.ps_parameters)
460
459
  else:
461
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather,
462
- self.allreduce), self.allreduce_filter, grads)
463
- new_grad = self.map_(F.partial(_cast_datatype), datatypes, new_grad)
460
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
461
+ self.allreduce), self.allreduce_filter, grads)
462
+ new_grad = self.map_(ops.partial(_cast_datatype), datatypes, new_grad)
464
463
  return new_grad
465
464
 
466
- def _check_parallel_mode(self):
467
- """check parallel mode"""
468
- parallel_mode = context.get_auto_parallel_context('parallel_mode')
469
- if context.get_context('mode') == context.GRAPH_MODE and parallel_mode in (
470
- context.ParallelMode.SEMI_AUTO_PARALLEL, context.ParallelMode.AUTO_PARALLEL):
471
- raise RuntimeError("{} can not use DistributedGradReducer in graph mode".format(parallel_mode))
472
465
 
473
-
474
- grad_scale = C.MultitypeFuncGraph("grad_scale")
475
- shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
476
- reciprocal = P.Reciprocal()
466
+ grad_scale = ops.MultitypeFuncGraph("grad_scale")
467
+ shard_grad_scale = ops.MultitypeFuncGraph("shard_grad_scale")
468
+ reciprocal = ops.Reciprocal()
477
469
 
478
470
 
479
471
  @grad_scale.register("Tensor", "Tensor", "Tensor")
480
472
  def tensor_grad_scale_pipeline(scale, grad, accu_grad):
481
- accu_grad = F.depend(accu_grad, grad)
473
+ accu_grad = ops.depend(accu_grad, grad)
482
474
  new_grad = accu_grad * reciprocal(scale)
483
- accu_grad = F.depend(accu_grad, new_grad)
484
- zeros = F.tensor_mul(accu_grad, 0.0)
485
- new_grad = F.depend(new_grad, F.assign(accu_grad, zeros))
475
+ accu_grad = ops.depend(accu_grad, new_grad)
476
+ zeros = ops.tensor_mul(accu_grad, 0.0)
477
+ new_grad = ops.depend(new_grad, ops.assign(accu_grad, zeros))
486
478
  return new_grad
487
479
 
488
480
 
489
481
  @shard_grad_scale.register("Tensor", "Tensor", "Tensor")
490
482
  def tensor_shard_grad_scale_pipeline(scale, grad, accu_grad):
491
483
  new_grad = grad * reciprocal(scale)
492
- accu_grad = F.depend(accu_grad, new_grad)
493
- new_grad = F.depend(new_grad, F.assign(accu_grad, F.zeros_like(accu_grad)))
484
+ accu_grad = ops.depend(accu_grad, new_grad)
485
+ new_grad = ops.depend(new_grad, ops.assign(accu_grad, ops.zeros_like(accu_grad)))
494
486
  return new_grad
495
487
 
496
488
 
@@ -563,7 +555,7 @@ class PipelineGradReducer(Cell):
563
555
  >>> net.layer3.pipeline_stage = 1
564
556
  >>> loss_fn = nn.CrossEntropyLoss()
565
557
  >>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
566
- >>> net_with_loss = nn.Pipeline(nn.WithLossCell(net, loss_fn), 2)
558
+ >>> net_with_loss = nn.PipelineCell(nn.WithLossCell(net, loss_fn), 2)
567
559
  >>> net_with_loss.set_train()
568
560
  >>> def forward_fn(inputs, target):
569
561
  ... loss = net_with_loss(inputs, target)
@@ -587,12 +579,11 @@ class PipelineGradReducer(Cell):
587
579
  """
588
580
  def __init__(self, parameters, scale_sense=1.0, opt_shard=None):
589
581
  super(PipelineGradReducer, self).__init__(auto_prefix=False)
590
- self._check_mode()
591
582
  self.accu_grads = parameters.clone(prefix="accu_grads", init="zeros")
592
583
  self.grad_reducer = Identity()
593
584
  self.degree = Tensor(1, mstype.float32)
594
585
  self.scale_sense = Parameter(scale_sense, name='scale_sense')
595
- self.hyper_map = C.HyperMap()
586
+ self.hyper_map = ops.HyperMap()
596
587
  if opt_shard is None:
597
588
  self.opt_shard = _get_enable_parallel_optimizer()
598
589
  else:
@@ -603,15 +594,9 @@ class PipelineGradReducer(Cell):
603
594
  new_grads = None
604
595
  if self.opt_shard:
605
596
  grads = self.grad_reducer(grads)
606
- new_grads = self.hyper_map(F.partial(shard_grad_scale, self.scale_sense * self.degree),
597
+ new_grads = self.hyper_map(ops.partial(shard_grad_scale, self.scale_sense * self.degree),
607
598
  grads, self.accu_grads)
608
599
  else:
609
600
  accu_grads = self.grad_reducer(self.accu_grads)
610
- new_grads = self.hyper_map(F.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
601
+ new_grads = self.hyper_map(ops.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
611
602
  return new_grads
612
-
613
- def _check_mode(self):
614
- """check parallel mode"""
615
- mode = context.get_context('mode')
616
- if mode != context.GRAPH_MODE:
617
- raise RuntimeError(f"PipelineGradReducer only support graph mode, but get {mode}")