mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (403) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +36 -61
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +32 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +76 -15
  37. mindspore/common/api.py +193 -112
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +2 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +48 -83
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +38 -23
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +32 -2
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +1 -0
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +4 -44
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +208 -5
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +122 -98
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +325 -499
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +86 -85
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +1 -1
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/optimizer.py +1 -1
  159. mindspore/nn/optim/thor.py +2 -2
  160. mindspore/nn/probability/distribution/exponential.py +2 -1
  161. mindspore/nn/probability/distribution/poisson.py +2 -1
  162. mindspore/nn/sparse/sparse.py +3 -3
  163. mindspore/nn/wrap/cell_wrapper.py +34 -37
  164. mindspore/nn/wrap/grad_reducer.py +37 -37
  165. mindspore/nn/wrap/loss_scale.py +72 -74
  166. mindspore/numpy/array_creations.py +5 -5
  167. mindspore/numpy/fft.py +1 -1
  168. mindspore/numpy/math_ops.py +1 -1
  169. mindspore/opencv_core452.dll +0 -0
  170. mindspore/opencv_imgcodecs452.dll +0 -0
  171. mindspore/opencv_imgproc452.dll +0 -0
  172. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  173. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  174. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  175. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  176. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
  177. mindspore/ops/auto_generate/gen_extend_func.py +1 -51
  178. mindspore/ops/auto_generate/gen_ops_def.py +463 -257
  179. mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
  180. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  181. mindspore/ops/composite/__init__.py +10 -0
  182. mindspore/ops/composite/base.py +8 -4
  183. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  184. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  185. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  186. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  187. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  188. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  189. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  190. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  191. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  192. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  193. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  194. mindspore/ops/function/__init__.py +3 -1
  195. mindspore/ops/function/_add_attr_func.py +11 -6
  196. mindspore/ops/function/array_func.py +7 -94
  197. mindspore/ops/function/debug_func.py +4 -3
  198. mindspore/ops/function/grad/grad_func.py +1 -1
  199. mindspore/ops/function/math_func.py +21 -367
  200. mindspore/ops/function/nn_func.py +26 -41
  201. mindspore/ops/function/other_func.py +4 -1
  202. mindspore/ops/function/random_func.py +31 -4
  203. mindspore/ops/functional.py +0 -2
  204. mindspore/ops/functional_overload.py +463 -6
  205. mindspore/ops/op_info_register.py +21 -0
  206. mindspore/ops/operations/__init__.py +5 -2
  207. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  208. mindspore/ops/operations/_inner_ops.py +3 -6
  209. mindspore/ops/operations/_sequence_ops.py +1 -1
  210. mindspore/ops/operations/comm_ops.py +185 -26
  211. mindspore/ops/operations/custom_ops.py +235 -172
  212. mindspore/ops/operations/debug_ops.py +55 -4
  213. mindspore/ops/operations/image_ops.py +13 -13
  214. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  215. mindspore/ops/operations/math_ops.py +3 -4
  216. mindspore/ops/operations/nn_ops.py +5 -6
  217. mindspore/ops/primitive.py +6 -10
  218. mindspore/ops/tensor_method.py +36 -4
  219. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  220. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  221. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  222. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  223. mindspore/ops_generate/common/base_generator.py +14 -0
  224. mindspore/ops_generate/common/gen_constants.py +7 -2
  225. mindspore/ops_generate/common/gen_utils.py +0 -19
  226. mindspore/ops_generate/common/op_proto.py +11 -4
  227. mindspore/ops_generate/common/template.py +88 -11
  228. mindspore/ops_generate/gen_ops.py +1 -1
  229. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  230. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  231. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  232. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  233. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  234. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  235. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  236. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  237. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  238. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  239. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  240. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  241. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  242. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  243. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  244. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  245. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  246. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  247. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  248. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  249. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  250. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  251. mindspore/parallel/_auto_parallel_context.py +4 -2
  252. mindspore/parallel/_cell_wrapper.py +106 -40
  253. mindspore/parallel/_parallel_serialization.py +1 -1
  254. mindspore/parallel/_ps_context.py +4 -6
  255. mindspore/parallel/_tensor.py +167 -12
  256. mindspore/parallel/_transformer/moe.py +1 -1
  257. mindspore/parallel/_transformer/transformer.py +13 -8
  258. mindspore/parallel/auto_parallel.py +12 -5
  259. mindspore/parallel/checkpoint_convert.py +3 -3
  260. mindspore/parallel/checkpoint_transform.py +3 -1
  261. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  262. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  263. mindspore/parallel/cluster/run.py +43 -4
  264. mindspore/parallel/function/__init__.py +8 -1
  265. mindspore/parallel/function/reshard_func.py +1 -1
  266. mindspore/parallel/nn/__init__.py +15 -2
  267. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  268. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  269. mindspore/parallel/shard.py +2 -2
  270. mindspore/parallel/transform_safetensors.py +462 -174
  271. mindspore/pgodb140.dll +0 -0
  272. mindspore/pgort140.dll +0 -0
  273. mindspore/profiler/__init__.py +2 -1
  274. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  275. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  276. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  277. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  278. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  279. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  280. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  281. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  282. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  283. mindspore/profiler/analysis/task_manager.py +1 -1
  284. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  285. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  286. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  287. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  288. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  289. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  290. mindspore/profiler/common/constant.py +16 -0
  291. mindspore/profiler/common/profiler_context.py +25 -27
  292. mindspore/profiler/common/profiler_info.py +0 -16
  293. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  294. mindspore/profiler/common/profiler_output_path.py +23 -8
  295. mindspore/profiler/common/profiler_parameters.py +128 -35
  296. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  297. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  298. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  299. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  300. mindspore/profiler/dynamic_profiler.py +305 -314
  301. mindspore/profiler/envprofiler.py +12 -7
  302. mindspore/profiler/experimental_config.py +96 -6
  303. mindspore/profiler/mstx.py +33 -12
  304. mindspore/profiler/platform/__init__.py +2 -3
  305. mindspore/profiler/platform/npu_profiler.py +29 -19
  306. mindspore/profiler/profiler.py +35 -19
  307. mindspore/profiler/profiler_action_controller.py +64 -76
  308. mindspore/profiler/schedule.py +10 -4
  309. mindspore/rewrite/common/config.py +1 -0
  310. mindspore/rewrite/common/namer.py +1 -0
  311. mindspore/rewrite/common/namespace.py +1 -0
  312. mindspore/rewrite/node/node.py +31 -11
  313. mindspore/rewrite/parsers/assign_parser.py +1 -1
  314. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  315. mindspore/run_check/_check_version.py +7 -10
  316. mindspore/runtime/__init__.py +5 -5
  317. mindspore/runtime/event.py +10 -4
  318. mindspore/runtime/executor.py +60 -45
  319. mindspore/runtime/memory.py +21 -30
  320. mindspore/runtime/thread_bind_core.py +298 -164
  321. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  322. mindspore/swresample-4.dll +0 -0
  323. mindspore/swscale-6.dll +0 -0
  324. mindspore/tbbmalloc.dll +0 -0
  325. mindspore/tinyxml2.dll +0 -0
  326. mindspore/train/_utils.py +6 -2
  327. mindspore/train/amp.py +43 -20
  328. mindspore/train/callback/__init__.py +5 -5
  329. mindspore/train/callback/_checkpoint.py +3 -6
  330. mindspore/train/callback/_flops_collector.py +1 -1
  331. mindspore/train/callback/_landscape.py +0 -1
  332. mindspore/train/callback/_train_fault_tolerance.py +71 -13
  333. mindspore/train/data_sink.py +11 -2
  334. mindspore/train/dataset_helper.py +9 -0
  335. mindspore/train/model.py +51 -33
  336. mindspore/train/serialization.py +133 -111
  337. mindspore/train/summary/summary_record.py +13 -2
  338. mindspore/turbojpeg.dll +0 -0
  339. mindspore/utils/__init__.py +3 -2
  340. mindspore/utils/dryrun.py +0 -6
  341. mindspore/utils/runtime_execution_order_check.py +162 -78
  342. mindspore/utils/sdc_detect.py +68 -0
  343. mindspore/utils/utils.py +6 -9
  344. mindspore/vcmeta.dll +0 -0
  345. mindspore/vcruntime140.dll +0 -0
  346. mindspore/vcruntime140_1.dll +0 -0
  347. mindspore/version.py +1 -1
  348. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  349. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +352 -390
  350. mindspore/_deprecated/jit.py +0 -198
  351. mindspore/experimental/es/__init__.py +0 -22
  352. mindspore/experimental/es/embedding_service.py +0 -891
  353. mindspore/experimental/es/embedding_service_layer.py +0 -581
  354. mindspore/profiler/parser/__init__.py +0 -14
  355. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  356. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  357. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  358. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  359. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  360. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  361. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  362. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  363. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  364. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  365. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  366. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  367. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  368. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  369. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  370. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  371. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  372. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  373. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  374. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  375. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  376. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  377. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  378. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  379. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  380. mindspore/profiler/parser/container.py +0 -229
  381. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  382. mindspore/profiler/parser/flops_parser.py +0 -531
  383. mindspore/profiler/parser/framework_enum.py +0 -111
  384. mindspore/profiler/parser/framework_parser.py +0 -464
  385. mindspore/profiler/parser/framework_struct.py +0 -61
  386. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  387. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  388. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  389. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  390. mindspore/profiler/parser/hccl_parser.py +0 -573
  391. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  392. mindspore/profiler/parser/integrator.py +0 -526
  393. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  394. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  395. mindspore/profiler/parser/minddata_parser.py +0 -186
  396. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  397. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  398. mindspore/profiler/parser/optime_parser.py +0 -250
  399. mindspore/profiler/parser/profiler_info.py +0 -213
  400. mindspore/profiler/parser/step_trace_parser.py +0 -666
  401. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  402. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  403. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -31,15 +31,12 @@ from mindspore.common import dtype as mstype
31
31
  from mindspore.common.parameter import Parameter, ParameterTuple
32
32
  from mindspore.common.tensor import Tensor
33
33
  from mindspore.ops.primitive import _primexpr
34
- from mindspore.ops import composite as C
35
- from mindspore.ops import functional as F
36
- from mindspore.ops import operations as P
37
34
  from mindspore.ops.operations.comm_ops import _VirtualDataset
38
35
  from mindspore.nn.cell import Cell
39
36
  from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
40
37
  from mindspore.utils import ExitByRequest
41
38
 
42
- _get_datatype = C.MultitypeFuncGraph("_get_datatype")
39
+ _get_datatype = ops.MultitypeFuncGraph("_get_datatype")
43
40
 
44
41
 
45
42
  @_get_datatype.register("Tensor")
@@ -53,10 +50,10 @@ def _tensors_get_datatype(param):
53
50
  Returns:
54
51
  mstype, the datatype of parameter.
55
52
  """
56
- return F.dtype(param)
53
+ return ops.dtype(param)
57
54
 
58
55
 
59
- _cast_datatype = C.MultitypeFuncGraph("_cast_datatype")
56
+ _cast_datatype = ops.MultitypeFuncGraph("_cast_datatype")
60
57
 
61
58
 
62
59
  @_cast_datatype.register("TypeType", "Tensor")
@@ -71,7 +68,7 @@ def _tensors_cast_datatype(datatype, param):
71
68
  Returns:
72
69
  Tensor, the parameter after operation.
73
70
  """
74
- return F.cast(param, datatype)
71
+ return ops.cast(param, datatype)
75
72
 
76
73
 
77
74
  class WithLossCell(Cell):
@@ -195,7 +192,7 @@ class WithGradCell(Cell):
195
192
  self.network = network
196
193
  self.loss_fn = loss_fn
197
194
  self.weights = ParameterTuple(network.trainable_params())
198
- self.grad = C.GradOperation(get_by_list=True, sens_param=(sens is not None))
195
+ self.grad = ops.GradOperation(get_by_list=True, sens_param=(sens is not None))
199
196
  self.sens = sens
200
197
  if loss_fn is None:
201
198
  self.network_with_loss = network
@@ -303,7 +300,7 @@ class ForwardValueAndGrad(Cell):
303
300
  self.get_all = get_all
304
301
  self.get_by_list = get_by_list
305
302
  self.sens_param = sens_param
306
- self.grad = C.GradOperation(get_all=self.get_all, get_by_list=self.get_by_list, sens_param=self.sens_param)
303
+ self.grad = ops.GradOperation(get_all=self.get_all, get_by_list=self.get_by_list, sens_param=self.sens_param)
307
304
  self._get_attr_from_cell(network)
308
305
 
309
306
  def construct(self, *inputs):
@@ -385,8 +382,8 @@ class TrainOneStepCell(Cell):
385
382
  self.network.set_grad()
386
383
  self.optimizer = optimizer
387
384
  self.weights = self.optimizer.parameters
388
- self.grad = C.GradOperation(get_by_list=True, sens_param=True)
389
- self.grad_no_sens = C.GradOperation(get_by_list=True)
385
+ self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
386
+ self.grad_no_sens = ops.GradOperation(get_by_list=True)
390
387
  self.sens = sens
391
388
  if self.sens == 0:
392
389
  raise ValueError("The input argument of 'sens' can not be 0.")
@@ -428,12 +425,12 @@ class TrainOneStepCell(Cell):
428
425
  if not self.sense_flag:
429
426
  return self._no_sens_impl(*inputs)
430
427
  loss = self.network(*inputs)
431
- sens = F.fill(loss.dtype, loss.shape, self.sens)
428
+ sens = ops.fill(loss.dtype, loss.shape, self.sens)
432
429
  grads = self.grad(self.network, self.weights)(*inputs, sens)
433
430
  grads = self.grad_reducer(grads)
434
431
  if self.use_graceful_exit:
435
432
  grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
436
- loss = F.depend(loss, self.optimizer(grads))
433
+ loss = ops.depend(loss, self.optimizer(grads))
437
434
  if self.return_grad:
438
435
  grad_with_param_name = {}
439
436
  for index, value in enumerate(grads):
@@ -448,7 +445,7 @@ class TrainOneStepCell(Cell):
448
445
  grads = self.grad_reducer(grads)
449
446
  if self.use_graceful_exit:
450
447
  grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
451
- loss = F.depend(loss, self.optimizer(grads))
448
+ loss = ops.depend(loss, self.optimizer(grads))
452
449
  if self.return_grad:
453
450
  grad_with_param_name = {}
454
451
  for index, value in enumerate(grads):
@@ -496,7 +493,7 @@ class GetNextSingleOp(Cell):
496
493
 
497
494
  def __init__(self, dataset_types, dataset_shapes, queue_name):
498
495
  super(GetNextSingleOp, self).__init__()
499
- self.get_next = P.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
496
+ self.get_next = ops.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
500
497
 
501
498
  def construct(self):
502
499
  return self.get_next()
@@ -533,22 +530,22 @@ class _VirtualDatasetCell(Cell):
533
530
 
534
531
 
535
532
  def _pipeline_clear_grad(accu_grad, grad):
536
- accu_grad = F.depend(accu_grad, grad)
537
- zeros = F.zeros_like(accu_grad)
538
- return F.assign(accu_grad, zeros)
533
+ accu_grad = ops.depend(accu_grad, grad)
534
+ zeros = ops.zeros_like(accu_grad)
535
+ return ops.assign(accu_grad, zeros)
539
536
 
540
537
  def grad_scale(scale, grad):
541
538
  """grad_scale"""
542
539
  new_grad = scale * grad
543
540
  grad = ops.depend(grad, new_grad)
544
- zeros = F.zeros_like(grad)
545
- new_grad = ops.depend(new_grad, F.assign(grad, zeros))
541
+ zeros = ops.zeros_like(grad)
542
+ new_grad = ops.depend(new_grad, ops.assign(grad, zeros))
546
543
  return new_grad
547
544
 
548
545
 
549
546
  @_primexpr
550
547
  def _check_shape_value_on_axis_divided_by_target_value(input_shape, micro_size):
551
- if F.isconstant(input_shape[0]) is False:
548
+ if ops.isconstant(input_shape[0]) is False:
552
549
  return
553
550
  if input_shape[0] % micro_size != 0:
554
551
  raise ValueError(f"For micro batch initialization, the 0th dimension shape of input({input_shape[0]}) must be "
@@ -564,9 +561,9 @@ class _MicroBatch(Cell):
564
561
  """
565
562
  def __init__(self, micro_size):
566
563
  super(_MicroBatch, self).__init__()
567
- self.shape = P.Shape()
564
+ self.shape = ops.Shape()
568
565
  self.micro_size = micro_size
569
- self.strided_slice = P.StridedSlice()
566
+ self.strided_slice = ops.StridedSlice()
570
567
 
571
568
  def construct(self, i, *inputs):
572
569
  """construct for _MicroBatch."""
@@ -628,7 +625,7 @@ class GradAccumulationCell(Cell):
628
625
  micro_input = _MicroBatch(micro_size)
629
626
  micro_input.strided_slice.add_prim_attr("grad_accu_num", micro_size)
630
627
  self.micro_inputs.append(micro_input)
631
- self.add = P.Add().add_prim_attr("forward_end", i)
628
+ self.add = ops.Add().add_prim_attr("forward_end", i)
632
629
  self.add_list.append(self.add)
633
630
  self._get_attr_from_cell(network)
634
631
 
@@ -685,10 +682,10 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
685
682
  grads = self.grad_no_sens(self.network, self.weights)(*inputs)
686
683
  accu_grads = ops.depend(self.accu_grads, grads)
687
684
  if self.opt_shard:
688
- grads = self.hyper_map(F.partial(grad_scale, self.sens), grads)
685
+ grads = self.hyper_map(ops.partial(grad_scale, self.sens), grads)
689
686
  succ = self.optimizer(grads)
690
687
  else:
691
- accu_grads = self.hyper_map(F.partial(grad_scale, self.sens), accu_grads)
688
+ accu_grads = self.hyper_map(ops.partial(grad_scale, self.sens), accu_grads)
692
689
  succ = self.optimizer(accu_grads)
693
690
  loss = ops.depend(loss, succ)
694
691
  clear = self.hyper_map(_pipeline_clear_grad, accu_grads, grads)
@@ -794,8 +791,8 @@ class WithEvalCell(Cell):
794
791
  def construct(self, data, label):
795
792
  outputs = self._network(data)
796
793
  if self.add_cast_fp32:
797
- label = F.mixed_precision_cast(mstype.float32, label)
798
- outputs = F.cast(outputs, mstype.float32)
794
+ label = ops.mixed_precision_cast(mstype.float32, label)
795
+ outputs = ops.cast(outputs, mstype.float32)
799
796
  loss = self._loss_fn(outputs, label)
800
797
  return loss, outputs, label
801
798
 
@@ -845,7 +842,7 @@ class ParameterUpdate(Cell):
845
842
  self._param = param
846
843
 
847
844
  def construct(self, x):
848
- F.assign(self._param, x)
845
+ ops.assign(self._param, x)
849
846
  return x
850
847
 
851
848
 
@@ -861,21 +858,21 @@ class _BroadCastCell(Cell):
861
858
  super(_BroadCastCell, self).__init__()
862
859
  from mindspore.communication.management import get_group_size, create_group
863
860
  from mindspore import context
864
- self.map_ = C.Map()
861
+ self.map_ = ops.Map()
865
862
  self.params = tuple(params)
866
863
  if context.get_context("device_target") == "Ascend" and context.get_context("mode") != context.PYNATIVE_MODE:
867
864
  rank_list = [id for id in range(0, get_group_size())]
868
865
  create_group("BroadcastWorldGroup", rank_list)
869
- self.broadcast = P.Broadcast(0, group="BroadcastWorldGroup")
866
+ self.broadcast = ops.Broadcast(0, group="BroadcastWorldGroup")
870
867
  else:
871
- self.broadcast = P.Broadcast(0)
868
+ self.broadcast = ops.Broadcast(0)
872
869
  self.add_flags(skip_auto_parallel_compile=True)
873
870
 
874
871
  def construct(self):
875
- datatypes = self.map_(F.partial(_get_datatype), self.params)
876
- params = self.map_(F.partial(_cast_datatype, mstype.float32), self.params)
872
+ datatypes = self.map_(ops.partial(_get_datatype), self.params)
873
+ params = self.map_(ops.partial(_cast_datatype, mstype.float32), self.params)
877
874
  params = self.broadcast(params)
878
- new_params = self.map_(F.partial(_cast_datatype), datatypes, params)
875
+ new_params = self.map_(ops.partial(_cast_datatype), datatypes, params)
879
876
  return new_params
880
877
 
881
878
 
@@ -921,7 +918,7 @@ class PipelineCell(Cell):
921
918
  for i in range(micro_size):
922
919
  micro_input = _MicroBatch(micro_size)
923
920
  self.micro_inputs.append(micro_input)
924
- self.add = P.Add().add_prim_attr("pipeline_end", i)
921
+ self.add = ops.Add().add_prim_attr("pipeline_end", i)
925
922
  self.add_list.append(self.add)
926
923
  self._get_attr_from_cell(network)
927
924
 
@@ -1011,7 +1008,7 @@ class MicroBatchInterleaved(Cell):
1011
1008
  self.network = network
1012
1009
  self.interleave_num = interleave_num
1013
1010
  self.interleave_inputs = nn.CellList()
1014
- self.add = P.Add().add_prim_attr("micro_interleaved_add_flag", True)
1011
+ self.add = ops.Add().add_prim_attr("micro_interleaved_add_flag", True)
1015
1012
  for _ in range(interleave_num):
1016
1013
  interleave_data = _MicroBatch(interleave_num)
1017
1014
  interleave_data.strided_slice.add_prim_attr("strided_slice_flag", True)
@@ -20,7 +20,7 @@ from mindspore import log as logger
20
20
  from mindspore.nn.cell import Cell
21
21
  from mindspore.communication.management import GlobalComm, get_group_size
22
22
  from mindspore.common.sparse_tensor import RowTensorInner
23
- from mindspore.ops import functional as F, composite as C, operations as P
23
+ from mindspore import ops
24
24
  from mindspore.ops.operations.comm_ops import AllReduce, AllGather
25
25
  from mindspore.parallel._auto_parallel_context import auto_parallel_context
26
26
  import mindspore.common.dtype as mstype
@@ -33,7 +33,7 @@ from mindspore.parallel._utils import _get_enable_parallel_optimizer
33
33
  __all__ = ['DistributedGradReducer']
34
34
 
35
35
 
36
- reduce_opt = C.MultitypeFuncGraph("reduce_opt")
36
+ reduce_opt = ops.MultitypeFuncGraph("reduce_opt")
37
37
 
38
38
 
39
39
  def _init_allreduce_operators(length, split_indices, group=GlobalComm.WORLD_COMM_GROUP):
@@ -114,7 +114,7 @@ def _tensors_allreduce(degree, mean, allgather, allreduce, allreduce_filter, gra
114
114
  if allreduce_filter:
115
115
  grad = allreduce(grad)
116
116
  if mean:
117
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
117
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
118
118
  return grad
119
119
  return grad
120
120
 
@@ -135,7 +135,7 @@ def _tensors_allreduce_post(degree, mean, allreduce_filter, grad):
135
135
  """
136
136
  if allreduce_filter:
137
137
  if mean:
138
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
138
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
139
139
  return grad
140
140
  return grad
141
141
 
@@ -163,7 +163,7 @@ def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter,
163
163
  if allreduce_filter:
164
164
  grad = allreduce(grad)
165
165
  if mean:
166
- grad = F.tensor_mul(grad, F.cast(degree, F.dtype(grad)))
166
+ grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
167
167
  return grad
168
168
  return grad
169
169
 
@@ -189,7 +189,7 @@ def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce
189
189
  indices = allgather(grad.indices)
190
190
  dout = allgather(grad.values)
191
191
  if mean:
192
- dout = F.tensor_mul(dout, F.cast(degree, F.dtype(dout)))
192
+ dout = ops.tensor_mul(dout, ops.cast(degree, ops.dtype(dout)))
193
193
  grad = RowTensorInner(indices, dout, grad.dense_shape)
194
194
  return grad
195
195
 
@@ -219,12 +219,12 @@ def _tensors_allreduce_with_sparse_ps(degree, mean, allgather, allreduce, allred
219
219
  indices = allgather(grad.indices)
220
220
  dout = allgather(grad.values)
221
221
  if mean:
222
- dout = F.tensor_mul(dout, F.cast(degree, F.dtype(dout)))
222
+ dout = ops.tensor_mul(dout, ops.cast(degree, ops.dtype(dout)))
223
223
  grad = RowTensorInner(indices, dout, grad.dense_shape)
224
224
  return grad
225
225
 
226
226
 
227
- _get_datatype = C.MultitypeFuncGraph("_get_datatype")
227
+ _get_datatype = ops.MultitypeFuncGraph("_get_datatype")
228
228
 
229
229
 
230
230
  @_get_datatype.register("Tensor")
@@ -238,7 +238,7 @@ def _tensors_get_datatype(grad):
238
238
  Returns:
239
239
  mstype, the datatype of gradient.
240
240
  """
241
- return F.dtype(grad)
241
+ return ops.dtype(grad)
242
242
 
243
243
 
244
244
  @_get_datatype.register("RowTensor")
@@ -252,10 +252,10 @@ def _tensors_get_datatype_with_sparse(grad):
252
252
  Returns:
253
253
  mstype, the datatype of gradient.
254
254
  """
255
- return F.dtype(grad.values)
255
+ return ops.dtype(grad.values)
256
256
 
257
257
 
258
- _cast_datatype = C.MultitypeFuncGraph("_cast_datatype")
258
+ _cast_datatype = ops.MultitypeFuncGraph("_cast_datatype")
259
259
 
260
260
 
261
261
  @_cast_datatype.register("TypeType", "Tensor")
@@ -270,7 +270,7 @@ def _tensors_cast_datatype(datatype, grad):
270
270
  Returns:
271
271
  Tensor, the gradient tensor after operation.
272
272
  """
273
- return F.cast(grad, datatype)
273
+ return ops.cast(grad, datatype)
274
274
 
275
275
 
276
276
  @_cast_datatype.register("TypeType", "RowTensor")
@@ -285,7 +285,7 @@ def _tensors_cast_datatype_with_sparse(datatype, grad):
285
285
  Returns:
286
286
  RowTensor, the gradient after operation.
287
287
  """
288
- dout = F.cast(grad.values, datatype)
288
+ dout = ops.cast(grad.values, datatype)
289
289
  return RowTensorInner(grad.indices, dout, grad.dense_shape)
290
290
 
291
291
 
@@ -361,7 +361,7 @@ class DistributedGradReducer(Cell):
361
361
  ... def construct(self, *args):
362
362
  ... weights = self.weights
363
363
  ... loss = self.network(*args)
364
- ... sens = F.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
364
+ ... sens = ops.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
365
365
  ... grads = self.grad(self.network, weights)(*args, sens)
366
366
  ... if self.reducer_flag:
367
367
  ... # apply grad reducer on grads
@@ -395,7 +395,7 @@ class DistributedGradReducer(Cell):
395
395
  def __init__(self, parameters, mean=None, degree=None, fusion_type=1, group=GlobalComm.WORLD_COMM_GROUP):
396
396
  super(DistributedGradReducer, self).__init__(auto_prefix=False)
397
397
  self._check_parallel_mode()
398
- self.map_ = C.Map()
398
+ self.map_ = ops.Map()
399
399
  self.mean = mean
400
400
  if mean is None:
401
401
  self.mean = auto_parallel_context().get_gradients_mean()
@@ -443,24 +443,24 @@ class DistributedGradReducer(Cell):
443
443
  Returns:
444
444
  new_grads (Union[Tensor, tuple[Tensor]]), the gradient tensor or tuple after operation.
445
445
  """
446
- datatypes = self.map_(F.partial(_get_datatype), grads)
447
- grads = self.map_(F.partial(_cast_datatype, mstype.float32), grads)
446
+ datatypes = self.map_(ops.partial(_get_datatype), grads)
447
+ grads = self.map_(ops.partial(_cast_datatype, mstype.float32), grads)
448
448
 
449
449
  if self.split_fusion:
450
450
  if self.enable_parameter_server:
451
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather),
451
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
452
452
  self.op_list, self.allreduce_filter, grads, self.ps_parameters)
453
453
  else:
454
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather),
454
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
455
455
  self.op_list, self.allreduce_filter, grads)
456
456
  else:
457
457
  if self.enable_parameter_server:
458
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather,
459
- self.allreduce), self.allreduce_filter, grads, self.ps_parameters)
458
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
459
+ self.allreduce), self.allreduce_filter, grads, self.ps_parameters)
460
460
  else:
461
- new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather,
462
- self.allreduce), self.allreduce_filter, grads)
463
- new_grad = self.map_(F.partial(_cast_datatype), datatypes, new_grad)
461
+ new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
462
+ self.allreduce), self.allreduce_filter, grads)
463
+ new_grad = self.map_(ops.partial(_cast_datatype), datatypes, new_grad)
464
464
  return new_grad
465
465
 
466
466
  def _check_parallel_mode(self):
@@ -471,26 +471,26 @@ class DistributedGradReducer(Cell):
471
471
  raise RuntimeError("{} can not use DistributedGradReducer in graph mode".format(parallel_mode))
472
472
 
473
473
 
474
- grad_scale = C.MultitypeFuncGraph("grad_scale")
475
- shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
476
- reciprocal = P.Reciprocal()
474
+ grad_scale = ops.MultitypeFuncGraph("grad_scale")
475
+ shard_grad_scale = ops.MultitypeFuncGraph("shard_grad_scale")
476
+ reciprocal = ops.Reciprocal()
477
477
 
478
478
 
479
479
  @grad_scale.register("Tensor", "Tensor", "Tensor")
480
480
  def tensor_grad_scale_pipeline(scale, grad, accu_grad):
481
- accu_grad = F.depend(accu_grad, grad)
481
+ accu_grad = ops.depend(accu_grad, grad)
482
482
  new_grad = accu_grad * reciprocal(scale)
483
- accu_grad = F.depend(accu_grad, new_grad)
484
- zeros = F.tensor_mul(accu_grad, 0.0)
485
- new_grad = F.depend(new_grad, F.assign(accu_grad, zeros))
483
+ accu_grad = ops.depend(accu_grad, new_grad)
484
+ zeros = ops.tensor_mul(accu_grad, 0.0)
485
+ new_grad = ops.depend(new_grad, ops.assign(accu_grad, zeros))
486
486
  return new_grad
487
487
 
488
488
 
489
489
  @shard_grad_scale.register("Tensor", "Tensor", "Tensor")
490
490
  def tensor_shard_grad_scale_pipeline(scale, grad, accu_grad):
491
491
  new_grad = grad * reciprocal(scale)
492
- accu_grad = F.depend(accu_grad, new_grad)
493
- new_grad = F.depend(new_grad, F.assign(accu_grad, F.zeros_like(accu_grad)))
492
+ accu_grad = ops.depend(accu_grad, new_grad)
493
+ new_grad = ops.depend(new_grad, ops.assign(accu_grad, ops.zeros_like(accu_grad)))
494
494
  return new_grad
495
495
 
496
496
 
@@ -563,7 +563,7 @@ class PipelineGradReducer(Cell):
563
563
  >>> net.layer3.pipeline_stage = 1
564
564
  >>> loss_fn = nn.CrossEntropyLoss()
565
565
  >>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
566
- >>> net_with_loss = nn.Pipeline(nn.WithLossCell(net, loss_fn), 2)
566
+ >>> net_with_loss = nn.PipelineCell(nn.WithLossCell(net, loss_fn), 2)
567
567
  >>> net_with_loss.set_train()
568
568
  >>> def forward_fn(inputs, target):
569
569
  ... loss = net_with_loss(inputs, target)
@@ -592,7 +592,7 @@ class PipelineGradReducer(Cell):
592
592
  self.grad_reducer = Identity()
593
593
  self.degree = Tensor(1, mstype.float32)
594
594
  self.scale_sense = Parameter(scale_sense, name='scale_sense')
595
- self.hyper_map = C.HyperMap()
595
+ self.hyper_map = ops.HyperMap()
596
596
  if opt_shard is None:
597
597
  self.opt_shard = _get_enable_parallel_optimizer()
598
598
  else:
@@ -603,11 +603,11 @@ class PipelineGradReducer(Cell):
603
603
  new_grads = None
604
604
  if self.opt_shard:
605
605
  grads = self.grad_reducer(grads)
606
- new_grads = self.hyper_map(F.partial(shard_grad_scale, self.scale_sense * self.degree),
606
+ new_grads = self.hyper_map(ops.partial(shard_grad_scale, self.scale_sense * self.degree),
607
607
  grads, self.accu_grads)
608
608
  else:
609
609
  accu_grads = self.grad_reducer(self.accu_grads)
610
- new_grads = self.hyper_map(F.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
610
+ new_grads = self.hyper_map(ops.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
611
611
  return new_grads
612
612
 
613
613
  def _check_mode(self):