mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (403) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +36 -61
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +32 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +76 -15
  37. mindspore/common/api.py +193 -112
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +2 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +48 -83
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +38 -23
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +32 -2
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +1 -0
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +4 -44
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +208 -5
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +122 -98
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +325 -499
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +86 -85
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +1 -1
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/optimizer.py +1 -1
  159. mindspore/nn/optim/thor.py +2 -2
  160. mindspore/nn/probability/distribution/exponential.py +2 -1
  161. mindspore/nn/probability/distribution/poisson.py +2 -1
  162. mindspore/nn/sparse/sparse.py +3 -3
  163. mindspore/nn/wrap/cell_wrapper.py +34 -37
  164. mindspore/nn/wrap/grad_reducer.py +37 -37
  165. mindspore/nn/wrap/loss_scale.py +72 -74
  166. mindspore/numpy/array_creations.py +5 -5
  167. mindspore/numpy/fft.py +1 -1
  168. mindspore/numpy/math_ops.py +1 -1
  169. mindspore/opencv_core452.dll +0 -0
  170. mindspore/opencv_imgcodecs452.dll +0 -0
  171. mindspore/opencv_imgproc452.dll +0 -0
  172. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  173. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  174. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  175. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  176. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
  177. mindspore/ops/auto_generate/gen_extend_func.py +1 -51
  178. mindspore/ops/auto_generate/gen_ops_def.py +463 -257
  179. mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
  180. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  181. mindspore/ops/composite/__init__.py +10 -0
  182. mindspore/ops/composite/base.py +8 -4
  183. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  184. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  185. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  186. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  187. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  188. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  189. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  190. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  191. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  192. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  193. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  194. mindspore/ops/function/__init__.py +3 -1
  195. mindspore/ops/function/_add_attr_func.py +11 -6
  196. mindspore/ops/function/array_func.py +7 -94
  197. mindspore/ops/function/debug_func.py +4 -3
  198. mindspore/ops/function/grad/grad_func.py +1 -1
  199. mindspore/ops/function/math_func.py +21 -367
  200. mindspore/ops/function/nn_func.py +26 -41
  201. mindspore/ops/function/other_func.py +4 -1
  202. mindspore/ops/function/random_func.py +31 -4
  203. mindspore/ops/functional.py +0 -2
  204. mindspore/ops/functional_overload.py +463 -6
  205. mindspore/ops/op_info_register.py +21 -0
  206. mindspore/ops/operations/__init__.py +5 -2
  207. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  208. mindspore/ops/operations/_inner_ops.py +3 -6
  209. mindspore/ops/operations/_sequence_ops.py +1 -1
  210. mindspore/ops/operations/comm_ops.py +185 -26
  211. mindspore/ops/operations/custom_ops.py +235 -172
  212. mindspore/ops/operations/debug_ops.py +55 -4
  213. mindspore/ops/operations/image_ops.py +13 -13
  214. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  215. mindspore/ops/operations/math_ops.py +3 -4
  216. mindspore/ops/operations/nn_ops.py +5 -6
  217. mindspore/ops/primitive.py +6 -10
  218. mindspore/ops/tensor_method.py +36 -4
  219. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  220. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  221. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  222. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  223. mindspore/ops_generate/common/base_generator.py +14 -0
  224. mindspore/ops_generate/common/gen_constants.py +7 -2
  225. mindspore/ops_generate/common/gen_utils.py +0 -19
  226. mindspore/ops_generate/common/op_proto.py +11 -4
  227. mindspore/ops_generate/common/template.py +88 -11
  228. mindspore/ops_generate/gen_ops.py +1 -1
  229. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  230. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  231. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  232. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  233. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  234. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  235. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  236. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  237. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  238. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  239. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  240. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  241. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  242. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  243. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  244. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  245. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  246. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  247. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  248. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  249. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  250. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  251. mindspore/parallel/_auto_parallel_context.py +4 -2
  252. mindspore/parallel/_cell_wrapper.py +106 -40
  253. mindspore/parallel/_parallel_serialization.py +1 -1
  254. mindspore/parallel/_ps_context.py +4 -6
  255. mindspore/parallel/_tensor.py +167 -12
  256. mindspore/parallel/_transformer/moe.py +1 -1
  257. mindspore/parallel/_transformer/transformer.py +13 -8
  258. mindspore/parallel/auto_parallel.py +12 -5
  259. mindspore/parallel/checkpoint_convert.py +3 -3
  260. mindspore/parallel/checkpoint_transform.py +3 -1
  261. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  262. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  263. mindspore/parallel/cluster/run.py +43 -4
  264. mindspore/parallel/function/__init__.py +8 -1
  265. mindspore/parallel/function/reshard_func.py +1 -1
  266. mindspore/parallel/nn/__init__.py +15 -2
  267. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  268. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  269. mindspore/parallel/shard.py +2 -2
  270. mindspore/parallel/transform_safetensors.py +462 -174
  271. mindspore/pgodb140.dll +0 -0
  272. mindspore/pgort140.dll +0 -0
  273. mindspore/profiler/__init__.py +2 -1
  274. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  275. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  276. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  277. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  278. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  279. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  280. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  281. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  282. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  283. mindspore/profiler/analysis/task_manager.py +1 -1
  284. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  285. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  286. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  287. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  288. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  289. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  290. mindspore/profiler/common/constant.py +16 -0
  291. mindspore/profiler/common/profiler_context.py +25 -27
  292. mindspore/profiler/common/profiler_info.py +0 -16
  293. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  294. mindspore/profiler/common/profiler_output_path.py +23 -8
  295. mindspore/profiler/common/profiler_parameters.py +128 -35
  296. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  297. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  298. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  299. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  300. mindspore/profiler/dynamic_profiler.py +305 -314
  301. mindspore/profiler/envprofiler.py +12 -7
  302. mindspore/profiler/experimental_config.py +96 -6
  303. mindspore/profiler/mstx.py +33 -12
  304. mindspore/profiler/platform/__init__.py +2 -3
  305. mindspore/profiler/platform/npu_profiler.py +29 -19
  306. mindspore/profiler/profiler.py +35 -19
  307. mindspore/profiler/profiler_action_controller.py +64 -76
  308. mindspore/profiler/schedule.py +10 -4
  309. mindspore/rewrite/common/config.py +1 -0
  310. mindspore/rewrite/common/namer.py +1 -0
  311. mindspore/rewrite/common/namespace.py +1 -0
  312. mindspore/rewrite/node/node.py +31 -11
  313. mindspore/rewrite/parsers/assign_parser.py +1 -1
  314. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  315. mindspore/run_check/_check_version.py +7 -10
  316. mindspore/runtime/__init__.py +5 -5
  317. mindspore/runtime/event.py +10 -4
  318. mindspore/runtime/executor.py +60 -45
  319. mindspore/runtime/memory.py +21 -30
  320. mindspore/runtime/thread_bind_core.py +298 -164
  321. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  322. mindspore/swresample-4.dll +0 -0
  323. mindspore/swscale-6.dll +0 -0
  324. mindspore/tbbmalloc.dll +0 -0
  325. mindspore/tinyxml2.dll +0 -0
  326. mindspore/train/_utils.py +6 -2
  327. mindspore/train/amp.py +43 -20
  328. mindspore/train/callback/__init__.py +5 -5
  329. mindspore/train/callback/_checkpoint.py +3 -6
  330. mindspore/train/callback/_flops_collector.py +1 -1
  331. mindspore/train/callback/_landscape.py +0 -1
  332. mindspore/train/callback/_train_fault_tolerance.py +71 -13
  333. mindspore/train/data_sink.py +11 -2
  334. mindspore/train/dataset_helper.py +9 -0
  335. mindspore/train/model.py +51 -33
  336. mindspore/train/serialization.py +133 -111
  337. mindspore/train/summary/summary_record.py +13 -2
  338. mindspore/turbojpeg.dll +0 -0
  339. mindspore/utils/__init__.py +3 -2
  340. mindspore/utils/dryrun.py +0 -6
  341. mindspore/utils/runtime_execution_order_check.py +162 -78
  342. mindspore/utils/sdc_detect.py +68 -0
  343. mindspore/utils/utils.py +6 -9
  344. mindspore/vcmeta.dll +0 -0
  345. mindspore/vcruntime140.dll +0 -0
  346. mindspore/vcruntime140_1.dll +0 -0
  347. mindspore/version.py +1 -1
  348. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  349. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +352 -390
  350. mindspore/_deprecated/jit.py +0 -198
  351. mindspore/experimental/es/__init__.py +0 -22
  352. mindspore/experimental/es/embedding_service.py +0 -891
  353. mindspore/experimental/es/embedding_service_layer.py +0 -581
  354. mindspore/profiler/parser/__init__.py +0 -14
  355. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  356. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  357. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  358. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  359. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  360. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  361. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  362. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  363. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  364. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  365. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  366. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  367. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  368. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  369. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  370. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  371. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  372. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  373. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  374. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  375. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  376. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  377. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  378. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  379. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  380. mindspore/profiler/parser/container.py +0 -229
  381. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  382. mindspore/profiler/parser/flops_parser.py +0 -531
  383. mindspore/profiler/parser/framework_enum.py +0 -111
  384. mindspore/profiler/parser/framework_parser.py +0 -464
  385. mindspore/profiler/parser/framework_struct.py +0 -61
  386. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  387. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  388. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  389. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  390. mindspore/profiler/parser/hccl_parser.py +0 -573
  391. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  392. mindspore/profiler/parser/integrator.py +0 -526
  393. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  394. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  395. mindspore/profiler/parser/minddata_parser.py +0 -186
  396. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  397. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  398. mindspore/profiler/parser/optime_parser.py +0 -250
  399. mindspore/profiler/parser/profiler_info.py +0 -213
  400. mindspore/profiler/parser/step_trace_parser.py +0 -666
  401. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  402. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  403. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/common/dump.py CHANGED
@@ -25,15 +25,19 @@ def set_dump(target, enabled=True):
25
25
  Enable or disable dump for the `target` and its contents.
26
26
 
27
27
  `target` should be an instance of :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` .
28
- Please note that this API takes effect only when Synchronous Dump is enabled and the `dump_mode`
29
- field in dump config file is ``"2"`` . See the `dump document
30
- <https://www.mindspore.cn/tutorials/en/master/debug/dump.html>`_ for details.
28
+ Please note that this API takes effect only when the Dump function is enabled, and the `dump_mode`
29
+ field in the Dump configuration file is set to `"2"` with the `ms_backend` compilation backend
30
+ (please refer to the backend parameter in
31
+ `jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_).
32
+ See the `dump document <https://www.mindspore.cn/tutorials/en/master/debug/dump.html>`_ for details.
31
33
  The default enabled status for
32
34
  a :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` is False.
33
35
 
34
36
  Note:
35
- 1. This API is only effective for GRAPH_MODE whose graph compilation level is O0/O1 with Ascend backend,
36
- and can not work for fusion Primitive operators.
37
+ 1. This API is only available for JIT compilation, requires 'Ascend' as the device_target and
38
+ `ms_backend` as the compilation backend (please refer to the backend parameter in
39
+ `jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_),
40
+ and does not support fused operators.
37
41
  2. This API only supports being called before training starts.
38
42
  If you call this API during training, it may not be effective.
39
43
  3. After using `set_dump(Cell, True)` , operators in forward and backward
@@ -66,7 +70,6 @@ def set_dump(target, enabled=True):
66
70
  >>> import mindspore.nn as nn
67
71
  >>> from mindspore import Tensor, set_dump
68
72
  >>>
69
- >>> ms.set_context(mode=ms.GRAPH_MODE)
70
73
  >>> ms.set_device(device_target="Ascend")
71
74
  >>>
72
75
  >>> class MyNet(nn.Cell):
@@ -75,6 +78,7 @@ def set_dump(target, enabled=True):
75
78
  ... self.conv1 = nn.Conv2d(5, 6, 5, pad_mode='valid')
76
79
  ... self.relu1 = nn.ReLU()
77
80
  ...
81
+ ... @jit
78
82
  ... def construct(self, x):
79
83
  ... x = self.conv1(x)
80
84
  ... x = self.relu1(x)
@@ -109,15 +113,6 @@ def set_dump(target, enabled=True):
109
113
  "If you have Ascend device, consider set device_target to Ascend "
110
114
  "before calling set_dump.".format(current_target))
111
115
 
112
- current_mode = context.get_context("mode")
113
- if current_mode != context.GRAPH_MODE:
114
- # We will not return here in case user changed mode later.
115
- warn(
116
- "Current mode is PYNATIVE_MODE, which is not supported by set_dump. "
117
- "Only GRAPH_MODE is supported currently. "
118
- "Consider set mode to GRAPH_MODE "
119
- "before calling set_dump.")
120
-
121
116
  # The actual set dump logic.
122
117
  if isinstance(target, nn.Cell):
123
118
  target.add_flags(dump=enabled)
@@ -77,10 +77,9 @@ class Generator:
77
77
  """
78
78
 
79
79
  def __init__(self):
80
- self._seed = Parameter(Tensor(0, mstype.int64),
81
- name="seed", requires_grad=False)
80
+ self._seed = Parameter(Tensor(0, mstype.int64), requires_grad=False)
82
81
  self._offset = Parameter(
83
- Tensor(0, mstype.int64), name="offset", requires_grad=False)
82
+ Tensor(0, mstype.int64), requires_grad=False)
84
83
 
85
84
  self._generator = GeneratorOp().set_device("CPU")
86
85
  self._generator.add_prim_attr("manual_seed", False)
@@ -81,19 +81,23 @@ class HookHandle:
81
81
  It is only supported in pynative mode and works when registering or removing hook function for Cell object.
82
82
 
83
83
  Args:
84
- hook_dict (Dict): The hook object with hook function registered on. Default value: None.
84
+ hook_dict (Dict, optional): The hook object with hook function registered on. Default value: ``None`` .
85
+ extra_dict (Dict, optional): The extra dict. Default value: ``None`` .
85
86
 
86
87
  Supported Platforms:
87
88
  ``Ascend`` ``GPU`` ``CPU``
88
89
  """
89
90
  unique_id = 0
90
91
 
91
- def __init__(self, hook_dict=None):
92
+ def __init__(self, hook_dict=None, *, extra_dict=None):
92
93
  self.hook_dict_ref = None
94
+ self.extra_dict_ref = None
93
95
  if hook_dict is not None:
94
96
  self.hook_dict_ref = weakref.ref(hook_dict)
95
97
  self.handle_id = HookHandle.unique_id
96
98
  HookHandle.unique_id += 1
99
+ if extra_dict is not None:
100
+ self.extra_dict_ref = weakref.ref(extra_dict)
97
101
 
98
102
  def remove(self):
99
103
  """
@@ -145,3 +149,8 @@ class HookHandle:
145
149
  hook_dict = self.hook_dict_ref()
146
150
  if hook_dict is not None and self.handle_id in hook_dict:
147
151
  del hook_dict[self.handle_id]
152
+
153
+ if self.extra_dict_ref is not None:
154
+ extra_dict = self.extra_dict_ref()
155
+ if extra_dict is not None and self.handle_id in extra_dict:
156
+ del extra_dict[self.handle_id]
@@ -97,7 +97,7 @@ class JitConfig:
97
97
  self.jit_config_dict["debug_level"] = debug_level
98
98
  self.jit_config_dict["infer_boost"] = infer_boost
99
99
  if "backend" not in self.jit_config_dict:
100
- if jit_level == "O0" or jit_level == "O1":
100
+ if jit_level in ["O0", "O1"]:
101
101
  self.jit_config_dict["backend"] = "ms_backend"
102
102
  elif jit_level == "O2":
103
103
  self.jit_config_dict["backend"] = "GE"
@@ -17,16 +17,15 @@
17
17
 
18
18
  import inspect
19
19
  import re
20
+ import types
20
21
  from functools import wraps
21
22
  import mindspore as ms
22
23
  from mindspore import log as logger
23
24
  from mindspore import context
24
25
  from mindspore.common.jit_context import JitContext, set_jit_context, jit_context
25
26
  from mindspore.common.tensor import Tensor as PythonTensor
26
- from mindspore._checkparam import is_stub_tensor
27
27
  from mindspore._c_expression import TraceRecorder as tr
28
28
  from mindspore._c_expression import JitExecutor_
29
- from mindspore._c_expression import TensorNode
30
29
  from mindspore._c_expression import TensorPy as Tensor, CSRTensor, COOTensor
31
30
  from mindspore._c_expression import typing
32
31
 
@@ -35,7 +34,7 @@ class TraceJitContext(JitContext):
35
34
  """JIT Context for trace JIT."""
36
35
 
37
36
  def __init__(self):
38
- JitContext.__init__(self)
37
+ super().__init__()
39
38
  self._is_nested = False
40
39
 
41
40
  def set_is_nested(self, status):
@@ -44,29 +43,34 @@ class TraceJitContext(JitContext):
44
43
  def is_nested(self):
45
44
  return self._is_nested
46
45
 
46
+ def args_preprocess(self, prim_name, prim_res, *args):
47
+ args = tuple(_convert_arg_for_operators(arg, prim_name)
48
+ for arg in args)
49
+ file_names, linenos = _get_caller_lines()
50
+ return prim_res, file_names, linenos, args
51
+
47
52
  def run_op(self, prim, prim_res, *args):
48
53
  """Capture op"""
49
54
  logger.debug(f'prim: {prim}, args: {args}, prim_res: {prim_res}')
50
- if isinstance(prim_res, TensorNode):
51
- prim_res = prim_res.get_value()
52
- prim_res = _sync_stub_tensor(prim_res)
53
- args = tuple(_sync_stub_tensor(arg) for arg in args)
54
- args = tuple(_convert_arg_for_operators(arg, prim.name) for arg in args)
55
- file_names, linenos = _get_caller_lines()
56
- tr.get_instance().new_node(prim, prim_res, file_names, linenos, False, *args)
55
+ prim_res, file_names, linenos, args = self.args_preprocess(prim.name, prim_res, *args)
56
+ tr.get_instance().new_node(prim, (prim_res, file_names, linenos, False), *args)
57
57
  return prim_res
58
58
 
59
+ def prepare_op(self, prim_name, prim_res, *args):
60
+ """Prepare op"""
61
+ logger.debug(f'prim: {prim_name}, args: {args}, prim_res: {prim_res}')
62
+ return self.args_preprocess(prim_name, prim_res, *args)
63
+
59
64
  def run_graph(self, phase, prim_res, *args):
60
65
  """Capture func_graph generated from ast"""
61
66
  logger.debug(f'phase: {phase}, args: {args}, prim_res: {prim_res}')
62
- if isinstance(prim_res, TensorNode):
63
- prim_res = prim_res.get_value()
64
- prim_res = _sync_stub_tensor(prim_res)
65
- args = tuple(_sync_stub_tensor(arg) for arg in args)
66
67
  file_names, linenos = _get_caller_lines()
67
- tr.get_instance().new_fg_node((phase, prim_res, file_names, linenos, self._is_nested), *args)
68
+ tr.get_instance().new_fg_node((prim_res, file_names, linenos, phase, self._is_nested), *args)
68
69
  return prim_res
69
70
 
71
+ def default_output(self):
72
+ return PythonTensor(0)
73
+
70
74
 
71
75
  _compile_only = False
72
76
  _trace_jit_context = TraceJitContext()
@@ -80,20 +84,6 @@ def _set_compile_only(compile_only=True):
80
84
  _compile_only = compile_only
81
85
 
82
86
 
83
- def _sync_stub_tensor(stub):
84
- """Synchronize stub tensor"""
85
- if is_stub_tensor(stub):
86
- real_tensor = stub.stub_sync()
87
- logger.debug(f'Convert stub tensor, stub: [{type(stub)}] {id(stub)}/{stub}, '
88
- f'tensor: [{type(real_tensor)}] {id(real_tensor)}/{real_tensor}')
89
- return real_tensor
90
- if isinstance(stub, tuple):
91
- return tuple(_sync_stub_tensor(item) for item in stub)
92
- if isinstance(stub, list):
93
- return list(_sync_stub_tensor(item) for item in stub)
94
- return stub
95
-
96
-
97
87
  def convert_tensorpy(args):
98
88
  new_args = []
99
89
  for arg in args:
@@ -129,84 +119,73 @@ def nested_run(obj, cell, *args):
129
119
  if res is not tuple:
130
120
  res = (res,)
131
121
  file_names, linenos = _get_caller_lines()
132
- res = _sync_stub_tensor(res)
133
122
  set_jit_context(None)
134
123
  return file_names, linenos, res
135
124
 
136
125
 
137
- def _jit_trace(fn):
138
- """
139
- Create a callable MindSpore graph from a Python function by trace method.
140
-
141
- This allows the MindSpore runtime to apply optimizations based on traced func graph.
142
-
143
- Args:
144
- fn (Function): The Python function that will be run as a graph. Default: ``None`` .
145
-
146
- Returns:
147
- Function, if `fn` is not None, returns a callable function that will execute the compiled function; If `fn` is
148
- None, returns a decorator and when this decorator invokes with a single `fn` argument, the callable function is
149
- equal to the case when `fn` is not None.
150
-
151
- Supported Platforms:
152
- ``Ascend`` ``GPU`` ``CPU``
153
-
154
- Examples:
155
- >>> import numpy as np
156
- >>> from mindspore import Tensor
157
- >>> from mindspore.common.jit_trace import _jit_trace as jit_trace
158
- ...
159
- >>> x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
160
- >>> y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
161
- ...
162
- >>> # To create a callable MindSpore graph by calling decorator @jit_trace
163
- >>> def tensor_add(x, y):
164
- ... z = x + y
165
- ... return z
166
- ...
167
- >>> tensor_add_graph = jit_trace(fn=tensor_add)
168
- >>> out = tensor_add_graph(x, y)
169
- """
170
-
171
- @wraps(fn)
172
- def jit_trace_wrap(*args, **kwargs):
173
- # If a trace graph is already built, keep going without building a new trace graph.
174
- if jit_context():
175
- return fn(*args, **kwargs)
176
- # Start trace process.
177
- if kwargs:
178
- bound_arguments = inspect.signature(fn).bind(*args, **kwargs)
179
- bound_arguments.apply_defaults()
180
- args = bound_arguments.args
181
- kwargs = bound_arguments.kwargs
182
- generate_name = fn.__module__
183
- if args:
184
- jit_args = args[1:] if hasattr(args[0], fn.__name__) else args
185
- obj = args[0]
186
- if hasattr(obj, fn.__name__): # Add class name for Cell.
187
- generate_name = generate_name + "." + obj.__class__.__name__
188
- else:
189
- jit_args = args
190
- generate_name = generate_name + "." + fn.__name__ + "#" + str(id(fn))
191
- # Add create time for Cell.
192
- if args and hasattr(obj, fn.__name__):
193
- generate_name = generate_name + '#created_' + str(args[0].create_time)
194
- line_str = fn.__code__.co_filename + ":" + str(fn.__code__.co_firstlineno)
195
- generate_name = generate_name + '#[' + line_str + ']'
196
-
197
- new_compile = _jit_trace_begin(generate_name, *jit_args)
198
- if new_compile:
199
- fn_res = fn(*args, **kwargs)
200
- logger.debug(f'fn: {fn}, fn_res: {fn_res}, line: {line_str}')
201
- # Use fn's output to build func graph's output.
202
- output = _jit_trace_end(fn_res)
203
- else:
204
- output = _jit_trace_end(None) # Run with compilation.
205
- logger.debug(f'output: {output}')
206
- return output
207
-
208
- jit_trace_wrap.__trace_func__ = True
209
- return jit_trace_wrap
126
+ def _jit_trace():
127
+ """Return the wrapped function for trace mode jit."""
128
+ def wrap_func(fn):
129
+ if hasattr(fn, "construct"):
130
+ if isinstance(fn, ms.nn.Cell):
131
+ # Bound the cell object to get the self arg.
132
+ return types.MethodType(_jit_trace()(fn.construct.__func__), fn)
133
+ if isinstance(fn, type) and issubclass(fn, ms.nn.Cell):
134
+ fn.construct = _jit_trace()(fn.construct)
135
+ return fn
136
+
137
+ if isinstance(fn, types.MethodType):
138
+ return types.MethodType(_jit_trace()(fn.__func__), fn.__self__)
139
+
140
+ if not isinstance(fn, types.FunctionType):
141
+ logger.warning(f"The fn should be function, method or cell instance/class, but got {fn}")
142
+ return fn
143
+
144
+ if hasattr(fn, "__wrapped_by_jit__"):
145
+ logger.warning(f"The fn {fn} should be wrapped by jit only once.")
146
+
147
+ @wraps(fn)
148
+ def jit_trace_wrap(*args, **kwargs):
149
+ # If a trace graph is already built, keep going without building a new trace graph.
150
+ if jit_context():
151
+ return fn(*args, **kwargs)
152
+ # Start trace process.
153
+ if kwargs:
154
+ bound_arguments = inspect.signature(fn).bind(*args, **kwargs)
155
+ bound_arguments.apply_defaults()
156
+ args = bound_arguments.args
157
+ kwargs = bound_arguments.kwargs
158
+ generate_name = fn.__module__
159
+ if args:
160
+ jit_args = args[1:] if hasattr(args[0], fn.__name__) else args
161
+ obj = args[0]
162
+ if hasattr(obj, fn.__name__): # Add class name for Cell.
163
+ generate_name = generate_name + "." + obj.__class__.__name__
164
+ else:
165
+ jit_args = args
166
+ generate_name = generate_name + "." + fn.__name__ + "#" + str(id(fn))
167
+ # Add create time for Cell.
168
+ if args and hasattr(obj, fn.__name__):
169
+ generate_name = generate_name + '#created_' + str(args[0].create_time)
170
+ line_str = fn.__code__.co_filename + ":" + str(fn.__code__.co_firstlineno)
171
+ generate_name = generate_name + '#[' + line_str + ']'
172
+
173
+ new_compile = _jit_trace_begin(generate_name, *jit_args)
174
+ if new_compile:
175
+ fn_res = fn(*args, **kwargs)
176
+ logger.debug(f'fn: {fn}, fn_res: {fn_res}, line: {line_str}')
177
+ # Use fn's output to build func graph's output.
178
+ output = _jit_trace_end(fn_res)
179
+ else:
180
+ output = _jit_trace_end(None) # Run with compilation.
181
+ logger.debug(f'output: {output}')
182
+ return output
183
+
184
+ jit_trace_wrap.__trace_func__ = True
185
+ setattr(jit_trace_wrap, "__wrapped_by_jit__", True)
186
+ return jit_trace_wrap
187
+
188
+ return wrap_func
210
189
 
211
190
 
212
191
  def _get_caller_lines():
@@ -240,6 +219,8 @@ def _get_args_for_run(args):
240
219
  new_args.append(arg)
241
220
  elif isinstance(arg, dict) and hasattr(arg, "__ms_mutable__"):
242
221
  new_args.append(tuple(arg.values()))
222
+ elif isinstance(arg, (tuple, list)) and hasattr(arg, "__ms_mutable__"):
223
+ new_args.append(arg)
243
224
  return tuple(new_args)
244
225
 
245
226
 
@@ -284,7 +265,6 @@ def _jit_trace_begin(fn_name, *args):
284
265
  logger.debug(f'_jit_trace_begin, args: {args}')
285
266
  _trace_jit_context.set_is_nested(False)
286
267
  set_jit_context(_trace_jit_context)
287
- args = tuple(_sync_stub_tensor(arg) for arg in args)
288
268
  for arg in args:
289
269
  logger.debug(f'_jit_trace_begin, arg: {arg}, {type(arg)}')
290
270
 
@@ -355,7 +335,6 @@ def _jit_trace_end(*output_args):
355
335
  logger.debug(f'jit trace result: {output}')
356
336
  else:
357
337
  logger.debug(f'output_args: {output_args}')
358
- output_args = tuple(_sync_stub_tensor(arg) for arg in output_args)
359
338
  file_names, linenos = _get_caller_lines()
360
339
  tr.get_instance().end_graph(file_names, linenos, *output_args)
361
340
  if _compile_only:
@@ -49,17 +49,37 @@ import mindspore.common._monad as monad
49
49
  __all__ = ['Parameter', 'ParameterTuple']
50
50
 
51
51
  PARAMETER_NAME_DEFAULT = "Parameter"
52
+ _GENERATED_PARAMETER_NAME_PREFIX = PARAMETER_NAME_DEFAULT + '#'
52
53
  PARAMETER_NAME_PREFIX_MAX_LEN = 1024
53
54
 
55
+ _PARAMETER_NAME_ID = 0
56
+
57
+
58
+ def _generate_parameter_name():
59
+ global _PARAMETER_NAME_ID
60
+ name = _GENERATED_PARAMETER_NAME_PREFIX + str(_PARAMETER_NAME_ID)
61
+ _PARAMETER_NAME_ID += 1
62
+ return name
63
+
64
+
65
+ def _is_parameter_generated(param_name):
66
+ if not param_name or not isinstance(param_name, str):
67
+ return False
68
+ return param_name.startswith(_GENERATED_PARAMETER_NAME_PREFIX)
69
+
70
+
54
71
  # Global variable for parameter unique key.
55
72
  _GLOBAL_PARAMETER_KEY = -1
56
73
 
57
74
  # Global variable to mark the hook of parameter is updated
58
75
  _parameter_hook_updated = True
76
+
77
+
59
78
  def set_parameter_hook_updated(value):
60
79
  global _parameter_hook_updated
61
80
  _parameter_hook_updated = value
62
81
 
82
+
63
83
  def parameter_hook_updated():
64
84
  global _parameter_hook_updated
65
85
  return _parameter_hook_updated
@@ -496,11 +516,11 @@ class Parameter(Tensor_):
496
516
  the default value `PARAMETER_NAME_DEFAULT` is used.
497
517
  """
498
518
  if name_ is None:
499
- name_ = PARAMETER_NAME_DEFAULT
519
+ name_ = _generate_parameter_name()
500
520
  elif isinstance(name_, str):
501
521
  name_ = name_.strip()
502
522
  if name_ == '':
503
- name_ = PARAMETER_NAME_DEFAULT
523
+ name_ = _generate_parameter_name()
504
524
  if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
505
525
  raise ValueError("The length of the '{}' name should be less than {}.".
506
526
  format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
@@ -904,13 +924,10 @@ class Parameter(Tensor_):
904
924
  incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
905
925
  current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
906
926
  if self.dtype != data.dtype:
907
- if mstype.implicit_conversion_seq.get(self.dtype) < mstype.implicit_conversion_seq.get(data.dtype):
908
- self._raise_type_error(data.dtype)
909
- else:
910
- from mindspore.ops import functional as F
911
- if isinstance(data, Tensor) and data.init is not None:
912
- data.init_data()
913
- data = F.cast(data, self.dtype)
927
+ from mindspore.ops import functional as F
928
+ if isinstance(data, Tensor) and data.init is not None:
929
+ data.init_data()
930
+ data = F.cast(data, self.dtype)
914
931
  if isinstance(data, Tensor) and data.has_init:
915
932
  # The parameter has been initialized, directly update by the data
916
933
  if current_tensor_is_init:
@@ -938,7 +955,6 @@ class Parameter(Tensor_):
938
955
  init_data_args += (slice_index, layout[2], layout[5])
939
956
  return init_data_args
940
957
 
941
-
942
958
  def init_data(self, layout=None, set_sliced=False):
943
959
  """
944
960
  Initialize the parameter's data.
@@ -1030,7 +1046,6 @@ class Parameter(Tensor_):
1030
1046
  """
1031
1047
  return Tensor_._offload(self, True)
1032
1048
 
1033
-
1034
1049
  def _load(self):
1035
1050
  r"""
1036
1051
  Load parameter to device.
@@ -1160,6 +1175,5 @@ class ParameterTuple(tuple):
1160
1175
  _insert_accumu_init_info(x1.name, init_to_value(init))
1161
1176
  return ParameterTuple(new)
1162
1177
 
1163
-
1164
1178
  def __parameter_tuple__(self):
1165
1179
  """For parse check."""
@@ -25,7 +25,7 @@ from mindspore.ops.composite import GradOperation
25
25
  from mindspore.common._register_for_recompute import recompute_registry
26
26
  from mindspore.common.api import _pynative_executor, _no_grad
27
27
  from mindspore.common.generator import get_rng_state, set_rng_state
28
- from mindspore.train.amp import amp_decorator
28
+ from mindspore.train.amp import AmpDecorator
29
29
  from mindspore._c_expression.amp import get_curr_amp_strategy
30
30
 
31
31
 
@@ -104,8 +104,8 @@ class _RecomputeCell(Cell):
104
104
  set_rng_state(self.cpu_rng_state)
105
105
  _pynative_executor.set_is_run_recompute(True)
106
106
  if self.amp_strategy:
107
- with amp_decorator(self.amp_strategy.get_amp_level(), self.amp_strategy.get_amp_dtype(),
108
- self.amp_strategy.get_white_list(), self.amp_strategy.get_black_list()):
107
+ with AmpDecorator(self.amp_strategy.get_amp_level(), self.amp_strategy.get_amp_dtype(),
108
+ self.amp_strategy.get_white_list(), self.amp_strategy.get_black_list()):
109
109
  grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
110
110
  else:
111
111
  grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
@@ -98,7 +98,6 @@ class RowTensor(RowTensorInner):
98
98
 
99
99
  .. warning::
100
100
  - This is an experimental API that is subjected to change or deletion.
101
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
102
101
 
103
102
  Args:
104
103
  indices (Tensor): A 1-D integer Tensor of shape :math:`(d_0)` . Default: ``None``.
@@ -232,7 +231,6 @@ class COOTensor(COOTensor_):
232
231
 
233
232
  .. warning::
234
233
  - This is an experimental API that is subject to change or deletion.
235
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
236
234
  - Currently, duplicate coordinates in the indices will not be coalesced.
237
235
  If the indices contain out-of-bound values, the result will be undefined.
238
236
 
@@ -681,7 +679,6 @@ class CSRTensor(CSRTensor_):
681
679
 
682
680
  .. warning::
683
681
  - This is an experimental API that is subjected to change.
684
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
685
682
  - If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
686
683
  when the length of `values` or `indices` exceeds the range indicated by `indptr`, and when the columns
687
684
  indicated by `indices` are repeated on the same row.
@@ -104,7 +104,6 @@ class Symbol:
104
104
  if not isinstance(unique, bool):
105
105
  raise TypeError(f"For 'Symbol', the argument 'unique' must be bool, but got {type(unique)}")
106
106
 
107
- # pylint: disable=missing-docstring
108
107
  def to_dict(self):
109
108
  # Convert the symbolic info to dictionary.
110
109
  # This method is not necessary to show in public api document, use comment instead of docstring.