mindspore 2.6.0__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (380) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +40 -9
  7. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  8. mindspore/_extends/optimize/cell_utils.py +96 -0
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +44 -22
  11. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  12. mindspore/_extends/parse/parser.py +36 -61
  13. mindspore/_extends/parse/resources.py +39 -0
  14. mindspore/_extends/parse/standard_method.py +32 -13
  15. mindspore/_extends/parse/trope.py +8 -1
  16. mindspore/_extends/pijit/__init__.py +1 -2
  17. mindspore/amp.py +4 -4
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/adasum.py +1 -1
  24. mindspore/boost/boost_cell_wrapper.py +4 -4
  25. mindspore/common/__init__.py +27 -2
  26. mindspore/common/_grad_function.py +2 -1
  27. mindspore/common/_pijit_context.py +28 -7
  28. mindspore/common/_stub_tensor.py +1 -209
  29. mindspore/common/_tensor_cpp_method.py +1 -1
  30. mindspore/common/_tensor_docs.py +76 -15
  31. mindspore/common/api.py +193 -112
  32. mindspore/common/dtype.py +21 -11
  33. mindspore/common/dump.py +10 -15
  34. mindspore/common/generator.py +2 -3
  35. mindspore/common/hook_handle.py +11 -2
  36. mindspore/common/jit_config.py +1 -1
  37. mindspore/common/jit_trace.py +84 -105
  38. mindspore/common/parameter.py +26 -12
  39. mindspore/common/recompute.py +3 -3
  40. mindspore/common/sparse_tensor.py +0 -3
  41. mindspore/common/symbol.py +0 -1
  42. mindspore/common/tensor.py +48 -83
  43. mindspore/communication/_comm_helper.py +46 -4
  44. mindspore/communication/management.py +79 -7
  45. mindspore/context.py +38 -23
  46. mindspore/dataset/core/config.py +3 -3
  47. mindspore/dataset/engine/datasets.py +20 -7
  48. mindspore/dataset/engine/datasets_user_defined.py +32 -2
  49. mindspore/dataset/engine/iterators.py +2 -2
  50. mindspore/dataset/engine/obs/config_loader.py +2 -2
  51. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  52. mindspore/dataset/transforms/py_transforms.py +7 -3
  53. mindspore/dataset/transforms/transforms.py +7 -3
  54. mindspore/dataset/vision/validators.py +1 -0
  55. mindspore/device_context/ascend/device.py +1 -1
  56. mindspore/device_context/gpu/__init__.py +2 -2
  57. mindspore/device_context/gpu/device.py +1 -1
  58. mindspore/device_context/gpu/op_precision.py +4 -2
  59. mindspore/device_context/gpu/op_tuning.py +6 -3
  60. mindspore/device_manager.py +16 -9
  61. mindspore/dnnl.dll +0 -0
  62. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
  63. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  64. mindspore/experimental/optim/adadelta.py +13 -20
  65. mindspore/experimental/optim/adagrad.py +15 -22
  66. mindspore/experimental/optim/adam.py +17 -24
  67. mindspore/experimental/optim/adamax.py +14 -22
  68. mindspore/experimental/optim/adamw.py +28 -34
  69. mindspore/experimental/optim/asgd.py +15 -25
  70. mindspore/experimental/optim/lr_scheduler.py +27 -45
  71. mindspore/experimental/optim/nadam.py +14 -24
  72. mindspore/experimental/optim/optimizer.py +13 -23
  73. mindspore/experimental/optim/radam.py +18 -24
  74. mindspore/experimental/optim/rmsprop.py +14 -25
  75. mindspore/experimental/optim/rprop.py +15 -26
  76. mindspore/experimental/optim/sgd.py +9 -19
  77. mindspore/hal/__init__.py +4 -4
  78. mindspore/hal/contiguous_tensors_handle.py +2 -2
  79. mindspore/hal/memory.py +1 -0
  80. mindspore/include/api/cell.h +37 -1
  81. mindspore/include/api/delegate.h +10 -0
  82. mindspore/include/api/model.h +3 -0
  83. mindspore/include/api/types.h +2 -2
  84. mindspore/include/c_api/model_c.h +0 -58
  85. mindspore/include/c_api/tensor_c.h +0 -26
  86. mindspore/include/dataset/vision_ascend.h +1 -1
  87. mindspore/jpeg62.dll +0 -0
  88. mindspore/mindrecord/tools/cifar10.py +60 -11
  89. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  90. mindspore/mindspore_backend_common.dll +0 -0
  91. mindspore/mindspore_backend_manager.dll +0 -0
  92. mindspore/mindspore_common.dll +0 -0
  93. mindspore/mindspore_core.dll +0 -0
  94. mindspore/mindspore_cpu_res_manager.dll +0 -0
  95. mindspore/mindspore_dump.dll +0 -0
  96. mindspore/mindspore_frontend.dll +0 -0
  97. mindspore/mindspore_glog.dll +0 -0
  98. mindspore/mindspore_memory_pool.dll +0 -0
  99. mindspore/mindspore_ms_backend.dll +0 -0
  100. mindspore/mindspore_ops.dll +0 -0
  101. mindspore/mindspore_ops_host.dll +0 -0
  102. mindspore/mindspore_ops_kernel_common.dll +0 -0
  103. mindspore/mindspore_profiler.dll +0 -0
  104. mindspore/mindspore_pyboost.dll +0 -0
  105. mindspore/mindspore_pynative.dll +0 -0
  106. mindspore/mindspore_res_manager.dll +0 -0
  107. mindspore/mindspore_runtime_pipeline.dll +0 -0
  108. mindspore/mint/__init__.py +4 -44
  109. mindspore/mint/distributed/__init__.py +1 -0
  110. mindspore/mint/distributed/distributed.py +208 -5
  111. mindspore/mint/nn/__init__.py +1 -1
  112. mindspore/mint/nn/functional.py +53 -6
  113. mindspore/mint/nn/layer/_functions.py +164 -294
  114. mindspore/mint/nn/layer/activation.py +8 -6
  115. mindspore/mint/nn/layer/conv.py +122 -98
  116. mindspore/mint/nn/layer/normalization.py +8 -22
  117. mindspore/mint/optim/adam.py +19 -18
  118. mindspore/mint/optim/adamw.py +14 -8
  119. mindspore/mint/optim/sgd.py +5 -5
  120. mindspore/nn/cell.py +325 -499
  121. mindspore/nn/grad/cell_grad.py +11 -12
  122. mindspore/nn/layer/activation.py +32 -34
  123. mindspore/nn/layer/basic.py +67 -64
  124. mindspore/nn/layer/channel_shuffle.py +4 -4
  125. mindspore/nn/layer/combined.py +4 -2
  126. mindspore/nn/layer/conv.py +86 -85
  127. mindspore/nn/layer/dense.py +9 -7
  128. mindspore/nn/layer/embedding.py +50 -52
  129. mindspore/nn/layer/image.py +37 -39
  130. mindspore/nn/layer/math.py +111 -112
  131. mindspore/nn/layer/normalization.py +56 -44
  132. mindspore/nn/layer/pooling.py +58 -63
  133. mindspore/nn/layer/rnn_cells.py +33 -33
  134. mindspore/nn/layer/rnns.py +56 -56
  135. mindspore/nn/layer/thor_layer.py +74 -73
  136. mindspore/nn/layer/transformer.py +11 -1
  137. mindspore/nn/learning_rate_schedule.py +20 -20
  138. mindspore/nn/loss/loss.py +79 -81
  139. mindspore/nn/optim/adam.py +1 -1
  140. mindspore/nn/optim/adasum.py +2 -2
  141. mindspore/nn/optim/optimizer.py +1 -1
  142. mindspore/nn/optim/thor.py +2 -2
  143. mindspore/nn/probability/distribution/exponential.py +2 -1
  144. mindspore/nn/probability/distribution/poisson.py +2 -1
  145. mindspore/nn/sparse/sparse.py +3 -3
  146. mindspore/nn/wrap/cell_wrapper.py +34 -37
  147. mindspore/nn/wrap/grad_reducer.py +37 -37
  148. mindspore/nn/wrap/loss_scale.py +72 -74
  149. mindspore/numpy/array_creations.py +5 -5
  150. mindspore/numpy/fft.py +1 -1
  151. mindspore/numpy/math_ops.py +1 -1
  152. mindspore/opencv_core452.dll +0 -0
  153. mindspore/opencv_imgcodecs452.dll +0 -0
  154. mindspore/opencv_imgproc452.dll +0 -0
  155. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  156. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  157. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  158. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  159. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
  160. mindspore/ops/auto_generate/gen_extend_func.py +1 -51
  161. mindspore/ops/auto_generate/gen_ops_def.py +463 -257
  162. mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
  163. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  164. mindspore/ops/composite/__init__.py +10 -0
  165. mindspore/ops/composite/base.py +8 -4
  166. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  167. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  168. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  169. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  170. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  171. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  172. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  173. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  174. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  175. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  176. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  177. mindspore/ops/function/__init__.py +3 -1
  178. mindspore/ops/function/_add_attr_func.py +11 -6
  179. mindspore/ops/function/array_func.py +7 -94
  180. mindspore/ops/function/debug_func.py +4 -3
  181. mindspore/ops/function/grad/grad_func.py +1 -1
  182. mindspore/ops/function/math_func.py +21 -367
  183. mindspore/ops/function/nn_func.py +26 -41
  184. mindspore/ops/function/other_func.py +4 -1
  185. mindspore/ops/function/random_func.py +31 -4
  186. mindspore/ops/functional.py +0 -2
  187. mindspore/ops/functional_overload.py +463 -6
  188. mindspore/ops/op_info_register.py +21 -0
  189. mindspore/ops/operations/__init__.py +5 -2
  190. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  191. mindspore/ops/operations/_inner_ops.py +3 -6
  192. mindspore/ops/operations/_sequence_ops.py +1 -1
  193. mindspore/ops/operations/comm_ops.py +185 -26
  194. mindspore/ops/operations/custom_ops.py +235 -172
  195. mindspore/ops/operations/debug_ops.py +55 -4
  196. mindspore/ops/operations/image_ops.py +13 -13
  197. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  198. mindspore/ops/operations/math_ops.py +3 -4
  199. mindspore/ops/operations/nn_ops.py +5 -6
  200. mindspore/ops/primitive.py +6 -10
  201. mindspore/ops/tensor_method.py +36 -4
  202. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  203. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  204. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  205. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  206. mindspore/ops_generate/common/base_generator.py +14 -0
  207. mindspore/ops_generate/common/gen_constants.py +7 -2
  208. mindspore/ops_generate/common/gen_utils.py +0 -19
  209. mindspore/ops_generate/common/op_proto.py +11 -4
  210. mindspore/ops_generate/common/template.py +88 -11
  211. mindspore/ops_generate/gen_ops.py +1 -1
  212. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  213. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  214. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  215. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  216. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  217. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  218. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  219. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  220. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  221. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  222. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  223. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  224. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  225. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  226. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  227. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  228. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  229. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  230. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  231. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  232. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  233. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  234. mindspore/parallel/_auto_parallel_context.py +4 -2
  235. mindspore/parallel/_cell_wrapper.py +106 -40
  236. mindspore/parallel/_parallel_serialization.py +1 -1
  237. mindspore/parallel/_ps_context.py +4 -6
  238. mindspore/parallel/_tensor.py +167 -12
  239. mindspore/parallel/_transformer/moe.py +1 -1
  240. mindspore/parallel/_transformer/transformer.py +13 -8
  241. mindspore/parallel/auto_parallel.py +12 -5
  242. mindspore/parallel/checkpoint_convert.py +3 -3
  243. mindspore/parallel/checkpoint_transform.py +3 -1
  244. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  245. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  246. mindspore/parallel/cluster/run.py +43 -4
  247. mindspore/parallel/function/__init__.py +8 -1
  248. mindspore/parallel/function/reshard_func.py +1 -1
  249. mindspore/parallel/nn/__init__.py +15 -2
  250. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  251. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  252. mindspore/parallel/shard.py +2 -2
  253. mindspore/parallel/transform_safetensors.py +462 -174
  254. mindspore/profiler/__init__.py +2 -1
  255. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  256. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  257. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  258. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  259. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  260. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  261. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  262. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  263. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  264. mindspore/profiler/analysis/task_manager.py +1 -1
  265. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  266. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  267. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  268. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  269. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  270. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  271. mindspore/profiler/common/constant.py +16 -0
  272. mindspore/profiler/common/profiler_context.py +25 -27
  273. mindspore/profiler/common/profiler_info.py +0 -16
  274. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  275. mindspore/profiler/common/profiler_output_path.py +23 -8
  276. mindspore/profiler/common/profiler_parameters.py +128 -35
  277. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  278. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  279. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  280. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  281. mindspore/profiler/dynamic_profiler.py +305 -314
  282. mindspore/profiler/envprofiler.py +12 -7
  283. mindspore/profiler/experimental_config.py +96 -6
  284. mindspore/profiler/mstx.py +33 -12
  285. mindspore/profiler/platform/__init__.py +2 -3
  286. mindspore/profiler/platform/npu_profiler.py +29 -19
  287. mindspore/profiler/profiler.py +35 -19
  288. mindspore/profiler/profiler_action_controller.py +64 -76
  289. mindspore/profiler/schedule.py +10 -4
  290. mindspore/rewrite/common/config.py +1 -0
  291. mindspore/rewrite/common/namer.py +1 -0
  292. mindspore/rewrite/common/namespace.py +1 -0
  293. mindspore/rewrite/node/node.py +31 -11
  294. mindspore/rewrite/parsers/assign_parser.py +1 -1
  295. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  296. mindspore/run_check/_check_version.py +7 -10
  297. mindspore/runtime/__init__.py +5 -5
  298. mindspore/runtime/event.py +10 -4
  299. mindspore/runtime/executor.py +60 -45
  300. mindspore/runtime/memory.py +21 -30
  301. mindspore/runtime/thread_bind_core.py +298 -164
  302. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  303. mindspore/swresample-4.dll +0 -0
  304. mindspore/swscale-6.dll +0 -0
  305. mindspore/tinyxml2.dll +0 -0
  306. mindspore/train/_utils.py +6 -2
  307. mindspore/train/amp.py +43 -20
  308. mindspore/train/callback/__init__.py +5 -5
  309. mindspore/train/callback/_checkpoint.py +3 -6
  310. mindspore/train/callback/_flops_collector.py +1 -1
  311. mindspore/train/callback/_landscape.py +0 -1
  312. mindspore/train/callback/_train_fault_tolerance.py +71 -13
  313. mindspore/train/data_sink.py +11 -2
  314. mindspore/train/dataset_helper.py +9 -0
  315. mindspore/train/model.py +51 -33
  316. mindspore/train/serialization.py +133 -111
  317. mindspore/train/summary/summary_record.py +13 -2
  318. mindspore/turbojpeg.dll +0 -0
  319. mindspore/utils/__init__.py +3 -2
  320. mindspore/utils/dryrun.py +0 -6
  321. mindspore/utils/runtime_execution_order_check.py +162 -78
  322. mindspore/utils/sdc_detect.py +68 -0
  323. mindspore/utils/utils.py +6 -9
  324. mindspore/version.py +1 -1
  325. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  326. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +329 -367
  327. mindspore/_deprecated/jit.py +0 -198
  328. mindspore/experimental/es/__init__.py +0 -22
  329. mindspore/experimental/es/embedding_service.py +0 -891
  330. mindspore/experimental/es/embedding_service_layer.py +0 -581
  331. mindspore/profiler/parser/__init__.py +0 -14
  332. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  333. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  334. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  335. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  336. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  337. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  338. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  339. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  340. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  341. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  342. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  343. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  344. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  345. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  346. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  347. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  348. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  349. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  350. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  351. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  352. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  353. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  354. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  355. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  356. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  357. mindspore/profiler/parser/container.py +0 -229
  358. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  359. mindspore/profiler/parser/flops_parser.py +0 -531
  360. mindspore/profiler/parser/framework_enum.py +0 -111
  361. mindspore/profiler/parser/framework_parser.py +0 -464
  362. mindspore/profiler/parser/framework_struct.py +0 -61
  363. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  364. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  365. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  366. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  367. mindspore/profiler/parser/hccl_parser.py +0 -573
  368. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  369. mindspore/profiler/parser/integrator.py +0 -526
  370. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  371. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  372. mindspore/profiler/parser/minddata_parser.py +0 -186
  373. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  374. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  375. mindspore/profiler/parser/optime_parser.py +0 -250
  376. mindspore/profiler/parser/profiler_info.py +0 -213
  377. mindspore/profiler/parser/step_trace_parser.py +0 -666
  378. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  379. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  380. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/common/api.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
2
2
  #
3
- # Copyright 2020-2024 Huawei Technologies Co., Ltd
3
+ # Copyright 2020-2025 Huawei Technologies Co., Ltd
4
4
  #
5
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
6
  # you may not use this file except in compliance with the License.
@@ -17,6 +17,8 @@
17
17
  """Providing interface methods."""
18
18
  from __future__ import absolute_import
19
19
 
20
+ __all__ = ['ms_memory_recycle', 'jit', 'jit_class', 'flops_collection']
21
+
20
22
  import gc
21
23
  import types
22
24
  import sys
@@ -42,7 +44,7 @@ from mindspore.common.sparse_tensor import RowTensor as PythonRowTensor
42
44
  from mindspore._c_expression.amp import get_curr_amp_strategy
43
45
  from mindspore._c_expression import GraphExecutor_, JitExecutor_, CSRTensor, RowTensor, COOTensor, \
44
46
  PyNativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
45
- _run_jit_pipeline, _ms_memory_recycle, _bind_device_ctx, StubNode, MSContext, TensorPy as Tensor
47
+ _run_jit_pipeline, _ms_memory_recycle, _bind_device_ctx, MSContext, TensorPy as Tensor
46
48
  from mindspore.parallel._ps_context import _is_role_sched
47
49
  from mindspore.parallel._utils import _check_full_batch, _get_parameter_broadcast, _is_in_auto_parallel_mode, \
48
50
  _is_parallel_mode
@@ -58,7 +60,7 @@ from mindspore.common.jit_context import jit_context
58
60
  from mindspore.common.jit_trace import _jit_trace
59
61
  from mindspore.parallel._utils import _init_auto_parallel_context, _clear_auto_parallel_context
60
62
 
61
- # Store ms_function class compiled pipeline cache.
63
+ # Store jit class compiled pipeline cache.
62
64
  ms_compile_cache = set()
63
65
  # Store cell compiled pipeline cache.
64
66
  cells_compile_cache = {}
@@ -134,8 +136,6 @@ def _convert_python_data(data):
134
136
  """
135
137
  if isinstance(data, PythonTensor):
136
138
  return data
137
- if isinstance(data, StubNode):
138
- return ms.common._stub_tensor._convert_stub(data)
139
139
  if data.__class__ is tuple:
140
140
  # Handle namedtuple since its type is tuple.
141
141
  if hasattr(data, "_fields"):
@@ -278,13 +278,13 @@ def __get_compile_cache_dep_files(file_path, compile_cache_dep_files, pkg):
278
278
  module = importlib.util.module_from_spec(module_spec)
279
279
  if hasattr(module, '__file__'):
280
280
  dep_file_path = module.__file__
281
+ # Exclude the installed modules.
282
+ if not _in_sys_path(dep_file_path) and dep_file_path not in compile_cache_dep_files:
283
+ logger.debug(f"dependent file path: {dep_file_path}")
284
+ compile_cache_dep_files.append(dep_file_path)
285
+ __get_compile_cache_dep_files(dep_file_path, compile_cache_dep_files, module.__package__)
281
286
  else:
282
287
  continue
283
- # Exclude the installed modules.
284
- if not _in_sys_path(dep_file_path) and dep_file_path not in compile_cache_dep_files:
285
- logger.debug(f"dependent file path: {dep_file_path}")
286
- compile_cache_dep_files.append(dep_file_path)
287
- __get_compile_cache_dep_files(dep_file_path, compile_cache_dep_files, module.__package__)
288
288
 
289
289
 
290
290
  def _get_compile_cache_dep_files():
@@ -342,7 +342,7 @@ def _get_parameter_layout():
342
342
  return layout
343
343
 
344
344
 
345
- def _handle_arg(obj, arg, compile_arg):
345
+ def _handle_arg(obj, arg, has_mutable_arg):
346
346
  """Handle arg for runtime .If need handle the arg, return True"""
347
347
  from mindspore._extends.parse import compile_config
348
348
  if isinstance(arg, PythonTensor):
@@ -352,7 +352,7 @@ def _handle_arg(obj, arg, compile_arg):
352
352
  return arg
353
353
  elif isinstance(arg, (Tensor, CSRTensor, COOTensor)):
354
354
  return arg
355
- elif compile_arg is not None and hasattr(compile_arg, "__ms_mutable__") and getattr(compile_arg, "__ms_mutable__"):
355
+ elif has_mutable_arg:
356
356
  # mutable([]) will be eliminated by FuncGraphSpecializer, and empty list is not supported by backend.
357
357
  if isinstance(arg, list) and not arg:
358
358
  return None
@@ -366,7 +366,7 @@ def _handle_arg(obj, arg, compile_arg):
366
366
  return None
367
367
 
368
368
 
369
- def _handle_arg_predict(obj, arg, compile_arg):
369
+ def _handle_arg_predict(obj, arg, has_mutable_arg):
370
370
  """Handle arg for runtime .If need handle the arg, return True"""
371
371
  if arg is None:
372
372
  return None
@@ -375,8 +375,7 @@ def _handle_arg_predict(obj, arg, compile_arg):
375
375
  return None
376
376
 
377
377
  if isinstance(arg, (list, tuple)):
378
- if compile_arg is not None and hasattr(compile_arg, "__ms_mutable__") and \
379
- getattr(compile_arg, "__ms_mutable__"):
378
+ if has_mutable_arg:
380
379
  # mutable([]) will be eliminated by FuncGraphSpecializer, and empty list is not supported by backend.
381
380
  if isinstance(arg, list) and not arg:
382
381
  return None
@@ -388,35 +387,30 @@ def _handle_arg_predict(obj, arg, compile_arg):
388
387
  return arg
389
388
 
390
389
 
391
- def _get_args_for_run(obj, args, kwargs, compile_args):
390
+ def _get_args_for_run(obj, args, kwargs, has_mutable_args_list, is_predict):
392
391
  """Get the actual input args and kwargs for runtime."""
393
392
  new_args = []
394
- for arg, compile_arg in zip(args, compile_args):
395
- new_arg = _handle_arg(obj, arg, compile_arg)
393
+ fn = _handle_arg_predict if is_predict else _handle_arg
394
+ for arg, has_mutable_arg in zip(args, has_mutable_args_list):
395
+ new_arg = fn(obj, arg, has_mutable_arg)
396
396
  if new_arg is not None:
397
397
  new_args.append(new_arg)
398
398
 
399
399
  for _, value in kwargs.items():
400
- new_value = _handle_arg(obj, value, None)
400
+ new_value = fn(obj, value, None)
401
401
  if new_value is not None:
402
402
  new_args.append(new_value)
403
403
 
404
404
  return new_args
405
405
 
406
406
 
407
- def _get_args_for_run_predict(obj, args, kwargs, compile_args):
408
- """Get the actual input args and kwargs for runtime."""
407
+ def _get_mutable_flags(compile_args):
408
+ """Get a list of booleans indicating whether each argument is marked as mutable"""
409
409
  new_args = []
410
- for arg, compile_arg in zip(args, compile_args):
411
- new_arg = _handle_arg_predict(obj, arg, compile_arg)
412
- if new_arg is not None:
413
- new_args.append(new_arg)
414
-
415
- for _, value in kwargs.items():
416
- new_value = _handle_arg_predict(obj, value, None)
417
- if new_value is not None:
418
- new_args.append(new_value)
419
-
410
+ for compile_arg in compile_args:
411
+ has_mutable_arg = compile_arg is not None and hasattr(compile_arg, "__ms_mutable__") and \
412
+ getattr(compile_arg, "__ms_mutable__")
413
+ new_args.append(has_mutable_arg)
420
414
  return new_args
421
415
 
422
416
 
@@ -586,7 +580,8 @@ class _JitExecutor:
586
580
  The result of pipeline running in graph mode.
587
581
  """
588
582
 
589
- def __init__(self, fn, ms_create_time, input_signature=None, obj=None, jit_config=None, dynamic=0):
583
+ def __init__(self, fn, ms_create_time, input_signature=None, obj=None, jit_config=None, dynamic=0,
584
+ cell_cache_key_extend=''):
590
585
  init_pipeline()
591
586
  if not isinstance(fn, (types.FunctionType, types.MethodType)):
592
587
  raise RuntimeError('fn {} is not function or method'.format(fn))
@@ -606,6 +601,7 @@ class _JitExecutor:
606
601
  self._compile_args = None
607
602
  self._enable_auto_dynamic = dynamic == 1
608
603
  self.jit_config_dict = jit_config.jit_config_dict if jit_config else None
604
+ self._cell_cache_key_extend = cell_cache_key_extend
609
605
 
610
606
  def _predict(self, *args, **kwargs):
611
607
  """Dedicated routine for predict."""
@@ -639,6 +635,11 @@ class _JitExecutor:
639
635
  self._compile_args = compile_args
640
636
 
641
637
  new_inputs = self._generate_run_args(args_list, kwargs)
638
+ if self.jit_config_dict:
639
+ jit_config_dict = self.jit_config_dict
640
+ else:
641
+ jit_config_dict = JitConfig().jit_config_dict
642
+ self._graph_executor.set_jit_config(jit_config_dict)
642
643
  output = self._graph_executor(
643
644
  tuple(new_inputs),
644
645
  self.obj.phase_cache[self.obj.phase]
@@ -658,12 +659,9 @@ class _JitExecutor:
658
659
  args_list = args_list[1:]
659
660
  phase = ""
660
661
  try:
661
- if context.get_context("mode") == context.PYNATIVE_MODE:
662
- _pynative_executor.set_jit_compile_status(True, phase)
663
- phase = self.compile(self.fn.__name__, *args_list, **kwargs)
664
- _pynative_executor.set_jit_compile_status(False, phase)
665
- else:
666
- phase = self.compile(self.fn.__name__, *args_list, **kwargs)
662
+ _pynative_executor.set_jit_compile_status(True, phase)
663
+ phase = self.compile(self.fn.__name__, *args_list, **kwargs)
664
+ _pynative_executor.set_jit_compile_status(False, phase)
667
665
  except Exception as err:
668
666
  _pynative_executor.clear_res()
669
667
  raise err
@@ -672,15 +670,16 @@ class _JitExecutor:
672
670
  return None
673
671
 
674
672
  new_inputs = self._generate_run_args(args_list, kwargs)
675
- if context.get_context("mode") == context.PYNATIVE_MODE and not jit_context():
676
- output = _pynative_executor.grad_jit(*new_inputs)
673
+ if self.jit_config_dict:
674
+ jit_config_dict = self.jit_config_dict
677
675
  else:
678
- output = self._graph_executor(tuple(new_inputs), phase)
679
- if jit_context():
680
- if is_stub_tensor(output):
681
- output = output.stub_sync()
682
- return jit_context().run_graph(phase, output, *tuple(new_inputs))
683
-
676
+ jit_config_dict = JitConfig().jit_config_dict
677
+ self._graph_executor.set_jit_config(jit_config_dict)
678
+ output = _pynative_executor.grad_jit(*new_inputs)
679
+ if jit_context():
680
+ if is_stub_tensor(output):
681
+ output = output.stub_sync()
682
+ return jit_context().run_graph(phase, output, *tuple(new_inputs))
684
683
  return output
685
684
 
686
685
  def compile(self, method_name, *args, **kwargs):
@@ -747,6 +746,8 @@ class _JitExecutor:
747
746
 
748
747
  update_auto_dynamic_shape_phase_with_check_input_signature(compile_args, key_id, phase, self.input_signature)
749
748
 
749
+ phase = phase + self._cell_cache_key_extend
750
+
750
751
  if phase in ms_compile_cache and self._graph_executor.has_compiled(phase) and not parameter_hook_updated():
751
752
  # Release resource should be released when CompileInner won't be executed, such as cur_convert_input_
752
753
  # generated in generate_arguments_key.
@@ -758,10 +759,9 @@ class _JitExecutor:
758
759
  # If enable compile cache, get the dependency files list and set to graph executor.
759
760
  self._set_compile_cache_dep_files()
760
761
  if self.jit_config_dict:
761
- self._graph_executor.set_jit_config(self.jit_config_dict)
762
+ jit_config_dict = self.jit_config_dict
762
763
  else:
763
764
  jit_config_dict = JitConfig().jit_config_dict
764
- self._graph_executor.set_jit_config(jit_config_dict)
765
765
 
766
766
  if self.obj is None:
767
767
  # Set an attribute to fn as an identifier.
@@ -769,7 +769,8 @@ class _JitExecutor:
769
769
  setattr(self.fn.__func__, "__jit_function__", True)
770
770
  else:
771
771
  setattr(self.fn, "__jit_function__", True)
772
- is_compile = self._graph_executor.compile(self.fn, compile_args, kwargs, phase)
772
+ is_compile = self._graph_executor.compile(
773
+ self.fn, compile_args, kwargs, phase, jit_config_dict)
773
774
  if isinstance(self.fn, types.MethodType):
774
775
  delattr(self.fn.__func__, "__jit_function__")
775
776
  else:
@@ -777,7 +778,8 @@ class _JitExecutor:
777
778
  else:
778
779
  if isinstance(self.obj, ms.nn.Cell):
779
780
  self._graph_executor.set_weights_values(self.obj.parameters_dict())
780
- is_compile = self._graph_executor.compile(self.obj, compile_args, kwargs, phase)
781
+ is_compile = self._graph_executor.compile(
782
+ self.obj, compile_args, kwargs, phase, jit_config_dict)
781
783
 
782
784
  if not is_compile:
783
785
  raise RuntimeError("Executor compile failed.")
@@ -876,7 +878,7 @@ class _JitExecutor:
876
878
  Returns:
877
879
  new_inputs, new input args, which are required for running.
878
880
  """
879
- return _get_args_for_run(self, args_list, kwargs, self._compile_args)
881
+ return _get_args_for_run(self, args_list, kwargs, _get_mutable_flags(self._compile_args), False)
880
882
 
881
883
  def _get_func_graph_proto(self, obj, exec_id, ir_type="onnx_ir", use_prefix=False, incremental=False):
882
884
  """Get graph proto from pipeline."""
@@ -1037,6 +1039,68 @@ def _check_options(options, backend):
1037
1039
  _check_option_value(option, value)
1038
1040
 
1039
1041
 
1042
+ def _jit_ast(hash_obj, dynamic, jit_config, jit_graph_name):
1043
+ """Return the wrapped function for ast mode jit."""
1044
+ def wrap_func(func):
1045
+ nonlocal hash_obj
1046
+ if hasattr(func, "construct"):
1047
+ if isinstance(func, ms.nn.Cell):
1048
+ # Bound the cell object to get the self arg.
1049
+ return types.MethodType(_jit_ast(
1050
+ hash_obj, dynamic, jit_config, func._jit_graph_name)(func.construct.__func__), func)
1051
+ if isinstance(func, type) and issubclass(func, ms.nn.Cell):
1052
+ func.construct = _jit_ast(
1053
+ hash_obj, dynamic, jit_config, '')(func.construct)
1054
+ return func
1055
+
1056
+ if isinstance(func, types.MethodType):
1057
+ return types.MethodType(_jit_ast(hash_obj, dynamic, jit_config, '')(func.__func__), func.__self__)
1058
+
1059
+ if not isinstance(func, types.FunctionType):
1060
+ logger.warning(f"The func should be function, method or cell instance/class, but got {func}")
1061
+ return func
1062
+
1063
+ if hasattr(func, "__wrapped_by_jit__"):
1064
+ logger.warning(f"The func {func} should be wrapped by jit only once.")
1065
+
1066
+ if hash_obj is None or not _is_inner_func(func):
1067
+ hash_obj = int(time.time() * 1e9)
1068
+
1069
+ @wraps(func)
1070
+ def staging_specialize(*args, **kwargs):
1071
+ if os.getenv("MS_JIT") == '0':
1072
+ return func(*args, **kwargs)
1073
+
1074
+ args, kwargs = _handle_func_args(func, *args, **kwargs)
1075
+ process_obj = None
1076
+ if args and not isinstance(args[0], PythonTensor) and hasattr(args[0], func.__name__):
1077
+ process_obj = args[0]
1078
+ # Handle auto mixed precision strategy.
1079
+ if not hasattr(func, "amp_strategy"):
1080
+ if isinstance(func, types.MethodType):
1081
+ setattr(func.__func__, "amp_strategy", get_curr_amp_strategy())
1082
+ else:
1083
+ setattr(func, "amp_strategy", get_curr_amp_strategy())
1084
+
1085
+ jit_graph_name = ''
1086
+ if hasattr(staging_specialize, "__jit_graph_name__"):
1087
+ jit_graph_name = staging_specialize.__jit_graph_name__
1088
+ jit_executor = _JitExecutor(
1089
+ func, hash_obj, None, process_obj, jit_config, dynamic, jit_graph_name)
1090
+ out = jit_executor(*args, **kwargs)
1091
+ return out
1092
+
1093
+ # `inspect.getfullargspec(func)` will get the specification of the decorated function by default. By set
1094
+ # `__signature__` for the decorated function, `inspect.getfullargspec(func)` will get the specification of
1095
+ # original `func`.
1096
+ staging_specialize.__signature__ = inspect.signature(func)
1097
+ setattr(staging_specialize, "__wrapped_by_jit__", True)
1098
+ setattr(staging_specialize, "__jit_graph_name__", jit_graph_name)
1099
+ return staging_specialize
1100
+
1101
+ return wrap_func
1102
+
1103
+
1040
1104
  def jit(
1041
1105
  function: Optional[Callable] = None,
1042
1106
  *,
@@ -1059,22 +1123,22 @@ def jit(
1059
1123
  and the decoration @jit(capture_mode=“bytecode”) is considered invalid.
1060
1124
 
1061
1125
  Args:
1062
- function (Function, optional): The Python function that will be run as a graph. Default: ``None``.
1126
+ function (Callable, optional): The Python function or Cell that will be run as a graph. Default: ``None``.
1063
1127
 
1064
1128
  Keyword Args:
1065
1129
  capture_mode (str, optional): The method to create a callable MindSpore graph. The value of capture_mode
1066
1130
  should be ``ast`` , ``bytecode`` or ``trace`` . Default: ``ast`` .
1067
1131
 
1068
- - `ast <https://www.mindspore.cn/docs/en/r2.5.0/model_train/program_form/static_graph.html>`_ :
1132
+ - `ast <https://www.mindspore.cn/docs/en/master/features/compile/graph_construction.html#ast>`_ :
1069
1133
  Parse Python ast to build graph.
1070
- - `bytecode <https://www.mindspore.cn/docs/en/r2.5.0/model_train/program_form/pynative.html#pijit>`_ :
1134
+ - `bytecode <https://www.mindspore.cn/docs/en/master/features/compile/graph_construction.html#bytecode>`_ :
1071
1135
  Parse Python bytecode to build graph at runtime. This is an experimental prototype that is subject to
1072
1136
  change and/or deletion.
1073
- - `trace` : Trace the execution of Python code to build graph. This is an experimental prototype that is
1137
+ - `trace <https://www.mindspore.cn/docs/en/master/features/compile/graph_construction.html#trace>`_ : Trace the execution of Python code to build graph. This is an experimental prototype that is
1074
1138
  subject to change and/or deletion.
1075
1139
 
1076
1140
  jit_level (str, optional): Used to control the compilation optimization level. Currently is only effective
1077
- with default backend. The value of jit_level should be ``O0`` or ``O1`` . Default: ``O0`` .
1141
+ with ms_backend. The value of jit_level should be ``O0`` or ``O1`` . Default: ``O0`` .
1078
1142
 
1079
1143
  - `O0`: Except for optimizations that may affect functionality, all other optimizations are turned off.
1080
1144
  - `O1`: Using commonly used optimizations and automatic operator fusion optimizations. This optimization
@@ -1089,8 +1153,8 @@ def jit(
1089
1153
  fullgraph (bool, optional): Whether to capture the entire function into graph. If False, jit attempts to
1090
1154
  be compatible with all Python syntax in the function as much as possible. If True, we require that the
1091
1155
  entire function can be captured into graph. If this is not possible (that is, if there is Python syntax
1092
- not supported), then it will raise an exception. This currently only applies when capture_mode is ast.
1093
- Default: ``False``.
1156
+ not supported), then it will raise an exception. This currently only applies when capture_mode is ``ast``
1157
+ or ``bytecode``. Default: ``False``.
1094
1158
  backend (str, optional): The compilation backend to be used. If this parameter is not set, the framework will
1095
1159
  use ``GE`` backend for Atlas training series products and ``ms_backend`` backend for others including Atlas
1096
1160
  A2 training series products by default.
@@ -1158,29 +1222,84 @@ def jit(
1158
1222
  >>> x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
1159
1223
  >>> y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
1160
1224
  ...
1161
- >>> # create a callable MindSpore graph by calling jit
1225
+ >>> # Create a callable MindSpore graph by calling jit.
1162
1226
  >>> def tensor_add(x, y):
1163
1227
  ... z = x + y
1164
1228
  ... return z
1165
1229
  ...
1166
1230
  >>> tensor_add_graph = jit(function=tensor_add)
1167
1231
  >>> out = tensor_add_graph(x, y)
1232
+ >>> print(out)
1233
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1234
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1235
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1236
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1168
1237
  ...
1169
- >>> # create a callable MindSpore graph through decorator @jit
1238
+ >>> # Create a callable MindSpore graph through decorator @jit.
1170
1239
  >>> @jit
1171
1240
  ... def tensor_add_with_dec(x, y):
1172
1241
  ... z = x + y
1173
1242
  ... return z
1174
1243
  ...
1175
1244
  >>> out = tensor_add_with_dec(x, y)
1245
+ >>> print(out)
1246
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1247
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1248
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1249
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1176
1250
  ...
1177
- >>> # create a callable MindSpore graph and capture the entire function into the graph
1251
+ >>> # Create a callable MindSpore graph and capture the entire function into the graph.
1178
1252
  >>> @jit(fullgraph=True)
1179
1253
  ... def tensor_add_fullgraph(x, y):
1180
1254
  ... z = x + y
1181
1255
  ... return z
1182
1256
  ...
1183
1257
  >>> out = tensor_add_fullgraph(x, y)
1258
+ >>> print(out)
1259
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1260
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1261
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1262
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1263
+ ...
1264
+ >>> # Create a callable MindSpore graph by trace mode.
1265
+ >>> @jit(capture_mode="trace")
1266
+ ... def tensor_add_by_trace(x, y):
1267
+ ... z = x + y
1268
+ ... return z
1269
+ ...
1270
+ >>> out = tensor_add_by_trace(x, y)
1271
+ >>> print(out)
1272
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1273
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1274
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1275
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1276
+ ...
1277
+ >>> # Create a callable MindSpore graph with ms_backend and jit_level="O1".
1278
+ >>> @jit(backend="ms_backend", jit_level="O1")
1279
+ ... def tensor_add_by_trace(x, y):
1280
+ ... z = x + y
1281
+ ... return z
1282
+ ...
1283
+ >>> out = tensor_add_by_trace(x, y)
1284
+ >>> print(out)
1285
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1286
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1287
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1288
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1289
+ ...
1290
+ >>> # Create a callable MindSpore graph with GE backend and some ge options on Ascend.
1291
+ >>> @jit(backend="GE", ge_options={"global": {"ge.opSelectImplmode": "high_precision"}})
1292
+ ... def tensor_add_by_trace(x, y):
1293
+ ... z = x + y
1294
+ ... return z
1295
+ ...
1296
+ >>> out = tensor_add_by_trace(x, y)
1297
+ >>> print(out)
1298
+ Tensor(shape=[1, 1, 3, 3], dtype=Float32, value=
1299
+ [[[[ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1300
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00],
1301
+ [ 2.00000000e+00, 2.00000000e+00, 2.00000000e+00]]]])
1302
+ ...
1184
1303
  """
1185
1304
 
1186
1305
  capture_mode = Validator.check_string(capture_mode, ["ast", "bytecode", "trace"], "capture_mode", "jit")
@@ -1199,39 +1318,12 @@ def jit(
1199
1318
  jit_config = JitConfig(jit_level=jit_level, exc_mode=exc_mode, jit_syntax_level=jit_syntax_level,
1200
1319
  infer_boost=infer_boost, backend=backend, options=options_str)
1201
1320
 
1202
- def wrap_func(func):
1203
- nonlocal hash_obj
1204
- if hash_obj is None or not _is_inner_func(func):
1205
- hash_obj = int(time.time() * 1e9)
1206
-
1207
- @wraps(func)
1208
- def staging_specialize(*args, **kwargs):
1209
- if os.getenv("MS_JIT") == '0':
1210
- return func(*args, **kwargs)
1211
-
1212
- args, kwargs = _handle_func_args(func, *args, **kwargs)
1213
- process_obj = None
1214
- if args and not isinstance(args[0], PythonTensor) and hasattr(args[0], func.__name__):
1215
- process_obj = args[0]
1216
- # Handle auto mixed precision strategy.
1217
- if not hasattr(func, "amp_strategy"):
1218
- if isinstance(func, types.MethodType):
1219
- setattr(func.__func__, "amp_strategy", get_curr_amp_strategy())
1220
- else:
1221
- setattr(func, "amp_strategy", get_curr_amp_strategy())
1222
-
1223
- ms_function_executor = _JitExecutor(func, hash_obj, None, process_obj, jit_config, dynamic)
1224
- out = ms_function_executor(*args, **kwargs)
1225
- return out
1226
-
1227
- return staging_specialize
1228
-
1229
- if capture_mode == "bytecode":
1230
- wrap_func = PIJitCaptureContext(jit_config)
1231
- elif capture_mode == "trace":
1232
- if function is not None:
1233
- return _jit_trace(function)
1234
- return _jit_trace
1321
+ if capture_mode == "ast":
1322
+ wrap_func = _jit_ast(hash_obj, dynamic, jit_config, '')
1323
+ elif capture_mode == "bytecode":
1324
+ wrap_func = PIJitCaptureContext(fullgraph=fullgraph, jit_config=jit_config)
1325
+ else:
1326
+ wrap_func = _jit_trace()
1235
1327
 
1236
1328
  if function is not None:
1237
1329
  return wrap_func(function)
@@ -1547,7 +1639,7 @@ class _PyNativeExecutor:
1547
1639
  """
1548
1640
  self._executor.end_graph(obj, output, *args, *(kwargs.values()))
1549
1641
 
1550
- def check_run(self, grad, obj, weights, grad_hash_id, *args):
1642
+ def check_run(self, grad, obj, weights, grad_hash_id, *args, **kwargs):
1551
1643
  """
1552
1644
  Whether the forward graph need to construct.
1553
1645
 
@@ -1560,7 +1652,7 @@ class _PyNativeExecutor:
1560
1652
  Return:
1561
1653
  bool, specifies whether the forward graph needs to construct.
1562
1654
  """
1563
- return self._executor.check_run(grad, obj, weights, grad_hash_id, *args)
1655
+ return self._executor.check_run(grad, obj, weights, grad_hash_id, *args, **kwargs)
1564
1656
 
1565
1657
  def grad(self, obj, grad, weights, grad_position, *args):
1566
1658
  """
@@ -1878,13 +1970,6 @@ class _CellGraphExecutor:
1878
1970
  else:
1879
1971
  _set_dataset_mode_config('normal')
1880
1972
 
1881
- @staticmethod
1882
- def _use_vm_mode():
1883
- enable_ge = context.get_context("enable_ge")
1884
- enable_debug_runtime = context.get_context("enable_debug_runtime")
1885
- exe_mode = context.get_context("mode") == context.PYNATIVE_MODE
1886
- return not enable_ge or (enable_debug_runtime and exe_mode)
1887
-
1888
1973
  def _build_data_graph(self, obj, phase):
1889
1974
  self._graph_executor.build_data_graph(obj.parameters_dict(), phase)
1890
1975
 
@@ -1916,7 +2001,7 @@ class _CellGraphExecutor:
1916
2001
  obj.__parse_method__ = 'construct'
1917
2002
  if not hasattr(obj, obj.__parse_method__):
1918
2003
  raise AttributeError(
1919
- 'The class {} dose not have method {}'.format(obj.__class__.__name__, obj.__parse_method__))
2004
+ 'The class {} does not have method {}'.format(obj.__class__.__name__, obj.__parse_method__))
1920
2005
  key_id = str(id(obj)) + str(obj.create_time)
1921
2006
  args = get_auto_dynamic_shape_args(args, key_id)
1922
2007
 
@@ -1948,7 +2033,7 @@ class _CellGraphExecutor:
1948
2033
  _clear_auto_parallel_context(obj)
1949
2034
  return phase, False
1950
2035
 
1951
- full_function_name = obj.__class__.__name__ + '.' + str(obj.instance_count) + '.' + str(id(type(obj)))
2036
+ full_function_name = obj.__class__.__name__ + '.' + str(obj.total_instance_count) + '.' + str(id(type(obj)))
1952
2037
  echo_function_name = obj.__class__.__name__
1953
2038
  _check_recompile(obj, args, kwargs, full_function_name, obj.create_time, echo_function_name)
1954
2039
 
@@ -1958,13 +2043,11 @@ class _CellGraphExecutor:
1958
2043
  self._set_compile_cache_dep_files(phase)
1959
2044
 
1960
2045
  self._graph_executor.set_weights_values(obj.parameters_dict())
1961
- if jit_config_dict:
1962
- self._graph_executor.set_jit_config(jit_config_dict)
1963
- else:
2046
+ if not jit_config_dict:
1964
2047
  jit_config_dict = JitConfig().jit_config_dict
1965
- self._graph_executor.set_jit_config(jit_config_dict)
1966
2048
  gc.collect()
1967
- result = self._graph_executor.compile(obj, args, kwargs, phase)
2049
+ result = self._graph_executor.compile(
2050
+ obj, args, kwargs, phase, jit_config_dict)
1968
2051
  obj.compile_cache.add(phase)
1969
2052
  if not result:
1970
2053
  raise RuntimeError("Executor compile failed.")
@@ -2165,5 +2248,3 @@ def flops_collection(phase='train'):
2165
2248
 
2166
2249
  _cell_graph_executor = _CellGraphExecutor()
2167
2250
  _pynative_executor = _PyNativeExecutor()
2168
-
2169
- __all__ = ['ms_memory_recycle', 'jit', 'jit_class', 'flops_collection']
mindspore/common/dtype.py CHANGED
@@ -23,6 +23,7 @@ import numpy as np
23
23
  from mindspore._c_expression import typing
24
24
  from mindspore._c_expression.typing import Type
25
25
  from mindspore._c_expression.np_dtypes import np_version_valid
26
+
26
27
  if np_version_valid(False):
27
28
  from mindspore._c_expression.np_dtypes import bfloat16 as np_bfloat16
28
29
 
@@ -46,7 +47,9 @@ __dtype__ = [
46
47
  "TensorType", "_null",
47
48
  "Type", "Int",
48
49
  "complex64", "complex128",
49
- "bfloat16", "qint4x2"
50
+ "bfloat16", "qint4x2",
51
+ "float8_e4m3fn", "float8_e5m2",
52
+ "hifloat8"
50
53
  ]
51
54
 
52
55
  __method__ = [
@@ -86,6 +89,9 @@ float32 = typing.kFloat32
86
89
  single = float32
87
90
  float64 = typing.kFloat64
88
91
  double = float64
92
+ float8_e4m3fn = typing.kFloat8E4M3FN
93
+ float8_e5m2 = typing.kFloat8E5M2
94
+ hifloat8 = typing.kHiFloat8
89
95
  bfloat16 = typing.kBFloat16
90
96
  complex64 = typing.kComplex64
91
97
  complex128 = typing.kComplex128
@@ -145,17 +151,19 @@ number_type = (int8,
145
151
  bfloat16,
146
152
  complex64,
147
153
  complex128,
148
- qint4x2,)
154
+ qint4x2,
155
+ float8_e4m3fn,
156
+ float8_e5m2,
157
+ hifloat8)
149
158
 
150
159
  int_type = (int8, int16, int32, int64,)
151
160
  uint_type = (uint8, uint16, uint32, uint64,)
152
- float_type = (float16, float32, float64, bfloat16,)
153
- signed_type = (int8, byte, int16, short, int32, intc, int64,
154
- intp, float16, half, float32, single, float64,
155
- double, bfloat16, complex64, complex128)
161
+ float_type = (float16, float32, float64, bfloat16, float8_e4m3fn, float8_e5m2, hifloat8)
162
+ signed_type = (int8, byte, int16, short, int32, intc, int64, intp, float16, half, float32, single, float64, double,
163
+ bfloat16, complex64, complex128, float8_e4m3fn, float8_e5m2, hifloat8)
156
164
  complex_type = (complex64, complex128,)
157
- all_types = (bool_, int8, uint8, int16, int32, int64, float16, float32, float64, bfloat16, complex64, complex128)
158
- implicit_conversion_seq = {t: idx for idx, t in enumerate(all_types)}
165
+ all_types = (bool_, int8, uint8, int16, int32, int64, float16, float32, float64, bfloat16, complex64, complex128,
166
+ float8_e4m3fn, float8_e5m2, hifloat8)
159
167
 
160
168
  _simple_types = {
161
169
  list: list_,
@@ -281,8 +289,11 @@ def dtype_to_nptype(type_):
281
289
  }
282
290
  if type_ == bfloat16:
283
291
  if not np_version_valid(True):
284
- raise TypeError("The Numpy bfloat16 data type is not supported now, please ensure that the current "
285
- "Numpy version is not less than the version when the mindspore is compiled.")
292
+ raise TypeError(
293
+ "The Numpy bfloat16 data type is not supported now, please ensure that the current "
294
+ "Numpy version is not less than the version when the mindspore is compiled, "
295
+ "and the major versions are same."
296
+ )
286
297
  return np_bfloat16
287
298
  return _dtype_nptype_dict[type_]
288
299
 
@@ -335,7 +346,6 @@ def _issubclass_(type_, dtype):
335
346
  return typing.is_subclass(type_, dtype)
336
347
 
337
348
 
338
-
339
349
  def type_size_in_bytes(dtype):
340
350
  """
341
351
  Return type size in bytes.