mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.5.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (389) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +8 -3
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +0 -5
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/compile_config.py +64 -0
  11. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
  13. mindspore/_extends/parse/parser.py +23 -5
  14. mindspore/_extends/parse/standard_method.py +123 -27
  15. mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
  16. mindspore/amp.py +7 -1
  17. mindspore/atlprov.dll +0 -0
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/boost_cell_wrapper.py +136 -41
  24. mindspore/c1.dll +0 -0
  25. mindspore/c1xx.dll +0 -0
  26. mindspore/c2.dll +0 -0
  27. mindspore/common/__init__.py +3 -1
  28. mindspore/common/_register_for_tensor.py +0 -1
  29. mindspore/common/_stub_tensor.py +25 -4
  30. mindspore/common/_tensor_cpp_method.py +17 -0
  31. mindspore/common/_tensor_docs.py +6132 -0
  32. mindspore/common/api.py +98 -21
  33. mindspore/common/dtype.py +34 -34
  34. mindspore/common/dump.py +2 -1
  35. mindspore/common/file_system.py +8 -3
  36. mindspore/common/generator.py +2 -0
  37. mindspore/common/hook_handle.py +3 -1
  38. mindspore/common/initializer.py +3 -4
  39. mindspore/common/lazy_inline.py +8 -2
  40. mindspore/common/mindir_util.py +10 -2
  41. mindspore/common/parameter.py +31 -15
  42. mindspore/common/tensor.py +713 -1337
  43. mindspore/communication/__init__.py +1 -1
  44. mindspore/communication/_comm_helper.py +5 -0
  45. mindspore/communication/comm_func.py +215 -173
  46. mindspore/communication/management.py +23 -20
  47. mindspore/context.py +285 -191
  48. mindspore/dataset/__init__.py +23 -19
  49. mindspore/dataset/callback/ds_callback.py +2 -1
  50. mindspore/dataset/core/config.py +84 -3
  51. mindspore/dataset/engine/cache_admin.py +3 -3
  52. mindspore/dataset/engine/cache_client.py +5 -4
  53. mindspore/dataset/engine/datasets.py +192 -149
  54. mindspore/dataset/engine/datasets_audio.py +14 -0
  55. mindspore/dataset/engine/datasets_standard_format.py +11 -11
  56. mindspore/dataset/engine/datasets_text.py +38 -1
  57. mindspore/dataset/engine/datasets_user_defined.py +100 -66
  58. mindspore/dataset/engine/datasets_vision.py +81 -8
  59. mindspore/dataset/engine/iterators.py +281 -63
  60. mindspore/dataset/engine/obs/util.py +8 -0
  61. mindspore/dataset/engine/queue.py +40 -0
  62. mindspore/dataset/engine/samplers.py +26 -2
  63. mindspore/dataset/engine/serializer_deserializer.py +1 -1
  64. mindspore/dataset/engine/validators.py +43 -11
  65. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  66. mindspore/dataset/transforms/transforms.py +29 -12
  67. mindspore/dataset/vision/validators.py +1 -2
  68. mindspore/device_context/__init__.py +21 -0
  69. mindspore/device_context/ascend/__init__.py +25 -0
  70. mindspore/device_context/ascend/device.py +72 -0
  71. mindspore/device_context/ascend/op_debug.py +94 -0
  72. mindspore/device_context/ascend/op_precision.py +193 -0
  73. mindspore/device_context/ascend/op_tuning.py +127 -0
  74. mindspore/device_context/cpu/__init__.py +25 -0
  75. mindspore/device_context/cpu/device.py +62 -0
  76. mindspore/device_context/cpu/op_tuning.py +43 -0
  77. mindspore/device_context/gpu/__init__.py +21 -0
  78. mindspore/device_context/gpu/device.py +70 -0
  79. mindspore/device_context/gpu/op_precision.py +67 -0
  80. mindspore/device_context/gpu/op_tuning.py +175 -0
  81. mindspore/device_manager.py +134 -0
  82. mindspore/dnnl.dll +0 -0
  83. mindspore/dpcmi.dll +0 -0
  84. mindspore/experimental/llm_boost/__init__.py +1 -0
  85. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  86. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  87. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  90. mindspore/experimental/llm_boost/register.py +1 -0
  91. mindspore/experimental/optim/adadelta.py +26 -22
  92. mindspore/experimental/optim/adam.py +3 -0
  93. mindspore/experimental/optim/lr_scheduler.py +33 -24
  94. mindspore/experimental/optim/radam.py +33 -30
  95. mindspore/hal/device.py +28 -0
  96. mindspore/hal/event.py +17 -0
  97. mindspore/hal/memory.py +94 -3
  98. mindspore/hal/stream.py +91 -6
  99. mindspore/include/api/context.h +0 -1
  100. mindspore/jpeg62.dll +0 -0
  101. mindspore/log.py +12 -0
  102. mindspore/mindrecord/__init__.py +1 -1
  103. mindspore/mindrecord/config.py +17 -316
  104. mindspore/mindrecord/filereader.py +1 -9
  105. mindspore/mindrecord/filewriter.py +5 -15
  106. mindspore/mindrecord/mindpage.py +1 -9
  107. mindspore/mindspore_backend.dll +0 -0
  108. mindspore/mindspore_common.dll +0 -0
  109. mindspore/mindspore_core.dll +0 -0
  110. mindspore/mindspore_glog.dll +0 -0
  111. mindspore/mindspore_ops.dll +0 -0
  112. mindspore/mint/__init__.py +824 -218
  113. mindspore/mint/distributed/__init__.py +66 -4
  114. mindspore/mint/distributed/distributed.py +2594 -44
  115. mindspore/mint/linalg/__init__.py +6 -0
  116. mindspore/mint/nn/__init__.py +473 -14
  117. mindspore/mint/nn/functional.py +486 -11
  118. mindspore/mint/nn/layer/__init__.py +17 -4
  119. mindspore/mint/nn/layer/_functions.py +330 -0
  120. mindspore/mint/nn/layer/activation.py +169 -1
  121. mindspore/mint/nn/layer/basic.py +123 -0
  122. mindspore/mint/nn/layer/conv.py +727 -0
  123. mindspore/mint/nn/layer/normalization.py +215 -19
  124. mindspore/mint/nn/layer/padding.py +797 -0
  125. mindspore/mint/nn/layer/pooling.py +170 -0
  126. mindspore/mint/optim/__init__.py +2 -1
  127. mindspore/mint/optim/adam.py +223 -0
  128. mindspore/mint/optim/adamw.py +26 -19
  129. mindspore/mint/special/__init__.py +2 -1
  130. mindspore/msobj140.dll +0 -0
  131. mindspore/mspdb140.dll +0 -0
  132. mindspore/mspdbcore.dll +0 -0
  133. mindspore/mspdbst.dll +0 -0
  134. mindspore/mspft140.dll +0 -0
  135. mindspore/msvcdis140.dll +0 -0
  136. mindspore/msvcp140_1.dll +0 -0
  137. mindspore/msvcp140_2.dll +0 -0
  138. mindspore/msvcp140_atomic_wait.dll +0 -0
  139. mindspore/msvcp140_codecvt_ids.dll +0 -0
  140. mindspore/multiprocessing/__init__.py +5 -0
  141. mindspore/nn/cell.py +126 -19
  142. mindspore/nn/dynamic_lr.py +2 -1
  143. mindspore/nn/layer/activation.py +6 -6
  144. mindspore/nn/layer/basic.py +35 -25
  145. mindspore/nn/layer/channel_shuffle.py +3 -3
  146. mindspore/nn/layer/embedding.py +3 -3
  147. mindspore/nn/layer/normalization.py +8 -7
  148. mindspore/nn/layer/padding.py +4 -3
  149. mindspore/nn/layer/pooling.py +47 -13
  150. mindspore/nn/layer/rnn_cells.py +1 -1
  151. mindspore/nn/layer/rnns.py +2 -1
  152. mindspore/nn/layer/timedistributed.py +5 -5
  153. mindspore/nn/layer/transformer.py +48 -26
  154. mindspore/nn/learning_rate_schedule.py +5 -3
  155. mindspore/nn/loss/loss.py +31 -36
  156. mindspore/nn/optim/ada_grad.py +1 -0
  157. mindspore/nn/optim/adadelta.py +2 -2
  158. mindspore/nn/optim/adam.py +1 -1
  159. mindspore/nn/optim/lars.py +1 -4
  160. mindspore/nn/optim/optimizer.py +1 -1
  161. mindspore/nn/optim/rprop.py +2 -2
  162. mindspore/nn/optim/thor.py +2 -1
  163. mindspore/nn/utils/init.py +13 -11
  164. mindspore/nn/wrap/cell_wrapper.py +4 -6
  165. mindspore/nn/wrap/loss_scale.py +3 -4
  166. mindspore/numpy/array_creations.py +60 -62
  167. mindspore/numpy/array_ops.py +148 -143
  168. mindspore/numpy/logic_ops.py +41 -42
  169. mindspore/numpy/math_ops.py +361 -359
  170. mindspore/numpy/utils.py +16 -16
  171. mindspore/numpy/utils_const.py +4 -4
  172. mindspore/opencv_core452.dll +0 -0
  173. mindspore/opencv_imgcodecs452.dll +0 -0
  174. mindspore/opencv_imgproc452.dll +0 -0
  175. mindspore/ops/__init__.py +2 -1
  176. mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
  177. mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
  178. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  179. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  180. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  181. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  182. mindspore/ops/_vmap/vmap_array_ops.py +20 -19
  183. mindspore/ops/_vmap/vmap_base.py +0 -2
  184. mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
  185. mindspore/ops/_vmap/vmap_math_ops.py +11 -9
  186. mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
  187. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
  188. mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
  189. mindspore/ops/auto_generate/gen_extend_func.py +554 -60
  190. mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
  191. mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
  192. mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
  193. mindspore/ops/composite/base.py +1 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
  195. mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
  196. mindspore/ops/function/__init__.py +12 -0
  197. mindspore/ops/function/array_func.py +561 -159
  198. mindspore/ops/function/clip_func.py +64 -0
  199. mindspore/ops/function/debug_func.py +28 -20
  200. mindspore/ops/function/image_func.py +1 -1
  201. mindspore/ops/function/linalg_func.py +5 -4
  202. mindspore/ops/function/math_func.py +1659 -290
  203. mindspore/ops/function/nn_func.py +988 -317
  204. mindspore/ops/function/parameter_func.py +3 -56
  205. mindspore/ops/function/random_func.py +243 -33
  206. mindspore/ops/function/sparse_unary_func.py +1 -1
  207. mindspore/ops/functional.py +18 -5
  208. mindspore/ops/functional_overload.py +897 -0
  209. mindspore/ops/operations/__init__.py +3 -2
  210. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  211. mindspore/ops/operations/_grad_ops.py +2 -34
  212. mindspore/ops/operations/_infer_ops.py +2 -1
  213. mindspore/ops/operations/_inner_ops.py +38 -8
  214. mindspore/ops/operations/array_ops.py +45 -303
  215. mindspore/ops/operations/comm_ops.py +19 -16
  216. mindspore/ops/operations/custom_ops.py +11 -55
  217. mindspore/ops/operations/debug_ops.py +42 -47
  218. mindspore/ops/operations/inner_ops.py +6 -4
  219. mindspore/ops/operations/linalg_ops.py +3 -2
  220. mindspore/ops/operations/manually_defined/ops_def.py +185 -104
  221. mindspore/ops/operations/math_ops.py +11 -216
  222. mindspore/ops/operations/nn_ops.py +146 -308
  223. mindspore/ops/primitive.py +23 -21
  224. mindspore/ops/tensor_method.py +1669 -0
  225. mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
  226. mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
  227. mindspore/ops_generate/arg_handler.py +0 -61
  228. mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
  229. mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
  230. mindspore/ops_generate/base_generator.py +11 -0
  231. mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
  232. mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
  233. mindspore/ops_generate/functional_overload_py_generator.py +110 -0
  234. mindspore/ops_generate/functions_cc_generator.py +233 -0
  235. mindspore/ops_generate/gen_aclnn_implement.py +110 -114
  236. mindspore/ops_generate/gen_constants.py +157 -3
  237. mindspore/ops_generate/gen_ops.py +245 -990
  238. mindspore/ops_generate/gen_pyboost_func.py +97 -998
  239. mindspore/ops_generate/gen_utils.py +119 -33
  240. mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
  241. mindspore/ops_generate/op_api_proto.py +206 -0
  242. mindspore/ops_generate/op_def_py_generator.py +131 -0
  243. mindspore/ops_generate/op_prim_py_generator.py +480 -0
  244. mindspore/ops_generate/op_proto.py +373 -108
  245. mindspore/ops_generate/op_template_parser.py +436 -0
  246. mindspore/ops_generate/ops_def_cc_generator.py +288 -0
  247. mindspore/ops_generate/ops_def_h_generator.py +74 -0
  248. mindspore/ops_generate/ops_name_h_generator.py +68 -0
  249. mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
  250. mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
  251. mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
  252. mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
  253. mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
  254. mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
  255. mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
  256. mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
  257. mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
  258. mindspore/ops_generate/pyboost_utils.py +92 -33
  259. mindspore/ops_generate/template.py +294 -44
  260. mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
  261. mindspore/parallel/__init__.py +3 -3
  262. mindspore/parallel/_auto_parallel_context.py +24 -33
  263. mindspore/parallel/_parallel_serialization.py +13 -2
  264. mindspore/parallel/_utils.py +4 -1
  265. mindspore/parallel/algo_parameter_config.py +1 -1
  266. mindspore/parallel/checkpoint_transform.py +44 -0
  267. mindspore/parallel/cluster/process_entity/_api.py +131 -37
  268. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  269. mindspore/parallel/cluster/run.py +20 -3
  270. mindspore/parallel/parameter_broadcast.py +1 -1
  271. mindspore/parallel/shard.py +3 -0
  272. mindspore/parallel/transform_safetensors.py +119 -253
  273. mindspore/pgodb140.dll +0 -0
  274. mindspore/pgort140.dll +0 -0
  275. mindspore/profiler/__init__.py +17 -4
  276. mindspore/profiler/analysis/__init__.py +0 -0
  277. mindspore/profiler/analysis/parser/__init__.py +0 -0
  278. mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
  279. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  280. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  281. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  282. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  283. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  284. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
  285. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  286. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
  287. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  288. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  289. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  290. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  291. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  292. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  293. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  294. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  295. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  296. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  297. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
  298. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  299. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  300. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  301. mindspore/profiler/analysis/task_manager.py +131 -0
  302. mindspore/profiler/analysis/time_converter.py +84 -0
  303. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  304. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
  305. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  306. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
  307. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
  308. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
  309. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
  310. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  311. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  312. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
  313. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  314. mindspore/profiler/analysis/work_flow.py +73 -0
  315. mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
  316. mindspore/profiler/common/command_executor.py +90 -0
  317. mindspore/profiler/common/constant.py +174 -3
  318. mindspore/profiler/common/file_manager.py +208 -0
  319. mindspore/profiler/common/log.py +130 -0
  320. mindspore/profiler/common/msprof_cmd_tool.py +202 -0
  321. mindspore/profiler/common/path_manager.py +371 -0
  322. mindspore/profiler/common/process_bar.py +168 -0
  323. mindspore/profiler/common/process_pool.py +9 -3
  324. mindspore/profiler/common/profiler_context.py +476 -0
  325. mindspore/profiler/common/profiler_info.py +304 -0
  326. mindspore/profiler/common/profiler_output_path.py +284 -0
  327. mindspore/profiler/common/profiler_parameters.py +210 -0
  328. mindspore/profiler/common/profiler_path_manager.py +120 -0
  329. mindspore/profiler/common/record_function.py +76 -0
  330. mindspore/profiler/common/tlv_decoder.py +76 -0
  331. mindspore/profiler/common/util.py +75 -2
  332. mindspore/profiler/dynamic_profiler.py +270 -37
  333. mindspore/profiler/envprofiler.py +138 -0
  334. mindspore/profiler/mstx.py +199 -0
  335. mindspore/profiler/platform/__init__.py +21 -0
  336. mindspore/profiler/platform/base_profiler.py +40 -0
  337. mindspore/profiler/platform/cpu_profiler.py +124 -0
  338. mindspore/profiler/platform/gpu_profiler.py +74 -0
  339. mindspore/profiler/platform/npu_profiler.py +309 -0
  340. mindspore/profiler/profiler.py +580 -93
  341. mindspore/profiler/profiler_action_controller.py +187 -0
  342. mindspore/profiler/profiler_interface.py +114 -0
  343. mindspore/profiler/schedule.py +208 -0
  344. mindspore/rewrite/api/symbol_tree.py +1 -2
  345. mindspore/run_check/_check_version.py +2 -6
  346. mindspore/runtime/__init__.py +37 -0
  347. mindspore/runtime/device.py +27 -0
  348. mindspore/runtime/event.py +209 -0
  349. mindspore/runtime/executor.py +148 -0
  350. mindspore/runtime/memory.py +392 -0
  351. mindspore/runtime/stream.py +460 -0
  352. mindspore/runtime/thread_bind_core.py +401 -0
  353. mindspore/swresample-4.dll +0 -0
  354. mindspore/swscale-6.dll +0 -0
  355. mindspore/tbbmalloc.dll +0 -0
  356. mindspore/tinyxml2.dll +0 -0
  357. mindspore/train/__init__.py +2 -2
  358. mindspore/train/_utils.py +53 -18
  359. mindspore/train/amp.py +8 -4
  360. mindspore/train/callback/_checkpoint.py +32 -18
  361. mindspore/train/callback/_early_stop.py +1 -1
  362. mindspore/train/callback/_flops_collector.py +105 -69
  363. mindspore/train/callback/_history.py +1 -1
  364. mindspore/train/callback/_summary_collector.py +44 -6
  365. mindspore/train/callback/_tft_register.py +31 -10
  366. mindspore/train/dataset_helper.py +11 -11
  367. mindspore/train/metrics/precision.py +4 -5
  368. mindspore/train/mind_ir_pb2.py +167 -46
  369. mindspore/train/model.py +13 -15
  370. mindspore/train/serialization.py +462 -76
  371. mindspore/train/summary/summary_record.py +1 -2
  372. mindspore/train/train_thor/model_thor.py +1 -1
  373. mindspore/turbojpeg.dll +0 -0
  374. mindspore/utils/__init__.py +4 -2
  375. mindspore/utils/dryrun.py +138 -0
  376. mindspore/utils/runtime_execution_order_check.py +550 -0
  377. mindspore/vcmeta.dll +0 -0
  378. mindspore/vcruntime140.dll +0 -0
  379. mindspore/vcruntime140_1.dll +0 -0
  380. mindspore/version.py +1 -1
  381. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
  382. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +385 -261
  383. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
  384. mindspore/common/_tensor_overload.py +0 -139
  385. mindspore/mindspore_np_dtype.dll +0 -0
  386. mindspore/profiler/envprofiling.py +0 -254
  387. mindspore/profiler/profiling.py +0 -1926
  388. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
  389. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
@@ -24,7 +24,6 @@ from mindspore.ops import functional as F
24
24
  from mindspore.ops import operations as P
25
25
  from mindspore import _checkparam as Validator
26
26
 
27
-
28
27
  __all__ = ['StepLR', 'LinearLR', 'LRScheduler', 'ExponentialLR', 'PolynomialLR',
29
28
  'MultiplicativeLR', 'ConstantLR', 'MultiStepLR', 'LambdaLR', 'SequentialLR', 'ReduceLROnPlateau',
30
29
  'CyclicLR', 'CosineAnnealingWarmRestarts', 'CosineAnnealingLR']
@@ -82,6 +81,7 @@ class LRScheduler:
82
81
  [Tensor(shape=[], dtype=Float32, value= 0.01)]
83
82
  [Tensor(shape=[], dtype=Float32, value= 0.01)]
84
83
  """
84
+
85
85
  def __init__(self, optimizer, last_epoch=-1):
86
86
  if not isinstance(optimizer, Optimizer):
87
87
  raise TypeError('{} is not an Optimizer'.format(
@@ -192,6 +192,7 @@ class StepLR(LRScheduler):
192
192
  ... scheduler.step()
193
193
  ... current_lr = scheduler.get_last_lr()
194
194
  """
195
+
195
196
  def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):
196
197
  if not isinstance(step_size, int) and not isinstance(step_size, bool):
197
198
  raise TypeError(f"For 'StepLR', the 'step_size' must be int, but got {type(step_size)}.")
@@ -297,8 +298,8 @@ class LinearLR(LRScheduler):
297
298
  if self.last_epoch > self.total_iters:
298
299
  return [lr * 1. for lr in self._last_lr]
299
300
 
300
- factor = 1. + (self.end_factor - self.start_factor) / (
301
- self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))
301
+ factor = 1. + (self.end_factor - self.start_factor) / \
302
+ (self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))
302
303
  return [lr * factor for lr in self._last_lr]
303
304
 
304
305
  def _get_closed_form_lr(self):
@@ -419,6 +420,7 @@ class PolynomialLR(LRScheduler):
419
420
  [Tensor(shape=[], dtype=Float32, value= 0)]
420
421
  [Tensor(shape=[], dtype=Float32, value= 0)]
421
422
  """
423
+
422
424
  def __init__(self, optimizer, total_iters=5, power=1.0, last_epoch=-1):
423
425
  if not isinstance(power, float):
424
426
  raise TypeError(f"For 'PolynomialLR', the 'power' must be float, but got {type(power)}.")
@@ -435,8 +437,8 @@ class PolynomialLR(LRScheduler):
435
437
  def get_lr(self):
436
438
  if self.last_epoch == 0 or self.last_epoch > self.total_iters:
437
439
  return [lr * 1. for lr in self._last_lr]
438
- factor = ((1.0 - self.last_epoch / self.total_iters) / (
439
- 1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power
440
+ factor = ((1.0 - self.last_epoch / self.total_iters) /
441
+ (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power
440
442
  return [lr * factor for lr in self._last_lr]
441
443
 
442
444
  def _get_closed_form_lr(self):
@@ -483,14 +485,16 @@ class LambdaLR(LRScheduler):
483
485
  [Tensor(shape=[], dtype=Float32, value= 0.0081)]
484
486
  [Tensor(shape=[], dtype=Float32, value= 0.00729)]
485
487
  """
488
+
486
489
  def __init__(self, optimizer, lr_lambda, last_epoch=-1):
487
- if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
488
- self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
489
- else:
490
- if len(lr_lambda) != len(optimizer.param_groups):
490
+ param_groups_length = len(optimizer.param_groups)
491
+ if isinstance(lr_lambda, (list, tuple)):
492
+ if len(lr_lambda) != param_groups_length:
491
493
  raise ValueError("Expected {} lr_lambdas, but got {}".format(
492
- len(optimizer.param_groups), len(lr_lambda)))
494
+ param_groups_length, len(lr_lambda)))
493
495
  self.lr_lambdas = list(lr_lambda)
496
+ else:
497
+ self.lr_lambdas = [lr_lambda] * param_groups_length
494
498
  super(LambdaLR, self).__init__(optimizer, last_epoch)
495
499
 
496
500
  def get_lr(self):
@@ -533,14 +537,16 @@ class MultiplicativeLR(LRScheduler):
533
537
  [Tensor(shape=[], dtype=Float32, value= 0.009025)]
534
538
  [Tensor(shape=[], dtype=Float32, value= 0.00857375)]
535
539
  """
540
+
536
541
  def __init__(self, optimizer, lr_lambda, last_epoch=-1):
537
- if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
538
- self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
539
- else:
540
- if len(lr_lambda) != len(optimizer.param_groups):
542
+ if isinstance(lr_lambda, (list, tuple)):
543
+ if len(lr_lambda) == len(optimizer.param_groups):
544
+ self.lr_lambdas = list(lr_lambda)
545
+ else:
541
546
  raise ValueError("Expected {} lr_lambdas, but got {}".format(
542
547
  len(optimizer.param_groups), len(lr_lambda)))
543
- self.lr_lambdas = list(lr_lambda)
548
+ else:
549
+ self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
544
550
  super(MultiplicativeLR, self).__init__(optimizer, last_epoch)
545
551
 
546
552
  def get_lr(self):
@@ -599,6 +605,7 @@ class MultiStepLR(LRScheduler):
599
605
  [Tensor(shape=[], dtype=Float32, value= 0.0005)]
600
606
  [Tensor(shape=[], dtype=Float32, value= 0.0005)]
601
607
  """
608
+
602
609
  def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
603
610
  Validator.check_value_type('milestones', milestones, [list])
604
611
  for milestone in milestones:
@@ -668,6 +675,7 @@ class ConstantLR(LRScheduler):
668
675
  [Tensor(shape=[], dtype=Float32, value= 0.05)]
669
676
  [Tensor(shape=[], dtype=Float32, value= 0.05)]
670
677
  """
678
+
671
679
  def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1):
672
680
  if factor > 1.0 or factor < 0:
673
681
  raise ValueError('Constant multiplicative factor expected to be between 0 and 1.')
@@ -735,6 +743,7 @@ class SequentialLR:
735
743
  [Tensor(shape=[], dtype=Float32, value= 0.0729)]
736
744
  [Tensor(shape=[], dtype=Float32, value= 0.06561)]
737
745
  """
746
+
738
747
  def __init__(self, optimizer, schedulers, milestones, last_epoch=-1):
739
748
  for sched_idx in range(len(schedulers)):
740
749
  if schedulers[sched_idx].optimizer != optimizer:
@@ -863,6 +872,7 @@ class ReduceLROnPlateau:
863
872
  [Tensor(shape=[], dtype=Float32, value= 0.001)]
864
873
  [Tensor(shape=[], dtype=Float32, value= 0.0001)]
865
874
  """
875
+
866
876
  def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
867
877
  threshold=1e-4, threshold_mode='rel', cooldown=0,
868
878
  min_lr=0, eps=1e-8):
@@ -1053,6 +1063,7 @@ class CyclicLR(LRScheduler):
1053
1063
  [Tensor(shape=[], dtype=Float32, value= 0.01018)]
1054
1064
  [Tensor(shape=[], dtype=Float32, value= 0.010225)]
1055
1065
  """
1066
+
1056
1067
  def __init__(self,
1057
1068
  optimizer,
1058
1069
  base_lr,
@@ -1127,12 +1138,12 @@ class CyclicLR(LRScheduler):
1127
1138
  def _triangular_scale_fn(self, x):
1128
1139
  return 1.
1129
1140
 
1130
- def _triangular2_scale_fn(self, x):
1131
- return 1 / (2. ** (x - 1))
1132
-
1133
1141
  def _exp_range_scale_fn(self, x):
1134
1142
  return self.gamma ** (x)
1135
1143
 
1144
+ def _triangular2_scale_fn(self, x):
1145
+ return 1 / (2. ** (x - 1))
1146
+
1136
1147
  def get_lr(self):
1137
1148
  cycle = self.floor(1 + self.last_epoch / self.total_step_size)
1138
1149
  x = 1. + self.last_epoch / self.total_step_size - cycle
@@ -1143,13 +1154,9 @@ class CyclicLR(LRScheduler):
1143
1154
  lrs = []
1144
1155
  for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
1145
1156
  base_height = (max_lr - base_lr) * scale_factor
1146
-
1147
- if self.scale_mode == 'cycle':
1148
- lr = base_lr + base_height * self.scale_fn(cycle)
1149
- else:
1150
- lr = base_lr + base_height * self.scale_fn(self.last_epoch)
1157
+ cycle_or_epoch = cycle if self.scale_mode == 'cycle' else self.last_epoch
1158
+ lr = base_lr + base_height * self.scale_fn(cycle_or_epoch)
1151
1159
  lrs.append(lr)
1152
-
1153
1160
  return lrs
1154
1161
 
1155
1162
 
@@ -1211,6 +1218,7 @@ class CosineAnnealingWarmRestarts(LRScheduler):
1211
1218
  [Tensor(shape=[], dtype=Float32, value= 0.025)]
1212
1219
  [Tensor(shape=[], dtype=Float32, value= 0.00669873)]
1213
1220
  """
1221
+
1214
1222
  def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1):
1215
1223
  if T_0 <= 0 or not isinstance(T_0, int):
1216
1224
  raise ValueError("T_0 should be an integer and equal or greater than 0, but got {}".format(T_0))
@@ -1336,6 +1344,7 @@ class CosineAnnealingLR(LRScheduler):
1336
1344
  [Tensor(shape=[], dtype=Float32, value= 0.05)]
1337
1345
  [Tensor(shape=[], dtype=Float32, value= 0)]
1338
1346
  """
1347
+
1339
1348
  def __init__(self, optimizer, T_max, eta_min=0.0, last_epoch=-1):
1340
1349
  if not isinstance(eta_min, (float, int)):
1341
1350
  raise TypeError(f"For 'CosineAnnealingLR', the 'eta_min' must be float or int, but got {type(eta_min)}.")
@@ -55,36 +55,39 @@ class RAdam(Optimizer):
55
55
  Implements RAdam algorithm.
56
56
 
57
57
  .. math::
58
- \begin{aligned}
59
- &\rule{110mm}{0.4pt} \\
60
- &\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2
61
- \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
62
- \lambda \text{ (weightdecay)}, \\
63
- &\hspace{13mm} \epsilon \text{ (epsilon)} \\
64
- &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
65
- v_0 \leftarrow 0 \text{ ( second moment)}, \\
66
- &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex]
67
- &\rule{110mm}{0.4pt} \\
68
- &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
69
- &\hspace{6mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
70
- &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\
71
- &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
72
- &\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
73
- &\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
74
- &\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
75
- &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
76
- 2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[-1.ex]
77
- &\hspace{6mm}\textbf{if} \: \rho_t > 5 \\
78
- &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon } \\
79
- &\hspace{12mm} r_t \leftarrow
80
- \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
81
- &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} r_t l_t \\
82
- &\hspace{6mm}\textbf{else} \\
83
- &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} \\
84
- &\rule{110mm}{0.4pt} \\[-1.ex]
85
- &\bf{return} \: \theta_t \\[-1.ex]
86
- &\rule{110mm}{0.4pt} \\[-1.ex]
87
- \end{aligned}
58
+ \begin{align*}
59
+ &\rule{110mm}{0.4pt} \\
60
+ &\textbf{Input}:
61
+ \gamma \text{ (lr)}, \: \beta_1, \beta_2 \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta)
62
+ \text{ (objective)}, \:
63
+ \lambda \text{ (weightdecay)}, \: \epsilon \text{ (epsilon)} \\
64
+ &\textbf{Initialize}:
65
+ \begin{cases}
66
+ m_0 \leftarrow 0 \text{ (first moment)} \\
67
+ v_0 \leftarrow 0 \text{ (second moment)} \\
68
+ \rho_{\infty} \xleftarrow{\text{def}} \dfrac{2}{1 - \beta_2} - 1
69
+ \end{cases} \\
70
+ &\rule{110mm}{0.4pt} \\
71
+ &\textbf{For } t = 1 \text{ to } \ldots \text{ do}: \\
72
+ &\quad g_t \leftarrow \nabla_{\theta} f_t(\theta_{t - 1}) \\
73
+ &\quad \text{If } \lambda \neq 0: \\
74
+ &\quad\quad g_t \leftarrow g_t + \lambda \theta_{t - 1} \\
75
+ &\quad m_t \leftarrow \beta_1 m_{t - 1} + (1 - \beta_1) g_t \\
76
+ &\quad v_t \leftarrow \beta_2 v_{t - 1} + (1 - \beta_2) g_t^2 \\
77
+ &\quad \widehat{m_t} \leftarrow \dfrac{m_t}{1 - \beta_1^t} \\
78
+ &\quad \text{Let } \rho_t' = 2 t \beta_2^t /(1 - \beta_2^t) \quad \text{(auxiliary variable)} \\
79
+ &\quad \rho_t \leftarrow \rho_{\infty} - \rho_t' \\
80
+ &\quad \text{If } \rho_t > 5: \\
81
+ &\quad\quad l_t \leftarrow \dfrac{\sqrt{1 - \beta_2^t}}{\sqrt{v_t} + \epsilon} \\
82
+ &\quad\quad r_t \leftarrow \sqrt{\dfrac{(\rho_t - 4)(\rho_t - 2)\rho_{\infty}}{(\rho_{\infty} - 4)
83
+ (\rho_{\infty} - 2) \rho_t}} \\
84
+ &\quad\quad \theta_t \leftarrow \theta_{t - 1} - \gamma \widehat{m_t} r_t l_t \\
85
+ &\quad \text{Else}: \\
86
+ &\quad\quad \theta_t \leftarrow \theta_{t - 1} - \gamma \widehat{m_t} \\
87
+ &\rule{110mm}{0.4pt} \\
88
+ &\bf{Return}: \theta_t \\
89
+ &\rule{110mm}{0.4pt}
90
+ \end{align*}
88
91
 
89
92
  .. warning::
90
93
  This is an experimental optimizer API that is subject to change.
mindspore/hal/device.py CHANGED
@@ -122,6 +122,9 @@ def is_initialized(device_target):
122
122
  """
123
123
  Returns whether specified backend is initialized.
124
124
 
125
+ Note:
126
+ - The api will be deprecated.
127
+
125
128
  Note:
126
129
  MindSpore's backends "CPU", "GPU" and "Ascend" will be initialized in the following scenarios:
127
130
 
@@ -162,6 +165,12 @@ def is_available(device_target):
162
165
  Returns whether specified backend is available.
163
166
  All dependent libraries should be successfully loaded if this backend is available.
164
167
 
168
+ Note:
169
+ - The api will be deprecated.
170
+ - CPU hardware, please use the interface :func:`mindspore.device_context.cpu.is_available`.
171
+ - GPU hardware, please use the interface :func:`mindspore.device_context.gpu.is_available`.
172
+ - Ascend hardware, please use the interface :func:`mindspore.device_context.ascend.is_available`.
173
+
165
174
  Args:
166
175
  device_target (str): The device name of backend, should be one of "CPU", "GPU" and "Ascend".
167
176
 
@@ -194,6 +203,13 @@ def device_count(device_target=None):
194
203
  """
195
204
  Returns device count of specified backend.
196
205
 
206
+ Note:
207
+ - The api will be deprecated.
208
+ - CPU hardware, please use the interface :func:`mindspore.device_context.cpu.device_count`.
209
+ - GPU hardware, please use the interface :func:`mindspore.device_context.gpu.device_count`.
210
+ - Ascend hardware, please use the interface :func:`mindspore.device_context.ascend.device_count`.
211
+
212
+
197
213
  Note:
198
214
  If `device_target` is not specified, get the device count of the current backend set by context.
199
215
  For CPU backend, this method always returns 1.
@@ -221,6 +237,9 @@ def get_device_capability(device_id, device_target=None):
221
237
  """
222
238
  Get specified device's capability.
223
239
 
240
+ Note:
241
+ - The api will be deprecated.
242
+
224
243
  Note:
225
244
  If `device_target` is not specified, get the device capability of the current backend set by context.
226
245
 
@@ -253,6 +272,9 @@ def get_device_properties(device_id, device_target=None):
253
272
  """
254
273
  Get specified device's properties.
255
274
 
275
+ Note:
276
+ - The api will be deprecated.
277
+
256
278
  Note:
257
279
  If `device_target` is not specified, get the device properties of the current backend set by context.
258
280
  For Ascend, backend must be initialized before calling this method,
@@ -308,6 +330,9 @@ def get_device_name(device_id, device_target=None):
308
330
  """
309
331
  Get specified device's name.
310
332
 
333
+ Note:
334
+ - The api will be deprecated.
335
+
311
336
  Note:
312
337
  If `device_target` is not specified, get the device name of the current backend set by context.
313
338
  This method always returns "CPU" for CPU backend.
@@ -335,6 +360,9 @@ def get_arch_list(device_target=None):
335
360
  """
336
361
  Get the architecture list this MindSpore was compiled for.
337
362
 
363
+ Note:
364
+ - The api will be deprecated.
365
+
338
366
  Note:
339
367
  If `device_target` is not specified, get the device name of the current backend set by context.
340
368
 
mindspore/hal/event.py CHANGED
@@ -17,6 +17,9 @@ from mindspore._c_expression import Event as Event_
17
17
  from mindspore._c_expression import Stream as Stream_
18
18
  from mindspore._c_expression import current_stream as current_stream_
19
19
  from mindspore import _checkparam as Validator
20
+ from mindspore import log as logger
21
+
22
+ function_event_status = {'Event': False, 'wait': False}
20
23
 
21
24
 
22
25
  class Event(Event_):
@@ -28,6 +31,9 @@ class Event(Event_):
28
31
 
29
32
  The underlying device events are lazily initialized when the event is first recorded.
30
33
 
34
+ Note:
35
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.Event` instead.
36
+
31
37
  Args:
32
38
  enable_timing (bool, optional): indicates if the event should measure time (default: ``False``)
33
39
  blocking (bool, optional): if ``True``, `wait` will be blocking (default: ``False``)
@@ -58,7 +64,13 @@ class Event(Event_):
58
64
  [5. 5.]]
59
65
  >>> elapsed_time = start.elapsed_time(end)
60
66
  """
67
+
61
68
  def __init__(self, enable_timing=False, blocking=False):
69
+ if not function_event_status['Event']:
70
+ function_event_status['Event'] = True
71
+ logger.warning(
72
+ "WARN_DEPRECATED: The usage of mindspore.hal.Event(enable_timing=True) is deprecated."
73
+ " Please use mindspore.runtime.Event(enable_timing=True)")
62
74
  # pylint: disable=useless-super-delegation
63
75
  Validator.check_bool(enable_timing, "enable_timing", "Event")
64
76
  Validator.check_bool(blocking, "blocking", "Event")
@@ -118,6 +130,11 @@ class Event(Event_):
118
130
  [[4. 4.]
119
131
  [4. 4.]]
120
132
  """
133
+ if not function_event_status['wait']:
134
+ function_event_status['wait'] = True
135
+ logger.warning(
136
+ "WARN_DEPRECATED: The usage of mindspore.hal.Event() is deprecated."
137
+ " Please use mindspore.runtime.Event()")
121
138
  if stream is None:
122
139
  stream = current_stream_()
123
140
  if not isinstance(stream, Stream_):
mindspore/hal/memory.py CHANGED
@@ -18,12 +18,20 @@ from mindspore._c_expression import _memory_stats, _reset_max_mem_reserved, _res
18
18
  from mindspore import log as logger
19
19
  from .device import _check_inputs_validation, is_initialized
20
20
 
21
+ function_memory_status = {'memory_stats': False, 'memory_reserved': False, 'max_memory_reserved': False,
22
+ 'empty_cache': False, 'reset_peak_memory_stats': False, 'memory_summary': False,
23
+ 'memory_allocated': False, 'max_memory_allocated': False,
24
+ 'reset_max_memory_reserved': False, 'reset_max_memory_allocated': False}
25
+
21
26
 
22
27
  @_check_inputs_validation
23
28
  def memory_stats(device_target=None):
24
29
  """
25
30
  Returns status information queried from the memory pool.
26
31
 
32
+ Note:
33
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.memory_stats` instead.
34
+
27
35
  Note:
28
36
  - If `device_target` is not specified, get the device capability of the current backend set by context.
29
37
  - For the `CPU` backend, a dictionary with empty data is always returned.
@@ -49,6 +57,12 @@ def memory_stats(device_target=None):
49
57
  {<capsule object NULL at 0x7f7e8c27b030>: {'block_stream_id': 0, 'block_memory_size': 1073741824}}},
50
58
  'persistent_mem_pool_stats': {'block_unit_size': 1073741824, 'block_counts': 0, 'blocks_info': {}}}
51
59
  """
60
+ if not function_memory_status['memory_stats']:
61
+ function_memory_status['memory_stats'] = True
62
+ logger.warning(
63
+ "WARN_DEPRECATED: The usage of mindspore.hal.memory_stats() is deprecated."
64
+ " Please use mindspore.runtime.memory_stats()"
65
+ )
52
66
  if not is_initialized(device_target):
53
67
  logger.warning(f"Backend {device_target} is not initialized yet. Return empty dict.")
54
68
  return {}
@@ -60,6 +74,9 @@ def memory_reserved(device_target=None):
60
74
  """
61
75
  Returns the total amount of memory currently managed by the memory pool.
62
76
 
77
+ Note:
78
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.memory_reserved` instead.
79
+
63
80
  Note:
64
81
  - If `device_target` is not specified, get the device capability of the current backend set by context.
65
82
  - For the `CPU` backend, 0 is always returned.
@@ -81,6 +98,12 @@ def memory_reserved(device_target=None):
81
98
  >>> print(ms.hal.memory_reserved())
82
99
  1073741824
83
100
  """
101
+ if not function_memory_status['memory_reserved']:
102
+ function_memory_status['memory_reserved'] = True
103
+ logger.warning(
104
+ "WARN_DEPRECATED: The usage of mindspore.hal.memory_reserved() is deprecated."
105
+ " Please use mindspore.runtime.memory_reserved()"
106
+ )
84
107
  return _memory_stats(device_target).get("total_reserved_memory", 0)
85
108
 
86
109
 
@@ -89,6 +112,9 @@ def max_memory_reserved(device_target=None):
89
112
  """
90
113
  Returns the peak value of the total memory managed by the memory pool since the process was started.
91
114
 
115
+ Note:
116
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.max_memory_reserved` instead.
117
+
92
118
  Note:
93
119
  - If `device_target` is not specified, get the device capability of the current backend set by context.
94
120
  - For the `CPU` backend, 0 is always returned.
@@ -110,6 +136,12 @@ def max_memory_reserved(device_target=None):
110
136
  >>> print(ms.hal.max_memory_reserved())
111
137
  1073741824
112
138
  """
139
+ if not function_memory_status['max_memory_reserved']:
140
+ function_memory_status['max_memory_reserved'] = True
141
+ logger.warning(
142
+ "WARN_DEPRECATED: The usage of mindspore.hal.max_memory_reserved() is deprecated."
143
+ " Please use mindspore.runtime.max_memory_reserved()"
144
+ )
113
145
  return _memory_stats(device_target).get("max_reserved_memory", 0)
114
146
 
115
147
 
@@ -119,11 +151,16 @@ def empty_cache():
119
151
  Release all memory fragments in the memory pool, so that memory arrangement
120
152
  will be optimized.
121
153
 
154
+ Note:
155
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.empty_cache` instead.
156
+
122
157
  Note:
123
158
  Currently, the MindSpore memory pool does not have the function of releasing memory fragments.
124
159
  This interface is reserved but implemented as an empty method and prompted in log mode.
125
160
  """
126
- logger.warning(f"The empty_cache operation is currently not supported.")
161
+ if not function_memory_status['empty_cache']:
162
+ function_memory_status['empty_cache'] = True
163
+ logger.warning(f"The empty_cache operation is currently not supported.")
127
164
 
128
165
 
129
166
  @_check_inputs_validation
@@ -131,6 +168,9 @@ def reset_peak_memory_stats(device_target=None):
131
168
  """
132
169
  Reset the "peak" stats tracked by memory manager.
133
170
 
171
+ Note:
172
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.reset_peak_memory_stats` instead.
173
+
134
174
  Note:
135
175
  If `device_target` is not specified, get the device capability of the current backend set by context.
136
176
 
@@ -155,6 +195,12 @@ def reset_peak_memory_stats(device_target=None):
155
195
  >>> print(ms.hal.max_memory_allocated())
156
196
  0
157
197
  """
198
+ if not function_memory_status['reset_peak_memory_stats']:
199
+ function_memory_status['reset_peak_memory_stats'] = True
200
+ logger.warning(
201
+ "WARN_DEPRECATED: The usage of mindspore.hal.reset_peak_memory_stats() is deprecated."
202
+ " Please use mindspore.runtime.reset_peak_memory_stats()"
203
+ )
158
204
  _reset_max_mem_reserved(device_target)
159
205
  _reset_max_mem_allocated(device_target)
160
206
 
@@ -164,6 +210,9 @@ def memory_summary(device_target=None):
164
210
  """
165
211
  Returns readable memory pool status information.
166
212
 
213
+ Note:
214
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.memory_summary` instead.
215
+
167
216
  Note:
168
217
  If `device_target` is not specified, get the device capability of the current backend set by context.
169
218
 
@@ -174,6 +223,12 @@ def memory_summary(device_target=None):
174
223
  Returns:
175
224
  str, readable memory pool status information in tabular form.
176
225
  """
226
+ if not function_memory_status['memory_summary']:
227
+ function_memory_status['memory_summary'] = True
228
+ logger.warning(
229
+ "WARN_DEPRECATED: The usage of mindspore.hal.memory_summary() is deprecated."
230
+ " Please use mindspore.runtime.memory_summary()"
231
+ )
177
232
  stats = _memory_stats(device_target)
178
233
 
179
234
  def _format_size(sz, pref_sz):
@@ -189,7 +244,7 @@ def memory_summary(device_target=None):
189
244
 
190
245
  metrics_to_display = [
191
246
  ("total_reserved_memory", "Reserved memory", _format_size),
192
- ("total_allocatd_memory", "Allocated memory", _format_size),
247
+ ("total_allocated_memory", "Allocated memory", _format_size),
193
248
  ("total_idle_memory", "Idle memory", _format_size),
194
249
  ("total_eager_free_memory", "Eager free memory", _format_size),
195
250
  ("max_reserved_memory", "Max reserved memory", _format_size),
@@ -217,6 +272,9 @@ def memory_allocated(device_target=None):
217
272
  """
218
273
  Returns the actual memory size currently occupied by Tensor.
219
274
 
275
+ Note:
276
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.memory_allocated` instead.
277
+
220
278
  Note:
221
279
  - If `device_target` is not specified, get the device capability of the current backend set by context.
222
280
  - For the `CPU` backend, 0 is always returned.
@@ -238,7 +296,13 @@ def memory_allocated(device_target=None):
238
296
  >>> print(ms.hal.memory_allocated())
239
297
  1024
240
298
  """
241
- return _memory_stats(device_target).get("total_allocatd_memory", 0)
299
+ if not function_memory_status['memory_allocated']:
300
+ function_memory_status['memory_allocated'] = True
301
+ logger.warning(
302
+ "WARN_DEPRECATED: The usage of mindspore.hal.memory_allocated() is deprecated."
303
+ " Please use mindspore.runtime.memory_allocated()"
304
+ )
305
+ return _memory_stats(device_target).get("total_allocated_memory", 0)
242
306
 
243
307
 
244
308
  @_check_inputs_validation
@@ -246,6 +310,9 @@ def max_memory_allocated(device_target=None):
246
310
  """
247
311
  Returns the peak memory size of the memory pool actually occupied by Tensor since the process was started.
248
312
 
313
+ Note:
314
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.max_memory_allocated` instead.
315
+
249
316
  Note:
250
317
  - If `device_target` is not specified, get the device capability of the current backend set by context.
251
318
  - For the `CPU` backend, 0 is always returned.
@@ -267,6 +334,12 @@ def max_memory_allocated(device_target=None):
267
334
  >>> print(ms.hal.max_memory_allocated())
268
335
  1536
269
336
  """
337
+ if not function_memory_status['max_memory_allocated']:
338
+ function_memory_status['max_memory_allocated'] = True
339
+ logger.warning(
340
+ "WARN_DEPRECATED: The usage of mindspore.hal.max_memory_allocated() is deprecated."
341
+ " Please use mindspore.runtime.max_memory_allocated()"
342
+ )
270
343
  return _memory_stats(device_target).get("max_allocated_memory", 0)
271
344
 
272
345
 
@@ -275,6 +348,9 @@ def reset_max_memory_reserved(device_target=None):
275
348
  """
276
349
  Reset the peak memory size managed by the memory pool.
277
350
 
351
+ Note:
352
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.reset_max_memory_reserved` instead.
353
+
278
354
  Note:
279
355
  If `device_target` is not specified, get the device capability of the current backend set by context.
280
356
 
@@ -295,6 +371,12 @@ def reset_max_memory_reserved(device_target=None):
295
371
  >>> print(ms.hal.max_memory_reserved())
296
372
  0
297
373
  """
374
+ if not function_memory_status['reset_max_memory_reserved']:
375
+ function_memory_status['reset_max_memory_reserved'] = True
376
+ logger.warning(
377
+ "WARN_DEPRECATED: The usage of mindspore.hal.reset_max_memory_reserved() is deprecated."
378
+ " Please use mindspore.runtime.reset_max_memory_reserved()"
379
+ )
298
380
  _reset_max_mem_reserved(device_target)
299
381
 
300
382
 
@@ -303,6 +385,9 @@ def reset_max_memory_allocated(device_target=None):
303
385
  """
304
386
  Reset the peak memory size of the memory pool actually occupied by Tensor.
305
387
 
388
+ Note:
389
+ - The api will be deprecated, please use the api :func:`mindspore.runtime.reset_max_memory_allocated` instead.
390
+
306
391
  Note:
307
392
  If `device_target` is not specified, get the device capability of the current backend set by context.
308
393
 
@@ -323,4 +408,10 @@ def reset_max_memory_allocated(device_target=None):
323
408
  >>> print(ms.hal.max_memory_allocated())
324
409
  0
325
410
  """
411
+ if not function_memory_status['reset_max_memory_allocated']:
412
+ function_memory_status['reset_max_memory_allocated'] = True
413
+ logger.warning(
414
+ "WARN_DEPRECATED: The usage of mindspore.hal.reset_max_memory_allocated() is deprecated."
415
+ " Please use mindspore.runtime.reset_max_memory_allocated()"
416
+ )
326
417
  _reset_max_mem_allocated(device_target)