mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (458) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +65 -84
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +58 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +178 -53
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +377 -203
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +5 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +117 -131
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +67 -55
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +70 -24
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +27 -7
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +6 -46
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +429 -23
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +140 -104
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +491 -623
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +117 -110
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +4 -6
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/asgd.py +2 -0
  188. mindspore/nn/optim/lamb.py +1 -3
  189. mindspore/nn/optim/optimizer.py +1 -1
  190. mindspore/nn/optim/tft_wrapper.py +2 -3
  191. mindspore/nn/optim/thor.py +2 -2
  192. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  193. mindspore/nn/probability/distribution/exponential.py +2 -1
  194. mindspore/nn/probability/distribution/poisson.py +2 -1
  195. mindspore/nn/sparse/sparse.py +3 -3
  196. mindspore/nn/wrap/cell_wrapper.py +73 -42
  197. mindspore/nn/wrap/grad_reducer.py +37 -52
  198. mindspore/nn/wrap/loss_scale.py +72 -74
  199. mindspore/numpy/array_creations.py +7 -7
  200. mindspore/numpy/fft.py +1 -1
  201. mindspore/numpy/math_ops.py +5 -5
  202. mindspore/numpy/utils_const.py +1 -1
  203. mindspore/opencv_core452.dll +0 -0
  204. mindspore/opencv_imgcodecs452.dll +0 -0
  205. mindspore/opencv_imgproc452.dll +0 -0
  206. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  207. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  208. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  209. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  210. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  211. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  212. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  213. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
  214. mindspore/ops/auto_generate/gen_extend_func.py +27 -145
  215. mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
  216. mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
  217. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  218. mindspore/ops/composite/__init__.py +10 -0
  219. mindspore/ops/composite/base.py +9 -5
  220. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  221. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  222. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  223. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  224. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  225. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  226. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  227. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  228. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  229. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  230. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  231. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  232. mindspore/ops/function/__init__.py +4 -1
  233. mindspore/ops/function/_add_attr_func.py +11 -6
  234. mindspore/ops/function/array_func.py +19 -102
  235. mindspore/ops/function/debug_func.py +8 -5
  236. mindspore/ops/function/grad/grad_func.py +5 -13
  237. mindspore/ops/function/math_func.py +77 -572
  238. mindspore/ops/function/nn_func.py +46 -94
  239. mindspore/ops/function/other_func.py +4 -1
  240. mindspore/ops/function/random_func.py +44 -5
  241. mindspore/ops/function/vmap_func.py +2 -1
  242. mindspore/ops/functional.py +4 -4
  243. mindspore/ops/functional_overload.py +594 -18
  244. mindspore/ops/op_info_register.py +21 -0
  245. mindspore/ops/operations/__init__.py +16 -11
  246. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  247. mindspore/ops/operations/_inner_ops.py +14 -18
  248. mindspore/ops/operations/_sequence_ops.py +1 -1
  249. mindspore/ops/operations/array_ops.py +5 -51
  250. mindspore/ops/operations/comm_ops.py +186 -41
  251. mindspore/ops/operations/custom_ops.py +303 -177
  252. mindspore/ops/operations/debug_ops.py +59 -4
  253. mindspore/ops/operations/image_ops.py +13 -13
  254. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  255. mindspore/ops/operations/math_ops.py +8 -9
  256. mindspore/ops/operations/nn_ops.py +8 -40
  257. mindspore/ops/primitive.py +9 -20
  258. mindspore/ops/tensor_method.py +63 -15
  259. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  260. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  261. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  262. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  263. mindspore/ops_generate/common/base_generator.py +14 -0
  264. mindspore/ops_generate/common/gen_constants.py +8 -3
  265. mindspore/ops_generate/common/gen_utils.py +0 -19
  266. mindspore/ops_generate/common/op_proto.py +11 -4
  267. mindspore/ops_generate/common/template.py +88 -11
  268. mindspore/ops_generate/gen_ops.py +1 -1
  269. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  270. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  271. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  272. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  273. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  274. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  275. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  276. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  277. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  278. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  279. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  280. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  281. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  282. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  283. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  284. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  285. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  286. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  287. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  288. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  289. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  290. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  291. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  292. mindspore/parallel/_auto_parallel_context.py +16 -23
  293. mindspore/parallel/_cell_wrapper.py +113 -45
  294. mindspore/parallel/_parallel_serialization.py +4 -3
  295. mindspore/parallel/_ps_context.py +4 -6
  296. mindspore/parallel/_tensor.py +167 -12
  297. mindspore/parallel/_transformer/moe.py +1 -1
  298. mindspore/parallel/_transformer/transformer.py +17 -12
  299. mindspore/parallel/_utils.py +5 -11
  300. mindspore/parallel/auto_parallel.py +35 -14
  301. mindspore/parallel/checkpoint_convert.py +3 -3
  302. mindspore/parallel/checkpoint_transform.py +13 -7
  303. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  304. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  305. mindspore/parallel/cluster/run.py +48 -7
  306. mindspore/parallel/function/__init__.py +8 -1
  307. mindspore/parallel/function/reshard_func.py +12 -12
  308. mindspore/parallel/nn/__init__.py +15 -2
  309. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  310. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  311. mindspore/parallel/shard.py +10 -25
  312. mindspore/parallel/transform_safetensors.py +469 -174
  313. mindspore/pgodb140.dll +0 -0
  314. mindspore/pgort140.dll +0 -0
  315. mindspore/profiler/__init__.py +2 -1
  316. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  317. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  318. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  319. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  321. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  322. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  323. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  324. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  325. mindspore/profiler/analysis/task_manager.py +1 -1
  326. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  327. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  328. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  329. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  330. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  331. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  332. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  333. mindspore/profiler/common/constant.py +16 -0
  334. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  335. mindspore/profiler/common/path_manager.py +9 -0
  336. mindspore/profiler/common/profiler_context.py +50 -29
  337. mindspore/profiler/common/profiler_info.py +0 -16
  338. mindspore/profiler/common/profiler_meta_data.py +1 -0
  339. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  340. mindspore/profiler/common/profiler_output_path.py +23 -8
  341. mindspore/profiler/common/profiler_parameters.py +128 -35
  342. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  343. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  344. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  345. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  346. mindspore/profiler/dynamic_profiler.py +374 -338
  347. mindspore/profiler/envprofiler.py +42 -12
  348. mindspore/profiler/experimental_config.py +112 -7
  349. mindspore/profiler/mstx.py +33 -12
  350. mindspore/profiler/platform/__init__.py +2 -3
  351. mindspore/profiler/platform/cpu_profiler.py +10 -4
  352. mindspore/profiler/platform/npu_profiler.py +30 -20
  353. mindspore/profiler/profiler.py +218 -154
  354. mindspore/profiler/profiler_action_controller.py +65 -77
  355. mindspore/profiler/profiler_interface.py +2 -2
  356. mindspore/profiler/schedule.py +10 -4
  357. mindspore/rewrite/common/config.py +1 -0
  358. mindspore/rewrite/common/namer.py +1 -0
  359. mindspore/rewrite/common/namespace.py +1 -0
  360. mindspore/rewrite/node/node.py +31 -11
  361. mindspore/rewrite/parsers/assign_parser.py +1 -1
  362. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  363. mindspore/run_check/_check_version.py +7 -10
  364. mindspore/runtime/__init__.py +8 -6
  365. mindspore/runtime/event.py +10 -4
  366. mindspore/runtime/executor.py +87 -45
  367. mindspore/runtime/memory.py +31 -32
  368. mindspore/runtime/thread_bind_core.py +299 -165
  369. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  370. mindspore/swresample-4.dll +0 -0
  371. mindspore/swscale-6.dll +0 -0
  372. mindspore/tbbmalloc.dll +0 -0
  373. mindspore/tinyxml2.dll +0 -0
  374. mindspore/train/_utils.py +17 -7
  375. mindspore/train/amp.py +43 -23
  376. mindspore/train/callback/__init__.py +5 -5
  377. mindspore/train/callback/_callback.py +2 -1
  378. mindspore/train/callback/_checkpoint.py +4 -14
  379. mindspore/train/callback/_flops_collector.py +11 -7
  380. mindspore/train/callback/_landscape.py +0 -1
  381. mindspore/train/callback/_train_fault_tolerance.py +98 -21
  382. mindspore/train/data_sink.py +15 -6
  383. mindspore/train/dataset_helper.py +14 -5
  384. mindspore/train/model.py +133 -69
  385. mindspore/train/serialization.py +168 -126
  386. mindspore/train/summary/summary_record.py +13 -2
  387. mindspore/train/train_thor/model_thor.py +2 -2
  388. mindspore/turbojpeg.dll +0 -0
  389. mindspore/utils/__init__.py +3 -2
  390. mindspore/utils/dryrun.py +0 -6
  391. mindspore/utils/runtime_execution_order_check.py +163 -77
  392. mindspore/utils/sdc_detect.py +68 -0
  393. mindspore/utils/utils.py +14 -17
  394. mindspore/vcmeta.dll +0 -0
  395. mindspore/vcruntime140.dll +0 -0
  396. mindspore/vcruntime140_1.dll +0 -0
  397. mindspore/version.py +1 -1
  398. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  399. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
  400. mindspore/_deprecated/jit.py +0 -198
  401. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  402. mindspore/communication/_hccl_management.py +0 -297
  403. mindspore/experimental/es/embedding_service.py +0 -891
  404. mindspore/experimental/es/embedding_service_layer.py +0 -581
  405. mindspore/profiler/common/validator/__init__.py +0 -14
  406. mindspore/profiler/common/validator/validate_path.py +0 -84
  407. mindspore/profiler/parser/__init__.py +0 -14
  408. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  409. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  410. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  411. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  412. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  413. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  414. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  415. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  416. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  417. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  418. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  419. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  420. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  421. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  422. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  423. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  424. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  425. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  426. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  427. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  428. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  429. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  430. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  431. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  432. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  433. mindspore/profiler/parser/container.py +0 -229
  434. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  435. mindspore/profiler/parser/flops_parser.py +0 -531
  436. mindspore/profiler/parser/framework_enum.py +0 -111
  437. mindspore/profiler/parser/framework_parser.py +0 -464
  438. mindspore/profiler/parser/framework_struct.py +0 -61
  439. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  440. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  441. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  442. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  443. mindspore/profiler/parser/hccl_parser.py +0 -573
  444. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  445. mindspore/profiler/parser/integrator.py +0 -526
  446. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  447. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  448. mindspore/profiler/parser/minddata_parser.py +0 -186
  449. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  450. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  451. mindspore/profiler/parser/optime_parser.py +0 -250
  452. mindspore/profiler/parser/profiler_info.py +0 -213
  453. mindspore/profiler/parser/step_trace_parser.py +0 -666
  454. mindspore/utils/hooks.py +0 -81
  455. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  456. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  457. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  458. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -1,48 +1,37 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/rmsprop.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """rmsprop"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
19
8
  import mindspore.common.dtype as mstype
20
9
  from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than, check_not_less_than_without_equal
21
10
  from mindspore import ops
22
11
  from mindspore import jit
23
12
 
24
- _rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt")
13
+ _rmsprop_opt = ops.MultitypeFuncGraph("rmsprop_opt")
25
14
 
26
- op_mul = P.Mul()
27
- op_sqrt = P.Sqrt()
15
+ op_mul = ops.Mul()
16
+ op_sqrt = ops.Sqrt()
28
17
 
29
18
 
30
19
  @_rmsprop_opt.register("Bool", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
31
20
  def _run_rmsprop_opt(centered, alpha, eps, momentum, lr, weight, mean_square, mean_grad, mom, grad):
32
21
  """Apply rmsprop optimizer to the weight parameter using dynamic learning rate."""
33
- F.assign(mean_square, ops.addcmul(op_mul(mean_square, alpha), grad, grad, 1 - alpha))
22
+ ops.assign(mean_square, ops.addcmul(op_mul(mean_square, alpha), grad, grad, 1 - alpha))
34
23
 
35
24
  if centered:
36
- F.assign(mean_grad, op_mul(mean_grad, alpha) + op_mul(grad, 1 - alpha))
25
+ ops.assign(mean_grad, op_mul(mean_grad, alpha) + op_mul(grad, 1 - alpha))
37
26
  avg = op_sqrt(ops.addcmul(mean_square, mean_grad, mean_grad, -1.)) + eps
38
27
  else:
39
28
  avg = op_sqrt(mean_square) + eps
40
29
 
41
30
  if momentum > 0:
42
- F.assign(mom, op_mul(mom, momentum) + grad / avg)
43
- F.assign(weight, weight - mom * lr)
31
+ ops.assign(mom, op_mul(mom, momentum) + grad / avg)
32
+ ops.assign(weight, weight - mom * lr)
44
33
  else:
45
- F.assign(weight, weight - lr * grad / avg)
34
+ ops.assign(weight, weight - lr * grad / avg)
46
35
  return True
47
36
 
48
37
 
@@ -124,7 +113,7 @@ class RMSprop(Optimizer):
124
113
  self.mean_grad = self.parameters.clone(prefix="mean_grad", init='zeros')
125
114
  self.mean_square = self.parameters.clone(prefix="mean_square", init='zeros')
126
115
  self.moment = self.parameters.clone(prefix="moment", init='zeros')
127
- self.op_cast = P.Cast()
116
+ self.op_cast = ops.Cast()
128
117
 
129
118
  @jit
130
119
  def implementation(self, group_id, lr, gradients, maximize, weight_decay, centered, alpha, eps, momentum):
@@ -132,12 +121,12 @@ class RMSprop(Optimizer):
132
121
  start_id = self.group_start_id[group_id]
133
122
  end_id = self.group_start_id[group_id + 1]
134
123
  params = self.parameters[start_id: end_id]
135
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
124
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
136
125
  grads = self._decay_weight(weight_decay, params, grads)
137
126
  mean_grad = self.mean_grad[start_id: end_id]
138
127
  mean_square = self.mean_square[start_id: end_id]
139
128
  moment = self.moment[start_id: end_id]
140
- self.hyper_map(F.partial(_rmsprop_opt, centered, alpha, eps, momentum, lr),
129
+ self.hyper_map(ops.partial(_rmsprop_opt, centered, alpha, eps, momentum, lr),
141
130
  params, mean_square, mean_grad, moment, grads)
142
131
  return True
143
132
 
@@ -1,37 +1,26 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/rprop.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """rprop"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
8
+ from mindspore import ops
19
9
  from mindspore.common import Tensor, Parameter
20
10
  import mindspore.common.dtype as mstype
21
11
  from mindspore import _checkparam as validator
22
12
  from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than_without_equal
23
- from mindspore import ops
24
13
  from mindspore import jit
25
14
 
26
- _rprop_opt = C.MultitypeFuncGraph("rprop_opt")
15
+ _rprop_opt = ops.MultitypeFuncGraph("rprop_opt")
27
16
 
28
- op_sign = P.Sign()
29
- op_fill = P.FillV2()
30
- op_assign = P.Assign()
31
- op_assignadd = P.AssignAdd()
32
- op_cast = P.Cast()
33
- op_select = P.Select()
34
- op_oneslike = P.OnesLike()
17
+ op_sign = ops.Sign()
18
+ op_fill = ops.FillV2()
19
+ op_assign = ops.Assign()
20
+ op_assignadd = ops.AssignAdd()
21
+ op_cast = ops.Cast()
22
+ op_select = ops.Select()
23
+ op_oneslike = ops.OnesLike()
35
24
 
36
25
 
37
26
  @_rprop_opt.register("Tensor", "Tensor", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
@@ -131,7 +120,7 @@ class Rprop(Optimizer):
131
120
  self.step_size = self.parameters.clone(prefix="step_size", init='zeros')
132
121
  self.step_t = Parameter(Tensor(0, mstype.int32), "step_t")
133
122
  self.increase_tensor = Tensor(1, mstype.int32)
134
- self.op_cast = P.Cast()
123
+ self.op_cast = ops.Cast()
135
124
 
136
125
  @jit(backend="ms_backend")
137
126
  def implementation(self, etaminus, etaplus, group_id, lr, gradients, maximize, step_size_min, step_size_max):
@@ -141,10 +130,10 @@ class Rprop(Optimizer):
141
130
  end_id = self.group_start_id[group_id + 1]
142
131
 
143
132
  params = self.parameters[start_id: end_id]
144
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
133
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
145
134
  prev = self.prev[start_id: end_id]
146
135
  step_size = self.step_size[start_id: end_id]
147
- self.hyper_map(F.partial(_rprop_opt, etaminus, etaplus, step_size_min, step_size_max, self.step_t, lr),
136
+ self.hyper_map(ops.partial(_rprop_opt, etaminus, etaplus, step_size_min, step_size_max, self.step_t, lr),
148
137
  params, prev, step_size, grads)
149
138
  return True
150
139
 
@@ -1,28 +1,18 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/sgd.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """sgd"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
8
+ from mindspore import ops
19
9
  from mindspore.common.tensor import Tensor
20
10
  import mindspore.common.dtype as mstype
21
11
  from mindspore import _checkparam as Validator
22
12
  from mindspore.experimental.optim.optimizer import Optimizer
23
13
  from mindspore import jit
24
14
 
25
- _sgd_opt = C.MultitypeFuncGraph("sgd_opt")
15
+ _sgd_opt = ops.MultitypeFuncGraph("sgd_opt")
26
16
 
27
17
 
28
18
  @_sgd_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
@@ -129,7 +119,7 @@ class SGD(Optimizer):
129
119
  "equal to 0.0, but got 'momentum' {}, 'dampening' {}".format(momentum, dampening))
130
120
  self.accum = self.parameters.clone(prefix="accum", init='zeros')
131
121
  self.stat = self.parameters.clone(prefix="stat", init='ones')
132
- self.op_cast = P.Cast()
122
+ self.op_cast = ops.Cast()
133
123
 
134
124
  @jit
135
125
  def implementation(self, momentum, lr, group_id, gradients, maximize, dampening, weight_decay, nesterov):
@@ -137,9 +127,9 @@ class SGD(Optimizer):
137
127
  start_id = self.group_start_id[group_id]
138
128
  end_id = self.group_start_id[group_id + 1]
139
129
  momentum = self.op_cast(momentum, mstype.float32)
140
- opt = P.SGD(dampening, weight_decay, nesterov)
141
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
142
- self.hyper_map(F.partial(_sgd_opt, opt, momentum, lr), grads,
130
+ opt = ops.SGD(dampening, weight_decay, nesterov)
131
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
132
+ self.hyper_map(ops.partial(_sgd_opt, opt, momentum, lr), grads,
143
133
  self.parameters[start_id: end_id], self.accum[start_id: end_id],
144
134
  self.stat[start_id: end_id])
145
135
  return True
mindspore/hal/__init__.py CHANGED
@@ -19,13 +19,13 @@ MindSpore abstracts the preceding modules from different backends and allows use
19
19
  resources at the Python layer. Currently, these interfaces take effect only in PyNative mode.
20
20
  """
21
21
 
22
- from mindspore.hal.device import is_initialized, is_available, device_count, get_device_capability,\
22
+ from mindspore.hal.device import is_initialized, is_available, device_count, get_device_capability, \
23
23
  get_device_properties, get_device_name, get_arch_list
24
- from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream,\
24
+ from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream, \
25
25
  communication_stream, StreamCtx
26
26
  from mindspore.hal.event import Event
27
- from mindspore.hal.memory import memory_stats, memory_reserved, max_memory_reserved, empty_cache,\
28
- reset_peak_memory_stats, memory_summary, memory_allocated,\
27
+ from mindspore.hal.memory import memory_stats, memory_reserved, max_memory_reserved, empty_cache, \
28
+ reset_peak_memory_stats, memory_summary, memory_allocated, \
29
29
  max_memory_allocated, reset_max_memory_reserved, reset_max_memory_allocated
30
30
 
31
31
  __all__ = [
@@ -27,7 +27,7 @@ def combine_tensor_list_contiguous(tensor_list, enable_mem_align=True):
27
27
  Return a contiguous memory handle where contiguous memory has been requested and slicing functionality is provided.
28
28
 
29
29
  Args:
30
- tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
30
+ tensor_list (list[Tensor], tuple[Tensor]): The tensor list to be stored.
31
31
  enable_mem_align (bool, optional): Whether to enable the memory alignment function.
32
32
  False is not supported. Default ``True`` .
33
33
 
@@ -57,7 +57,7 @@ class ContiguousTensorsHandle:
57
57
  ContiguousTensorsHandle is a handle manage continuous memory.
58
58
 
59
59
  Args:
60
- tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
60
+ tensor_list (list[Tensor], tuple[Tensor]): The tensor list to be stored.
61
61
  enable_mem_align (bool, optional): Whether to enable the memory alignment function.
62
62
  False is not supported. Default ``True`` .
63
63
 
mindspore/hal/memory.py CHANGED
@@ -14,14 +14,18 @@
14
14
  # ============================================================================
15
15
 
16
16
  """Hardware memory interfaces."""
17
- from mindspore._c_expression import _memory_stats, _reset_max_mem_reserved, _reset_max_mem_allocated
17
+ from mindspore._c_expression import _memory_stats, _reset_max_mem_reserved, _reset_max_mem_allocated, _empty_cache, \
18
+ DeviceContextManager
18
19
  from mindspore import log as logger
20
+ import mindspore as ms
19
21
  from .device import _check_inputs_validation, is_initialized
20
22
 
23
+
21
24
  function_memory_status = {'memory_stats': False, 'memory_reserved': False, 'max_memory_reserved': False,
22
25
  'empty_cache': False, 'reset_peak_memory_stats': False, 'memory_summary': False,
23
26
  'memory_allocated': False, 'max_memory_allocated': False,
24
27
  'reset_max_memory_reserved': False, 'reset_max_memory_allocated': False}
28
+ _device_context_mgr = DeviceContextManager.get_instance()
25
29
 
26
30
 
27
31
  @_check_inputs_validation
@@ -131,20 +135,36 @@ def max_memory_reserved(device_target=None):
131
135
  return _memory_stats(device_target).get("max_reserved_memory", 0)
132
136
 
133
137
 
138
+ def _is_initialized(device_target):
139
+ """
140
+ Returns whether specified backend is initialized.
141
+ """
142
+ _device_context = _device_context_mgr.get_device_context(device_target)
143
+ if _device_context is None:
144
+ return False
145
+ return _device_context.initialized()
146
+
147
+
134
148
  @_check_inputs_validation
135
149
  def empty_cache():
136
150
  """
137
- Release all memory fragments in the memory pool, so that memory arrangement
138
- will be optimized, this api will be deprecated and removed in future versions, please use
139
- the api :func:`mindspore.runtime.empty_cache` instead.
151
+ Empty cache in the memory pool, this api will be deprecated and removed in future versions.
152
+ Please use the api :func:`mindspore.runtime.empty_cache` instead.
140
153
 
141
154
  Note:
142
- Currently, the MindSpore memory pool does not have the function of releasing memory fragments.
143
- This interface is reserved but implemented as an empty method and prompted in log mode when using.
155
+ - Empty cache help reduce the fragmentation of device memory.
156
+ - Support Atlas A2 series products.
157
+
158
+ Supported Platforms:
159
+ ``Ascend``
144
160
  """
145
161
  if not function_memory_status['empty_cache']:
146
162
  function_memory_status['empty_cache'] = True
147
- logger.warning(f"The empty_cache operation is currently not supported.")
163
+ device_target = ms.context.get_context("device_target")
164
+ if not _is_initialized(device_target):
165
+ logger.warning(f"Backend {device_target} is not initialized yet.")
166
+ return
167
+ _empty_cache(device_target)
148
168
 
149
169
 
150
170
  @_check_inputs_validation
@@ -31,10 +31,26 @@ using Output = InputAndOutput;
31
31
 
32
32
  class MS_API CellBase {
33
33
  public:
34
+ /// \brief Constructor of Cellbase.
34
35
  CellBase() = default;
36
+ /// \brief Destructor of Cellbase.
35
37
  virtual ~CellBase() = default;
38
+ /// \brief Construct using inputs.
39
+ ///
40
+ /// \param[in] inputs Vector of inputs.
41
+ ///
42
+ /// \return Vector of outputs.
36
43
  virtual std::vector<Output> Construct(const std::vector<Input> &inputs) { return {}; }
44
+ /// \brief Clone a cellbase.
45
+ ///
46
+ /// \return Shared pointer of Cellbase.
37
47
  virtual std::shared_ptr<CellBase> Clone() const = 0;
48
+ /// \brief Run a cellbase.
49
+ ///
50
+ /// \param[in] inputs Vector of MSTensor as inputs.
51
+ /// \param[in] outputs Vector of MSTensor as outputs.
52
+ ///
53
+ /// \return Status of the operation.
38
54
  virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { return kSuccess; }
39
55
  std::vector<Output> operator()(const std::vector<Input> &inputs) const;
40
56
  };
@@ -49,19 +65,50 @@ class MS_API Cell : public CellBase {
49
65
  class MS_API GraphCell final : public Cell<GraphCell> {
50
66
  public:
51
67
  class GraphImpl;
52
-
68
+ /// \brief Constructor of GraphCell.
53
69
  GraphCell() = default;
70
+ /// \brief Destructor of GraphCell.
54
71
  ~GraphCell() override = default;
55
-
72
+ /// \brief Constructor of GraphCell.
73
+ ///
74
+ /// \param[in] graph Graph to construct.
56
75
  explicit GraphCell(const Graph &graph);
76
+ /// \brief Constructor of GraphCell.
77
+ ///
78
+ /// \param[in] graph Graph to construct.
57
79
  explicit GraphCell(Graph &&graph);
80
+ /// \brief Constructor of GraphCell.
81
+ ///
82
+ /// \param[in] graph Graph to construct.
58
83
  explicit GraphCell(const std::shared_ptr<Graph> &graph);
59
-
84
+ /// \brief Set a context.
85
+ ///
86
+ /// \param[in] context Context to be set.
60
87
  void SetContext(const std::shared_ptr<Context> &context);
88
+ /// \brief Get back the graph.
89
+ ///
90
+ /// \return Graph of the graphcell.
61
91
  const std::shared_ptr<Graph> &GetGraph() const { return graph_; }
92
+ /// \brief Run the graphcell.
93
+ ///
94
+ /// \param[in] inputs Vector of MSTensor as inputs.
95
+ /// \param[in] outputs Vector of MSTensor as outputs.
96
+ ///
97
+ /// \return Status of the operation.
62
98
  Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
99
+ /// \brief Get the inputs.
100
+ ///
101
+ /// \return Inputs.
63
102
  std::vector<MSTensor> GetInputs();
103
+ /// \brief Get the outputs.
104
+ ///
105
+ /// \return Outputs.
64
106
  std::vector<MSTensor> GetOutputs();
107
+ /// \brief Load the device.
108
+ ///
109
+ /// \param[in] device_id Device id to be loaded.
110
+ ///
111
+ /// \return Status of the operation.
65
112
  Status Load(uint32_t device_id);
66
113
 
67
114
  private:
@@ -73,12 +120,25 @@ class MS_API GraphCell final : public Cell<GraphCell> {
73
120
 
74
121
  class MS_API InputAndOutput {
75
122
  public:
123
+ /// \brief Constructor of InputAndOutput.
76
124
  InputAndOutput();
125
+ /// \brief Destructor of InputAndOutput.
77
126
  ~InputAndOutput() = default;
78
-
127
+ /// \brief Constructor of InputAndOutput.
128
+ ///
129
+ /// \param[in] cell The cellbase.
130
+ ///
131
+ /// \param[in] prev The previous inputs/outputs.
132
+ ///
133
+ /// \param[in] index Index of inputs/outputs.
79
134
  InputAndOutput(const std::shared_ptr<CellBase> &cell, const std::vector<InputAndOutput> &prev, int32_t index);
80
-
135
+ /// \brief Get index.
136
+ ///
137
+ /// \return index Index of inputs/outputs.
81
138
  int32_t GetIndex() const { return index_; }
139
+ /// \brief Set index.
140
+ ///
141
+ /// \param[in] index Index to be set.
82
142
  void SetIndex(int32_t index) { index_ = index; }
83
143
 
84
144
  private:
@@ -28,35 +28,50 @@ namespace mindspore {
28
28
  constexpr int iter_th = 1000;
29
29
  class MS_API MixPrecisionCfg {
30
30
  public:
31
+ /// \brief Constructor of mix precision training config.
31
32
  MixPrecisionCfg() {
32
33
  this->dynamic_loss_scale_ = false;
33
34
  this->loss_scale_ = 128.0f;
34
35
  this->keep_batchnorm_fp32_ = true;
35
36
  this->num_of_not_nan_iter_th_ = iter_th;
36
37
  }
38
+ /// \brief Constructor of mix precision training config.
39
+ ///
40
+ /// \param[in] rhs The config of mix precision.
37
41
  MixPrecisionCfg(const MixPrecisionCfg &rhs) {
38
42
  this->dynamic_loss_scale_ = rhs.dynamic_loss_scale_;
39
43
  this->loss_scale_ = rhs.loss_scale_;
40
44
  this->keep_batchnorm_fp32_ = rhs.keep_batchnorm_fp32_;
41
45
  this->num_of_not_nan_iter_th_ = rhs.num_of_not_nan_iter_th_;
42
46
  }
47
+ /// \brief Destructor of mix precision config.
43
48
  ~MixPrecisionCfg() = default;
44
49
 
45
- bool dynamic_loss_scale_ = false; /**< Enable/disable dynamic loss scale during mix precision training */
46
- float loss_scale_; /**< Initial loss scale factor */
47
- bool keep_batchnorm_fp32_ = true; /**< Keep batch norm in FP32 while training */
48
- uint32_t num_of_not_nan_iter_th_; /**< a threshold for modifying loss scale when dynamic loss scale is enabled */
49
- bool is_raw_mix_precision_ = false; /**< Is mix precision model export from mindspore */
50
+ /// \brief Enable/disable dynamic loss scale during mix precision training.
51
+ bool dynamic_loss_scale_ = false;
52
+ /// \brief Initial loss scale factor.
53
+ float loss_scale_;
54
+ /// \brief Keep batch norm in FP32 while training.
55
+ bool keep_batchnorm_fp32_ = true;
56
+ /// \brief A threshold for modifying loss scale when dynamic loss scale is enabled.
57
+ uint32_t num_of_not_nan_iter_th_;
58
+ /// \brief Is mix precision model export from mindspore.
59
+ bool is_raw_mix_precision_ = false;
50
60
  };
51
61
 
52
62
  class MS_API TrainCfg {
53
63
  public:
64
+ /// \brief Constructor of training config.
54
65
  TrainCfg() = default;
66
+ /// \brief Constructor of training config.
67
+ ///
68
+ /// \param[in] rhs The training config.
55
69
  TrainCfg(const TrainCfg &rhs) {
56
70
  this->loss_name_ = rhs.loss_name_;
57
71
  this->mix_precision_cfg_ = rhs.mix_precision_cfg_;
58
72
  this->accumulate_gradients_ = rhs.accumulate_gradients_;
59
73
  }
74
+ /// \brief Destructor of training config.
60
75
  ~TrainCfg() = default;
61
76
 
62
77
  /// \brief obtain part of the name that identify a loss kernel.
@@ -67,9 +82,11 @@ class MS_API TrainCfg {
67
82
  ///
68
83
  /// \param[in] loss_name define part of the name that identify a loss kernel.
69
84
  inline void SetLossName(const std::vector<std::string> &loss_name);
70
-
85
+ /// \brief Optimization level.
71
86
  OptimizationLevel optimization_level_ = kO0;
72
- MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */
87
+ /// \brief Mix precision configuration.
88
+ MixPrecisionCfg mix_precision_cfg_;
89
+ /// \brief If accumulate gradients is used.
73
90
  bool accumulate_gradients_ = false;
74
91
 
75
92
  private:
@@ -50,6 +50,7 @@ class DeviceInfoContext;
50
50
  /// \brief Context is used to store environment variables during execution.
51
51
  class MS_API Context {
52
52
  public:
53
+ /// \brief Data of context.
53
54
  struct Data;
54
55
  Context();
55
56
  ~Context() = default;
@@ -129,12 +129,20 @@ class Delegate : public IDelegate<LiteDelegateGraph, kernel::Kernel, kernel::Ker
129
129
  ///
130
130
  /// \return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference.
131
131
  virtual Status Init() = 0;
132
-
132
+ /// \brief Create kernel.
133
+ ///
134
+ /// \param[in] node The kernel to be created.
135
+ ///
136
+ /// \return Created kernel.
133
137
  std::shared_ptr<kernel::Kernel> CreateKernel(const std::shared_ptr<kernel::Kernel> &node) override {
134
138
  // return node as kernel since they are same one.
135
139
  return node;
136
140
  }
137
-
141
+ /// \brief Check if the node is delegate node.
142
+ ///
143
+ /// \param[in] node The kernel to verify.
144
+ ///
145
+ /// \return True if the node is delegate.
138
146
  bool IsDelegateNode(const std::shared_ptr<kernel::Kernel> &node) override { return false; }
139
147
 
140
148
  /// \brief Replace the nodes in model with delegate nodes, delegate will create kernels by its delegate nodes.