mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (403) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +36 -61
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +32 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +76 -15
  37. mindspore/common/api.py +193 -112
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +2 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +48 -83
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +38 -23
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +32 -2
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +1 -0
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +4 -44
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +208 -5
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +122 -98
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +325 -499
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +86 -85
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +1 -1
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/optimizer.py +1 -1
  159. mindspore/nn/optim/thor.py +2 -2
  160. mindspore/nn/probability/distribution/exponential.py +2 -1
  161. mindspore/nn/probability/distribution/poisson.py +2 -1
  162. mindspore/nn/sparse/sparse.py +3 -3
  163. mindspore/nn/wrap/cell_wrapper.py +34 -37
  164. mindspore/nn/wrap/grad_reducer.py +37 -37
  165. mindspore/nn/wrap/loss_scale.py +72 -74
  166. mindspore/numpy/array_creations.py +5 -5
  167. mindspore/numpy/fft.py +1 -1
  168. mindspore/numpy/math_ops.py +1 -1
  169. mindspore/opencv_core452.dll +0 -0
  170. mindspore/opencv_imgcodecs452.dll +0 -0
  171. mindspore/opencv_imgproc452.dll +0 -0
  172. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  173. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  174. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  175. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  176. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
  177. mindspore/ops/auto_generate/gen_extend_func.py +1 -51
  178. mindspore/ops/auto_generate/gen_ops_def.py +463 -257
  179. mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
  180. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  181. mindspore/ops/composite/__init__.py +10 -0
  182. mindspore/ops/composite/base.py +8 -4
  183. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  184. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  185. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  186. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  187. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  188. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  189. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  190. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  191. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  192. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  193. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  194. mindspore/ops/function/__init__.py +3 -1
  195. mindspore/ops/function/_add_attr_func.py +11 -6
  196. mindspore/ops/function/array_func.py +7 -94
  197. mindspore/ops/function/debug_func.py +4 -3
  198. mindspore/ops/function/grad/grad_func.py +1 -1
  199. mindspore/ops/function/math_func.py +21 -367
  200. mindspore/ops/function/nn_func.py +26 -41
  201. mindspore/ops/function/other_func.py +4 -1
  202. mindspore/ops/function/random_func.py +31 -4
  203. mindspore/ops/functional.py +0 -2
  204. mindspore/ops/functional_overload.py +463 -6
  205. mindspore/ops/op_info_register.py +21 -0
  206. mindspore/ops/operations/__init__.py +5 -2
  207. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  208. mindspore/ops/operations/_inner_ops.py +3 -6
  209. mindspore/ops/operations/_sequence_ops.py +1 -1
  210. mindspore/ops/operations/comm_ops.py +185 -26
  211. mindspore/ops/operations/custom_ops.py +235 -172
  212. mindspore/ops/operations/debug_ops.py +55 -4
  213. mindspore/ops/operations/image_ops.py +13 -13
  214. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  215. mindspore/ops/operations/math_ops.py +3 -4
  216. mindspore/ops/operations/nn_ops.py +5 -6
  217. mindspore/ops/primitive.py +6 -10
  218. mindspore/ops/tensor_method.py +36 -4
  219. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  220. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  221. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  222. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  223. mindspore/ops_generate/common/base_generator.py +14 -0
  224. mindspore/ops_generate/common/gen_constants.py +7 -2
  225. mindspore/ops_generate/common/gen_utils.py +0 -19
  226. mindspore/ops_generate/common/op_proto.py +11 -4
  227. mindspore/ops_generate/common/template.py +88 -11
  228. mindspore/ops_generate/gen_ops.py +1 -1
  229. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  230. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  231. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  232. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  233. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  234. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  235. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  236. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  237. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  238. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  239. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  240. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  241. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  242. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  243. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  244. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  245. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  246. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  247. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  248. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  249. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  250. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  251. mindspore/parallel/_auto_parallel_context.py +4 -2
  252. mindspore/parallel/_cell_wrapper.py +106 -40
  253. mindspore/parallel/_parallel_serialization.py +1 -1
  254. mindspore/parallel/_ps_context.py +4 -6
  255. mindspore/parallel/_tensor.py +167 -12
  256. mindspore/parallel/_transformer/moe.py +1 -1
  257. mindspore/parallel/_transformer/transformer.py +13 -8
  258. mindspore/parallel/auto_parallel.py +12 -5
  259. mindspore/parallel/checkpoint_convert.py +3 -3
  260. mindspore/parallel/checkpoint_transform.py +3 -1
  261. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  262. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  263. mindspore/parallel/cluster/run.py +43 -4
  264. mindspore/parallel/function/__init__.py +8 -1
  265. mindspore/parallel/function/reshard_func.py +1 -1
  266. mindspore/parallel/nn/__init__.py +15 -2
  267. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  268. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  269. mindspore/parallel/shard.py +2 -2
  270. mindspore/parallel/transform_safetensors.py +462 -174
  271. mindspore/pgodb140.dll +0 -0
  272. mindspore/pgort140.dll +0 -0
  273. mindspore/profiler/__init__.py +2 -1
  274. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  275. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  276. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  277. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  278. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  279. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  280. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  281. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  282. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  283. mindspore/profiler/analysis/task_manager.py +1 -1
  284. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  285. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  286. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  287. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  288. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  289. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  290. mindspore/profiler/common/constant.py +16 -0
  291. mindspore/profiler/common/profiler_context.py +25 -27
  292. mindspore/profiler/common/profiler_info.py +0 -16
  293. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  294. mindspore/profiler/common/profiler_output_path.py +23 -8
  295. mindspore/profiler/common/profiler_parameters.py +128 -35
  296. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  297. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  298. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  299. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  300. mindspore/profiler/dynamic_profiler.py +305 -314
  301. mindspore/profiler/envprofiler.py +12 -7
  302. mindspore/profiler/experimental_config.py +96 -6
  303. mindspore/profiler/mstx.py +33 -12
  304. mindspore/profiler/platform/__init__.py +2 -3
  305. mindspore/profiler/platform/npu_profiler.py +29 -19
  306. mindspore/profiler/profiler.py +35 -19
  307. mindspore/profiler/profiler_action_controller.py +64 -76
  308. mindspore/profiler/schedule.py +10 -4
  309. mindspore/rewrite/common/config.py +1 -0
  310. mindspore/rewrite/common/namer.py +1 -0
  311. mindspore/rewrite/common/namespace.py +1 -0
  312. mindspore/rewrite/node/node.py +31 -11
  313. mindspore/rewrite/parsers/assign_parser.py +1 -1
  314. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  315. mindspore/run_check/_check_version.py +7 -10
  316. mindspore/runtime/__init__.py +5 -5
  317. mindspore/runtime/event.py +10 -4
  318. mindspore/runtime/executor.py +60 -45
  319. mindspore/runtime/memory.py +21 -30
  320. mindspore/runtime/thread_bind_core.py +298 -164
  321. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  322. mindspore/swresample-4.dll +0 -0
  323. mindspore/swscale-6.dll +0 -0
  324. mindspore/tbbmalloc.dll +0 -0
  325. mindspore/tinyxml2.dll +0 -0
  326. mindspore/train/_utils.py +6 -2
  327. mindspore/train/amp.py +43 -20
  328. mindspore/train/callback/__init__.py +5 -5
  329. mindspore/train/callback/_checkpoint.py +3 -6
  330. mindspore/train/callback/_flops_collector.py +1 -1
  331. mindspore/train/callback/_landscape.py +0 -1
  332. mindspore/train/callback/_train_fault_tolerance.py +71 -13
  333. mindspore/train/data_sink.py +11 -2
  334. mindspore/train/dataset_helper.py +9 -0
  335. mindspore/train/model.py +51 -33
  336. mindspore/train/serialization.py +133 -111
  337. mindspore/train/summary/summary_record.py +13 -2
  338. mindspore/turbojpeg.dll +0 -0
  339. mindspore/utils/__init__.py +3 -2
  340. mindspore/utils/dryrun.py +0 -6
  341. mindspore/utils/runtime_execution_order_check.py +162 -78
  342. mindspore/utils/sdc_detect.py +68 -0
  343. mindspore/utils/utils.py +6 -9
  344. mindspore/vcmeta.dll +0 -0
  345. mindspore/vcruntime140.dll +0 -0
  346. mindspore/vcruntime140_1.dll +0 -0
  347. mindspore/version.py +1 -1
  348. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  349. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +352 -390
  350. mindspore/_deprecated/jit.py +0 -198
  351. mindspore/experimental/es/__init__.py +0 -22
  352. mindspore/experimental/es/embedding_service.py +0 -891
  353. mindspore/experimental/es/embedding_service_layer.py +0 -581
  354. mindspore/profiler/parser/__init__.py +0 -14
  355. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  356. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  357. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  358. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  359. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  360. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  361. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  362. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  363. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  364. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  365. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  366. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  367. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  368. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  369. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  370. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  371. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  372. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  373. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  374. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  375. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  376. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  377. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  378. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  379. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  380. mindspore/profiler/parser/container.py +0 -229
  381. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  382. mindspore/profiler/parser/flops_parser.py +0 -531
  383. mindspore/profiler/parser/framework_enum.py +0 -111
  384. mindspore/profiler/parser/framework_parser.py +0 -464
  385. mindspore/profiler/parser/framework_struct.py +0 -61
  386. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  387. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  388. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  389. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  390. mindspore/profiler/parser/hccl_parser.py +0 -573
  391. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  392. mindspore/profiler/parser/integrator.py +0 -526
  393. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  394. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  395. mindspore/profiler/parser/minddata_parser.py +0 -186
  396. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  397. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  398. mindspore/profiler/parser/optime_parser.py +0 -250
  399. mindspore/profiler/parser/profiler_info.py +0 -213
  400. mindspore/profiler/parser/step_trace_parser.py +0 -666
  401. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  402. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  403. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,22 +1,12 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/optimizer.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """optimizer"""
16
6
  from __future__ import absolute_import
17
7
  from collections import defaultdict
18
8
  from typing import Iterable
19
- from mindspore.ops import functional as F, composite as C, operations as P
9
+ from mindspore import ops
20
10
 
21
11
  from mindspore.nn.cell import Cell
22
12
  from mindspore.common.parameter import Parameter, ParameterTuple
@@ -98,7 +88,7 @@ class Optimizer(Cell):
98
88
  self.param_groups = []
99
89
  self.parameters = []
100
90
  self.lrs = []
101
- self.map_ = C.Map()
91
+ self.map_ = ops.Map()
102
92
  self.group_start_id = [0]
103
93
  if not isinstance(param_groups[0], dict):
104
94
  param_groups = [{'params': param_groups}]
@@ -106,7 +96,7 @@ class Optimizer(Cell):
106
96
  for param_group in param_groups:
107
97
  self.add_param_group(param_group)
108
98
  self.parameters = ParameterTuple(self.parameters)
109
- self.hyper_map = C.HyperMap()
99
+ self.hyper_map = ops.HyperMap()
110
100
  self.enable_tuple_broaden = True
111
101
 
112
102
  def __repr__(self):
@@ -167,7 +157,7 @@ class Optimizer(Cell):
167
157
  """Apply weight decay."""
168
158
  if weight_decay != 0.:
169
159
  weight_decay = Tensor(weight_decay, mstype.float32)
170
- gradients = self.map_(F.partial(_apply_decay, weight_decay), params, gradients)
160
+ gradients = self.map_(ops.partial(_apply_decay, weight_decay), params, gradients)
171
161
  return gradients
172
162
 
173
163
  def _preprocess_param_group(self, param_group):
@@ -228,18 +218,18 @@ class Optimizer(Cell):
228
218
  def construct(self, *hyper_params):
229
219
  raise NotImplementedError
230
220
 
231
- op_add = P.AddN()
232
- op_gather = P.Gather()
233
- op_mul = P.Mul()
221
+ op_add = ops.AddN()
222
+ op_gather = ops.Gather()
223
+ op_mul = ops.Mul()
234
224
 
235
- _apply_decay = C.MultitypeFuncGraph("apply_decay")
225
+ _apply_decay = ops.MultitypeFuncGraph("apply_decay")
236
226
 
237
227
 
238
228
  @_apply_decay.register("Tensor", "Tensor", "RowTensor")
239
229
  def _tensor_apply_decay_with_sparse(weight_decay, weight, gradient):
240
230
  """Get grad with weight_decay."""
241
231
  indices = gradient.indices
242
- values = op_add((op_gather(weight, indices, 0) * F.cast(weight_decay, F.dtype(weight)), gradient.values))
232
+ values = op_add((op_gather(weight, indices, 0) * ops.cast(weight_decay, ops.dtype(weight)), gradient.values))
243
233
  shape = gradient.dense_shape
244
234
  return RowTensorInner(indices, values, shape)
245
235
 
@@ -247,7 +237,7 @@ def _tensor_apply_decay_with_sparse(weight_decay, weight, gradient):
247
237
  @_apply_decay.register("Tensor", "Tensor", "Tensor")
248
238
  def _tensor_apply_decay(weight_decay, weight, gradient):
249
239
  """Get grad with weight_decay."""
250
- return op_add((op_mul(weight, F.cast(weight_decay, F.dtype(weight))), gradient))
240
+ return op_add((op_mul(weight, ops.cast(weight_decay, ops.dtype(weight))), gradient))
251
241
 
252
242
 
253
243
  def check_not_less_than(arg_value, arg_name, prim, value=0.0):
@@ -1,32 +1,22 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/radam.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """radam"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
8
+ from mindspore import ops
19
9
  from mindspore.common import Tensor, Parameter
20
10
  import mindspore.common.dtype as mstype
21
11
  from mindspore import _checkparam as validator
22
12
  from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than, check_not_less_than_without_equal
23
13
  from mindspore import jit
24
14
 
25
- _radam_opt = C.MultitypeFuncGraph("radam_opt")
15
+ _radam_opt = ops.MultitypeFuncGraph("radam_opt")
26
16
 
27
- op_pow = P.Pow()
28
- op_sqrt = P.Sqrt()
29
- op_cast = P.Cast()
17
+ op_pow = ops.Pow()
18
+ op_sqrt = ops.Sqrt()
19
+ op_cast = ops.Cast()
30
20
 
31
21
 
32
22
  @_radam_opt.register("Number", "Number", "Number", "Tensor", "Number", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
@@ -35,17 +25,17 @@ def _tensor_run_opt(beta1, beta2, eps, lr, rho_inf, rho_t, bias_correction1, bia
35
25
  exp_avg_sq):
36
26
  """Apply radam optimizer to the weight parameter."""
37
27
 
38
- F.assign(exp_avg, exp_avg * beta1 + grad * (1 - beta1))
39
- F.assign(exp_avg_sq, exp_avg_sq * beta2 + grad * grad * (1 - beta2))
28
+ ops.assign(exp_avg, exp_avg * beta1 + grad * (1 - beta1))
29
+ ops.assign(exp_avg_sq, exp_avg_sq * beta2 + grad * grad * (1 - beta2))
40
30
  bias_corrected_exp_avg = exp_avg / bias_correction1
41
31
 
42
32
  if rho_t > 5.0:
43
33
  rect = op_sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t))
44
34
  exp_avg_sq_sqrt = op_sqrt(exp_avg_sq) + eps
45
35
  adaptive_lr = op_sqrt(bias_correction2) / exp_avg_sq_sqrt
46
- F.assign(param, param - bias_corrected_exp_avg * lr * adaptive_lr * rect)
36
+ ops.assign(param, param - bias_corrected_exp_avg * lr * adaptive_lr * rect)
47
37
  else:
48
- F.assign(param, param - bias_corrected_exp_avg * lr)
38
+ ops.assign(param, param - bias_corrected_exp_avg * lr)
49
39
 
50
40
  return True
51
41
 
@@ -89,6 +79,9 @@ class RAdam(Optimizer):
89
79
  &\rule{180mm}{0.4pt}
90
80
  \end{align*}
91
81
 
82
+ For more details about RAdam algorithm, please refer to `On the Variance of the Adaptive Learning Rate and Beyond
83
+ <https://arxiv.org/abs/1908.03265>`_.
84
+
92
85
  .. warning::
93
86
  This is an experimental optimizer API that is subject to change.
94
87
  This module must be used with lr scheduler module in `LRScheduler Class
@@ -155,7 +148,7 @@ class RAdam(Optimizer):
155
148
  self.exp_avg = self.parameters.clone(prefix="exp_avg", init='zeros')
156
149
  self.exp_avg_sq = self.parameters.clone(prefix="exp_avg_sq", init='zeros')
157
150
  self.increase_tensor = Tensor(1, mstype.int32)
158
- self.assignadd = P.AssignAdd()
151
+ self.assignadd = ops.AssignAdd()
159
152
 
160
153
  @jit(backend="ms_backend")
161
154
  def implementation(self, lr, beta1, beta2, weight_decay, eps, start_id, end_id, gradients):
@@ -175,7 +168,8 @@ class RAdam(Optimizer):
175
168
 
176
169
  rho_t = rho_inf - right
177
170
 
178
- self.hyper_map(F.partial(_radam_opt, beta1, beta2, eps, lr, rho_inf, rho_t, bias_correction1, bias_correction2),
171
+ self.hyper_map(ops.partial(_radam_opt, beta1, beta2, eps, lr, rho_inf,
172
+ rho_t, bias_correction1, bias_correction2),
179
173
  params, grads, exp_avg, exp_avg_sq)
180
174
  return True
181
175
 
@@ -1,48 +1,37 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/rmsprop.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """rmsprop"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
19
8
  import mindspore.common.dtype as mstype
20
9
  from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than, check_not_less_than_without_equal
21
10
  from mindspore import ops
22
11
  from mindspore import jit
23
12
 
24
- _rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt")
13
+ _rmsprop_opt = ops.MultitypeFuncGraph("rmsprop_opt")
25
14
 
26
- op_mul = P.Mul()
27
- op_sqrt = P.Sqrt()
15
+ op_mul = ops.Mul()
16
+ op_sqrt = ops.Sqrt()
28
17
 
29
18
 
30
19
  @_rmsprop_opt.register("Bool", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
31
20
  def _run_rmsprop_opt(centered, alpha, eps, momentum, lr, weight, mean_square, mean_grad, mom, grad):
32
21
  """Apply rmsprop optimizer to the weight parameter using dynamic learning rate."""
33
- F.assign(mean_square, ops.addcmul(op_mul(mean_square, alpha), grad, grad, 1 - alpha))
22
+ ops.assign(mean_square, ops.addcmul(op_mul(mean_square, alpha), grad, grad, 1 - alpha))
34
23
 
35
24
  if centered:
36
- F.assign(mean_grad, op_mul(mean_grad, alpha) + op_mul(grad, 1 - alpha))
25
+ ops.assign(mean_grad, op_mul(mean_grad, alpha) + op_mul(grad, 1 - alpha))
37
26
  avg = op_sqrt(ops.addcmul(mean_square, mean_grad, mean_grad, -1.)) + eps
38
27
  else:
39
28
  avg = op_sqrt(mean_square) + eps
40
29
 
41
30
  if momentum > 0:
42
- F.assign(mom, op_mul(mom, momentum) + grad / avg)
43
- F.assign(weight, weight - mom * lr)
31
+ ops.assign(mom, op_mul(mom, momentum) + grad / avg)
32
+ ops.assign(weight, weight - mom * lr)
44
33
  else:
45
- F.assign(weight, weight - lr * grad / avg)
34
+ ops.assign(weight, weight - lr * grad / avg)
46
35
  return True
47
36
 
48
37
 
@@ -124,7 +113,7 @@ class RMSprop(Optimizer):
124
113
  self.mean_grad = self.parameters.clone(prefix="mean_grad", init='zeros')
125
114
  self.mean_square = self.parameters.clone(prefix="mean_square", init='zeros')
126
115
  self.moment = self.parameters.clone(prefix="moment", init='zeros')
127
- self.op_cast = P.Cast()
116
+ self.op_cast = ops.Cast()
128
117
 
129
118
  @jit
130
119
  def implementation(self, group_id, lr, gradients, maximize, weight_decay, centered, alpha, eps, momentum):
@@ -132,12 +121,12 @@ class RMSprop(Optimizer):
132
121
  start_id = self.group_start_id[group_id]
133
122
  end_id = self.group_start_id[group_id + 1]
134
123
  params = self.parameters[start_id: end_id]
135
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
124
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
136
125
  grads = self._decay_weight(weight_decay, params, grads)
137
126
  mean_grad = self.mean_grad[start_id: end_id]
138
127
  mean_square = self.mean_square[start_id: end_id]
139
128
  moment = self.moment[start_id: end_id]
140
- self.hyper_map(F.partial(_rmsprop_opt, centered, alpha, eps, momentum, lr),
129
+ self.hyper_map(ops.partial(_rmsprop_opt, centered, alpha, eps, momentum, lr),
141
130
  params, mean_square, mean_grad, moment, grads)
142
131
  return True
143
132
 
@@ -1,37 +1,26 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/rprop.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """rprop"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
8
+ from mindspore import ops
19
9
  from mindspore.common import Tensor, Parameter
20
10
  import mindspore.common.dtype as mstype
21
11
  from mindspore import _checkparam as validator
22
12
  from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than_without_equal
23
- from mindspore import ops
24
13
  from mindspore import jit
25
14
 
26
- _rprop_opt = C.MultitypeFuncGraph("rprop_opt")
15
+ _rprop_opt = ops.MultitypeFuncGraph("rprop_opt")
27
16
 
28
- op_sign = P.Sign()
29
- op_fill = P.FillV2()
30
- op_assign = P.Assign()
31
- op_assignadd = P.AssignAdd()
32
- op_cast = P.Cast()
33
- op_select = P.Select()
34
- op_oneslike = P.OnesLike()
17
+ op_sign = ops.Sign()
18
+ op_fill = ops.FillV2()
19
+ op_assign = ops.Assign()
20
+ op_assignadd = ops.AssignAdd()
21
+ op_cast = ops.Cast()
22
+ op_select = ops.Select()
23
+ op_oneslike = ops.OnesLike()
35
24
 
36
25
 
37
26
  @_rprop_opt.register("Tensor", "Tensor", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
@@ -131,7 +120,7 @@ class Rprop(Optimizer):
131
120
  self.step_size = self.parameters.clone(prefix="step_size", init='zeros')
132
121
  self.step_t = Parameter(Tensor(0, mstype.int32), "step_t")
133
122
  self.increase_tensor = Tensor(1, mstype.int32)
134
- self.op_cast = P.Cast()
123
+ self.op_cast = ops.Cast()
135
124
 
136
125
  @jit(backend="ms_backend")
137
126
  def implementation(self, etaminus, etaplus, group_id, lr, gradients, maximize, step_size_min, step_size_max):
@@ -141,10 +130,10 @@ class Rprop(Optimizer):
141
130
  end_id = self.group_start_id[group_id + 1]
142
131
 
143
132
  params = self.parameters[start_id: end_id]
144
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
133
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
145
134
  prev = self.prev[start_id: end_id]
146
135
  step_size = self.step_size[start_id: end_id]
147
- self.hyper_map(F.partial(_rprop_opt, etaminus, etaplus, step_size_min, step_size_max, self.step_t, lr),
136
+ self.hyper_map(ops.partial(_rprop_opt, etaminus, etaplus, step_size_min, step_size_max, self.step_t, lr),
148
137
  params, prev, step_size, grads)
149
138
  return True
150
139
 
@@ -1,28 +1,18 @@
1
- # Copyright 2023 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
1
+ # The code implementation refers to the following files from pytorch:
2
+ # - https://github.com/pytorch/pytorch/blob/v1.13.0/torch/optim/sgd.py
3
+ # Additional modifications are made by Huawei Technologies Co., Ltd in 2023.
14
4
  # ============================================================================
15
5
  """sgd"""
16
6
  from __future__ import absolute_import
17
7
 
18
- from mindspore.ops import functional as F, composite as C, operations as P
8
+ from mindspore import ops
19
9
  from mindspore.common.tensor import Tensor
20
10
  import mindspore.common.dtype as mstype
21
11
  from mindspore import _checkparam as Validator
22
12
  from mindspore.experimental.optim.optimizer import Optimizer
23
13
  from mindspore import jit
24
14
 
25
- _sgd_opt = C.MultitypeFuncGraph("sgd_opt")
15
+ _sgd_opt = ops.MultitypeFuncGraph("sgd_opt")
26
16
 
27
17
 
28
18
  @_sgd_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
@@ -129,7 +119,7 @@ class SGD(Optimizer):
129
119
  "equal to 0.0, but got 'momentum' {}, 'dampening' {}".format(momentum, dampening))
130
120
  self.accum = self.parameters.clone(prefix="accum", init='zeros')
131
121
  self.stat = self.parameters.clone(prefix="stat", init='ones')
132
- self.op_cast = P.Cast()
122
+ self.op_cast = ops.Cast()
133
123
 
134
124
  @jit
135
125
  def implementation(self, momentum, lr, group_id, gradients, maximize, dampening, weight_decay, nesterov):
@@ -137,9 +127,9 @@ class SGD(Optimizer):
137
127
  start_id = self.group_start_id[group_id]
138
128
  end_id = self.group_start_id[group_id + 1]
139
129
  momentum = self.op_cast(momentum, mstype.float32)
140
- opt = P.SGD(dampening, weight_decay, nesterov)
141
- grads = tuple([grad if not maximize else F.neg(grad) for grad in gradients[start_id: end_id]])
142
- self.hyper_map(F.partial(_sgd_opt, opt, momentum, lr), grads,
130
+ opt = ops.SGD(dampening, weight_decay, nesterov)
131
+ grads = tuple([grad if not maximize else ops.neg(grad) for grad in gradients[start_id: end_id]])
132
+ self.hyper_map(ops.partial(_sgd_opt, opt, momentum, lr), grads,
143
133
  self.parameters[start_id: end_id], self.accum[start_id: end_id],
144
134
  self.stat[start_id: end_id])
145
135
  return True
mindspore/hal/__init__.py CHANGED
@@ -19,13 +19,13 @@ MindSpore abstracts the preceding modules from different backends and allows use
19
19
  resources at the Python layer. Currently, these interfaces take effect only in PyNative mode.
20
20
  """
21
21
 
22
- from mindspore.hal.device import is_initialized, is_available, device_count, get_device_capability,\
22
+ from mindspore.hal.device import is_initialized, is_available, device_count, get_device_capability, \
23
23
  get_device_properties, get_device_name, get_arch_list
24
- from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream,\
24
+ from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream, \
25
25
  communication_stream, StreamCtx
26
26
  from mindspore.hal.event import Event
27
- from mindspore.hal.memory import memory_stats, memory_reserved, max_memory_reserved, empty_cache,\
28
- reset_peak_memory_stats, memory_summary, memory_allocated,\
27
+ from mindspore.hal.memory import memory_stats, memory_reserved, max_memory_reserved, empty_cache, \
28
+ reset_peak_memory_stats, memory_summary, memory_allocated, \
29
29
  max_memory_allocated, reset_max_memory_reserved, reset_max_memory_allocated
30
30
 
31
31
  __all__ = [
@@ -27,7 +27,7 @@ def combine_tensor_list_contiguous(tensor_list, enable_mem_align=True):
27
27
  Return a contiguous memory handle where contiguous memory has been requested and slicing functionality is provided.
28
28
 
29
29
  Args:
30
- tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
30
+ tensor_list (list[Tensor], tuple[Tensor]): The tensor list to be stored.
31
31
  enable_mem_align (bool, optional): Whether to enable the memory alignment function.
32
32
  False is not supported. Default ``True`` .
33
33
 
@@ -57,7 +57,7 @@ class ContiguousTensorsHandle:
57
57
  ContiguousTensorsHandle is a handle manage continuous memory.
58
58
 
59
59
  Args:
60
- tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
60
+ tensor_list (list[Tensor], tuple[Tensor]): The tensor list to be stored.
61
61
  enable_mem_align (bool, optional): Whether to enable the memory alignment function.
62
62
  False is not supported. Default ``True`` .
63
63
 
mindspore/hal/memory.py CHANGED
@@ -144,6 +144,7 @@ def _is_initialized(device_target):
144
144
  return False
145
145
  return _device_context.initialized()
146
146
 
147
+
147
148
  @_check_inputs_validation
148
149
  def empty_cache():
149
150
  """
@@ -33,8 +33,22 @@ class MS_API CellBase {
33
33
  public:
34
34
  CellBase() = default;
35
35
  virtual ~CellBase() = default;
36
+ /// \brief Construct using inputs.
37
+ ///
38
+ /// \param[in] inputs Vector of inputs.
39
+ ///
40
+ /// \return Vector of outputs.
36
41
  virtual std::vector<Output> Construct(const std::vector<Input> &inputs) { return {}; }
42
+ /// \brief Clone a cellbase.
43
+ ///
44
+ /// \return Shared pointer of Cellbase.
37
45
  virtual std::shared_ptr<CellBase> Clone() const = 0;
46
+ /// \brief Run a cellbase.
47
+ ///
48
+ /// \param[in] inputs Vector of MSTensor as inputs.
49
+ /// \param[in] outputs Vector of MSTensor as outputs.
50
+ ///
51
+ /// \return Status of the operation.
38
52
  virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { return kSuccess; }
39
53
  std::vector<Output> operator()(const std::vector<Input> &inputs) const;
40
54
  };
@@ -56,12 +70,34 @@ class MS_API GraphCell final : public Cell<GraphCell> {
56
70
  explicit GraphCell(const Graph &graph);
57
71
  explicit GraphCell(Graph &&graph);
58
72
  explicit GraphCell(const std::shared_ptr<Graph> &graph);
59
-
73
+ /// \brief Set a context.
74
+ ///
75
+ /// \param[in] context Context to be set.
60
76
  void SetContext(const std::shared_ptr<Context> &context);
77
+ /// \brief Get back the graph.
78
+ ///
79
+ /// \return Graph of the graphcell.
61
80
  const std::shared_ptr<Graph> &GetGraph() const { return graph_; }
81
+ /// \brief Run the graphcell.
82
+ ///
83
+ /// \param[in] inputs Vector of MSTensor as inputs.
84
+ /// \param[in] outputs Vector of MSTensor as outputs.
85
+ ///
86
+ /// \return Status of the operation.
62
87
  Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
88
+ /// \brief Get the inputs.
89
+ ///
90
+ /// \return Inputs.
63
91
  std::vector<MSTensor> GetInputs();
92
+ /// \brief Get the outputs.
93
+ ///
94
+ /// \return Outputs.
64
95
  std::vector<MSTensor> GetOutputs();
96
+ /// \brief Load the device.
97
+ ///
98
+ /// \param[in] device_id Device id to be loaded.
99
+ ///
100
+ /// \return Status of the operation.
65
101
  Status Load(uint32_t device_id);
66
102
 
67
103
  private:
@@ -130,11 +130,21 @@ class Delegate : public IDelegate<LiteDelegateGraph, kernel::Kernel, kernel::Ker
130
130
  /// \return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference.
131
131
  virtual Status Init() = 0;
132
132
 
133
+ /// \brief Create kernel.
134
+ ///
135
+ /// \param[in] node The kernel to be created.
136
+ ///
137
+ /// \return Created kernel.
133
138
  std::shared_ptr<kernel::Kernel> CreateKernel(const std::shared_ptr<kernel::Kernel> &node) override {
134
139
  // return node as kernel since they are same one.
135
140
  return node;
136
141
  }
137
142
 
143
+ /// \brief Check if the node is delegate node.
144
+ ///
145
+ /// \param[in] node The kernel to verify.
146
+ ///
147
+ /// \return True if the node is delegate.
138
148
  bool IsDelegateNode(const std::shared_ptr<kernel::Kernel> &node) override { return false; }
139
149
 
140
150
  /// \brief Replace the nodes in model with delegate nodes, delegate will create kernels by its delegate nodes.
@@ -370,6 +370,9 @@ class MS_API Model {
370
370
  /// \return The value of the model info associated with the given key.
371
371
  inline std::string GetModelInfo(const std::string &key);
372
372
 
373
+ /// \brief Finish the model. (Only used for mindspore_lite's ascend backend.)
374
+ ///
375
+ /// \return Status of the operation.
373
376
  // release inference resourcec, only used for mindspore_lite's ascend backend now.
374
377
  Status Finalize();
375
378
 
@@ -243,14 +243,14 @@ class MS_API MSTensor {
243
243
 
244
244
  /// \brief Get the boolean value that indicates whether the MSTensor equals tensor.
245
245
  ///
246
- /// \param[in] another MSTensor.
246
+ /// \param[in] tensor MSTensor.
247
247
  ///
248
248
  /// \return The boolean value that indicates whether the MSTensor equals tensor.
249
249
  bool operator==(const MSTensor &tensor) const;
250
250
 
251
251
  /// \brief Get the boolean value that indicates whether the MSTensor not equals tensor.
252
252
  ///
253
- /// \param[in] another MSTensor.
253
+ /// \param[in] tensor MSTensor.
254
254
  ///
255
255
  /// \return The boolean value that indicates whether the MSTensor not equals tensor.
256
256
  bool operator!=(const MSTensor &tensor) const;