mindspore 2.6.0rc1__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (407) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +37 -62
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +43 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +77 -16
  37. mindspore/common/api.py +238 -113
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +5 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +81 -81
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +58 -40
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +33 -3
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +27 -7
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +6 -46
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +212 -9
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +137 -101
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +328 -502
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +117 -110
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +3 -3
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/asgd.py +2 -0
  159. mindspore/nn/optim/optimizer.py +1 -1
  160. mindspore/nn/optim/thor.py +2 -2
  161. mindspore/nn/probability/distribution/exponential.py +2 -1
  162. mindspore/nn/probability/distribution/poisson.py +2 -1
  163. mindspore/nn/sparse/sparse.py +3 -3
  164. mindspore/nn/wrap/cell_wrapper.py +34 -37
  165. mindspore/nn/wrap/grad_reducer.py +37 -37
  166. mindspore/nn/wrap/loss_scale.py +72 -74
  167. mindspore/numpy/array_creations.py +5 -5
  168. mindspore/numpy/fft.py +1 -1
  169. mindspore/numpy/math_ops.py +5 -5
  170. mindspore/opencv_core452.dll +0 -0
  171. mindspore/opencv_imgcodecs452.dll +0 -0
  172. mindspore/opencv_imgproc452.dll +0 -0
  173. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  174. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  175. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  176. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  177. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
  178. mindspore/ops/auto_generate/gen_extend_func.py +23 -141
  179. mindspore/ops/auto_generate/gen_ops_def.py +727 -321
  180. mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
  181. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  182. mindspore/ops/composite/__init__.py +10 -0
  183. mindspore/ops/composite/base.py +8 -4
  184. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  185. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  186. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  187. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  188. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  189. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  190. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  191. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  192. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  193. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  194. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  195. mindspore/ops/function/__init__.py +3 -1
  196. mindspore/ops/function/_add_attr_func.py +11 -6
  197. mindspore/ops/function/array_func.py +9 -96
  198. mindspore/ops/function/debug_func.py +4 -3
  199. mindspore/ops/function/grad/grad_func.py +1 -1
  200. mindspore/ops/function/math_func.py +33 -540
  201. mindspore/ops/function/nn_func.py +28 -74
  202. mindspore/ops/function/other_func.py +4 -1
  203. mindspore/ops/function/random_func.py +44 -5
  204. mindspore/ops/function/vmap_func.py +2 -1
  205. mindspore/ops/functional.py +2 -3
  206. mindspore/ops/functional_overload.py +571 -6
  207. mindspore/ops/op_info_register.py +21 -0
  208. mindspore/ops/operations/__init__.py +16 -11
  209. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  210. mindspore/ops/operations/_inner_ops.py +3 -6
  211. mindspore/ops/operations/_sequence_ops.py +1 -1
  212. mindspore/ops/operations/array_ops.py +2 -2
  213. mindspore/ops/operations/comm_ops.py +185 -26
  214. mindspore/ops/operations/custom_ops.py +294 -174
  215. mindspore/ops/operations/debug_ops.py +59 -4
  216. mindspore/ops/operations/image_ops.py +13 -13
  217. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  218. mindspore/ops/operations/math_ops.py +3 -4
  219. mindspore/ops/operations/nn_ops.py +7 -39
  220. mindspore/ops/primitive.py +6 -10
  221. mindspore/ops/tensor_method.py +47 -8
  222. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  223. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  224. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  226. mindspore/ops_generate/common/base_generator.py +14 -0
  227. mindspore/ops_generate/common/gen_constants.py +8 -3
  228. mindspore/ops_generate/common/gen_utils.py +0 -19
  229. mindspore/ops_generate/common/op_proto.py +11 -4
  230. mindspore/ops_generate/common/template.py +88 -11
  231. mindspore/ops_generate/gen_ops.py +1 -1
  232. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  233. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  234. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  235. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  236. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  237. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  238. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  239. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  240. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  241. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  242. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  243. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  244. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  246. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  247. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  248. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  249. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  250. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  251. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  252. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  253. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  254. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  255. mindspore/parallel/_auto_parallel_context.py +11 -8
  256. mindspore/parallel/_cell_wrapper.py +113 -45
  257. mindspore/parallel/_parallel_serialization.py +1 -1
  258. mindspore/parallel/_ps_context.py +4 -6
  259. mindspore/parallel/_tensor.py +167 -12
  260. mindspore/parallel/_transformer/moe.py +1 -1
  261. mindspore/parallel/_transformer/transformer.py +13 -8
  262. mindspore/parallel/auto_parallel.py +14 -7
  263. mindspore/parallel/checkpoint_convert.py +3 -3
  264. mindspore/parallel/checkpoint_transform.py +11 -7
  265. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  266. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  267. mindspore/parallel/cluster/run.py +43 -4
  268. mindspore/parallel/function/__init__.py +8 -1
  269. mindspore/parallel/function/reshard_func.py +6 -7
  270. mindspore/parallel/nn/__init__.py +15 -2
  271. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  272. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  273. mindspore/parallel/shard.py +3 -4
  274. mindspore/parallel/transform_safetensors.py +463 -174
  275. mindspore/pgodb140.dll +0 -0
  276. mindspore/pgort140.dll +0 -0
  277. mindspore/profiler/__init__.py +2 -1
  278. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  279. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  280. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  281. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  282. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  283. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  284. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  285. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  286. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  287. mindspore/profiler/analysis/task_manager.py +1 -1
  288. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  289. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  290. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  291. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  292. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  293. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  294. mindspore/profiler/common/constant.py +16 -0
  295. mindspore/profiler/common/profiler_context.py +25 -27
  296. mindspore/profiler/common/profiler_info.py +0 -16
  297. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  298. mindspore/profiler/common/profiler_output_path.py +23 -8
  299. mindspore/profiler/common/profiler_parameters.py +128 -35
  300. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  301. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  302. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  303. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  304. mindspore/profiler/dynamic_profiler.py +305 -314
  305. mindspore/profiler/envprofiler.py +12 -7
  306. mindspore/profiler/experimental_config.py +96 -6
  307. mindspore/profiler/mstx.py +33 -12
  308. mindspore/profiler/platform/__init__.py +2 -3
  309. mindspore/profiler/platform/npu_profiler.py +29 -19
  310. mindspore/profiler/profiler.py +35 -19
  311. mindspore/profiler/profiler_action_controller.py +64 -76
  312. mindspore/profiler/schedule.py +10 -4
  313. mindspore/rewrite/common/config.py +1 -0
  314. mindspore/rewrite/common/namer.py +1 -0
  315. mindspore/rewrite/common/namespace.py +1 -0
  316. mindspore/rewrite/node/node.py +31 -11
  317. mindspore/rewrite/parsers/assign_parser.py +1 -1
  318. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  319. mindspore/run_check/_check_version.py +7 -10
  320. mindspore/runtime/__init__.py +5 -5
  321. mindspore/runtime/event.py +10 -4
  322. mindspore/runtime/executor.py +60 -45
  323. mindspore/runtime/memory.py +30 -32
  324. mindspore/runtime/thread_bind_core.py +298 -164
  325. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  326. mindspore/swresample-4.dll +0 -0
  327. mindspore/swscale-6.dll +0 -0
  328. mindspore/tbbmalloc.dll +0 -0
  329. mindspore/tinyxml2.dll +0 -0
  330. mindspore/train/_utils.py +14 -4
  331. mindspore/train/amp.py +43 -20
  332. mindspore/train/callback/__init__.py +5 -5
  333. mindspore/train/callback/_checkpoint.py +3 -6
  334. mindspore/train/callback/_flops_collector.py +1 -1
  335. mindspore/train/callback/_landscape.py +0 -1
  336. mindspore/train/callback/_train_fault_tolerance.py +97 -16
  337. mindspore/train/data_sink.py +11 -2
  338. mindspore/train/dataset_helper.py +9 -0
  339. mindspore/train/model.py +135 -55
  340. mindspore/train/serialization.py +133 -111
  341. mindspore/train/summary/summary_record.py +13 -2
  342. mindspore/turbojpeg.dll +0 -0
  343. mindspore/utils/__init__.py +3 -2
  344. mindspore/utils/dryrun.py +0 -6
  345. mindspore/utils/runtime_execution_order_check.py +163 -77
  346. mindspore/utils/sdc_detect.py +68 -0
  347. mindspore/utils/utils.py +6 -9
  348. mindspore/vcmeta.dll +0 -0
  349. mindspore/vcruntime140.dll +0 -0
  350. mindspore/vcruntime140_1.dll +0 -0
  351. mindspore/version.py +1 -1
  352. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  353. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +356 -394
  354. mindspore/_deprecated/jit.py +0 -198
  355. mindspore/experimental/es/__init__.py +0 -22
  356. mindspore/experimental/es/embedding_service.py +0 -891
  357. mindspore/experimental/es/embedding_service_layer.py +0 -581
  358. mindspore/profiler/parser/__init__.py +0 -14
  359. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  360. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  361. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  362. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  363. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  364. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  365. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  366. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  367. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  368. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  369. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  370. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  371. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  372. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  373. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  374. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  375. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  376. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  377. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  378. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  379. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  380. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  381. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  382. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  383. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  384. mindspore/profiler/parser/container.py +0 -229
  385. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  386. mindspore/profiler/parser/flops_parser.py +0 -531
  387. mindspore/profiler/parser/framework_enum.py +0 -111
  388. mindspore/profiler/parser/framework_parser.py +0 -464
  389. mindspore/profiler/parser/framework_struct.py +0 -61
  390. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  391. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  392. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  393. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  394. mindspore/profiler/parser/hccl_parser.py +0 -573
  395. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  396. mindspore/profiler/parser/integrator.py +0 -526
  397. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  398. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  399. mindspore/profiler/parser/minddata_parser.py +0 -186
  400. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  401. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  402. mindspore/profiler/parser/optime_parser.py +0 -250
  403. mindspore/profiler/parser/profiler_info.py +0 -213
  404. mindspore/profiler/parser/step_trace_parser.py +0 -666
  405. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  406. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  407. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ from __future__ import division
19
19
  import math
20
20
 
21
21
  from mindspore.common import dtype as mstype
22
- from mindspore.ops import operations as P
22
+ from mindspore import ops
23
23
  from mindspore.nn.cell import Cell
24
24
  from mindspore import _checkparam as validator
25
25
 
@@ -119,13 +119,13 @@ class ExponentialDecayLR(LearningRateSchedule):
119
119
  self.decay_rate = decay_rate
120
120
  self.decay_steps = decay_steps
121
121
  self.is_stair = is_stair
122
- self.pow = P.Pow()
123
- self.cast = P.Cast()
122
+ self.pow = ops.Pow()
123
+ self.cast = ops.Cast()
124
124
 
125
125
  def construct(self, global_step):
126
126
  p = self.cast(global_step, mstype.float32) / self.decay_steps
127
127
  if self.is_stair:
128
- p = P.Floor()(p)
128
+ p = ops.Floor()(p)
129
129
  return self.learning_rate * self.pow(self.decay_rate, p)
130
130
 
131
131
 
@@ -191,13 +191,13 @@ class NaturalExpDecayLR(LearningRateSchedule):
191
191
  self.decay_steps = decay_steps
192
192
  self.is_stair = is_stair
193
193
  self.math_e = math.e
194
- self.pow = P.Pow()
195
- self.cast = P.Cast()
194
+ self.pow = ops.Pow()
195
+ self.cast = ops.Cast()
196
196
 
197
197
  def construct(self, global_step):
198
198
  p = self.cast(global_step, mstype.float32)
199
199
  if self.is_stair:
200
- p = P.FloorDiv()(p, self.decay_steps) * self.decay_steps
200
+ p = ops.FloorDiv()(p, self.decay_steps) * self.decay_steps
201
201
  return self.learning_rate * self.pow(self.math_e, -self.decay_rate * p)
202
202
 
203
203
 
@@ -264,12 +264,12 @@ class InverseDecayLR(LearningRateSchedule):
264
264
  self.decay_rate = decay_rate
265
265
  self.decay_steps = decay_steps
266
266
  self.is_stair = is_stair
267
- self.cast = P.Cast()
267
+ self.cast = ops.Cast()
268
268
 
269
269
  def construct(self, global_step):
270
270
  p = self.cast(global_step, mstype.float32) / self.decay_steps
271
271
  if self.is_stair:
272
- p = P.Floor()(p)
272
+ p = ops.Floor()(p)
273
273
  return self.learning_rate / (1 + self.decay_rate * p)
274
274
 
275
275
 
@@ -334,10 +334,10 @@ class CosineDecayLR(LearningRateSchedule):
334
334
  self.decay_steps = decay_steps
335
335
  self.math_pi = math.pi
336
336
  self.delta = 0.5 * (max_lr - min_lr)
337
- self.cos = P.Cos()
338
- self.sin = P.Sin()
339
- self.min = P.Minimum()
340
- self.cast = P.Cast()
337
+ self.cos = ops.Cos()
338
+ self.sin = ops.Sin()
339
+ self.min = ops.Minimum()
340
+ self.cast = ops.Cast()
341
341
 
342
342
  def construct(self, global_step):
343
343
  p = self.cast(self.min(global_step, self.decay_steps), mstype.float32)
@@ -425,13 +425,13 @@ class PolynomialDecayLR(LearningRateSchedule):
425
425
  self.diff_learning_rate = learning_rate - end_learning_rate
426
426
  self.power = power
427
427
  self.update_decay_steps = update_decay_steps
428
- self.pow = P.Pow()
429
- self.ceil = P.Ceil()
430
- self.min = P.Minimum()
431
- self.max = P.Maximum()
428
+ self.pow = ops.Pow()
429
+ self.ceil = ops.Ceil()
430
+ self.min = ops.Minimum()
431
+ self.max = ops.Maximum()
432
432
 
433
433
  def construct(self, global_step):
434
- tmp_global_step = P.Cast()(global_step, mstype.float32)
434
+ tmp_global_step = ops.Cast()(global_step, mstype.float32)
435
435
  tmp_decay_step = self.decay_steps
436
436
  if self.update_decay_steps:
437
437
  tmp_decay_step = tmp_decay_step * self.max(self.ceil(tmp_global_step / tmp_decay_step), 1)
@@ -498,8 +498,8 @@ class WarmUpLR(LearningRateSchedule):
498
498
  validator.check_positive_int(warmup_steps, 'warmup_steps', self.cls_name)
499
499
  self.warmup_steps = warmup_steps
500
500
  self.learning_rate = learning_rate
501
- self.min = P.Minimum()
502
- self.cast = P.Cast()
501
+ self.min = ops.Minimum()
502
+ self.cast = ops.Cast()
503
503
 
504
504
  def construct(self, global_step):
505
505
  warmup_percent = self.cast(self.min(global_step, self.warmup_steps), mstype.float32) / self.warmup_steps
mindspore/nn/loss/loss.py CHANGED
@@ -22,9 +22,7 @@ import mindspore.ops as ops
22
22
  from mindspore import log
23
23
  from mindspore.common.tensor import Tensor
24
24
  from mindspore.common.parameter import Parameter
25
- from mindspore.ops import operations as P
26
25
  from mindspore.ops.operations import _inner_ops as inner
27
- from mindspore.ops import functional as F
28
26
  from mindspore import nn
29
27
  from mindspore.ops.primitive import constexpr, _primexpr
30
28
  from mindspore.nn.cell import Cell
@@ -103,10 +101,10 @@ class LossBase(Cell):
103
101
  if reduction == 'none':
104
102
  self.reduce = False
105
103
 
106
- self.reduce_mean = P.ReduceMean()
107
- self.reduce_sum = P.ReduceSum()
108
- self.mul = P.Mul()
109
- self.cast = P.Cast()
104
+ self.reduce_mean = ops.ReduceMean()
105
+ self.reduce_sum = ops.ReduceSum()
106
+ self.mul = ops.Mul()
107
+ self.cast = ops.Cast()
110
108
 
111
109
  def get_axis(self, x):
112
110
  """
@@ -115,9 +113,9 @@ class LossBase(Cell):
115
113
  Args:
116
114
  x (Tensor): Tensor of any shape.
117
115
  """
118
- shape = F.shape(x)
119
- length = F.tuple_len(shape)
120
- perm = F.make_range(0, length)
116
+ shape = ops.shape(x)
117
+ length = ops.tuple_len(shape)
118
+ perm = ops.make_range(0, length)
121
119
  return perm
122
120
 
123
121
  def get_loss(self, x, weights=1.0):
@@ -168,9 +166,9 @@ class _Loss(LossBase):
168
166
  @constexpr(check=False)
169
167
  def _check_is_tensor(param_name, input_data, cls_name):
170
168
  """Internal function, used to check whether the input data is Tensor."""
171
- if input_data is not None and not isinstance(F.typeof(input_data), mstype.TensorType):
169
+ if input_data is not None and not isinstance(ops.typeof(input_data), mstype.TensorType):
172
170
  raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.TensorType}', "
173
- f"but got '{F.typeof(input_data)}'")
171
+ f"but got '{ops.typeof(input_data)}'")
174
172
 
175
173
 
176
174
  class L1Loss(LossBase):
@@ -243,7 +241,7 @@ class L1Loss(LossBase):
243
241
  self.reduction = reduction
244
242
 
245
243
  def construct(self, logits, labels):
246
- return F.l1_loss(logits, labels, self.reduction)
244
+ return ops.l1_loss(logits, labels, self.reduction)
247
245
 
248
246
 
249
247
  class L1LossExt(LossBase):
@@ -389,7 +387,7 @@ class MSELoss(LossBase):
389
387
  def construct(self, logits, labels):
390
388
  _check_is_tensor('logits', logits, self.cls_name)
391
389
  _check_is_tensor('labels', labels, self.cls_name)
392
- x = F.square(logits - labels)
390
+ x = ops.square(logits - labels)
393
391
  return self.get_loss(x)
394
392
 
395
393
 
@@ -448,7 +446,7 @@ class RMSELoss(LossBase):
448
446
  def __init__(self):
449
447
  """Initialize RMSELoss."""
450
448
  super(RMSELoss, self).__init__()
451
- self.dtype = P.DType()
449
+ self.dtype = ops.DType()
452
450
  self.MSELoss = MSELoss()
453
451
 
454
452
  def construct(self, logits, label):
@@ -458,7 +456,7 @@ class RMSELoss(LossBase):
458
456
  _check_rmseloss_dtype(logits_dtype, not_supported_dtype, 'RMSELoss')
459
457
  _check_rmseloss_dtype(label_dtype, not_supported_dtype, "RMSELoss")
460
458
 
461
- rmse_loss = F.sqrt(self.MSELoss(logits, label))
459
+ rmse_loss = ops.sqrt(self.MSELoss(logits, label))
462
460
 
463
461
  return rmse_loss
464
462
 
@@ -532,7 +530,7 @@ class MAELoss(LossBase):
532
530
  def __init__(self, reduction='mean'):
533
531
  """Initialize MAELoss."""
534
532
  super(MAELoss, self).__init__(reduction)
535
- self.abs = P.Abs()
533
+ self.abs = ops.Abs()
536
534
 
537
535
  def construct(self, logits, label):
538
536
  _check_is_tensor('logits', logits, self.cls_name)
@@ -708,7 +706,7 @@ class SmoothL1Loss(LossBase):
708
706
  super(SmoothL1Loss, self).__init__(reduction)
709
707
  self.beta = beta
710
708
  self.reduction = reduction
711
- self.smooth_l1_loss = P.SmoothL1Loss(self.beta, self.reduction)
709
+ self.smooth_l1_loss = ops.SmoothL1Loss(self.beta, self.reduction)
712
710
 
713
711
  def construct(self, logits, labels):
714
712
  return self.smooth_l1_loss(logits, labels)
@@ -772,7 +770,7 @@ class SoftMarginLoss(LossBase):
772
770
  self.reduction = reduction
773
771
 
774
772
  def construct(self, logits, labels):
775
- return F.soft_margin_loss(logits, labels, self.reduction)
773
+ return ops.soft_margin_loss(logits, labels, self.reduction)
776
774
 
777
775
 
778
776
  class SoftmaxCrossEntropyWithLogits(LossBase):
@@ -856,12 +854,12 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
856
854
  super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction)
857
855
  self.sparse = validator.check_bool(sparse, "sparse", self.cls_name)
858
856
  self.reduction = reduction
859
- self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits()
860
- self.one_hot = P.OneHot()
857
+ self.softmax_cross_entropy = ops.SoftmaxCrossEntropyWithLogits()
858
+ self.one_hot = ops.OneHot()
861
859
  self.on_value = Tensor(1.0, mstype.float32)
862
860
  self.off_value = Tensor(0., mstype.float32)
863
861
  self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"]
864
- self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits()
862
+ self.sparse_softmax_cross_entropy = ops.SparseSoftmaxCrossEntropyWithLogits()
865
863
 
866
864
  def construct(self, logits, labels):
867
865
  _check_is_tensor('logits', logits, self.cls_name)
@@ -870,7 +868,7 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
870
868
  if self.reduction == 'mean':
871
869
  x = self.sparse_softmax_cross_entropy(logits, labels)
872
870
  return x
873
- labels = self.one_hot(labels, F.shape(logits)[-1], self.on_value, self.off_value)
871
+ labels = self.one_hot(labels, ops.shape(logits)[-1], self.on_value, self.off_value)
874
872
  x = self.softmax_cross_entropy(logits, labels)[0]
875
873
  return self.get_loss(x)
876
874
 
@@ -934,7 +932,7 @@ class DiceLoss(LossBase):
934
932
  """Initialize DiceLoss."""
935
933
  super(DiceLoss, self).__init__()
936
934
  self.smooth = validator.check_positive_float(smooth, "smooth")
937
- self.reshape = P.Reshape()
935
+ self.reshape = ops.Reshape()
938
936
 
939
937
  def construct(self, logits, label):
940
938
  _check_is_tensor('logits', logits, self.cls_name)
@@ -1049,7 +1047,7 @@ class MultiClassDiceLoss(LossBase):
1049
1047
  if self.activation is not None and not isinstance(self.activation, Cell):
1050
1048
  raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell, "
1051
1049
  f"but got {type(self.activation)}.")
1052
- self.reshape = P.Reshape()
1050
+ self.reshape = ops.Reshape()
1053
1051
 
1054
1052
  def construct(self, logits, label):
1055
1053
  _check_is_tensor('logits', logits, self.cls_name)
@@ -1161,31 +1159,31 @@ class SampledSoftmaxLoss(LossBase):
1161
1159
  self.sampled_values = sampled_values
1162
1160
  self.remove_accidental_hits = remove_accidental_hits
1163
1161
  self.seed = seed
1164
- self.sampler = P.UniformCandidateSampler(
1162
+ self.sampler = ops.UniformCandidateSampler(
1165
1163
  num_true,
1166
1164
  num_sampled,
1167
1165
  True,
1168
1166
  num_classes,
1169
1167
  seed,
1170
1168
  remove_accidental_hits)
1171
- self.cast = P.Cast()
1172
- self.reshape = P.Reshape()
1173
- self.shape = P.Shape()
1174
- self.exp = P.Exp()
1175
- self.log = P.Log()
1176
- self.slice_op = P.Slice()
1177
- self.matmul = P.MatMul(False, True)
1178
- self.gather_v2 = P.Gather()
1179
- self.reduce_max_true = P.ReduceMax(True)
1180
- self.reduce_sum = P.ReduceSum()
1181
- self.reduce_sum_true = P.ReduceSum(True)
1182
- self.concat_dim0 = P.Concat(0)
1183
- self.concat_dim1 = P.Concat(1)
1184
- self.ones_like = P.OnesLike()
1185
- self.zeros_like = P.ZerosLike()
1186
- self.mul = P.Mul()
1187
- self.expand_dims = P.ExpandDims()
1188
- self.dtype = P.DType()
1169
+ self.cast = ops.Cast()
1170
+ self.reshape = ops.Reshape()
1171
+ self.shape = ops.Shape()
1172
+ self.exp = ops.Exp()
1173
+ self.log = ops.Log()
1174
+ self.slice_op = ops.Slice()
1175
+ self.matmul = ops.MatMul(False, True)
1176
+ self.gather_v2 = ops.Gather()
1177
+ self.reduce_max_true = ops.ReduceMax(True)
1178
+ self.reduce_sum = ops.ReduceSum()
1179
+ self.reduce_sum_true = ops.ReduceSum(True)
1180
+ self.concat_dim0 = ops.Concat(0)
1181
+ self.concat_dim1 = ops.Concat(1)
1182
+ self.ones_like = ops.OnesLike()
1183
+ self.zeros_like = ops.ZerosLike()
1184
+ self.mul = ops.Mul()
1185
+ self.expand_dims = ops.ExpandDims()
1186
+ self.dtype = ops.DType()
1189
1187
 
1190
1188
  def construct(self, weights, biases, labels, logits):
1191
1189
  _check_is_tensor('weights', weights, self.cls_name)
@@ -1393,7 +1391,7 @@ class TripletMarginWithDistanceLoss(LossBase):
1393
1391
  "'ndim' of the input must be positive, "
1394
1392
  f"but got {d.ndim}"
1395
1393
  )
1396
- return P.LpNorm(axis=1, p=2)(d)
1394
+ return ops.LpNorm(axis=1, p=2)(d)
1397
1395
 
1398
1396
  self.distance_function = pairwise_distance
1399
1397
  else:
@@ -1401,8 +1399,8 @@ class TripletMarginWithDistanceLoss(LossBase):
1401
1399
  self.swap = swap
1402
1400
  self.reduction = reduction
1403
1401
  self.margin = margin
1404
- self.minimum = P.Minimum()
1405
- self.maximum = P.Maximum()
1402
+ self.minimum = ops.Minimum()
1403
+ self.maximum = ops.Maximum()
1406
1404
 
1407
1405
  def construct(self, x, positive, negative):
1408
1406
  _check_is_tensor("x", x, self.cls_name)
@@ -1486,8 +1484,8 @@ class PoissonNLLLoss(LossBase):
1486
1484
  self.log_input = log_input
1487
1485
  self.full = full
1488
1486
  self.eps = eps
1489
- self.maximum = P.Maximum()
1490
- self.cast = P.Cast()
1487
+ self.maximum = ops.Maximum()
1488
+ self.cast = ops.Cast()
1491
1489
 
1492
1490
  def construct(self, input, target):
1493
1491
  _check_is_tensor('input', input, self.cls_name)
@@ -1500,7 +1498,7 @@ class PoissonNLLLoss(LossBase):
1500
1498
  if self.full:
1501
1499
  target = self.maximum(target, self.eps)
1502
1500
  stirling_term = (target > 1) * ((target + 0.5) * target.log() - target + get_half_ln_2_pi())
1503
- loss += F.masked_fill(stirling_term, target <= 1, F.cast(0, stirling_term.dtype))
1501
+ loss += ops.masked_fill(stirling_term, target <= 1, ops.cast(0, stirling_term.dtype))
1504
1502
  out = self.get_loss(loss)
1505
1503
  return out
1506
1504
 
@@ -1570,7 +1568,7 @@ class MultiLabelSoftMarginLoss(LossBase):
1570
1568
  self.reduction = reduction
1571
1569
 
1572
1570
  def construct(self, x, target):
1573
- return F.multilabel_soft_margin_loss(x, target, self.weight, self.reduction)
1571
+ return ops.multilabel_soft_margin_loss(x, target, self.weight, self.reduction)
1574
1572
 
1575
1573
 
1576
1574
  class MultiMarginLoss(LossBase):
@@ -1655,7 +1653,7 @@ class MultiMarginLoss(LossBase):
1655
1653
  if not weight_one:
1656
1654
  _check_is_tensor('weight', weight, self.cls_name)
1657
1655
  else:
1658
- weight = F.fill(x.dtype, x.astype('float32')[0].shape, 1)
1656
+ weight = ops.fill(x.dtype, x.astype('float32')[0].shape, 1)
1659
1657
  loss = self.multi_margin_loss(x, target, weight)
1660
1658
  return loss
1661
1659
 
@@ -1734,7 +1732,7 @@ class BCELoss(LossBase):
1734
1732
  self.weight = weight
1735
1733
 
1736
1734
  def construct(self, logits, labels):
1737
- return F.binary_cross_entropy(logits, labels, self.weight, self.reduction)
1735
+ return ops.binary_cross_entropy(logits, labels, self.weight, self.reduction)
1738
1736
 
1739
1737
 
1740
1738
  class CosineEmbeddingLoss(LossBase):
@@ -1793,8 +1791,8 @@ class CosineEmbeddingLoss(LossBase):
1793
1791
  def __init__(self, margin=0.0, reduction="mean"):
1794
1792
  """Initialize CosineEmbeddingLoss."""
1795
1793
  super(CosineEmbeddingLoss, self).__init__(reduction)
1796
- self.reduce_sum = P.ReduceSum()
1797
- self.maximum = P.Maximum()
1794
+ self.reduce_sum = ops.ReduceSum()
1795
+ self.maximum = ops.Maximum()
1798
1796
  validator.check_value_type("margin", margin, [float], self.cls_name)
1799
1797
  self.margin = validator.check_float_range(margin, -1.0, 1.0, validator.INC_BOTH, "margin", self.cls_name)
1800
1798
 
@@ -1806,16 +1804,16 @@ class CosineEmbeddingLoss(LossBase):
1806
1804
  # if labels > 0, 1-cosine(logits_x1, logits_x2)
1807
1805
  # else, max(0, cosine(logits_x1, logits_x2)-margin)
1808
1806
  prod_sum = self.reduce_sum(logits_x1 * logits_x2, (1,))
1809
- square1 = self.reduce_sum(F.square(logits_x1), (1,))
1810
- square2 = self.reduce_sum(F.square(logits_x2), (1,))
1811
- denom = F.sqrt(square1) * F.sqrt(square2)
1807
+ square1 = self.reduce_sum(ops.square(logits_x1), (1,))
1808
+ square2 = self.reduce_sum(ops.square(logits_x2), (1,))
1809
+ denom = ops.sqrt(square1) * ops.sqrt(square2)
1812
1810
  cosine = prod_sum / denom
1813
1811
 
1814
1812
  pos_value = 1.0 - cosine
1815
1813
  neg_value = self.maximum(cosine - self.margin, 0.0)
1816
- zeros = F.zeros_like(cosine)
1817
- pos_part = F.select(labels == 1, pos_value, zeros)
1818
- neg_part = F.select(labels == -1, neg_value, zeros)
1814
+ zeros = ops.zeros_like(cosine)
1815
+ pos_part = ops.select(labels == 1, pos_value, zeros)
1816
+ neg_part = ops.select(labels == -1, neg_value, zeros)
1819
1817
  output_unreduced = pos_part + neg_part
1820
1818
 
1821
1819
  return self.get_loss(output_unreduced)
@@ -2089,12 +2087,12 @@ class FocalLoss(LossBase):
2089
2087
  if isinstance(weight, Tensor) and weight.ndim != 1:
2090
2088
  raise ValueError(f"For '{self.cls_name}', the dimension of 'weight' must be 1, but got {weight.ndim}.")
2091
2089
  self.weight = weight
2092
- self.expand_dims = P.ExpandDims()
2093
- self.gather_d = P.GatherD()
2094
- self.squeeze = P.Squeeze(axis=1)
2095
- self.tile = P.Tile()
2096
- self.cast = P.Cast()
2097
- self.dtype = P.DType()
2090
+ self.expand_dims = ops.ExpandDims()
2091
+ self.gather_d = ops.GatherD()
2092
+ self.squeeze = ops.Squeeze(axis=1)
2093
+ self.tile = ops.Tile()
2094
+ self.cast = ops.Cast()
2095
+ self.dtype = ops.DType()
2098
2096
  self.logsoftmax = nn.LogSoftmax(1)
2099
2097
 
2100
2098
  def construct(self, logits, labels):
@@ -2118,7 +2116,7 @@ class FocalLoss(LossBase):
2118
2116
  log_probability = self.gather_d(log_probability, 1, self.cast(labelss, mindspore.int32))
2119
2117
  log_probability = self.squeeze(log_probability)
2120
2118
 
2121
- probability = F.exp(log_probability)
2119
+ probability = ops.exp(log_probability)
2122
2120
 
2123
2121
  if self.weight is not None:
2124
2122
  convert_weight = self.weight[None, :, None]
@@ -2128,7 +2126,7 @@ class FocalLoss(LossBase):
2128
2126
  convert_weight = self.squeeze(convert_weight)
2129
2127
  log_probability = log_probability * convert_weight
2130
2128
 
2131
- weight = F.pows(-1 * probability + 1.0, self.gamma)
2129
+ weight = ops.pows(-1 * probability + 1.0, self.gamma)
2132
2130
  if labels.shape[1] == 1:
2133
2131
  loss = (-1 * weight * log_probability).mean(axis=1)
2134
2132
  else:
@@ -2225,7 +2223,7 @@ class HuberLoss(LossBase):
2225
2223
  self.delta = delta
2226
2224
 
2227
2225
  def construct(self, logits, labels):
2228
- return F.huber_loss(logits, labels, self.reduction, self.delta)
2226
+ return ops.huber_loss(logits, labels, self.reduction, self.delta)
2229
2227
 
2230
2228
 
2231
2229
  class TripletMarginLoss(LossBase):
@@ -2323,8 +2321,8 @@ class TripletMarginLoss(LossBase):
2323
2321
  def construct(self, x, positive, negative, margin=1.):
2324
2322
  if self.margin != 1.0:
2325
2323
  margin = self.margin
2326
- return F.triplet_margin_loss(x, positive, negative, margin=margin, p=self.p,
2327
- eps=self.eps, swap=self.swap, reduction=self.reduction)
2324
+ return ops.triplet_margin_loss(x, positive, negative, margin=margin, p=self.p,
2325
+ eps=self.eps, swap=self.swap, reduction=self.reduction)
2328
2326
 
2329
2327
 
2330
2328
  class NLLLoss(LossBase):
@@ -2352,9 +2350,9 @@ class NLLLoss(LossBase):
2352
2350
  \end{array}\right.
2353
2351
 
2354
2352
  Args:
2355
- weight (Tensor): The rescaling weight to each class. If the value is not None, the shape is :math:`(C,)`.
2356
- The data type only supports float32 or float16. Default: ``None`` .
2357
- ignore_index (int): Specifies a target value that is ignored (typically for padding value)
2353
+ weight (Tensor, optional): The rescaling weight to each class. If the value is not None,
2354
+ the shape is :math:`(C,)`. The data type only supports float32 or float16. Default: ``None`` .
2355
+ ignore_index (int, optional): Specifies a target value that is ignored (typically for padding value)
2358
2356
  and does not contribute to the gradient. Default: ``-100`` .
2359
2357
  reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2360
2358
  ``'sum'`` . Default: ``'mean'`` .
@@ -2406,7 +2404,7 @@ class NLLLoss(LossBase):
2406
2404
  self.reduction = reduction
2407
2405
 
2408
2406
  def construct(self, logits, labels):
2409
- return F.nll_loss(logits, labels, self.weight, self.ignore_index, self.reduction)
2407
+ return ops.nll_loss(logits, labels, self.weight, self.ignore_index, self.reduction)
2410
2408
 
2411
2409
 
2412
2410
  @constexpr
@@ -2568,7 +2566,7 @@ class CrossEntropyLoss(LossBase):
2568
2566
  logits.dtype, labels.dtype,
2569
2567
  self.cls_name)
2570
2568
 
2571
- return F.cross_entropy(logits, labels, self.weight, self.ignore_index, self.reduction, self.label_smoothing)
2569
+ return ops.cross_entropy(logits, labels, self.weight, self.ignore_index, self.reduction, self.label_smoothing)
2572
2570
 
2573
2571
 
2574
2572
  class KLDivLoss(LossBase):
@@ -2644,7 +2642,7 @@ class KLDivLoss(LossBase):
2644
2642
  def construct(self, logits, labels):
2645
2643
  _check_is_tensor('logits', logits, self.cls_name)
2646
2644
  _check_is_tensor('labels', labels, self.cls_name)
2647
- return F.kl_div(logits, labels, self.reduction)
2645
+ return ops.kl_div(logits, labels, self.reduction)
2648
2646
 
2649
2647
 
2650
2648
  @_primexpr
@@ -2753,11 +2751,11 @@ class CTCLoss(LossBase):
2753
2751
  if targets.ndim == 1:
2754
2752
  targets = targets.expand_dims(0)
2755
2753
  log_probs = log_probs.expand_dims(-2)
2756
- neg_log_hood, _ = F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
2757
- self.zero_infinity)
2754
+ neg_log_hood, _ = ops.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank,
2755
+ self.reduction, self.zero_infinity)
2758
2756
  return neg_log_hood.squeeze(axis=0)
2759
- neg_log_hood, _ = F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
2760
- self.zero_infinity)
2757
+ neg_log_hood, _ = ops.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
2758
+ self.zero_infinity)
2761
2759
  return neg_log_hood
2762
2760
 
2763
2761
 
@@ -537,7 +537,7 @@ class Adam(Optimizer):
537
537
 
538
538
  When using Adam with use_offload=True:
539
539
 
540
- This optimizer only supports `GRAPH_MODE`.
540
+ This optimizer only supports `GRAPH_MODE` and don't support GE backend.
541
541
 
542
542
  Args:
543
543
  params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the
@@ -1014,7 +1014,7 @@ class AdamWeightDecay(Optimizer):
1014
1014
  >>>
1015
1015
  >>> loss = nn.SoftmaxCrossEntropyWithLogits()
1016
1016
  >>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
1017
- """
1017
+ """
1018
1018
  _support_parallel_optimizer = True
1019
1019
 
1020
1020
  def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0):
@@ -1111,7 +1111,7 @@ class AdamOffload(Optimizer):
1111
1111
  :math:`\epsilon` represents `eps`.
1112
1112
 
1113
1113
  Note:
1114
- This optimizer only supports `GRAPH_MODE` currently.
1114
+ This optimizer only supports `GRAPH_MODE` currently and don't support GE backend.
1115
1115
 
1116
1116
  If parameters are not grouped, the `weight_decay` in optimizer will be applied on the network parameters without
1117
1117
  'beta' or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When
@@ -202,7 +202,7 @@ class _AdaSum(Cell):
202
202
  @staticmethod
203
203
  def _hash(step, target, weights_index):
204
204
  target = "tag" + str(step) + str(target) + str(weights_index)
205
- target_hash = hashlib.sha1(target.encode()).hexdigest()
205
+ target_hash = hashlib.sha256(target.encode()).hexdigest()
206
206
  hash_res = int(int(target_hash, 16) % MAX_NUM_HASH)
207
207
  return hash_res
208
208
 
@@ -430,7 +430,7 @@ class AdaSumByGradWrapCell(Cell):
430
430
  requires only one input.
431
431
 
432
432
  Inputs:
433
- - **grads** (Tuple[Tensor]) - Tuple of gradients, same with the input of passed optimizer.
433
+ - **grads** (tuple[Tensor]) - Tuple of gradients, same with the input of passed optimizer.
434
434
 
435
435
  Raises:
436
436
  RuntimeError: If `parallel_mode` uses `stand_alone` mode, AdaSum only supports use in distributed scenarios.
@@ -50,6 +50,8 @@ class ASGD(Optimizer):
50
50
  :math:`w` represents `params`.
51
51
 
52
52
  Note:
53
+ This optimizer don't support GE backend.
54
+
53
55
  If parameters are not grouped, the `weight_decay` in optimizer will be applied on the parameters without 'beta'
54
56
  or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When parameters
55
57
  are grouped, each group can set `weight_decay`, if not, the `weight_decay` in optimizer will be applied.
@@ -514,7 +514,7 @@ class Optimizer(Cell):
514
514
  raise ValueError("For 'Optimizer', the property 'target' cannot be set to 'Ascend' "
515
515
  "in the 'GPU' environment.")
516
516
 
517
- self._is_device = (value != 'CPU')
517
+ self._is_device = value != 'CPU'
518
518
  self._target = value
519
519
 
520
520
  def _grad_sparse_indices_deduplicate(self, gradients):
@@ -445,7 +445,7 @@ class ThorGpu(Optimizer):
445
445
  def _define_gpu_reducer(self, split_indices):
446
446
  """define gpu reducer"""
447
447
  self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
448
- self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
448
+ self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE
449
449
  if self.is_distributed:
450
450
  mean = _get_gradients_mean()
451
451
  degree = _get_device_num()
@@ -764,7 +764,7 @@ class ThorAscend(Optimizer):
764
764
  def _define_ascend_reducer(self, split_indices):
765
765
  """define ascend reducer"""
766
766
  self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
767
- self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
767
+ self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE
768
768
  if self.is_distributed:
769
769
  mean = _get_gradients_mean()
770
770
  degree = _get_device_num()
@@ -35,7 +35,8 @@ class Exponential(Distribution):
35
35
  where :math:`\lambda` is the rate of the distribution.
36
36
 
37
37
  Args:
38
- rate (int, float, list, numpy.ndarray, Tensor, optional): The inverse scale. :math:`\lambda` in the formula. Default: ``None`` .
38
+ rate (int, float, list, numpy.ndarray, Tensor, optional): The inverse scale.
39
+ :math:`\lambda` in the formula. Default: ``None`` .
39
40
  seed (int, optional): The seed used in sampling. The global seed is used if it is None. Default: ``None`` .
40
41
  dtype (mindspore.dtype, optional): The type of the event samples. Default: ``mstype.float32`` .
41
42
  name (str, optional): The name of the distribution. Default: ``'Exponential'`` .
@@ -36,7 +36,8 @@ class Poisson(Distribution):
36
36
  where :math:`\lambda` is the rate of the distribution.
37
37
 
38
38
  Args:
39
- rate (list, numpy.ndarray, Tensor): The rate of the Poisson distribution. :math:`\lambda` in the formula. Default: ``None`` .
39
+ rate (list, numpy.ndarray, Tensor): The rate of the Poisson distribution.
40
+ :math:`\lambda` in the formula. Default: ``None`` .
40
41
  seed (int): The seed used in sampling. The global seed is used if it is ``None`` . Default: ``None`` .
41
42
  dtype (mindspore.dtype): The type of the event samples. Default: ``mstype.float32`` .
42
43
  name (str): The name of the distribution. Default: ``'Poisson'`` .
@@ -15,8 +15,8 @@
15
15
  """Sparse related tools."""
16
16
  from __future__ import absolute_import
17
17
 
18
+ from mindspore import ops
18
19
  from mindspore import log as logger
19
- from mindspore.ops import operations as P
20
20
  from mindspore.nn.cell import Cell
21
21
 
22
22
 
@@ -76,7 +76,7 @@ class SparseToDense(Cell):
76
76
  logger.warning("'nn.SparseToDense' is deprecated from version 2.0 and will be removed in a future version. " +
77
77
  "Please use 'COOTensor.to_dense()' instead.")
78
78
  super(SparseToDense, self).__init__()
79
- self.sparse_to_dense = P.SparseToDense()
79
+ self.sparse_to_dense = ops.SparseToDense()
80
80
 
81
81
  def construct(self, sparse_tensor):
82
82
  return self.sparse_to_dense(sparse_tensor.indices,
@@ -141,7 +141,7 @@ class SparseTensorDenseMatmul(Cell):
141
141
  super(SparseTensorDenseMatmul, self).__init__()
142
142
  self.adj_st = adjoint_st
143
143
  self.adj_dt = adjoint_dt
144
- self.sparse_dense_matmul = P.SparseTensorDenseMatmul(adjoint_st=self.adj_st, adjoint_dt=self.adj_dt)
144
+ self.sparse_dense_matmul = ops.SparseTensorDenseMatmul(adjoint_st=self.adj_st, adjoint_dt=self.adj_dt)
145
145
 
146
146
  def construct(self, indices, values, sparse_shape, dense):
147
147
  return self.sparse_dense_matmul(indices, values, sparse_shape, dense)