mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (458) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +65 -84
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +58 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +178 -53
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +377 -203
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +5 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +117 -131
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +67 -55
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +70 -24
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +27 -7
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +6 -46
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +429 -23
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +140 -104
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +491 -623
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +117 -110
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +4 -6
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/asgd.py +2 -0
  188. mindspore/nn/optim/lamb.py +1 -3
  189. mindspore/nn/optim/optimizer.py +1 -1
  190. mindspore/nn/optim/tft_wrapper.py +2 -3
  191. mindspore/nn/optim/thor.py +2 -2
  192. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  193. mindspore/nn/probability/distribution/exponential.py +2 -1
  194. mindspore/nn/probability/distribution/poisson.py +2 -1
  195. mindspore/nn/sparse/sparse.py +3 -3
  196. mindspore/nn/wrap/cell_wrapper.py +73 -42
  197. mindspore/nn/wrap/grad_reducer.py +37 -52
  198. mindspore/nn/wrap/loss_scale.py +72 -74
  199. mindspore/numpy/array_creations.py +7 -7
  200. mindspore/numpy/fft.py +1 -1
  201. mindspore/numpy/math_ops.py +5 -5
  202. mindspore/numpy/utils_const.py +1 -1
  203. mindspore/opencv_core452.dll +0 -0
  204. mindspore/opencv_imgcodecs452.dll +0 -0
  205. mindspore/opencv_imgproc452.dll +0 -0
  206. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  207. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  208. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  209. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  210. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  211. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  212. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  213. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
  214. mindspore/ops/auto_generate/gen_extend_func.py +27 -145
  215. mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
  216. mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
  217. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  218. mindspore/ops/composite/__init__.py +10 -0
  219. mindspore/ops/composite/base.py +9 -5
  220. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  221. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  222. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  223. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  224. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  225. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  226. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  227. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  228. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  229. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  230. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  231. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  232. mindspore/ops/function/__init__.py +4 -1
  233. mindspore/ops/function/_add_attr_func.py +11 -6
  234. mindspore/ops/function/array_func.py +19 -102
  235. mindspore/ops/function/debug_func.py +8 -5
  236. mindspore/ops/function/grad/grad_func.py +5 -13
  237. mindspore/ops/function/math_func.py +77 -572
  238. mindspore/ops/function/nn_func.py +46 -94
  239. mindspore/ops/function/other_func.py +4 -1
  240. mindspore/ops/function/random_func.py +44 -5
  241. mindspore/ops/function/vmap_func.py +2 -1
  242. mindspore/ops/functional.py +4 -4
  243. mindspore/ops/functional_overload.py +594 -18
  244. mindspore/ops/op_info_register.py +21 -0
  245. mindspore/ops/operations/__init__.py +16 -11
  246. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  247. mindspore/ops/operations/_inner_ops.py +14 -18
  248. mindspore/ops/operations/_sequence_ops.py +1 -1
  249. mindspore/ops/operations/array_ops.py +5 -51
  250. mindspore/ops/operations/comm_ops.py +186 -41
  251. mindspore/ops/operations/custom_ops.py +303 -177
  252. mindspore/ops/operations/debug_ops.py +59 -4
  253. mindspore/ops/operations/image_ops.py +13 -13
  254. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  255. mindspore/ops/operations/math_ops.py +8 -9
  256. mindspore/ops/operations/nn_ops.py +8 -40
  257. mindspore/ops/primitive.py +9 -20
  258. mindspore/ops/tensor_method.py +63 -15
  259. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  260. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  261. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  262. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  263. mindspore/ops_generate/common/base_generator.py +14 -0
  264. mindspore/ops_generate/common/gen_constants.py +8 -3
  265. mindspore/ops_generate/common/gen_utils.py +0 -19
  266. mindspore/ops_generate/common/op_proto.py +11 -4
  267. mindspore/ops_generate/common/template.py +88 -11
  268. mindspore/ops_generate/gen_ops.py +1 -1
  269. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  270. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  271. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  272. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  273. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  274. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  275. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  276. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  277. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  278. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  279. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  280. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  281. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  282. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  283. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  284. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  285. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  286. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  287. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  288. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  289. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  290. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  291. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  292. mindspore/parallel/_auto_parallel_context.py +16 -23
  293. mindspore/parallel/_cell_wrapper.py +113 -45
  294. mindspore/parallel/_parallel_serialization.py +4 -3
  295. mindspore/parallel/_ps_context.py +4 -6
  296. mindspore/parallel/_tensor.py +167 -12
  297. mindspore/parallel/_transformer/moe.py +1 -1
  298. mindspore/parallel/_transformer/transformer.py +17 -12
  299. mindspore/parallel/_utils.py +5 -11
  300. mindspore/parallel/auto_parallel.py +35 -14
  301. mindspore/parallel/checkpoint_convert.py +3 -3
  302. mindspore/parallel/checkpoint_transform.py +13 -7
  303. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  304. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  305. mindspore/parallel/cluster/run.py +48 -7
  306. mindspore/parallel/function/__init__.py +8 -1
  307. mindspore/parallel/function/reshard_func.py +12 -12
  308. mindspore/parallel/nn/__init__.py +15 -2
  309. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  310. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  311. mindspore/parallel/shard.py +10 -25
  312. mindspore/parallel/transform_safetensors.py +469 -174
  313. mindspore/pgodb140.dll +0 -0
  314. mindspore/pgort140.dll +0 -0
  315. mindspore/profiler/__init__.py +2 -1
  316. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  317. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  318. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  319. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  321. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  322. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  323. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  324. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  325. mindspore/profiler/analysis/task_manager.py +1 -1
  326. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  327. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  328. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  329. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  330. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  331. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  332. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  333. mindspore/profiler/common/constant.py +16 -0
  334. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  335. mindspore/profiler/common/path_manager.py +9 -0
  336. mindspore/profiler/common/profiler_context.py +50 -29
  337. mindspore/profiler/common/profiler_info.py +0 -16
  338. mindspore/profiler/common/profiler_meta_data.py +1 -0
  339. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  340. mindspore/profiler/common/profiler_output_path.py +23 -8
  341. mindspore/profiler/common/profiler_parameters.py +128 -35
  342. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  343. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  344. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  345. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  346. mindspore/profiler/dynamic_profiler.py +374 -338
  347. mindspore/profiler/envprofiler.py +42 -12
  348. mindspore/profiler/experimental_config.py +112 -7
  349. mindspore/profiler/mstx.py +33 -12
  350. mindspore/profiler/platform/__init__.py +2 -3
  351. mindspore/profiler/platform/cpu_profiler.py +10 -4
  352. mindspore/profiler/platform/npu_profiler.py +30 -20
  353. mindspore/profiler/profiler.py +218 -154
  354. mindspore/profiler/profiler_action_controller.py +65 -77
  355. mindspore/profiler/profiler_interface.py +2 -2
  356. mindspore/profiler/schedule.py +10 -4
  357. mindspore/rewrite/common/config.py +1 -0
  358. mindspore/rewrite/common/namer.py +1 -0
  359. mindspore/rewrite/common/namespace.py +1 -0
  360. mindspore/rewrite/node/node.py +31 -11
  361. mindspore/rewrite/parsers/assign_parser.py +1 -1
  362. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  363. mindspore/run_check/_check_version.py +7 -10
  364. mindspore/runtime/__init__.py +8 -6
  365. mindspore/runtime/event.py +10 -4
  366. mindspore/runtime/executor.py +87 -45
  367. mindspore/runtime/memory.py +31 -32
  368. mindspore/runtime/thread_bind_core.py +299 -165
  369. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  370. mindspore/swresample-4.dll +0 -0
  371. mindspore/swscale-6.dll +0 -0
  372. mindspore/tbbmalloc.dll +0 -0
  373. mindspore/tinyxml2.dll +0 -0
  374. mindspore/train/_utils.py +17 -7
  375. mindspore/train/amp.py +43 -23
  376. mindspore/train/callback/__init__.py +5 -5
  377. mindspore/train/callback/_callback.py +2 -1
  378. mindspore/train/callback/_checkpoint.py +4 -14
  379. mindspore/train/callback/_flops_collector.py +11 -7
  380. mindspore/train/callback/_landscape.py +0 -1
  381. mindspore/train/callback/_train_fault_tolerance.py +98 -21
  382. mindspore/train/data_sink.py +15 -6
  383. mindspore/train/dataset_helper.py +14 -5
  384. mindspore/train/model.py +133 -69
  385. mindspore/train/serialization.py +168 -126
  386. mindspore/train/summary/summary_record.py +13 -2
  387. mindspore/train/train_thor/model_thor.py +2 -2
  388. mindspore/turbojpeg.dll +0 -0
  389. mindspore/utils/__init__.py +3 -2
  390. mindspore/utils/dryrun.py +0 -6
  391. mindspore/utils/runtime_execution_order_check.py +163 -77
  392. mindspore/utils/sdc_detect.py +68 -0
  393. mindspore/utils/utils.py +14 -17
  394. mindspore/vcmeta.dll +0 -0
  395. mindspore/vcruntime140.dll +0 -0
  396. mindspore/vcruntime140_1.dll +0 -0
  397. mindspore/version.py +1 -1
  398. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  399. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
  400. mindspore/_deprecated/jit.py +0 -198
  401. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  402. mindspore/communication/_hccl_management.py +0 -297
  403. mindspore/experimental/es/embedding_service.py +0 -891
  404. mindspore/experimental/es/embedding_service_layer.py +0 -581
  405. mindspore/profiler/common/validator/__init__.py +0 -14
  406. mindspore/profiler/common/validator/validate_path.py +0 -84
  407. mindspore/profiler/parser/__init__.py +0 -14
  408. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  409. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  410. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  411. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  412. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  413. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  414. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  415. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  416. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  417. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  418. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  419. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  420. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  421. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  422. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  423. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  424. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  425. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  426. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  427. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  428. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  429. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  430. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  431. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  432. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  433. mindspore/profiler/parser/container.py +0 -229
  434. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  435. mindspore/profiler/parser/flops_parser.py +0 -531
  436. mindspore/profiler/parser/framework_enum.py +0 -111
  437. mindspore/profiler/parser/framework_parser.py +0 -464
  438. mindspore/profiler/parser/framework_struct.py +0 -61
  439. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  440. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  441. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  442. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  443. mindspore/profiler/parser/hccl_parser.py +0 -573
  444. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  445. mindspore/profiler/parser/integrator.py +0 -526
  446. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  447. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  448. mindspore/profiler/parser/minddata_parser.py +0 -186
  449. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  450. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  451. mindspore/profiler/parser/optime_parser.py +0 -250
  452. mindspore/profiler/parser/profiler_info.py +0 -213
  453. mindspore/profiler/parser/step_trace_parser.py +0 -666
  454. mindspore/utils/hooks.py +0 -81
  455. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  456. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  457. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  458. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -18,15 +18,15 @@ from collections import OrderedDict
18
18
  from types import MethodType
19
19
  from mindspore import log as logger
20
20
  from mindspore.nn.cell import Cell
21
- from mindspore import context
22
21
  from mindspore.common.tensor import Tensor
23
22
  from mindspore import ops
24
23
  from mindspore.ops.composite import GradOperation
25
24
  from mindspore.common._register_for_recompute import recompute_registry
26
25
  from mindspore.common.api import _pynative_executor, _no_grad
27
26
  from mindspore.common.generator import get_rng_state, set_rng_state
28
- from mindspore.train.amp import amp_decorator
27
+ from mindspore.train.amp import AmpDecorator
29
28
  from mindspore._c_expression.amp import get_curr_amp_strategy
29
+ from mindspore._check_jit_forbidden_api import jit_forbidden_register
30
30
 
31
31
 
32
32
  class _WrapCell(Cell):
@@ -104,8 +104,8 @@ class _RecomputeCell(Cell):
104
104
  set_rng_state(self.cpu_rng_state)
105
105
  _pynative_executor.set_is_run_recompute(True)
106
106
  if self.amp_strategy:
107
- with amp_decorator(self.amp_strategy.get_amp_level(), self.amp_strategy.get_amp_dtype(),
108
- self.amp_strategy.get_white_list(), self.amp_strategy.get_black_list()):
107
+ with AmpDecorator(self.amp_strategy.get_amp_level(), self.amp_strategy.get_amp_dtype(),
108
+ self.amp_strategy.get_white_list(), self.amp_strategy.get_black_list()):
109
109
  grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
110
110
  else:
111
111
  grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
@@ -211,14 +211,12 @@ def _detach_input(input_arg):
211
211
  def _check_validation(block):
212
212
  if not isinstance(block, Cell):
213
213
  raise TypeError("Recompute function now only support block which inherited from Cell!")
214
- if context.get_context("mode") != context.PYNATIVE_MODE:
215
- raise AssertionError("Recompute function now only support pynative mode, you can use "
216
- "Cell.recompute() in graph mode.")
217
214
  if block.construct.__code__.co_name == "staging_specialize":
218
215
  logger.warning('Block\'s construct method decorated by @jit that recompute '
219
216
  'function will not come into effect.')
220
217
 
221
218
 
219
+ @jit_forbidden_register
222
220
  def recompute(block, *args, **kwargs):
223
221
  r"""
224
222
  This function is used to reduce memory, when run block, rather than
@@ -98,7 +98,6 @@ class RowTensor(RowTensorInner):
98
98
 
99
99
  .. warning::
100
100
  - This is an experimental API that is subjected to change or deletion.
101
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
102
101
 
103
102
  Args:
104
103
  indices (Tensor): A 1-D integer Tensor of shape :math:`(d_0)` . Default: ``None``.
@@ -232,7 +231,6 @@ class COOTensor(COOTensor_):
232
231
 
233
232
  .. warning::
234
233
  - This is an experimental API that is subject to change or deletion.
235
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
236
234
  - Currently, duplicate coordinates in the indices will not be coalesced.
237
235
  If the indices contain out-of-bound values, the result will be undefined.
238
236
 
@@ -681,7 +679,6 @@ class CSRTensor(CSRTensor_):
681
679
 
682
680
  .. warning::
683
681
  - This is an experimental API that is subjected to change.
684
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
685
682
  - If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
686
683
  when the length of `values` or `indices` exceeds the range indicated by `indptr`, and when the columns
687
684
  indicated by `indices` are repeated on the same row.
@@ -104,7 +104,6 @@ class Symbol:
104
104
  if not isinstance(unique, bool):
105
105
  raise TypeError(f"For 'Symbol', the argument 'unique' must be bool, but got {type(unique)}")
106
106
 
107
- # pylint: disable=missing-docstring
108
107
  def to_dict(self):
109
108
  # Convert the symbolic info to dictionary.
110
109
  # This method is not necessary to show in public api document, use comment instead of docstring.
@@ -32,11 +32,12 @@ from mindspore.common.hook_handle import _TensorHookHandle
32
32
  from mindspore.common._utils import get_slice_num
33
33
  from mindspore.common._register_for_tensor import tensor_operator_registry
34
34
  from mindspore._c_expression import TensorPy as TensorPy_
35
+ from mindspore._c_expression import _rmod_instance
35
36
  from mindspore import _checkparam as validator
36
37
  from mindspore._checkparam import is_stub_tensor, check_hook_fn
37
38
  from mindspore._check_jit_forbidden_api import jit_forbidden_register
38
39
  from mindspore.common.symbol import Symbol
39
-
40
+ from mindspore._c_expression import is_reboot_node
40
41
 
41
42
  np_types = (np.int8, np.int16, np.int32, np.int64,
42
43
  np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
@@ -93,13 +94,6 @@ def _set_symbolic_shape(shape):
93
94
  return shape, symbolic_shape
94
95
 
95
96
 
96
- def _convert_stub_tensor(input_data):
97
- """Convert input to stub tensor"""
98
- if not is_stub_tensor(input_data):
99
- return input_data
100
- return input_data.stub_sync()
101
-
102
-
103
97
  def _convert_numpy_array(input_data):
104
98
  """Convert inpyt to numpy array"""
105
99
  if not isinstance(input_data, np_types):
@@ -144,8 +138,6 @@ def _init(input_data=None, dtype=None, shape=None, init=None, const_arg=False, d
144
138
  _cast = tensor_operator_registry.get("cast")
145
139
  input_data = _cast(input_data, dtype)
146
140
 
147
- input_data = _convert_stub_tensor(input_data)
148
-
149
141
  if input_data is None and shape is None and init is None and dtype is not None:
150
142
  validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
151
143
  logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' or 'init' must not be None.")
@@ -325,7 +317,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
325
317
  return True
326
318
  return NotImplemented
327
319
 
328
-
329
320
  def __deepcopy__(self, memodict):
330
321
  new_obj = Tensor(self)
331
322
  new_obj.init = self.init
@@ -379,9 +370,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
379
370
  try:
380
371
  data = self._item()
381
372
  return int(data)
382
- except ValueError:
383
- raise ValueError("Only one element tensors can be converted to Python scalars")
384
-
373
+ except ValueError as e:
374
+ raise ValueError("Only one element tensors can be converted to Python scalars") from e
385
375
 
386
376
  def __float__(self):
387
377
  try:
@@ -396,8 +386,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
396
386
  if not isinstance(data, (int, bool)):
397
387
  raise ValueError
398
388
  return int(data)
399
- except ValueError:
400
- raise ValueError("Only integer tensors of a single element can be converted to an index.")
389
+ except ValueError as e:
390
+ raise ValueError("Only integer tensors of a single element can be converted to an index.") from e
401
391
 
402
392
  def __pos__(self):
403
393
  return self
@@ -426,14 +416,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
426
416
  def __rtruediv__(self, other):
427
417
  return tensor_operator_registry.get('__truediv__')(other, self)
428
418
 
429
- def __mod__(self, other):
430
- return tensor_operator_registry.get('__mod__')(self, other)
431
-
432
419
  def __rmod__(self, other):
433
- return tensor_operator_registry.get('__mod__')(other, self)
434
-
435
- def __imod__(self, other):
436
- return self.__mod__(other)
420
+ return _rmod_instance(other, self)
437
421
 
438
422
  def __rpow__(self, other):
439
423
  return tensor_operator_registry.get('__rpow__')(self, other)
@@ -1163,7 +1147,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
1163
1147
  which may be modified by returning a new output gradient.
1164
1148
  - The `hook` should have the following signature:
1165
1149
  hook(grad) -> New output gradient, but can not return None or not set return value.
1166
- - Higher-order differentiation does not support tensor `register_hook`.
1167
1150
  - The following constraints must be met under graph mode:
1168
1151
 
1169
1152
  - The `hook` must satisfy the syntax constraints of the graph mode.
@@ -1868,6 +1851,10 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
1868
1851
 
1869
1852
  self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
1870
1853
 
1854
+ .. warning::
1855
+ When deterministic computation is enabled, `index` can not be a non-contiguous Tensor; otherwise,
1856
+ deterministic results can not be guaranteed.
1857
+
1871
1858
  Args:
1872
1859
  dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`self`).
1873
1860
  index (Tensor): The index of `self` to do scatter operation whose data type must
@@ -2110,11 +2097,13 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2110
2097
 
2111
2098
  from mindspore.common.initializer import Zero as ZeroInitializer
2112
2099
 
2100
+ is_qint4x2 = self.dtype == mstype.qint4x2
2113
2101
  try:
2102
+ dtype_ = mstype.int8 if is_qint4x2 else self.dtype
2114
2103
  if isinstance(self.init, ZeroInitializer):
2115
- data = np.zeros(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2104
+ data = np.zeros(data_shape, dtype=mstype._dtype_to_nptype(dtype_)) # pylint:disable=protected-access
2116
2105
  else:
2117
- data = np.ndarray(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2106
+ data = np.ndarray(data_shape, dtype=mstype._dtype_to_nptype(dtype_)) # pylint:disable=protected-access
2118
2107
  except ValueError as e:
2119
2108
  msg = "Error shape={}".format(shape)
2120
2109
  logger.critical(msg)
@@ -2127,7 +2116,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2127
2116
  self.init = init
2128
2117
  global_seed = get_seed()
2129
2118
  self._np_seed = np.random.get_state()[1][0]
2130
- self.need_set_seed = (slice_index is not None)
2119
+ self.need_set_seed = slice_index is not None
2131
2120
  self._global_seed = global_seed
2132
2121
  self._seed_offset = 1
2133
2122
  if self.need_set_seed:
@@ -2150,7 +2139,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2150
2139
  self.init.seed, _ = self.seed
2151
2140
 
2152
2141
  with seed_context(self.init):
2153
- if not isinstance(self.init, ZeroInitializer) and slice_num_of_persistent_data == 1:
2142
+ if (not isinstance(self.init, ZeroInitializer) and slice_num_of_persistent_data == 1) \
2143
+ and not is_reboot_node():
2154
2144
  self.init(data)
2155
2145
  self.init = None
2156
2146
 
@@ -2159,6 +2149,10 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2159
2149
  self.assign_value(TensorPy_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
2160
2150
  else:
2161
2151
  self.assign_value(TensorPy_.from_numpy(data))
2152
+
2153
+ if is_qint4x2:
2154
+ self.set_dtype(mstype.qint4x2)
2155
+
2162
2156
  return self
2163
2157
 
2164
2158
  def resize(self, *new_shape):
@@ -2495,6 +2489,39 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2495
2489
  """
2496
2490
  return tensor_operator_registry.get('uniform_')(self, from_=from_, to=to, generator=generator)
2497
2491
 
2492
+ def exponential_(self, lambd=1, *, generator=None):
2493
+ r"""
2494
+ Fills `self` tensor with elements drawn from the exponential distribution:
2495
+
2496
+ .. math::
2497
+ f(x) = \lambda \exp(-\lambda x)
2498
+
2499
+ .. warning::
2500
+ - It is only supported on Atlas A2 Training Series Products.
2501
+ - This is an experimental API that is subject to change or deletion.
2502
+
2503
+ Args:
2504
+ lambd (float, optional): Parameters of exponential distribution. Default: ``1``.
2505
+
2506
+ Keyword Args:
2507
+ generator (Generator, optional): a pseudorandom number generator.
2508
+ Default: ``None`` .
2509
+
2510
+ Returns:
2511
+ Tensor, with same shape and same data type with input.
2512
+
2513
+ Supported Platforms:
2514
+ ``Ascend``
2515
+
2516
+ Examples:
2517
+ >>> import mindspore
2518
+ >>> x = mindspore.Tensor([1, 2, 3.0])
2519
+ >>> out = x.exponential_(2)
2520
+ >>> print(out.shape)
2521
+ (3,)
2522
+ """
2523
+ return tensor_operator_registry.get('exponential_')(self, lambd=lambd, generator=generator)
2524
+
2498
2525
  def sum_to_size(self, *size):
2499
2526
  r"""
2500
2527
  Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
@@ -2526,17 +2553,19 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2526
2553
  shape_x = x.shape
2527
2554
  if len(size) > x.ndim:
2528
2555
  raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
2556
+ pre_len = 0
2557
+ pre_axis = []
2529
2558
  if len(size) < x.ndim:
2530
- pre_axis = tuple([axis for axis in range(x.ndim - len(size))])
2531
- x = x.sum(pre_axis)
2532
- axes = []
2559
+ pre_len = x.ndim - len(size)
2560
+ pre_axis = [axis for axis in range(pre_len)]
2561
+ axes = pre_axis
2533
2562
  for i, element in enumerate(size):
2534
- if element != x.shape[i] and element == 1:
2535
- axes.append(i)
2536
- elif element != x.shape[i]:
2563
+ if element != x.shape[i + pre_len] and element == 1:
2564
+ axes.append(i + pre_len)
2565
+ elif element != x.shape[i + pre_len]:
2537
2566
  raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
2538
2567
  if axes:
2539
- return x.sum(tuple(axes), keepdims=True)
2568
+ return x.sum(tuple(axes), keepdims=True).reshape(size)
2540
2569
  return x
2541
2570
 
2542
2571
  def nanmean(self, axis=None, keepdims=False, *, dtype=None):
@@ -2557,6 +2586,34 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2557
2586
  """
2558
2587
  return tensor_operator_registry.get('bernoulli')(self, generator=generator)
2559
2588
 
2589
+ def bernoulli_(self, p=0.5, *, generator=None):
2590
+ r"""
2591
+ Fills each location of self with an independent sample from Bernoulli(p).
2592
+
2593
+ Args:
2594
+ p (Union[number.Number, Tensor], optional): `p` should either be a scalar or tensor containing
2595
+ probabilities to be used for drawing the binary random number, between ``0`` and ``1`` .
2596
+ If it is a tensor, `p` must be floating point. Default: ``0.5`` .
2597
+
2598
+ Keyword Args:
2599
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
2600
+ Default: ``None`` , uses the default pseudorandom number generator.
2601
+
2602
+ Returns:
2603
+ The input tensor.
2604
+
2605
+ Supported Platforms:
2606
+ ``Ascend``
2607
+
2608
+ Examples:
2609
+ >>> from mindspore import Tensor
2610
+ >>> x = Tensor([[2, 3, 4], [1, 2, 3]])
2611
+ >>> p = 0.1
2612
+ >>> print(x.bernoulli_(p).shape)
2613
+ (2, 3)
2614
+ """
2615
+ return tensor_operator_registry.get('bernoulli_')(self, p, generator=generator)
2616
+
2560
2617
  def random_(self, from_=0, to=None, *, generator=None):
2561
2618
  r"""
2562
2619
  Fill the tensor with numbers sampled from a discrete uniform distribution over an
@@ -2619,7 +2676,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2619
2676
  Note:
2620
2677
  The rank of `self`.
2621
2678
 
2622
- - Ascend: its rank can be equal to 0 except O2 mode.
2679
+ - Ascend: its rank can be equal to 0 except GE backend.
2623
2680
  - CPU/GPU: its rank should be greater than or eaqual to 1.
2624
2681
 
2625
2682
  Keyword Args:
@@ -2638,7 +2695,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2638
2695
  Raises:
2639
2696
  TypeError: If `self` is not Tensor.
2640
2697
  TypeError: If `as_tuple` is not bool.
2641
- RuntimeError: On GPU or CPU or Ascend O2 mode, if dim of `input` equals to 0.
2698
+ RuntimeError: On GPU or CPU or Ascend GE backend, if dim of `input` equals to 0.
2642
2699
 
2643
2700
  Supported Platforms:
2644
2701
  ``Ascend`` ``GPU`` ``CPU``
@@ -2801,7 +2858,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2801
2858
  return []
2802
2859
  return self._tolist()
2803
2860
 
2804
-
2805
2861
  def unsorted_segment_min(self, segment_ids, num_segments):
2806
2862
  r"""
2807
2863
  For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
@@ -2824,7 +2880,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2824
2880
  """
2825
2881
  For details, please refer to :func:`mindspore.ops.unique_consecutive`.
2826
2882
  """
2827
- output, idx, counts =\
2883
+ output, idx, counts = \
2828
2884
  tensor_operator_registry.get("unique_consecutive")(return_inverse, return_counts, dim)(self)
2829
2885
  if return_inverse and return_counts:
2830
2886
  return output, idx, counts
@@ -2911,7 +2967,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2911
2967
  taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
2912
2968
 
2913
2969
  Args:
2914
- dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
2970
+ dtype (dtype.Number, bool): The valid data type of the output tensor. Only constant value is allowed.
2971
+ Only Support type bool in PyNative mode.
2915
2972
 
2916
2973
  Returns:
2917
2974
  Tensor, converted to the specified `dtype`.
@@ -2963,7 +3020,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2963
3020
  return str(self.dtype)
2964
3021
  return self.astype(dtype)
2965
3022
 
2966
-
2967
3023
  def type_as(self, other):
2968
3024
  r"""
2969
3025
  Returns self tensor cast to the type of the with the input other tensor.
@@ -3006,7 +3062,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3006
3062
  return self
3007
3063
  return TensorPy_.type_as(self, other)
3008
3064
 
3009
-
3010
3065
  def bool(self):
3011
3066
  r"""
3012
3067
  Converts input tensor dtype to `bool`.
@@ -3502,49 +3557,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3502
3557
  """
3503
3558
  return tensor_operator_registry.get('zero_')(self)
3504
3559
 
3505
- def new_empty(self, size, *, dtype=None, device=None):
3506
- r"""
3507
- Returns an uninitialized Tensor of `size`. Its dtype is specified by `dtype` and its
3508
- device is specified by `device`.
3509
-
3510
- .. warning::
3511
- This is an experimental API that is subject to change or deletion.
3512
-
3513
- Args:
3514
- size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Only positive integer or
3515
- tuple or list containing positive integers are allowed.
3516
-
3517
- Keyword Args:
3518
- dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`,
3519
- the tensor will have the same dtype as `self`. Default ``None``.
3520
- device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
3521
- `device = None`, the tensor will have the same device as `self` and if the device of `self` is not
3522
- defined, the value set by :func:`mindspore.set_device` will be used. Default ``None``.
3523
-
3524
- Returns:
3525
- Tensor, the shape, dtype and device is defined above but with uninitialized data (May be a random value).
3526
-
3527
- Raises:
3528
- TypeError: If `size` is neither an int nor a tuple or list of int.
3529
-
3530
- Supported Platforms:
3531
- ``Ascend``
3532
-
3533
- Examples:
3534
- >>> import mindspore
3535
- >>> from mindspore import Tensor
3536
- >>> x = Tensor([[1, 2, 3], [4, 5, 6]])
3537
- >>> output1 = x.new_empty((2, 3))
3538
- >>> print(output1)
3539
- [[0 0 0]
3540
- [0 0 0]]
3541
- >>> output2 = x.new_empty((2, 3), dtype=mindspore.float64)
3542
- >>> print(output2)
3543
- [[0. 0. 0.]
3544
- [0. 0. 0.]]
3545
- """
3546
- return tensor_operator_registry.get('new_empty')(self, size, dtype, device)
3547
-
3548
3560
  def sign(self):
3549
3561
  r"""
3550
3562
  For details, please refer to :func:`mindspore.ops.sign`.
@@ -3616,46 +3628,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3616
3628
  """
3617
3629
  return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
3618
3630
 
3619
- def masked_scatter(self, mask, x):
3620
- r"""
3621
- Updates the value in the "self Tensor" with the `tensor` value according to the mask, and returns a Tensor.
3622
- The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
3623
-
3624
- .. warning::
3625
- This is an experimental API that is subject to change or deletion.
3626
-
3627
- Args:
3628
- mask (Tensor[bool]): A bool tensor with a shape broadcastable to the "self Tensor".
3629
- x (Tensor): A tensor with the same data type as the "self Tensor". The number
3630
- of elements must be greater than or equal to the number of True's in `mask`.
3631
-
3632
- Returns:
3633
- Tensor, with the same type and shape as the "self Tensor".
3634
-
3635
- Raises:
3636
- TypeError: If `mask` or `x` is not a Tensor.
3637
- TypeError: If data type of the "self Tensor" is not be supported.
3638
- TypeError: If dtype of `mask` is not bool.
3639
- TypeError: If the dim of the "self Tensor" less than the dim of `mask`.
3640
- ValueError: If `mask` can not be broadcastable to the "self Tensor".
3641
- ValueError: If the number of elements in `x` is less than the number required for the updates.
3642
-
3643
- Supported Platforms:
3644
- ``Ascend`` ``CPU``
3645
-
3646
- Examples:
3647
- >>> import numpy as np
3648
- >>> import mindspore
3649
- >>> from mindspore import Tensor
3650
- >>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
3651
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
3652
- >>> tensor = Tensor(np.array([5., 6., 7.]), mindspore.float32)
3653
- >>> output = x.masked_scatter(mask, tensor)
3654
- >>> print(output)
3655
- [5. 6. 3. 7.]
3656
- """
3657
- return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
3658
-
3659
3631
  def index_put(self, indices, values, accumulate=False):
3660
3632
  r"""
3661
3633
  Based on the indices in `indices`, replace the corresponding elements in Tensor `self`
@@ -3796,9 +3768,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3796
3768
  raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
3797
3769
  if to not in ("Ascend", "GPU", "CPU"):
3798
3770
  raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
3799
- mode = context.get_context("mode")
3800
- if mode != context.PYNATIVE_MODE:
3801
- raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
3802
3771
  return TensorPy_.move_to(self, to, blocking)
3803
3772
 
3804
3773
  def _offload(self):
@@ -3833,6 +3802,23 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3833
3802
  """
3834
3803
  return TensorPy_._data_ptr(self)
3835
3804
 
3805
+ def data_ptr(self):
3806
+ r"""
3807
+ Get the data ptr address of tensor, for CPU is host address, GPU/NPU is device address.
3808
+ User should know how to use the data ptr address.
3809
+ Note: this api is an experimental api, users need understatnd it before use.
3810
+
3811
+ Supported Platforms:
3812
+ ``CPU/GPU/Ascend``
3813
+
3814
+ Examples:
3815
+ >>> import mindspore as ms
3816
+ >>> from mindspore import Tensor
3817
+ >>> x = ms.Tensor([1, 2, 3], ms.int64)
3818
+ >>> data_ptr = x.data_ptr()
3819
+ """
3820
+ return TensorPy_._data_ptr(self)
3821
+
3836
3822
  def normal_(self, mean=0, std=1, *, generator=None):
3837
3823
  r"""
3838
3824
  Update the `self` tensor in place by generating random numbers sampled from the normal
@@ -3872,13 +3858,13 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3872
3858
  """
3873
3859
  return tensor_operator_registry.get('normal_')(self, mean=mean, std=std, generator=generator)
3874
3860
 
3875
-
3876
3861
  def triangular_solve(self, A, upper=True, transpose=False, unitriangular=False):
3877
3862
  r"""
3878
3863
  For details, please refer to :func:`mindspore.mint.triangular_solve`.
3879
3864
  """
3880
3865
  return tensor_operator_registry.get('triangular_solve')(self, A, upper, transpose, unitriangular)
3881
3866
 
3867
+
3882
3868
  def _vm_compare(*args):
3883
3869
  """Implement `vm_compare` for tensor."""
3884
3870
  if args:
@@ -3951,9 +3937,9 @@ def _check_astype_and_convert(dtype):
3951
3937
  if dtype.lower() not in all_types:
3952
3938
  raise TypeError(f"For Tensor.astype, the string input type must be one of {all_types}, "
3953
3939
  f"but got '{dtype}'.")
3954
- dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
3940
+ dtype = mstype._pytype_to_dtype(np.dtype(dtype.lower())) # pylint:disable=protected-access
3955
3941
  elif isinstance(dtype, type):
3956
- dtype = mstype.pytype_to_dtype(dtype)
3942
+ dtype = mstype._pytype_to_dtype(dtype) # pylint:disable=protected-access
3957
3943
  elif dtype not in mstype.number_type + (mstype.bool_,):
3958
3944
  raise TypeError(
3959
3945
  f"For Tensor.astype, the input type must be one of {list(mstype.number_type + (mstype.bool_,) + np_types)},"
@@ -25,7 +25,7 @@ from mindspore import context
25
25
  from mindspore.parallel._ps_context import _is_role_sched, _is_ps_mode,\
26
26
  _get_ps_context
27
27
  from mindspore import log as logger
28
- from mindspore._c_expression import CollectiveManager, set_cluster_exit_with_exception, MSContext
28
+ from mindspore._c_expression import CollectiveManager, set_cluster_exit_with_exception, MSContext, GroupOptions
29
29
  from mindspore.common._utils import load_lib
30
30
 
31
31
  HCCL_LIB = 'libhccl_plugin.so'
@@ -470,14 +470,25 @@ def _get_group_ranks(group):
470
470
 
471
471
 
472
472
  @check_parameter_available
473
- def _create_group_helper(group, rank_ids):
473
+ def _create_group_helper(group, rank_ids, options=None):
474
474
  """
475
475
  The Helper to do create_group.
476
476
 
477
477
  Args:
478
478
  group (str): The communication group.
479
479
  rank_ids (list): Rank ids in the group.
480
- backend (str): The backend, like "hccl".
480
+ options (GroupOptions, optional): Additional communication group configuration parameters.
481
+ The backend will automatically select supported parameters and apply them during group
482
+ initialization. i.e. for the ``HCCL`` backend, ``hccl_config`` can be specified so that
483
+ group initialization configurations can be applied. Default is ``None``.
484
+
485
+ `GroupOptions` is defined as a class that can be instantiated as a python object.
486
+
487
+ .. code-block::
488
+
489
+ GroupOptions {
490
+ hccl_config(dict)
491
+ }
481
492
 
482
493
  Raises:
483
494
  TypeError: If rank_ids is not a list.
@@ -499,10 +510,15 @@ def _create_group_helper(group, rank_ids):
499
510
  "but got 'rank_ids' size : {}.".format(len(rank_ids)))
500
511
  if len(rank_ids) - len(list(set(rank_ids))) > 0:
501
512
  raise ValueError("List rank_ids in Group {} has duplicate data!".format(group))
513
+ if options is None:
514
+ options = GroupOptions()
515
+ if not isinstance(options, GroupOptions):
516
+ raise TypeError("For 'create_group', the argument 'options' must be type of GroupOptions, "
517
+ "but got 'options' type : {}.".format(type(options)))
502
518
  if _hccl_test():
503
519
  hccl.create_group(group, rank_size, rank_ids)
504
520
  else:
505
- result = CollectiveManager.get_instance().create_group(group, rank_ids)
521
+ result = CollectiveManager.get_instance().create_group(group, rank_ids, options)
506
522
  if not result:
507
523
  raise RuntimeError("Failed to create communication group for {} with rank ids {}. "
508
524
  "If NCCL is used, 'export NCCL_DEBUG=INFO' "
@@ -554,3 +570,29 @@ def _get_group_map():
554
570
  def _wait_all_comm_init():
555
571
  """Wait for all communicators to be initialized."""
556
572
  return CollectiveManager.get_instance().wait_all_comm_init()
573
+
574
+
575
+ def _remove_group_info(group_name):
576
+ """
577
+ Remove group info after destroy group by user when using arf.
578
+
579
+ Args:
580
+ group_name (str): The user communication group name.
581
+
582
+ """
583
+ CollectiveManager.get_instance().remove_group_info(group_name)
584
+
585
+
586
+ def _comm_switch_nic_helper(global_ranks: list, use_backup: list) -> bool:
587
+ """Switch network interface card between the primary and the secondary NIC.
588
+
589
+ Args:
590
+ global_ranks (list[int], tuple[int]): list of integers. The global rank ids that need switch network interface .
591
+ use_backup (list[bool], tuple[int]): list of bool. For each rank id in global_ranks, determine whether to use
592
+ the backup network interface card. True means use, False means not use.
593
+
594
+ Returns:
595
+ bool, whether the network card switch is successful.
596
+ If one fails, return False. If all are successful, return True.
597
+ """
598
+ return CollectiveManager.get_instance().comm_switch_nic(global_ranks, use_backup)