mindspore 2.6.0__cp310-cp310-win_amd64.whl → 2.7.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (455) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +64 -83
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +47 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +177 -52
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +338 -208
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +2 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +84 -133
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +47 -38
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +69 -23
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +1 -0
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +4 -44
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +425 -19
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +125 -101
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +488 -620
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +86 -85
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +2 -4
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/lamb.py +1 -3
  188. mindspore/nn/optim/optimizer.py +1 -1
  189. mindspore/nn/optim/tft_wrapper.py +2 -3
  190. mindspore/nn/optim/thor.py +2 -2
  191. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  192. mindspore/nn/probability/distribution/exponential.py +2 -1
  193. mindspore/nn/probability/distribution/poisson.py +2 -1
  194. mindspore/nn/sparse/sparse.py +3 -3
  195. mindspore/nn/wrap/cell_wrapper.py +73 -42
  196. mindspore/nn/wrap/grad_reducer.py +37 -52
  197. mindspore/nn/wrap/loss_scale.py +72 -74
  198. mindspore/numpy/array_creations.py +7 -7
  199. mindspore/numpy/fft.py +1 -1
  200. mindspore/numpy/math_ops.py +1 -1
  201. mindspore/numpy/utils_const.py +1 -1
  202. mindspore/opencv_core452.dll +0 -0
  203. mindspore/opencv_imgcodecs452.dll +0 -0
  204. mindspore/opencv_imgproc452.dll +0 -0
  205. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  206. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  207. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  208. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  209. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  210. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  211. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  212. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
  213. mindspore/ops/auto_generate/gen_extend_func.py +5 -55
  214. mindspore/ops/auto_generate/gen_ops_def.py +753 -273
  215. mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
  216. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  217. mindspore/ops/composite/__init__.py +10 -0
  218. mindspore/ops/composite/base.py +9 -5
  219. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  220. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  221. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  222. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  223. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  224. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  225. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  226. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  227. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  228. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  229. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  230. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  231. mindspore/ops/function/__init__.py +4 -1
  232. mindspore/ops/function/_add_attr_func.py +11 -6
  233. mindspore/ops/function/array_func.py +17 -100
  234. mindspore/ops/function/debug_func.py +8 -5
  235. mindspore/ops/function/grad/grad_func.py +5 -13
  236. mindspore/ops/function/math_func.py +65 -399
  237. mindspore/ops/function/nn_func.py +44 -61
  238. mindspore/ops/function/other_func.py +4 -1
  239. mindspore/ops/function/random_func.py +31 -4
  240. mindspore/ops/functional.py +2 -3
  241. mindspore/ops/functional_overload.py +486 -18
  242. mindspore/ops/op_info_register.py +21 -0
  243. mindspore/ops/operations/__init__.py +5 -2
  244. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  245. mindspore/ops/operations/_inner_ops.py +14 -18
  246. mindspore/ops/operations/_sequence_ops.py +1 -1
  247. mindspore/ops/operations/array_ops.py +4 -50
  248. mindspore/ops/operations/comm_ops.py +186 -41
  249. mindspore/ops/operations/custom_ops.py +244 -175
  250. mindspore/ops/operations/debug_ops.py +55 -4
  251. mindspore/ops/operations/image_ops.py +13 -13
  252. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  253. mindspore/ops/operations/math_ops.py +8 -9
  254. mindspore/ops/operations/nn_ops.py +6 -7
  255. mindspore/ops/primitive.py +9 -20
  256. mindspore/ops/tensor_method.py +52 -11
  257. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  258. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  259. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  260. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  261. mindspore/ops_generate/common/base_generator.py +14 -0
  262. mindspore/ops_generate/common/gen_constants.py +7 -2
  263. mindspore/ops_generate/common/gen_utils.py +0 -19
  264. mindspore/ops_generate/common/op_proto.py +11 -4
  265. mindspore/ops_generate/common/template.py +88 -11
  266. mindspore/ops_generate/gen_ops.py +1 -1
  267. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  268. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  269. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  270. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  271. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  272. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  273. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  274. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  275. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  276. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  277. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  278. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  279. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  280. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  281. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  282. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  283. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  284. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  285. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  286. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  287. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  288. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  289. mindspore/parallel/_auto_parallel_context.py +9 -17
  290. mindspore/parallel/_cell_wrapper.py +106 -40
  291. mindspore/parallel/_parallel_serialization.py +4 -3
  292. mindspore/parallel/_ps_context.py +4 -6
  293. mindspore/parallel/_tensor.py +167 -12
  294. mindspore/parallel/_transformer/moe.py +1 -1
  295. mindspore/parallel/_transformer/transformer.py +17 -12
  296. mindspore/parallel/_utils.py +5 -11
  297. mindspore/parallel/auto_parallel.py +33 -12
  298. mindspore/parallel/checkpoint_convert.py +3 -3
  299. mindspore/parallel/checkpoint_transform.py +5 -1
  300. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  301. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  302. mindspore/parallel/cluster/run.py +48 -7
  303. mindspore/parallel/function/__init__.py +8 -1
  304. mindspore/parallel/function/reshard_func.py +7 -6
  305. mindspore/parallel/nn/__init__.py +15 -2
  306. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  307. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  308. mindspore/parallel/shard.py +9 -23
  309. mindspore/parallel/transform_safetensors.py +468 -174
  310. mindspore/pgodb140.dll +0 -0
  311. mindspore/pgort140.dll +0 -0
  312. mindspore/profiler/__init__.py +2 -1
  313. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  314. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  315. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  316. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  317. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  318. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  319. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  321. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  322. mindspore/profiler/analysis/task_manager.py +1 -1
  323. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  324. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  325. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  326. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  327. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  328. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  329. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  330. mindspore/profiler/common/constant.py +16 -0
  331. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  332. mindspore/profiler/common/path_manager.py +9 -0
  333. mindspore/profiler/common/profiler_context.py +50 -29
  334. mindspore/profiler/common/profiler_info.py +0 -16
  335. mindspore/profiler/common/profiler_meta_data.py +1 -0
  336. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  337. mindspore/profiler/common/profiler_output_path.py +23 -8
  338. mindspore/profiler/common/profiler_parameters.py +128 -35
  339. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  340. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  341. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  342. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  343. mindspore/profiler/dynamic_profiler.py +374 -338
  344. mindspore/profiler/envprofiler.py +42 -12
  345. mindspore/profiler/experimental_config.py +112 -7
  346. mindspore/profiler/mstx.py +33 -12
  347. mindspore/profiler/platform/__init__.py +2 -3
  348. mindspore/profiler/platform/cpu_profiler.py +10 -4
  349. mindspore/profiler/platform/npu_profiler.py +30 -20
  350. mindspore/profiler/profiler.py +218 -154
  351. mindspore/profiler/profiler_action_controller.py +65 -77
  352. mindspore/profiler/profiler_interface.py +2 -2
  353. mindspore/profiler/schedule.py +10 -4
  354. mindspore/rewrite/common/config.py +1 -0
  355. mindspore/rewrite/common/namer.py +1 -0
  356. mindspore/rewrite/common/namespace.py +1 -0
  357. mindspore/rewrite/node/node.py +31 -11
  358. mindspore/rewrite/parsers/assign_parser.py +1 -1
  359. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  360. mindspore/run_check/_check_version.py +7 -10
  361. mindspore/runtime/__init__.py +8 -6
  362. mindspore/runtime/event.py +10 -4
  363. mindspore/runtime/executor.py +87 -45
  364. mindspore/runtime/memory.py +22 -30
  365. mindspore/runtime/thread_bind_core.py +299 -165
  366. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  367. mindspore/swresample-4.dll +0 -0
  368. mindspore/swscale-6.dll +0 -0
  369. mindspore/tbbmalloc.dll +0 -0
  370. mindspore/tinyxml2.dll +0 -0
  371. mindspore/train/_utils.py +9 -5
  372. mindspore/train/amp.py +43 -23
  373. mindspore/train/callback/__init__.py +5 -5
  374. mindspore/train/callback/_callback.py +2 -1
  375. mindspore/train/callback/_checkpoint.py +4 -14
  376. mindspore/train/callback/_flops_collector.py +11 -7
  377. mindspore/train/callback/_landscape.py +0 -1
  378. mindspore/train/callback/_train_fault_tolerance.py +72 -18
  379. mindspore/train/data_sink.py +15 -6
  380. mindspore/train/dataset_helper.py +14 -5
  381. mindspore/train/model.py +49 -47
  382. mindspore/train/serialization.py +168 -126
  383. mindspore/train/summary/summary_record.py +13 -2
  384. mindspore/train/train_thor/model_thor.py +2 -2
  385. mindspore/turbojpeg.dll +0 -0
  386. mindspore/utils/__init__.py +3 -2
  387. mindspore/utils/dryrun.py +0 -6
  388. mindspore/utils/runtime_execution_order_check.py +162 -78
  389. mindspore/utils/sdc_detect.py +68 -0
  390. mindspore/utils/utils.py +14 -17
  391. mindspore/vcmeta.dll +0 -0
  392. mindspore/vcruntime140.dll +0 -0
  393. mindspore/vcruntime140_1.dll +0 -0
  394. mindspore/version.py +1 -1
  395. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  396. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
  397. mindspore/_deprecated/jit.py +0 -198
  398. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  399. mindspore/communication/_hccl_management.py +0 -297
  400. mindspore/experimental/es/embedding_service.py +0 -891
  401. mindspore/experimental/es/embedding_service_layer.py +0 -581
  402. mindspore/profiler/common/validator/__init__.py +0 -14
  403. mindspore/profiler/common/validator/validate_path.py +0 -84
  404. mindspore/profiler/parser/__init__.py +0 -14
  405. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  406. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  407. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  408. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  409. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  410. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  411. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  412. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  413. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  414. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  415. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  416. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  417. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  418. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  419. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  420. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  421. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  422. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  423. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  424. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  425. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  426. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  427. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  428. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  429. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  430. mindspore/profiler/parser/container.py +0 -229
  431. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  432. mindspore/profiler/parser/flops_parser.py +0 -531
  433. mindspore/profiler/parser/framework_enum.py +0 -111
  434. mindspore/profiler/parser/framework_parser.py +0 -464
  435. mindspore/profiler/parser/framework_struct.py +0 -61
  436. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  437. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  438. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  439. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  440. mindspore/profiler/parser/hccl_parser.py +0 -573
  441. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  442. mindspore/profiler/parser/integrator.py +0 -526
  443. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  444. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  445. mindspore/profiler/parser/minddata_parser.py +0 -186
  446. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  447. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  448. mindspore/profiler/parser/optime_parser.py +0 -250
  449. mindspore/profiler/parser/profiler_info.py +0 -213
  450. mindspore/profiler/parser/step_trace_parser.py +0 -666
  451. mindspore/utils/hooks.py +0 -81
  452. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  453. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  454. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  455. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -98,7 +98,6 @@ class RowTensor(RowTensorInner):
98
98
 
99
99
  .. warning::
100
100
  - This is an experimental API that is subjected to change or deletion.
101
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
102
101
 
103
102
  Args:
104
103
  indices (Tensor): A 1-D integer Tensor of shape :math:`(d_0)` . Default: ``None``.
@@ -232,7 +231,6 @@ class COOTensor(COOTensor_):
232
231
 
233
232
  .. warning::
234
233
  - This is an experimental API that is subject to change or deletion.
235
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
236
234
  - Currently, duplicate coordinates in the indices will not be coalesced.
237
235
  If the indices contain out-of-bound values, the result will be undefined.
238
236
 
@@ -681,7 +679,6 @@ class CSRTensor(CSRTensor_):
681
679
 
682
680
  .. warning::
683
681
  - This is an experimental API that is subjected to change.
684
- - If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
685
682
  - If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
686
683
  when the length of `values` or `indices` exceeds the range indicated by `indptr`, and when the columns
687
684
  indicated by `indices` are repeated on the same row.
@@ -104,7 +104,6 @@ class Symbol:
104
104
  if not isinstance(unique, bool):
105
105
  raise TypeError(f"For 'Symbol', the argument 'unique' must be bool, but got {type(unique)}")
106
106
 
107
- # pylint: disable=missing-docstring
108
107
  def to_dict(self):
109
108
  # Convert the symbolic info to dictionary.
110
109
  # This method is not necessary to show in public api document, use comment instead of docstring.
@@ -32,11 +32,12 @@ from mindspore.common.hook_handle import _TensorHookHandle
32
32
  from mindspore.common._utils import get_slice_num
33
33
  from mindspore.common._register_for_tensor import tensor_operator_registry
34
34
  from mindspore._c_expression import TensorPy as TensorPy_
35
+ from mindspore._c_expression import _rmod_instance
35
36
  from mindspore import _checkparam as validator
36
37
  from mindspore._checkparam import is_stub_tensor, check_hook_fn
37
38
  from mindspore._check_jit_forbidden_api import jit_forbidden_register
38
39
  from mindspore.common.symbol import Symbol
39
-
40
+ from mindspore._c_expression import is_reboot_node
40
41
 
41
42
  np_types = (np.int8, np.int16, np.int32, np.int64,
42
43
  np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
@@ -93,13 +94,6 @@ def _set_symbolic_shape(shape):
93
94
  return shape, symbolic_shape
94
95
 
95
96
 
96
- def _convert_stub_tensor(input_data):
97
- """Convert input to stub tensor"""
98
- if not is_stub_tensor(input_data):
99
- return input_data
100
- return input_data.stub_sync()
101
-
102
-
103
97
  def _convert_numpy_array(input_data):
104
98
  """Convert inpyt to numpy array"""
105
99
  if not isinstance(input_data, np_types):
@@ -144,8 +138,6 @@ def _init(input_data=None, dtype=None, shape=None, init=None, const_arg=False, d
144
138
  _cast = tensor_operator_registry.get("cast")
145
139
  input_data = _cast(input_data, dtype)
146
140
 
147
- input_data = _convert_stub_tensor(input_data)
148
-
149
141
  if input_data is None and shape is None and init is None and dtype is not None:
150
142
  validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
151
143
  logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' or 'init' must not be None.")
@@ -325,7 +317,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
325
317
  return True
326
318
  return NotImplemented
327
319
 
328
-
329
320
  def __deepcopy__(self, memodict):
330
321
  new_obj = Tensor(self)
331
322
  new_obj.init = self.init
@@ -379,9 +370,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
379
370
  try:
380
371
  data = self._item()
381
372
  return int(data)
382
- except ValueError:
383
- raise ValueError("Only one element tensors can be converted to Python scalars")
384
-
373
+ except ValueError as e:
374
+ raise ValueError("Only one element tensors can be converted to Python scalars") from e
385
375
 
386
376
  def __float__(self):
387
377
  try:
@@ -396,8 +386,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
396
386
  if not isinstance(data, (int, bool)):
397
387
  raise ValueError
398
388
  return int(data)
399
- except ValueError:
400
- raise ValueError("Only integer tensors of a single element can be converted to an index.")
389
+ except ValueError as e:
390
+ raise ValueError("Only integer tensors of a single element can be converted to an index.") from e
401
391
 
402
392
  def __pos__(self):
403
393
  return self
@@ -426,14 +416,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
426
416
  def __rtruediv__(self, other):
427
417
  return tensor_operator_registry.get('__truediv__')(other, self)
428
418
 
429
- def __mod__(self, other):
430
- return tensor_operator_registry.get('__mod__')(self, other)
431
-
432
419
  def __rmod__(self, other):
433
- return tensor_operator_registry.get('__mod__')(other, self)
434
-
435
- def __imod__(self, other):
436
- return self.__mod__(other)
420
+ return _rmod_instance(other, self)
437
421
 
438
422
  def __rpow__(self, other):
439
423
  return tensor_operator_registry.get('__rpow__')(self, other)
@@ -1163,7 +1147,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
1163
1147
  which may be modified by returning a new output gradient.
1164
1148
  - The `hook` should have the following signature:
1165
1149
  hook(grad) -> New output gradient, but can not return None or not set return value.
1166
- - Higher-order differentiation does not support tensor `register_hook`.
1167
1150
  - The following constraints must be met under graph mode:
1168
1151
 
1169
1152
  - The `hook` must satisfy the syntax constraints of the graph mode.
@@ -1868,6 +1851,10 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
1868
1851
 
1869
1852
  self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
1870
1853
 
1854
+ .. warning::
1855
+ When deterministic computation is enabled, `index` can not be a non-contiguous Tensor; otherwise,
1856
+ deterministic results can not be guaranteed.
1857
+
1871
1858
  Args:
1872
1859
  dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`self`).
1873
1860
  index (Tensor): The index of `self` to do scatter operation whose data type must
@@ -2110,11 +2097,13 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2110
2097
 
2111
2098
  from mindspore.common.initializer import Zero as ZeroInitializer
2112
2099
 
2100
+ is_qint4x2 = self.dtype == mstype.qint4x2
2113
2101
  try:
2102
+ dtype_ = mstype.int8 if is_qint4x2 else self.dtype
2114
2103
  if isinstance(self.init, ZeroInitializer):
2115
- data = np.zeros(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2104
+ data = np.zeros(data_shape, dtype=mstype._dtype_to_nptype(dtype_)) # pylint:disable=protected-access
2116
2105
  else:
2117
- data = np.ndarray(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2106
+ data = np.ndarray(data_shape, dtype=mstype._dtype_to_nptype(dtype_)) # pylint:disable=protected-access
2118
2107
  except ValueError as e:
2119
2108
  msg = "Error shape={}".format(shape)
2120
2109
  logger.critical(msg)
@@ -2127,7 +2116,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2127
2116
  self.init = init
2128
2117
  global_seed = get_seed()
2129
2118
  self._np_seed = np.random.get_state()[1][0]
2130
- self.need_set_seed = (slice_index is not None)
2119
+ self.need_set_seed = slice_index is not None
2131
2120
  self._global_seed = global_seed
2132
2121
  self._seed_offset = 1
2133
2122
  if self.need_set_seed:
@@ -2150,7 +2139,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2150
2139
  self.init.seed, _ = self.seed
2151
2140
 
2152
2141
  with seed_context(self.init):
2153
- if not isinstance(self.init, ZeroInitializer) and slice_num_of_persistent_data == 1:
2142
+ if (not isinstance(self.init, ZeroInitializer) and slice_num_of_persistent_data == 1) \
2143
+ and not is_reboot_node():
2154
2144
  self.init(data)
2155
2145
  self.init = None
2156
2146
 
@@ -2159,6 +2149,10 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2159
2149
  self.assign_value(TensorPy_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
2160
2150
  else:
2161
2151
  self.assign_value(TensorPy_.from_numpy(data))
2152
+
2153
+ if is_qint4x2:
2154
+ self.set_dtype(mstype.qint4x2)
2155
+
2162
2156
  return self
2163
2157
 
2164
2158
  def resize(self, *new_shape):
@@ -2495,7 +2489,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2495
2489
  """
2496
2490
  return tensor_operator_registry.get('uniform_')(self, from_=from_, to=to, generator=generator)
2497
2491
 
2498
-
2499
2492
  def exponential_(self, lambd=1, *, generator=None):
2500
2493
  r"""
2501
2494
  Fills `self` tensor with elements drawn from the exponential distribution:
@@ -2529,7 +2522,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2529
2522
  """
2530
2523
  return tensor_operator_registry.get('exponential_')(self, lambd=lambd, generator=generator)
2531
2524
 
2532
-
2533
2525
  def sum_to_size(self, *size):
2534
2526
  r"""
2535
2527
  Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
@@ -2561,17 +2553,19 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2561
2553
  shape_x = x.shape
2562
2554
  if len(size) > x.ndim:
2563
2555
  raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
2556
+ pre_len = 0
2557
+ pre_axis = []
2564
2558
  if len(size) < x.ndim:
2565
- pre_axis = tuple([axis for axis in range(x.ndim - len(size))])
2566
- x = x.sum(pre_axis)
2567
- axes = []
2559
+ pre_len = x.ndim - len(size)
2560
+ pre_axis = [axis for axis in range(pre_len)]
2561
+ axes = pre_axis
2568
2562
  for i, element in enumerate(size):
2569
- if element != x.shape[i] and element == 1:
2570
- axes.append(i)
2571
- elif element != x.shape[i]:
2563
+ if element != x.shape[i + pre_len] and element == 1:
2564
+ axes.append(i + pre_len)
2565
+ elif element != x.shape[i + pre_len]:
2572
2566
  raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
2573
2567
  if axes:
2574
- return x.sum(tuple(axes), keepdims=True)
2568
+ return x.sum(tuple(axes), keepdims=True).reshape(size)
2575
2569
  return x
2576
2570
 
2577
2571
  def nanmean(self, axis=None, keepdims=False, *, dtype=None):
@@ -2592,6 +2586,34 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2592
2586
  """
2593
2587
  return tensor_operator_registry.get('bernoulli')(self, generator=generator)
2594
2588
 
2589
+ def bernoulli_(self, p=0.5, *, generator=None):
2590
+ r"""
2591
+ Fills each location of self with an independent sample from Bernoulli(p).
2592
+
2593
+ Args:
2594
+ p (Union[number.Number, Tensor], optional): `p` should either be a scalar or tensor containing
2595
+ probabilities to be used for drawing the binary random number, between ``0`` and ``1`` .
2596
+ If it is a tensor, `p` must be floating point. Default: ``0.5`` .
2597
+
2598
+ Keyword Args:
2599
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
2600
+ Default: ``None`` , uses the default pseudorandom number generator.
2601
+
2602
+ Returns:
2603
+ The input tensor.
2604
+
2605
+ Supported Platforms:
2606
+ ``Ascend``
2607
+
2608
+ Examples:
2609
+ >>> from mindspore import Tensor
2610
+ >>> x = Tensor([[2, 3, 4], [1, 2, 3]])
2611
+ >>> p = 0.1
2612
+ >>> print(x.bernoulli_(p).shape)
2613
+ (2, 3)
2614
+ """
2615
+ return tensor_operator_registry.get('bernoulli_')(self, p, generator=generator)
2616
+
2595
2617
  def random_(self, from_=0, to=None, *, generator=None):
2596
2618
  r"""
2597
2619
  Fill the tensor with numbers sampled from a discrete uniform distribution over an
@@ -2654,7 +2676,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2654
2676
  Note:
2655
2677
  The rank of `self`.
2656
2678
 
2657
- - Ascend: its rank can be equal to 0 except O2 mode.
2679
+ - Ascend: its rank can be equal to 0 except GE backend.
2658
2680
  - CPU/GPU: its rank should be greater than or eaqual to 1.
2659
2681
 
2660
2682
  Keyword Args:
@@ -2673,7 +2695,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2673
2695
  Raises:
2674
2696
  TypeError: If `self` is not Tensor.
2675
2697
  TypeError: If `as_tuple` is not bool.
2676
- RuntimeError: On GPU or CPU or Ascend O2 mode, if dim of `input` equals to 0.
2698
+ RuntimeError: On GPU or CPU or Ascend GE backend, if dim of `input` equals to 0.
2677
2699
 
2678
2700
  Supported Platforms:
2679
2701
  ``Ascend`` ``GPU`` ``CPU``
@@ -2836,7 +2858,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2836
2858
  return []
2837
2859
  return self._tolist()
2838
2860
 
2839
-
2840
2861
  def unsorted_segment_min(self, segment_ids, num_segments):
2841
2862
  r"""
2842
2863
  For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
@@ -2859,7 +2880,7 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2859
2880
  """
2860
2881
  For details, please refer to :func:`mindspore.ops.unique_consecutive`.
2861
2882
  """
2862
- output, idx, counts =\
2883
+ output, idx, counts = \
2863
2884
  tensor_operator_registry.get("unique_consecutive")(return_inverse, return_counts, dim)(self)
2864
2885
  if return_inverse and return_counts:
2865
2886
  return output, idx, counts
@@ -2946,7 +2967,8 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2946
2967
  taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
2947
2968
 
2948
2969
  Args:
2949
- dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
2970
+ dtype (dtype.Number, bool): The valid data type of the output tensor. Only constant value is allowed.
2971
+ Only Support type bool in PyNative mode.
2950
2972
 
2951
2973
  Returns:
2952
2974
  Tensor, converted to the specified `dtype`.
@@ -2998,7 +3020,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
2998
3020
  return str(self.dtype)
2999
3021
  return self.astype(dtype)
3000
3022
 
3001
-
3002
3023
  def type_as(self, other):
3003
3024
  r"""
3004
3025
  Returns self tensor cast to the type of the with the input other tensor.
@@ -3041,7 +3062,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3041
3062
  return self
3042
3063
  return TensorPy_.type_as(self, other)
3043
3064
 
3044
-
3045
3065
  def bool(self):
3046
3066
  r"""
3047
3067
  Converts input tensor dtype to `bool`.
@@ -3537,49 +3557,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3537
3557
  """
3538
3558
  return tensor_operator_registry.get('zero_')(self)
3539
3559
 
3540
- def new_empty(self, size, *, dtype=None, device=None):
3541
- r"""
3542
- Returns an uninitialized Tensor of `size`. Its dtype is specified by `dtype` and its
3543
- device is specified by `device`.
3544
-
3545
- .. warning::
3546
- This is an experimental API that is subject to change or deletion.
3547
-
3548
- Args:
3549
- size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Only positive integer or
3550
- tuple or list containing positive integers are allowed.
3551
-
3552
- Keyword Args:
3553
- dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`,
3554
- the tensor will have the same dtype as `self`. Default ``None``.
3555
- device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
3556
- `device = None`, the tensor will have the same device as `self` and if the device of `self` is not
3557
- defined, the value set by :func:`mindspore.set_device` will be used. Default ``None``.
3558
-
3559
- Returns:
3560
- Tensor, the shape, dtype and device is defined above but with uninitialized data (May be a random value).
3561
-
3562
- Raises:
3563
- TypeError: If `size` is neither an int nor a tuple or list of int.
3564
-
3565
- Supported Platforms:
3566
- ``Ascend``
3567
-
3568
- Examples:
3569
- >>> import mindspore
3570
- >>> from mindspore import Tensor
3571
- >>> x = Tensor([[1, 2, 3], [4, 5, 6]])
3572
- >>> output1 = x.new_empty((2, 3))
3573
- >>> print(output1)
3574
- [[0 0 0]
3575
- [0 0 0]]
3576
- >>> output2 = x.new_empty((2, 3), dtype=mindspore.float64)
3577
- >>> print(output2)
3578
- [[0. 0. 0.]
3579
- [0. 0. 0.]]
3580
- """
3581
- return tensor_operator_registry.get('new_empty')(self, size, dtype, device)
3582
-
3583
3560
  def sign(self):
3584
3561
  r"""
3585
3562
  For details, please refer to :func:`mindspore.ops.sign`.
@@ -3651,46 +3628,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3651
3628
  """
3652
3629
  return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
3653
3630
 
3654
- def masked_scatter(self, mask, x):
3655
- r"""
3656
- Updates the value in the "self Tensor" with the `tensor` value according to the mask, and returns a Tensor.
3657
- The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
3658
-
3659
- .. warning::
3660
- This is an experimental API that is subject to change or deletion.
3661
-
3662
- Args:
3663
- mask (Tensor[bool]): A bool tensor with a shape broadcastable to the "self Tensor".
3664
- x (Tensor): A tensor with the same data type as the "self Tensor". The number
3665
- of elements must be greater than or equal to the number of True's in `mask`.
3666
-
3667
- Returns:
3668
- Tensor, with the same type and shape as the "self Tensor".
3669
-
3670
- Raises:
3671
- TypeError: If `mask` or `x` is not a Tensor.
3672
- TypeError: If data type of the "self Tensor" is not be supported.
3673
- TypeError: If dtype of `mask` is not bool.
3674
- TypeError: If the dim of the "self Tensor" less than the dim of `mask`.
3675
- ValueError: If `mask` can not be broadcastable to the "self Tensor".
3676
- ValueError: If the number of elements in `x` is less than the number required for the updates.
3677
-
3678
- Supported Platforms:
3679
- ``Ascend`` ``CPU``
3680
-
3681
- Examples:
3682
- >>> import numpy as np
3683
- >>> import mindspore
3684
- >>> from mindspore import Tensor
3685
- >>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
3686
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
3687
- >>> tensor = Tensor(np.array([5., 6., 7.]), mindspore.float32)
3688
- >>> output = x.masked_scatter(mask, tensor)
3689
- >>> print(output)
3690
- [5. 6. 3. 7.]
3691
- """
3692
- return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
3693
-
3694
3631
  def index_put(self, indices, values, accumulate=False):
3695
3632
  r"""
3696
3633
  Based on the indices in `indices`, replace the corresponding elements in Tensor `self`
@@ -3831,9 +3768,6 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3831
3768
  raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
3832
3769
  if to not in ("Ascend", "GPU", "CPU"):
3833
3770
  raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
3834
- mode = context.get_context("mode")
3835
- if mode != context.PYNATIVE_MODE:
3836
- raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
3837
3771
  return TensorPy_.move_to(self, to, blocking)
3838
3772
 
3839
3773
  def _offload(self):
@@ -3868,6 +3802,23 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3868
3802
  """
3869
3803
  return TensorPy_._data_ptr(self)
3870
3804
 
3805
+ def data_ptr(self):
3806
+ r"""
3807
+ Get the data ptr address of tensor, for CPU is host address, GPU/NPU is device address.
3808
+ User should know how to use the data ptr address.
3809
+ Note: this api is an experimental api, users need understatnd it before use.
3810
+
3811
+ Supported Platforms:
3812
+ ``CPU/GPU/Ascend``
3813
+
3814
+ Examples:
3815
+ >>> import mindspore as ms
3816
+ >>> from mindspore import Tensor
3817
+ >>> x = ms.Tensor([1, 2, 3], ms.int64)
3818
+ >>> data_ptr = x.data_ptr()
3819
+ """
3820
+ return TensorPy_._data_ptr(self)
3821
+
3871
3822
  def normal_(self, mean=0, std=1, *, generator=None):
3872
3823
  r"""
3873
3824
  Update the `self` tensor in place by generating random numbers sampled from the normal
@@ -3907,13 +3858,13 @@ class Tensor(TensorPy_, metaclass=_TensorMeta):
3907
3858
  """
3908
3859
  return tensor_operator_registry.get('normal_')(self, mean=mean, std=std, generator=generator)
3909
3860
 
3910
-
3911
3861
  def triangular_solve(self, A, upper=True, transpose=False, unitriangular=False):
3912
3862
  r"""
3913
3863
  For details, please refer to :func:`mindspore.mint.triangular_solve`.
3914
3864
  """
3915
3865
  return tensor_operator_registry.get('triangular_solve')(self, A, upper, transpose, unitriangular)
3916
3866
 
3867
+
3917
3868
  def _vm_compare(*args):
3918
3869
  """Implement `vm_compare` for tensor."""
3919
3870
  if args:
@@ -3986,9 +3937,9 @@ def _check_astype_and_convert(dtype):
3986
3937
  if dtype.lower() not in all_types:
3987
3938
  raise TypeError(f"For Tensor.astype, the string input type must be one of {all_types}, "
3988
3939
  f"but got '{dtype}'.")
3989
- dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
3940
+ dtype = mstype._pytype_to_dtype(np.dtype(dtype.lower())) # pylint:disable=protected-access
3990
3941
  elif isinstance(dtype, type):
3991
- dtype = mstype.pytype_to_dtype(dtype)
3942
+ dtype = mstype._pytype_to_dtype(dtype) # pylint:disable=protected-access
3992
3943
  elif dtype not in mstype.number_type + (mstype.bool_,):
3993
3944
  raise TypeError(
3994
3945
  f"For Tensor.astype, the input type must be one of {list(mstype.number_type + (mstype.bool_,) + np_types)},"
@@ -25,7 +25,7 @@ from mindspore import context
25
25
  from mindspore.parallel._ps_context import _is_role_sched, _is_ps_mode,\
26
26
  _get_ps_context
27
27
  from mindspore import log as logger
28
- from mindspore._c_expression import CollectiveManager, set_cluster_exit_with_exception, MSContext
28
+ from mindspore._c_expression import CollectiveManager, set_cluster_exit_with_exception, MSContext, GroupOptions
29
29
  from mindspore.common._utils import load_lib
30
30
 
31
31
  HCCL_LIB = 'libhccl_plugin.so'
@@ -470,14 +470,25 @@ def _get_group_ranks(group):
470
470
 
471
471
 
472
472
  @check_parameter_available
473
- def _create_group_helper(group, rank_ids):
473
+ def _create_group_helper(group, rank_ids, options=None):
474
474
  """
475
475
  The Helper to do create_group.
476
476
 
477
477
  Args:
478
478
  group (str): The communication group.
479
479
  rank_ids (list): Rank ids in the group.
480
- backend (str): The backend, like "hccl".
480
+ options (GroupOptions, optional): Additional communication group configuration parameters.
481
+ The backend will automatically select supported parameters and apply them during group
482
+ initialization. i.e. for the ``HCCL`` backend, ``hccl_config`` can be specified so that
483
+ group initialization configurations can be applied. Default is ``None``.
484
+
485
+ `GroupOptions` is defined as a class that can be instantiated as a python object.
486
+
487
+ .. code-block::
488
+
489
+ GroupOptions {
490
+ hccl_config(dict)
491
+ }
481
492
 
482
493
  Raises:
483
494
  TypeError: If rank_ids is not a list.
@@ -499,10 +510,15 @@ def _create_group_helper(group, rank_ids):
499
510
  "but got 'rank_ids' size : {}.".format(len(rank_ids)))
500
511
  if len(rank_ids) - len(list(set(rank_ids))) > 0:
501
512
  raise ValueError("List rank_ids in Group {} has duplicate data!".format(group))
513
+ if options is None:
514
+ options = GroupOptions()
515
+ if not isinstance(options, GroupOptions):
516
+ raise TypeError("For 'create_group', the argument 'options' must be type of GroupOptions, "
517
+ "but got 'options' type : {}.".format(type(options)))
502
518
  if _hccl_test():
503
519
  hccl.create_group(group, rank_size, rank_ids)
504
520
  else:
505
- result = CollectiveManager.get_instance().create_group(group, rank_ids)
521
+ result = CollectiveManager.get_instance().create_group(group, rank_ids, options)
506
522
  if not result:
507
523
  raise RuntimeError("Failed to create communication group for {} with rank ids {}. "
508
524
  "If NCCL is used, 'export NCCL_DEBUG=INFO' "
@@ -554,3 +570,29 @@ def _get_group_map():
554
570
  def _wait_all_comm_init():
555
571
  """Wait for all communicators to be initialized."""
556
572
  return CollectiveManager.get_instance().wait_all_comm_init()
573
+
574
+
575
+ def _remove_group_info(group_name):
576
+ """
577
+ Remove group info after destroy group by user when using arf.
578
+
579
+ Args:
580
+ group_name (str): The user communication group name.
581
+
582
+ """
583
+ CollectiveManager.get_instance().remove_group_info(group_name)
584
+
585
+
586
+ def _comm_switch_nic_helper(global_ranks: list, use_backup: list) -> bool:
587
+ """Switch network interface card between the primary and the secondary NIC.
588
+
589
+ Args:
590
+ global_ranks (list[int], tuple[int]): list of integers. The global rank ids that need switch network interface .
591
+ use_backup (list[bool], tuple[int]): list of bool. For each rank id in global_ranks, determine whether to use
592
+ the backup network interface card. True means use, False means not use.
593
+
594
+ Returns:
595
+ bool, whether the network card switch is successful.
596
+ If one fails, return False. If all are successful, return True.
597
+ """
598
+ return CollectiveManager.get_instance().comm_switch_nic(global_ranks, use_backup)