mindspore 2.7.0rc1__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (370) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +5 -2
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/__init__.py +3 -3
  11. mindspore/_extends/parse/compile_config.py +24 -1
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
  13. mindspore/_extends/parse/parser.py +28 -22
  14. mindspore/_extends/parse/resources.py +1 -1
  15. mindspore/_extends/parse/standard_method.py +23 -2
  16. mindspore/_extends/parse/trope.py +2 -1
  17. mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
  18. mindspore/amp.py +0 -18
  19. mindspore/avcodec-59.dll +0 -0
  20. mindspore/avdevice-59.dll +0 -0
  21. mindspore/avfilter-8.dll +0 -0
  22. mindspore/avformat-59.dll +0 -0
  23. mindspore/avutil-57.dll +0 -0
  24. mindspore/boost/base.py +29 -2
  25. mindspore/common/__init__.py +18 -12
  26. mindspore/common/_decorator.py +3 -2
  27. mindspore/common/_grad_function.py +3 -1
  28. mindspore/common/_tensor_cpp_method.py +1 -1
  29. mindspore/common/_tensor_docs.py +371 -96
  30. mindspore/common/_utils.py +7 -43
  31. mindspore/common/api.py +434 -135
  32. mindspore/common/dtype.py +98 -57
  33. mindspore/common/dump.py +7 -108
  34. mindspore/common/dynamic_shape/__init__.py +0 -0
  35. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
  36. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  37. mindspore/common/file_system.py +59 -9
  38. mindspore/common/hook_handle.py +82 -3
  39. mindspore/common/jit_config.py +5 -1
  40. mindspore/common/jit_trace.py +27 -12
  41. mindspore/common/lazy_inline.py +5 -3
  42. mindspore/common/np_dtype.py +3 -3
  43. mindspore/common/parameter.py +17 -127
  44. mindspore/common/recompute.py +4 -13
  45. mindspore/common/tensor.py +50 -217
  46. mindspore/communication/_comm_helper.py +11 -1
  47. mindspore/communication/comm_func.py +138 -4
  48. mindspore/communication/management.py +85 -1
  49. mindspore/config/op_info.config +0 -15
  50. mindspore/context.py +20 -106
  51. mindspore/dataset/__init__.py +1 -1
  52. mindspore/dataset/audio/transforms.py +1 -1
  53. mindspore/dataset/core/config.py +35 -1
  54. mindspore/dataset/engine/datasets.py +338 -319
  55. mindspore/dataset/engine/datasets_user_defined.py +38 -22
  56. mindspore/dataset/engine/datasets_vision.py +1 -1
  57. mindspore/dataset/engine/validators.py +1 -15
  58. mindspore/dataset/transforms/c_transforms.py +2 -2
  59. mindspore/dataset/transforms/transforms.py +3 -3
  60. mindspore/dataset/vision/__init__.py +1 -1
  61. mindspore/dataset/vision/py_transforms.py +8 -8
  62. mindspore/dataset/vision/transforms.py +17 -5
  63. mindspore/dataset/vision/utils.py +632 -21
  64. mindspore/device_context/ascend/op_tuning.py +35 -1
  65. mindspore/dnnl.dll +0 -0
  66. mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
  67. mindspore/graph/custom_pass.py +55 -0
  68. mindspore/include/api/cell.h +28 -4
  69. mindspore/include/api/cfg.h +24 -7
  70. mindspore/include/api/context.h +1 -0
  71. mindspore/include/api/delegate.h +0 -2
  72. mindspore/include/api/dual_abi_helper.h +100 -19
  73. mindspore/include/api/graph.h +14 -1
  74. mindspore/include/api/kernel.h +16 -3
  75. mindspore/include/api/kernel_api.h +9 -1
  76. mindspore/include/api/metrics/accuracy.h +9 -0
  77. mindspore/include/api/model.h +5 -1
  78. mindspore/include/api/model_group.h +4 -0
  79. mindspore/include/api/model_parallel_runner.h +2 -0
  80. mindspore/include/api/status.h +48 -10
  81. mindspore/include/api/types.h +6 -1
  82. mindspore/include/dataset/constants.h +9 -0
  83. mindspore/include/dataset/execute.h +2 -2
  84. mindspore/jpeg62.dll +0 -0
  85. mindspore/mindrecord/__init__.py +3 -3
  86. mindspore/mindrecord/common/exceptions.py +1 -0
  87. mindspore/mindrecord/config.py +1 -1
  88. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  89. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  90. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  91. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  92. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  93. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  94. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  95. mindspore/mindrecord/filereader.py +4 -4
  96. mindspore/mindrecord/filewriter.py +5 -5
  97. mindspore/mindrecord/mindpage.py +2 -2
  98. mindspore/mindrecord/tools/cifar10.py +4 -3
  99. mindspore/mindrecord/tools/cifar100.py +1 -1
  100. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  101. mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
  102. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  103. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  104. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  105. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  106. mindspore/mindspore_backend_common.dll +0 -0
  107. mindspore/mindspore_backend_manager.dll +0 -0
  108. mindspore/mindspore_cluster.dll +0 -0
  109. mindspore/mindspore_common.dll +0 -0
  110. mindspore/mindspore_core.dll +0 -0
  111. mindspore/mindspore_cpu.dll +0 -0
  112. mindspore/mindspore_dump.dll +0 -0
  113. mindspore/mindspore_frontend.dll +0 -0
  114. mindspore/mindspore_glog.dll +0 -0
  115. mindspore/mindspore_hardware_abstract.dll +0 -0
  116. mindspore/mindspore_memory_pool.dll +0 -0
  117. mindspore/mindspore_ms_backend.dll +0 -0
  118. mindspore/mindspore_ops.dll +0 -0
  119. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  120. mindspore/mindspore_profiler.dll +0 -0
  121. mindspore/mindspore_pyboost.dll +0 -0
  122. mindspore/mindspore_pynative.dll +0 -0
  123. mindspore/mindspore_runtime_pipeline.dll +0 -0
  124. mindspore/mindspore_runtime_utils.dll +0 -0
  125. mindspore/mindspore_tools.dll +0 -0
  126. mindspore/mint/__init__.py +15 -10
  127. mindspore/mint/distributed/__init__.py +4 -0
  128. mindspore/mint/distributed/distributed.py +392 -69
  129. mindspore/mint/nn/__init__.py +2 -16
  130. mindspore/mint/nn/functional.py +4 -110
  131. mindspore/mint/nn/layer/__init__.py +0 -2
  132. mindspore/mint/nn/layer/_functions.py +1 -2
  133. mindspore/mint/nn/layer/activation.py +0 -6
  134. mindspore/mint/nn/layer/basic.py +0 -47
  135. mindspore/mint/nn/layer/conv.py +10 -10
  136. mindspore/mint/nn/layer/normalization.py +11 -16
  137. mindspore/mint/nn/layer/pooling.py +0 -4
  138. mindspore/nn/__init__.py +1 -3
  139. mindspore/nn/cell.py +231 -239
  140. mindspore/nn/layer/activation.py +4 -2
  141. mindspore/nn/layer/basic.py +56 -14
  142. mindspore/nn/layer/container.py +16 -0
  143. mindspore/nn/layer/embedding.py +4 -169
  144. mindspore/nn/layer/image.py +1 -1
  145. mindspore/nn/layer/normalization.py +2 -1
  146. mindspore/nn/layer/thor_layer.py +4 -85
  147. mindspore/nn/optim/ada_grad.py +0 -1
  148. mindspore/nn/optim/adafactor.py +0 -1
  149. mindspore/nn/optim/adam.py +32 -127
  150. mindspore/nn/optim/adamax.py +0 -1
  151. mindspore/nn/optim/asgd.py +0 -1
  152. mindspore/nn/optim/ftrl.py +8 -102
  153. mindspore/nn/optim/lamb.py +1 -4
  154. mindspore/nn/optim/lars.py +0 -3
  155. mindspore/nn/optim/lazyadam.py +25 -218
  156. mindspore/nn/optim/momentum.py +5 -43
  157. mindspore/nn/optim/optimizer.py +6 -55
  158. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  159. mindspore/nn/optim/rmsprop.py +0 -1
  160. mindspore/nn/optim/rprop.py +0 -1
  161. mindspore/nn/optim/sgd.py +0 -1
  162. mindspore/nn/optim/tft_wrapper.py +2 -4
  163. mindspore/nn/optim/thor.py +0 -2
  164. mindspore/nn/probability/bijector/bijector.py +7 -8
  165. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  166. mindspore/nn/probability/bijector/power_transform.py +20 -21
  167. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  168. mindspore/nn/probability/bijector/softplus.py +13 -14
  169. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  170. mindspore/nn/wrap/cell_wrapper.py +39 -5
  171. mindspore/nn/wrap/grad_reducer.py +4 -89
  172. mindspore/numpy/array_creations.py +4 -4
  173. mindspore/numpy/fft.py +9 -9
  174. mindspore/numpy/utils_const.py +1 -1
  175. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  176. mindspore/onnx/onnx_export.py +137 -0
  177. mindspore/opencv_core4110.dll +0 -0
  178. mindspore/opencv_imgcodecs4110.dll +0 -0
  179. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  180. mindspore/ops/__init__.py +2 -0
  181. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  182. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  183. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  184. mindspore/ops/_op_impl/cpu/__init__.py +1 -5
  185. mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
  186. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
  187. mindspore/ops/auto_generate/gen_extend_func.py +6 -11
  188. mindspore/ops/auto_generate/gen_ops_def.py +385 -154
  189. mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
  190. mindspore/ops/communication.py +97 -0
  191. mindspore/ops/composite/__init__.py +5 -2
  192. mindspore/ops/composite/base.py +16 -2
  193. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  195. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  196. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  197. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  198. mindspore/ops/function/__init__.py +2 -0
  199. mindspore/ops/function/array_func.py +24 -18
  200. mindspore/ops/function/comm_func.py +3883 -0
  201. mindspore/ops/function/debug_func.py +7 -6
  202. mindspore/ops/function/grad/grad_func.py +4 -12
  203. mindspore/ops/function/math_func.py +89 -86
  204. mindspore/ops/function/nn_func.py +92 -313
  205. mindspore/ops/function/random_func.py +9 -18
  206. mindspore/ops/functional.py +4 -1
  207. mindspore/ops/functional_overload.py +377 -30
  208. mindspore/ops/operations/__init__.py +2 -5
  209. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  210. mindspore/ops/operations/_inner_ops.py +12 -50
  211. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  212. mindspore/ops/operations/array_ops.py +5 -50
  213. mindspore/ops/operations/comm_ops.py +95 -17
  214. mindspore/ops/operations/custom_ops.py +237 -22
  215. mindspore/ops/operations/debug_ops.py +33 -35
  216. mindspore/ops/operations/manually_defined/ops_def.py +39 -318
  217. mindspore/ops/operations/math_ops.py +5 -5
  218. mindspore/ops/operations/nn_ops.py +3 -3
  219. mindspore/ops/operations/sparse_ops.py +0 -83
  220. mindspore/ops/primitive.py +4 -27
  221. mindspore/ops/tensor_method.py +88 -10
  222. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  223. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  224. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  226. mindspore/ops_generate/common/gen_constants.py +11 -10
  227. mindspore/ops_generate/common/op_proto.py +18 -1
  228. mindspore/ops_generate/common/template.py +102 -245
  229. mindspore/ops_generate/common/template_utils.py +212 -0
  230. mindspore/ops_generate/gen_custom_ops.py +69 -0
  231. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  232. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  233. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  234. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  235. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  236. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  237. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  238. mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
  239. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  240. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  241. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  242. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  243. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  244. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  246. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  247. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  248. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  249. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  250. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  251. mindspore/parallel/_auto_parallel_context.py +5 -15
  252. mindspore/parallel/_cell_wrapper.py +1 -1
  253. mindspore/parallel/_parallel_serialization.py +4 -6
  254. mindspore/parallel/_ps_context.py +2 -2
  255. mindspore/parallel/_utils.py +34 -17
  256. mindspore/parallel/auto_parallel.py +23 -9
  257. mindspore/parallel/checkpoint_transform.py +20 -2
  258. mindspore/parallel/cluster/process_entity/_api.py +28 -33
  259. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  260. mindspore/parallel/cluster/run.py +5 -3
  261. mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
  262. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  263. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  264. mindspore/parallel/function/reshard_func.py +6 -5
  265. mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
  266. mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
  267. mindspore/parallel/shard.py +7 -21
  268. mindspore/parallel/strategy.py +336 -0
  269. mindspore/parallel/transform_safetensors.py +127 -20
  270. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  272. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  273. mindspore/profiler/common/constant.py +5 -0
  274. mindspore/profiler/common/file_manager.py +9 -0
  275. mindspore/profiler/common/msprof_cmd_tool.py +40 -4
  276. mindspore/profiler/common/path_manager.py +65 -24
  277. mindspore/profiler/common/profiler_context.py +27 -14
  278. mindspore/profiler/common/profiler_info.py +3 -3
  279. mindspore/profiler/common/profiler_meta_data.py +1 -0
  280. mindspore/profiler/common/profiler_op_analyse.py +10 -6
  281. mindspore/profiler/common/profiler_path_manager.py +13 -0
  282. mindspore/profiler/common/util.py +30 -3
  283. mindspore/profiler/dynamic_profiler.py +91 -46
  284. mindspore/profiler/envprofiler.py +30 -5
  285. mindspore/profiler/experimental_config.py +18 -2
  286. mindspore/profiler/platform/cpu_profiler.py +10 -4
  287. mindspore/profiler/platform/npu_profiler.py +34 -7
  288. mindspore/profiler/profiler.py +193 -145
  289. mindspore/profiler/profiler_action_controller.py +1 -1
  290. mindspore/profiler/profiler_interface.py +2 -2
  291. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  292. mindspore/run_check/_check_version.py +108 -24
  293. mindspore/runtime/__init__.py +9 -6
  294. mindspore/runtime/executor.py +35 -0
  295. mindspore/runtime/memory.py +113 -0
  296. mindspore/runtime/thread_bind_core.py +1 -1
  297. mindspore/swresample-4.dll +0 -0
  298. mindspore/swscale-6.dll +0 -0
  299. mindspore/tinyxml2.dll +0 -0
  300. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  301. mindspore/tools/data_dump.py +130 -0
  302. mindspore/tools/sdc_detect.py +91 -0
  303. mindspore/tools/stress_detect.py +63 -0
  304. mindspore/train/__init__.py +6 -6
  305. mindspore/train/_utils.py +8 -21
  306. mindspore/train/amp.py +6 -7
  307. mindspore/train/callback/_callback.py +2 -1
  308. mindspore/train/callback/_checkpoint.py +1 -17
  309. mindspore/train/callback/_flops_collector.py +10 -6
  310. mindspore/train/callback/_train_fault_tolerance.py +72 -25
  311. mindspore/train/data_sink.py +5 -9
  312. mindspore/train/dataset_helper.py +5 -5
  313. mindspore/train/model.py +41 -230
  314. mindspore/train/serialization.py +160 -401
  315. mindspore/train/train_thor/model_thor.py +2 -2
  316. mindspore/turbojpeg.dll +0 -0
  317. mindspore/utils/__init__.py +6 -3
  318. mindspore/utils/dlpack.py +92 -0
  319. mindspore/utils/dryrun.py +1 -1
  320. mindspore/utils/runtime_execution_order_check.py +10 -0
  321. mindspore/utils/sdc_detect.py +14 -12
  322. mindspore/utils/stress_detect.py +43 -0
  323. mindspore/utils/utils.py +152 -16
  324. mindspore/version.py +1 -1
  325. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  326. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
  327. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  328. mindspore/communication/_hccl_management.py +0 -297
  329. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
  330. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  331. mindspore/experimental/llm_boost/atb/__init__.py +0 -23
  332. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  333. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  334. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  335. mindspore/experimental/llm_boost/register.py +0 -130
  336. mindspore/experimental/llm_boost/utils.py +0 -31
  337. mindspore/include/OWNERS +0 -7
  338. mindspore/mindspore_cpu_res_manager.dll +0 -0
  339. mindspore/mindspore_ops_kernel_common.dll +0 -0
  340. mindspore/mindspore_res_manager.dll +0 -0
  341. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  342. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  343. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  344. mindspore/nn/reinforcement/tensor_array.py +0 -145
  345. mindspore/opencv_core452.dll +0 -0
  346. mindspore/opencv_imgcodecs452.dll +0 -0
  347. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  348. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  349. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  350. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  351. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  352. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  353. mindspore/ops/operations/_tensor_array.py +0 -359
  354. mindspore/ops/operations/rl_ops.py +0 -288
  355. mindspore/parallel/_offload_context.py +0 -275
  356. mindspore/parallel/_recovery_context.py +0 -115
  357. mindspore/parallel/_transformer/__init__.py +0 -35
  358. mindspore/parallel/_transformer/layers.py +0 -765
  359. mindspore/parallel/_transformer/loss.py +0 -251
  360. mindspore/parallel/_transformer/moe.py +0 -693
  361. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  362. mindspore/parallel/_transformer/transformer.py +0 -3124
  363. mindspore/parallel/mpi/_mpi_config.py +0 -116
  364. mindspore/profiler/common/validator/validate_path.py +0 -84
  365. mindspore/train/memory_profiling_pb2.py +0 -298
  366. mindspore/utils/hooks.py +0 -81
  367. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  368. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  369. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  370. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -28,7 +28,7 @@ def attach_docstr(method, docstr):
28
28
 
29
29
  attach_docstr("absolute", r"""absolute() -> Tensor
30
30
 
31
- Alias for :func:`Tensor.abs`.
31
+ Alias for :func:`mindspore.Tensor.abs`.
32
32
  """)
33
33
  attach_docstr("abs", r"""abs() -> Tensor
34
34
 
@@ -72,7 +72,7 @@ Note:
72
72
  - When `self` and `other` have different shapes,
73
73
  they must be able to broadcast to a common shape.
74
74
  - `self` and `other` can not be bool type at the same time,
75
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
75
+ [True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
76
76
  - `self` and `other` comply with the implicit type conversion rules to make the data types
77
77
  consistent.
78
78
  - The dimension of `self` should be greater than or equal to 1.
@@ -80,7 +80,7 @@ Note:
80
80
  Args:
81
81
  other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
82
82
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
83
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
83
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
84
84
 
85
85
  Returns:
86
86
  Tensor with a shape that is the same as the broadcasted shape of `self` and `other`,
@@ -131,7 +131,7 @@ Note:
131
131
  Args:
132
132
  other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
133
133
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
134
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
134
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
135
135
 
136
136
  Keyword Args:
137
137
  alpha (number.Number): A scaling factor applied to `other`, default 1.
@@ -319,29 +319,7 @@ Examples:
319
319
  .. method:: Tensor.any(dim=None, keepdim=False) -> Tensor
320
320
  :noindex:
321
321
 
322
- Tests if any element in tensor evaluates to `True` along the given axes.
323
-
324
- Args:
325
- dim (int, optional): The dimensions to reduce. If ``None`` , all dimensions are reduced. Default ``None`` .
326
- keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
327
-
328
- Returns:
329
- Tensor
330
-
331
- Supported Platforms:
332
- ``Ascend`` ``GPU`` ``CPU``
333
-
334
- Examples:
335
- >>> import mindspore
336
- >>> x = mindspore.tensor([[True, False], [True, True]])
337
- >>>
338
- >>> # case 1: By default, mindspore.Tensor.any tests along all the axes.
339
- >>> x.any()
340
- Tensor(shape=[], dtype=Bool, value= True)
341
- >>>
342
- >>> # case 2: Reduces a dimension along dim 1, with keepdim False.
343
- >>> x.any(dim=1)
344
- Tensor(shape=[2], dtype=Bool, value= [ True, True])
322
+ For details, please refer to :func:`mindspore.mint.any`.
345
323
  """)
346
324
  attach_docstr("arccosh", r"""arccosh() -> Tensor
347
325
 
@@ -361,7 +339,7 @@ Alias for :func:`mindspore.Tensor.asin`.
361
339
  """)
362
340
  attach_docstr("arctan2", r"""arctan2(other) -> Tensor
363
341
 
364
- Alias for :func:`Tensor.atan2`.
342
+ Alias for :func:`mindspore.Tensor.atan2`.
365
343
  """)
366
344
  attach_docstr("arctanh", r"""arctanh() -> Tensor
367
345
 
@@ -705,6 +683,9 @@ Examples:
705
683
  >>> print(output)
706
684
  [ 0 1 0 0 -2 3 2]
707
685
  """)
686
+ attach_docstr("broadcast_to", r"""broadcast_to(*shape) -> Tensor
687
+
688
+ For details, please refer to :func:`mindspore.ops.broadcast_to`.""")
708
689
  attach_docstr("ceil", r"""ceil() -> Tensor
709
690
 
710
691
  For details, please refer to :func:`mindspore.ops.ceil`.""")
@@ -790,9 +771,6 @@ attach_docstr("clone", r"""clone() -> Tensor
790
771
 
791
772
  Returns a copy of self.
792
773
 
793
- .. warning::
794
- This is an experimental API that is subject to change or deletion.
795
-
796
774
  Note:
797
775
  This function is differentiable, and gradients will flow back directly from the calculation
798
776
  result of the function to the `self`.
@@ -818,12 +796,16 @@ attach_docstr("copy_", r"""copy_(src, non_blocking=False) -> Tensor
818
796
  Copies the elements from `src` into `self` tensor and returns `self`.
819
797
 
820
798
  .. warning::
821
- This is an experimental API that is subject to change or deletion.
822
- The `src` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
799
+ If Copying is performed between Ascend and Ascend, the `src` tensor must be broadcastable with the `self` tensor,
800
+ and they can be of different data types.
801
+ Copying is performed between CPU and Ascend or CPU and CPU are only supported if `self` and `src` have
802
+ the same shape and data type and they are all contiguous.
823
803
 
824
804
  Args:
825
805
  src (Tensor): the source tensor to copy from.
826
- non_blocking (bool, optional): no effect currently. Default: ``False``.
806
+ non_blocking (bool, optional): If ``True`` and copying is performed between CPU and Ascend, and `self` and `src`
807
+ have the same shape and data type and are contiguous. The copy may occur asynchronously with respect to the
808
+ host. For other cases, this argument has no effect. Default: ``False``.
827
809
 
828
810
  Returns:
829
811
  Return self Tensor.
@@ -852,9 +834,6 @@ attach_docstr("count_nonzero", r"""count_nonzero(dim=None) -> Tensor
852
834
 
853
835
  Counts the number of non-zero values in the tensor input along the given dim. If no dim is specified then all non-zeros in the tensor are counted.
854
836
 
855
- .. warning::
856
- This is an experimental API that is subject to change or deletion.
857
-
858
837
  Args:
859
838
  dim (Union[None, int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
860
839
 
@@ -901,7 +880,7 @@ Args:
901
880
  keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
902
881
  If true, keep these reduced dimensions and the length is 1.
903
882
  If false, don't keep these dimensions. Default: ``False`` .
904
- dtype (Union[Number, mindspore.bool\_], optional): The data type of the output tensor.
883
+ dtype (Union[Number, mindspore.bool], optional): The data type of the output tensor.
905
884
  Default: ``None`` .
906
885
 
907
886
  Returns:
@@ -1357,9 +1336,6 @@ the scalar could only be a constant.
1357
1336
  where the :math:`floor` indicates the Floor operator. For more details,
1358
1337
  please refer to the :class:`mindspore.mint.floor` operator.
1359
1338
 
1360
- .. warning::
1361
- This is an experimental API that is subject to change or deletion.
1362
-
1363
1339
  Args:
1364
1340
  other (Union[Tensor, Number, bool]): The other input is a number or
1365
1341
  a bool or a tensor whose data type is number or bool.
@@ -1498,11 +1474,11 @@ The following figure shows the calculation process of Gather commonly:
1498
1474
  where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
1499
1475
 
1500
1476
  .. note::
1501
- - The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
1477
+ - The value of input_indices must be in the range of :math:`[0, input\_param.shape[axis])`.
1502
1478
  On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
1503
1479
  undefined.
1504
1480
  - The data type of self cannot be
1505
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
1481
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
1506
1482
  platform currently.
1507
1483
 
1508
1484
  Args:
@@ -1523,7 +1499,7 @@ Raises:
1523
1499
  ValueError: If `axis` is a Tensor and its size is not 1.
1524
1500
  TypeError: If `self` is not a tensor.
1525
1501
  TypeError: If `input_indices` is not a tensor of type int.
1526
- RuntimeError: If `input_indices` is out of range :math:`[0, input_param.shape[axis])` on CPU or GPU.
1502
+ RuntimeError: If `input_indices` is out of range :math:`[0, input\_param.shape[axis])` on CPU or GPU.
1527
1503
 
1528
1504
  Supported Platforms:
1529
1505
  ``Ascend`` ``GPU`` ``CPU``
@@ -1582,7 +1558,7 @@ For details, please refer to :func:`mindspore.ops.greater_equal`.
1582
1558
  """)
1583
1559
  attach_docstr("gt", r"""gt(other) -> Tensor
1584
1560
 
1585
- For details, please refer to :func:'mindspore.Tensor.greater'.""")
1561
+ For details, please refer to :func:`mindspore.Tensor.greater`.""")
1586
1562
  attach_docstr("hardshrink", r"""hardshrink(lambd=0.5) -> Tensor
1587
1563
 
1588
1564
  For details, please refer to :func:`mindspore.ops.hardshrink`.""")
@@ -1592,6 +1568,10 @@ For details, please refer to :func:`mindspore.ops.histc`.
1592
1568
 
1593
1569
  Supported Platforms:
1594
1570
  ``Ascend`` ``GPU`` ``CPU``""")
1571
+ attach_docstr("imag", r"""imag() -> Tensor
1572
+
1573
+ For details, please refer to :func:`mindspore.ops.imag`.
1574
+ """)
1595
1575
  attach_docstr("index_add", r"""index_add(indices, y, axis, use_lock=True, check_index_bound=True) -> Tensor
1596
1576
 
1597
1577
  Adds tensor `y` to specified axis and indices of tensor `self`. The axis should be in [-len(self.dim), len(self.dim) - 1], and indices should be in [0, the size of `self` - 1] at the axis dimension.
@@ -1636,6 +1616,87 @@ For details, please refer to :func:`mindspore.ops.index_add`.
1636
1616
  The corresponding relationships between the parameters of `Tensor.index_add` and :func:`mindspore.ops.index_add`
1637
1617
  are as follows: `dim` -> `axis`, `index` -> `indices`, `source * alpha` -> `y`.
1638
1618
  """)
1619
+ attach_docstr("index_copy_", r"""index_copy_(dim, index, tensor) -> Tensor
1620
+
1621
+ Copies the elements of `tensor` into the `self` by selecting the indices in the order given in `index` .
1622
+
1623
+ .. note::
1624
+ The value of `index` must be in the range `[0, self.shape[dim])` , if it is out of range, the result is undefined.
1625
+
1626
+ If value of `index` contains duplicate entries, the result is nondeterministic since it depends on the last copy operation that occurred.
1627
+
1628
+ Args:
1629
+ dim (int): The dimension along which to `index` .
1630
+ index (Tensor): A 1-D Tensor with the indices to access in `self` along the specified `dim` .
1631
+ tensor (Tensor): The tensor containing values to copy.
1632
+
1633
+ Returns:
1634
+ Return `self` Tensor.
1635
+
1636
+ Supported Platforms:
1637
+ ``Ascend``
1638
+
1639
+ Examples:
1640
+ >>> import mindspore
1641
+ >>> from mindspore import Tensor, mint
1642
+ >>> x = mint.ones((5, 3), dtype=mindspore.int64)
1643
+ >>> index = Tensor([4, 0, 2])
1644
+ >>> tensor = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=mindspore.int64)
1645
+ >>> x.index_copy_(0, index, tensor)
1646
+ Tensor(shape=[5, 3], dtype=Int64, value=
1647
+ [[4 5 6]
1648
+ [1 1 1]
1649
+ [7 8 9]
1650
+ [1 1 1]
1651
+ [1 2 3]])
1652
+ """)
1653
+ attach_docstr("index_fill_", r"""index_fill_(dim, index, value) -> Tensor
1654
+
1655
+ Fills the elements under the `dim` dimension of the `self` Tensor with the input `value`
1656
+ by selecting the indices in the order given in `index`.
1657
+
1658
+ .. warning::
1659
+ This is an experimental API that is subject to change or deletion.
1660
+
1661
+ .. note::
1662
+ While calculating the gradient of `value` , the value of `index` must be in the range `[0, self.shape[dim])` ,
1663
+ if it is out of range, the result is undefined.
1664
+
1665
+ Args:
1666
+ dim (int): Dimension along which to fill the `self` Tensor.
1667
+ index (Tensor): Indices of the `self` Tensor to fill in. The `index` must be a 0D or 1D Tensor with dtype int32
1668
+ or int64.
1669
+ value (Union[Tensor, Number, bool]): Value to fill the `self` Tensor. The `value` is a number or a bool or a
1670
+ tensor whose data type is number or bool. If `value` is a Tensor, it must be a 0D Tensor.
1671
+
1672
+ Returns:
1673
+ Tensor, the shape and the data type are the same as those of `self` .
1674
+
1675
+ Raises:
1676
+ TypeError: If the data type of `index` is not int32 or int64.
1677
+ RuntimeError: If `dim` is out of range :math:`[-self.ndim, self.ndim)`.
1678
+ RuntimeError: If the rank of `index` is greater than 1.
1679
+ RuntimeError: If `value` is a Tensor and its rank is not equal to 0.
1680
+
1681
+ Supported Platforms:
1682
+ ``Ascend``
1683
+
1684
+ Examples:
1685
+ >>> import mindspore
1686
+ >>> from mindspore import Tensor
1687
+ >>> import numpy as np
1688
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.int32)
1689
+ >>> dim = 1
1690
+ >>> index = Tensor(np.array([0, 2]), mindspore.int32)
1691
+ >>> value = Tensor(0, mindspore.int32)
1692
+ >>> output = x.index_fill_(dim, index, value)
1693
+ >>> print(output)
1694
+ [[ 0 2 0]
1695
+ [ 0 5 0]]
1696
+ >>> print(x)
1697
+ [[ 0 2 0]
1698
+ [ 0 5 0]]
1699
+ """)
1639
1700
  attach_docstr("index_select", r"""index_select(axis, index) -> Tensor
1640
1701
 
1641
1702
  Generates a new Tensor that accesses the values of `self` along the specified `axis` dimension
@@ -1923,13 +1984,13 @@ Examples:
1923
1984
  >>> import mindspore
1924
1985
  >>> import numpy as np
1925
1986
  >>> from mindspore import Tensor
1926
- >>> input = Tensor(np.array([True, False, True]), mindspore.bool_)
1927
- >>> other = Tensor(np.array([True, True, False]), mindspore.bool_)
1987
+ >>> input = Tensor(np.array([True, False, True]), mindspore.bool)
1988
+ >>> other = Tensor(np.array([True, True, False]), mindspore.bool)
1928
1989
  >>> output = input.logical_xor(other)
1929
1990
  >>> print(output)
1930
1991
  [ False True True]
1931
- >>> x = Tensor(1, mindspore.bool_)
1932
- >>> other = Tensor(0, mindspore.bool_)
1992
+ >>> x = Tensor(1, mindspore.bool)
1993
+ >>> other = Tensor(0, mindspore.bool)
1933
1994
  >>> output = input.logical_xor(other)
1934
1995
  >>> print(output)
1935
1996
  True
@@ -1963,6 +2024,81 @@ In-place version of :func:`mindspore.Tensor.masked_fill`.
1963
2024
  .. warning::
1964
2025
  This is an experimental API that is subject to change or deletion.
1965
2026
  """)
2027
+ attach_docstr("masked_scatter", r"""masked_scatter(mask, source) -> Tensor
2028
+
2029
+ Returns a Tensor. Updates the value in the "self Tensor" with the `tensor` value according to the mask.
2030
+ The shape of `mask` and the shape of the "self Tensor" must be the same or `mask` is broadcastable.
2031
+
2032
+ Args:
2033
+ mask (Tensor[bool]): A bool tensor with a shape broadcastable to the "self Tensor".
2034
+ source (Tensor): A tensor with the same data type as the "self Tensor". The number
2035
+ of elements must be greater than or equal to the number of True's in `mask`.
2036
+
2037
+ Returns:
2038
+ Tensor, with the same type and shape as the "self Tensor".
2039
+
2040
+ Raises:
2041
+ TypeError: If `mask` or `source` is not a Tensor.
2042
+ TypeError: If data type of the "self Tensor" is not be supported.
2043
+ TypeError: If dtype of `mask` is not bool.
2044
+ TypeError: If the dim of the "self Tensor" is less than the dim of `mask`.
2045
+ ValueError: If `mask` can not be broadcastable to the "self Tensor".
2046
+ ValueError: If the number of elements in `source` is less than the number of elements to be updated in the tensor.
2047
+
2048
+ Supported Platforms:
2049
+ ``Ascend`` ``CPU``
2050
+
2051
+ Examples:
2052
+ >>> import numpy as np
2053
+ >>> import mindspore
2054
+ >>> from mindspore import Tensor
2055
+ >>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
2056
+ >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
2057
+ >>> source = Tensor(np.array([5., 6., 7.]), mindspore.float32)
2058
+ >>> output = x.masked_scatter(mask, source)
2059
+ >>> print(output)
2060
+ [5. 6. 3. 7.]""")
2061
+ attach_docstr("masked_scatter_", r"""masked_scatter_(mask, source) -> Tensor
2062
+
2063
+ Updates the value in the `self` with the `source` value according to the `mask`, and returns a Tensor.
2064
+ The shape of `mask` and the `self` must be the same or `mask` is broadcastable.
2065
+
2066
+ Note:
2067
+ When the total number of elements in `source` is less than the number of True elements in `mask`,
2068
+ the NPU may not be able to detect this invalid input; therefore,
2069
+ the correctness of the output cannot be guaranteed.
2070
+
2071
+ Args:
2072
+ mask (Tensor[bool]): A bool tensor with a shape broadcastable to the `self`.
2073
+ source (Tensor): A tensor with the same data type as the `self`. The number
2074
+ of elements must be greater than or equal to the number of True elements in `mask`.
2075
+
2076
+ Returns:
2077
+ Tensor, with the same type and shape as the `self`.
2078
+
2079
+ Raises:
2080
+ TypeError: If `mask` or `source` is not a Tensor.
2081
+ TypeError: If data type of the "self Tensor" is not be supported.
2082
+ TypeError: If dtype of `mask` is not bool.
2083
+ TypeError: If the dim of the "self Tensor" is less than the dim of `mask`.
2084
+ ValueError: If `mask` can not be broadcastable to the "self Tensor".
2085
+
2086
+
2087
+ Supported Platforms:
2088
+ ``Ascend``
2089
+
2090
+ Examples:
2091
+ >>> import numpy as np
2092
+ >>> import mindspore
2093
+ >>> from mindspore import Tensor
2094
+ >>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
2095
+ >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
2096
+ >>> tensor = Tensor(np.array([5., 6., 7.]), mindspore.float32)
2097
+ >>> output = x.masked_scatter_(mask, tensor)
2098
+ >>> print(output)
2099
+ [5. 6. 3. 7.]
2100
+ >>> print(x)
2101
+ [5. 6. 3. 7.]""")
1966
2102
  attach_docstr("masked_select", r"""masked_select(mask) -> Tensor
1967
2103
 
1968
2104
  For details, please refer to :func:`mindspore.ops.masked_select`.""")
@@ -2489,7 +2625,7 @@ Note:
2489
2625
  - When `self` and `other` have different shapes,
2490
2626
  `other` be able to broadcast to a `self`.
2491
2627
  - `self` and `other` can not be bool type at the same time,
2492
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
2628
+ [True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
2493
2629
 
2494
2630
  Args:
2495
2631
  other (Union[Tensor, number.Number, bool]): `other` is a number.Number or
@@ -2659,9 +2795,6 @@ attach_docstr("new_empty", r"""new_empty(size, *, dtype=None, device=None) -> Te
2659
2795
  Returns an uninitialized Tensor. Its shape is specified by `size`, its dtype is specified by `dtype` and its
2660
2796
  device is specified by `device`.
2661
2797
 
2662
- .. warning::
2663
- This is an experimental API that is subject to change or deletion.
2664
-
2665
2798
  Args:
2666
2799
  size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Only positive integer or
2667
2800
  tuple or list containing positive integers are allowed.
@@ -2669,7 +2802,7 @@ Args:
2669
2802
  Keyword Args:
2670
2803
  dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`,
2671
2804
  the tensor will have the same dtype as `self`. Default ``None``.
2672
- device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
2805
+ device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
2673
2806
  ``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
2674
2807
  the value set by :func:`mindspore.set_device` will be used. Default ``None``.
2675
2808
 
@@ -2795,6 +2928,15 @@ attach_docstr("outer", r"""outer(vec2) -> Tensor
2795
2928
 
2796
2929
  For details, please refer to :func:`mindspore.ops.outer`.
2797
2930
  """)
2931
+ attach_docstr("permute", r"""permute(*dims) -> Tensor
2932
+
2933
+ For details, please refer to :func:`mindspore.mint.permute`.
2934
+
2935
+ .. method:: Tensor.permute(*axis) -> Tensor
2936
+ :noindex:
2937
+
2938
+ For details, please refer to :func:`mindspore.ops.permute`.
2939
+ """)
2798
2940
  attach_docstr("pow", r"""pow(exponent) -> Tensor
2799
2941
 
2800
2942
  For details, please refer to :func:`mindspore.ops.pow`.
@@ -2915,6 +3057,10 @@ Examples:
2915
3057
  [[4. 9. 5.]
2916
3058
  [10. 7. 8.]]
2917
3059
  """)
3060
+ attach_docstr("real", r"""real() -> Tensor
3061
+
3062
+ For details, please refer to :func:`mindspore.ops.real`.
3063
+ """)
2918
3064
  attach_docstr("reciprocal", r"""reciprocal() -> Tensor
2919
3065
 
2920
3066
  For details, please refer to :func:`mindspore.ops.reciprocal`.
@@ -2935,11 +3081,11 @@ Supports broadcasting to a common shape and implicit type promotion.
2935
3081
 
2936
3082
  The dividend `self` is a tensor whose data type is
2937
3083
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
2938
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3084
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
2939
3085
 
2940
3086
  Args:
2941
3087
  other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
2942
- a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
3088
+ a bool or a tensor whose data type is number or bool when the dividend is a tensor.
2943
3089
 
2944
3090
  Returns:
2945
3091
  Tensor, with dtype promoted and shape broadcasted.
@@ -3007,6 +3153,49 @@ Examples:
3007
3153
  >>> print(output)
3008
3154
  [2. 1. 0.]
3009
3155
  """)
3156
+ attach_docstr("remainder_", r"""remainder_(other) -> Tensor
3157
+
3158
+ Computes the remainder of `self` divided by `other` element-wise. The result has the same sign as the divisor `other`
3159
+ and its absolute value is less than that of `other`.
3160
+
3161
+ .. code-block::
3162
+
3163
+ remainder(self, other) == self - self.div(other, rounding_mode="floor") * other
3164
+
3165
+ .. warning::
3166
+ This is an experimental API that is subject to change or deletion.
3167
+
3168
+ Note:
3169
+ - Complex inputs are not supported.
3170
+ - The dividend `self` is a tensor whose data type is
3171
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3172
+ - When `self` and `other` have different shapes, `other` should be able to broadcast to a `self`.
3173
+
3174
+ Args:
3175
+ other (Union[Tensor, number, bool]): The divisor is a number or
3176
+ a bool or a tensor whose data type is number or bool.
3177
+
3178
+ Returns:
3179
+ Tensor, the shape and the data type are the same as those of `self` .
3180
+
3181
+ Raises:
3182
+ RuntimeError: If `other` cannot be broadcast to `self`.
3183
+
3184
+ Supported Platforms:
3185
+ ``Ascend``
3186
+
3187
+ Examples:
3188
+ >>> import mindspore
3189
+ >>> from mindspore import Tensor
3190
+ >>> import numpy as np
3191
+ >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
3192
+ >>> other = Tensor(np.array([3, -6, -2]), mindspore.int32)
3193
+ >>> output = x.remainder_(other)
3194
+ >>> print(output)
3195
+ [ 2 -2 -1]
3196
+ >>> print(x)
3197
+ [ 2 -2 -1]
3198
+ """)
3010
3199
  attach_docstr("repeat", r"""repeat(*repeats)
3011
3200
 
3012
3201
  Copy the elements in each dimension of a Tensor based on the specified number of repetition times.
@@ -3023,9 +3212,6 @@ elements in `repeats`.
3023
3212
  repeats_{i} & \text{if } input.{rank} \le i < n \\
3024
3213
  \end{cases}
3025
3214
 
3026
- .. warning::
3027
- This is an experimental API that is subject to change or deletion.
3028
-
3029
3215
  .. note::
3030
3216
  If need to specify the number of repetition times for each element of a single dimension, please
3031
3217
  refer to :func:`mindspore.Tensor.repeat_interleave`.
@@ -3842,6 +4028,12 @@ Examples:
3842
4028
  attach_docstr("sigmoid", r"""sigmoid() -> Tensor
3843
4029
 
3844
4030
  For details, please refer to :func:`mindspore.ops.sigmoid`.""")
4031
+ attach_docstr("sigmoid_", r"""sigmoid_() -> Tensor
4032
+
4033
+ In-place version of :func:`mindspore.Tensor.sigmoid`.
4034
+
4035
+ .. warning::
4036
+ Only supports Ascend.""")
3845
4037
  attach_docstr("sinc", r"""sinc() -> Tensor
3846
4038
 
3847
4039
  For details, please refer to :func:`mindspore.ops.sinc`.
@@ -4022,6 +4214,9 @@ For details, please refer to :func:`mindspore.ops.sqrt`.
4022
4214
  attach_docstr("square", r"""square() -> Tensor
4023
4215
 
4024
4216
  For details, please refer to :func:`mindspore.ops.square`.""")
4217
+ attach_docstr("squeeze", r"""squeeze(*axis) -> Tensor
4218
+
4219
+ For details, please refer to :func:`mindspore.ops.squeeze`.""")
4025
4220
  attach_docstr("std", r"""std(axis=None, ddof=0, keepdims=False) -> Tensor
4026
4221
 
4027
4222
  For details, please refer to :func:`mindspore.ops.std`.
@@ -4105,7 +4300,7 @@ Args:
4105
4300
  other (Union[Tensor, number.Number, bool]): The second self, is a number.Number or
4106
4301
  a bool or a tensor whose data type is
4107
4302
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
4108
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4303
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
4109
4304
 
4110
4305
  Keyword Args:
4111
4306
  alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
@@ -4467,13 +4662,106 @@ Examples:
4467
4662
 
4468
4663
  For more details, please refer to :func:`mindspore.ops.topk`.
4469
4664
  """)
4665
+ attach_docstr("to", r"""to(dtype=None, non_blocking=False, copy=False) -> Tensor
4666
+
4667
+ Returns a tensor with the new specified data type.
4668
+
4669
+ Note:
4670
+ - When converting complex numbers to boolean type, the imaginary part of the complex number is not
4671
+ taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
4672
+ - `non_blocking` and `copy` do not take effect in GRAPH_MODE or within jit.
4673
+
4674
+ Args:
4675
+ dtype (dtype.Number, optional): The valid data type of the output tensor. Default: ``None``.
4676
+ non_blocking(bool, optional): Data type conversion asynchronously. If ``True`` , convert data type asynchronously. If ``False`` , convert data type synchronously. Default: ``False`` .
4677
+ copy(bool, optional): When copy is set ``True`` , a new Tensor is created even when then Tensor already matches the desired conversion. Default: ``False`` .
4678
+
4679
+ Returns:
4680
+ Tensor, the data type of the tensor is `dtype` .
4681
+
4682
+ Supported Platforms:
4683
+ ``Ascend`` ``GPU`` ``CPU``
4684
+
4685
+ Examples:
4686
+ >>> import mindspore
4687
+ >>> import numpy as np
4688
+ >>> from mindspore import Tensor
4689
+ >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
4690
+ >>> input = Tensor(input_np)
4691
+ >>> dtype = mindspore.int32
4692
+ >>> output = input.to(dtype)
4693
+ >>> print(output.dtype)
4694
+ Int32
4695
+ >>> print(output.shape)
4696
+ (2, 3, 4, 5)
4697
+
4698
+ .. method:: Tensor.to(device=None, dtype=None, non_blocking=False, copy=False) -> Tensor
4699
+ :noindex:
4700
+
4701
+ Returns a tensor with the new specified data type and device type.
4702
+
4703
+ Note:
4704
+ `device` , `non_blocking` and `copy` do not take effect in GRAPH_MODE or within jit.
4705
+
4706
+ Args:
4707
+ device(str, optional): The device type of the output tensor. Default: ``None`` .
4708
+ dtype (dtype.Number, optional): The valid data type of the output tensor. Default: ``None`` .
4709
+ non_blocking(bool, optional): Data type conversion asynchronously. If ``True`` , convert data type asynchronously. If ``False`` , convert data type synchronously. Default: ``False`` .
4710
+ copy(bool, optional): When copy is set ``True`` , a new Tensor is created even when then Tensor already matches the desired conversion. Default: ``False`` .
4711
+
4712
+ Returns:
4713
+ Tensor, the specified device type and data type of the tensor.
4714
+
4715
+ Supported Platforms:
4716
+ ``Ascend`` ``CPU``
4717
+
4718
+ Examples:
4719
+ >>> import mindspore
4720
+ >>> import numpy as np
4721
+ >>> from mindspore import Tensor
4722
+ >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
4723
+ >>> input = Tensor(input_np)
4724
+ >>> dtype = mindspore.int32
4725
+ >>> output = input.to("Ascend")
4726
+ >>> print(output.device)
4727
+ "Ascend:0"
4728
+
4729
+ .. method:: Tensor.to(other, non_blocking=False, copy=False) -> Tensor
4730
+ :noindex:
4731
+
4732
+ Returns a tensor with same device and dtype as the Tensor `other` .
4733
+
4734
+ Note:
4735
+ `non_blocking` and `copy` do not take effect in GRAPH_MODE or within jit.
4736
+
4737
+ Args:
4738
+ other(Tensor): The returned Tensor has the same device and dtype as `other` .
4739
+ non_blocking(bool, optional): Data type conversion asynchronously. If ``True`` , convert data type asynchronously. If ``False`` , convert data type synchronously. Default: ``False`` .
4740
+ copy(bool, optional): When copy is set ``True`` , a new Tensor is created even when then Tensor already matches the desired conversion. Default: ``False`` .
4741
+
4742
+ Returns:
4743
+ Tensor, same device and dtype as the Tensor `other` .
4744
+
4745
+ Supported Platforms:
4746
+ ``Ascend`` ``CPU``
4747
+
4748
+ Examples:
4749
+ >>> import mindspore
4750
+ >>> import numpy as np
4751
+ >>> from mindspore import Tensor
4752
+ >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
4753
+ >>> input = Tensor(input_np)
4754
+ >>> other = input.to("Ascend", dtype=mindspore.float16)
4755
+ >> output = input.to(other)
4756
+ >>> print(output.device)
4757
+ "Ascend:0"
4758
+ >>> print(output.dtype)
4759
+ float16
4760
+ """)
4470
4761
  attach_docstr("transpose", r"""transpose(dim0, dim1) -> Tensor
4471
4762
 
4472
4763
  Interchange two axes of a tensor.
4473
4764
 
4474
- .. warning::
4475
- This is an experimental API that is subject to change or deletion.
4476
-
4477
4765
  Args:
4478
4766
  dim0 (int): Specifies the first dimension to be transposed.
4479
4767
  dim1 (int): Specifies the second dimension to be transposed.
@@ -4743,6 +5031,26 @@ Examples:
4743
5031
  >>> print(output)
4744
5032
  [1. 2. 3. 2. 3. 4.]
4745
5033
  """)
5034
+ attach_docstr("view", r"""view(*shape) -> Tensor
5035
+
5036
+ Reshape the tensor according to the input `shape` .
5037
+
5038
+ Args:
5039
+ shape (Union[tuple(int), int]): Dimension of the output tensor.
5040
+
5041
+ Returns:
5042
+ Tensor, which dimension is the input shape's value.
5043
+
5044
+ Examples:
5045
+ >>> from mindspore import Tensor
5046
+ >>> import numpy as np
5047
+ >>> a = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
5048
+ >>> output = a.view((3, 2))
5049
+ >>> print(output)
5050
+ [[1. 2.]
5051
+ [3. 2.]
5052
+ [3. 4.]]
5053
+ """)
4746
5054
  attach_docstr("where", r"""where(condition, y) -> Tensor
4747
5055
 
4748
5056
  For details, please refer to :func:`mindspore.ops.where`.
@@ -4751,39 +5059,6 @@ attach_docstr("xlogy", r"""xlogy(other) -> Tensor
4751
5059
 
4752
5060
  For details, please refer to :func:`mindspore.ops.xlogy`.
4753
5061
  """)
4754
- attach_docstr("_to", r"""_to(dtype) -> Tensor
4755
-
4756
- Returns a tensor with the new specified data type.
4757
-
4758
- Note:
4759
- When converting complex numbers to boolean type, the imaginary part of the complex number is not
4760
- taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
4761
-
4762
- Args:
4763
- dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
4764
-
4765
- Returns:
4766
- Tensor, the data type of the tensor is `dtype`.
4767
-
4768
- Raises:
4769
- TypeError: If `dtype` is not a Number.
4770
-
4771
- Supported Platforms:
4772
- ``Ascend`` ``GPU`` ``CPU``
4773
-
4774
- Examples:
4775
- >>> import mindspore
4776
- >>> import numpy as np
4777
- >>> from mindspore import Tensor
4778
- >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
4779
- >>> input = Tensor(input_np)
4780
- >>> dtype = mindspore.int32
4781
- >>> output = input._to(dtype)
4782
- >>> print(output.dtype)
4783
- Int32
4784
- >>> print(output.shape)
4785
- (2, 3, 4, 5)
4786
- """)
4787
5062
  attach_docstr("__abs__", r"""__abs__() -> Tensor
4788
5063
 
4789
5064
  Alias for :func:`Tensor.abs`.