mindspore 2.7.0rc1__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (370) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +5 -2
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/__init__.py +3 -3
  11. mindspore/_extends/parse/compile_config.py +24 -1
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
  13. mindspore/_extends/parse/parser.py +28 -22
  14. mindspore/_extends/parse/resources.py +1 -1
  15. mindspore/_extends/parse/standard_method.py +23 -2
  16. mindspore/_extends/parse/trope.py +2 -1
  17. mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
  18. mindspore/amp.py +0 -18
  19. mindspore/avcodec-59.dll +0 -0
  20. mindspore/avdevice-59.dll +0 -0
  21. mindspore/avfilter-8.dll +0 -0
  22. mindspore/avformat-59.dll +0 -0
  23. mindspore/avutil-57.dll +0 -0
  24. mindspore/boost/base.py +29 -2
  25. mindspore/common/__init__.py +18 -12
  26. mindspore/common/_decorator.py +3 -2
  27. mindspore/common/_grad_function.py +3 -1
  28. mindspore/common/_tensor_cpp_method.py +1 -1
  29. mindspore/common/_tensor_docs.py +371 -96
  30. mindspore/common/_utils.py +7 -43
  31. mindspore/common/api.py +434 -135
  32. mindspore/common/dtype.py +98 -57
  33. mindspore/common/dump.py +7 -108
  34. mindspore/common/dynamic_shape/__init__.py +0 -0
  35. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
  36. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  37. mindspore/common/file_system.py +59 -9
  38. mindspore/common/hook_handle.py +82 -3
  39. mindspore/common/jit_config.py +5 -1
  40. mindspore/common/jit_trace.py +27 -12
  41. mindspore/common/lazy_inline.py +5 -3
  42. mindspore/common/np_dtype.py +3 -3
  43. mindspore/common/parameter.py +17 -127
  44. mindspore/common/recompute.py +4 -13
  45. mindspore/common/tensor.py +50 -217
  46. mindspore/communication/_comm_helper.py +11 -1
  47. mindspore/communication/comm_func.py +138 -4
  48. mindspore/communication/management.py +85 -1
  49. mindspore/config/op_info.config +0 -15
  50. mindspore/context.py +20 -106
  51. mindspore/dataset/__init__.py +1 -1
  52. mindspore/dataset/audio/transforms.py +1 -1
  53. mindspore/dataset/core/config.py +35 -1
  54. mindspore/dataset/engine/datasets.py +338 -319
  55. mindspore/dataset/engine/datasets_user_defined.py +38 -22
  56. mindspore/dataset/engine/datasets_vision.py +1 -1
  57. mindspore/dataset/engine/validators.py +1 -15
  58. mindspore/dataset/transforms/c_transforms.py +2 -2
  59. mindspore/dataset/transforms/transforms.py +3 -3
  60. mindspore/dataset/vision/__init__.py +1 -1
  61. mindspore/dataset/vision/py_transforms.py +8 -8
  62. mindspore/dataset/vision/transforms.py +17 -5
  63. mindspore/dataset/vision/utils.py +632 -21
  64. mindspore/device_context/ascend/op_tuning.py +35 -1
  65. mindspore/dnnl.dll +0 -0
  66. mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
  67. mindspore/graph/custom_pass.py +55 -0
  68. mindspore/include/api/cell.h +28 -4
  69. mindspore/include/api/cfg.h +24 -7
  70. mindspore/include/api/context.h +1 -0
  71. mindspore/include/api/delegate.h +0 -2
  72. mindspore/include/api/dual_abi_helper.h +100 -19
  73. mindspore/include/api/graph.h +14 -1
  74. mindspore/include/api/kernel.h +16 -3
  75. mindspore/include/api/kernel_api.h +9 -1
  76. mindspore/include/api/metrics/accuracy.h +9 -0
  77. mindspore/include/api/model.h +5 -1
  78. mindspore/include/api/model_group.h +4 -0
  79. mindspore/include/api/model_parallel_runner.h +2 -0
  80. mindspore/include/api/status.h +48 -10
  81. mindspore/include/api/types.h +6 -1
  82. mindspore/include/dataset/constants.h +9 -0
  83. mindspore/include/dataset/execute.h +2 -2
  84. mindspore/jpeg62.dll +0 -0
  85. mindspore/mindrecord/__init__.py +3 -3
  86. mindspore/mindrecord/common/exceptions.py +1 -0
  87. mindspore/mindrecord/config.py +1 -1
  88. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  89. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  90. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  91. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  92. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  93. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  94. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  95. mindspore/mindrecord/filereader.py +4 -4
  96. mindspore/mindrecord/filewriter.py +5 -5
  97. mindspore/mindrecord/mindpage.py +2 -2
  98. mindspore/mindrecord/tools/cifar10.py +4 -3
  99. mindspore/mindrecord/tools/cifar100.py +1 -1
  100. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  101. mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
  102. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  103. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  104. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  105. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  106. mindspore/mindspore_backend_common.dll +0 -0
  107. mindspore/mindspore_backend_manager.dll +0 -0
  108. mindspore/mindspore_cluster.dll +0 -0
  109. mindspore/mindspore_common.dll +0 -0
  110. mindspore/mindspore_core.dll +0 -0
  111. mindspore/mindspore_cpu.dll +0 -0
  112. mindspore/mindspore_dump.dll +0 -0
  113. mindspore/mindspore_frontend.dll +0 -0
  114. mindspore/mindspore_glog.dll +0 -0
  115. mindspore/mindspore_hardware_abstract.dll +0 -0
  116. mindspore/mindspore_memory_pool.dll +0 -0
  117. mindspore/mindspore_ms_backend.dll +0 -0
  118. mindspore/mindspore_ops.dll +0 -0
  119. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  120. mindspore/mindspore_profiler.dll +0 -0
  121. mindspore/mindspore_pyboost.dll +0 -0
  122. mindspore/mindspore_pynative.dll +0 -0
  123. mindspore/mindspore_runtime_pipeline.dll +0 -0
  124. mindspore/mindspore_runtime_utils.dll +0 -0
  125. mindspore/mindspore_tools.dll +0 -0
  126. mindspore/mint/__init__.py +15 -10
  127. mindspore/mint/distributed/__init__.py +4 -0
  128. mindspore/mint/distributed/distributed.py +392 -69
  129. mindspore/mint/nn/__init__.py +2 -16
  130. mindspore/mint/nn/functional.py +4 -110
  131. mindspore/mint/nn/layer/__init__.py +0 -2
  132. mindspore/mint/nn/layer/_functions.py +1 -2
  133. mindspore/mint/nn/layer/activation.py +0 -6
  134. mindspore/mint/nn/layer/basic.py +0 -47
  135. mindspore/mint/nn/layer/conv.py +10 -10
  136. mindspore/mint/nn/layer/normalization.py +11 -16
  137. mindspore/mint/nn/layer/pooling.py +0 -4
  138. mindspore/nn/__init__.py +1 -3
  139. mindspore/nn/cell.py +231 -239
  140. mindspore/nn/layer/activation.py +4 -2
  141. mindspore/nn/layer/basic.py +56 -14
  142. mindspore/nn/layer/container.py +16 -0
  143. mindspore/nn/layer/embedding.py +4 -169
  144. mindspore/nn/layer/image.py +1 -1
  145. mindspore/nn/layer/normalization.py +2 -1
  146. mindspore/nn/layer/thor_layer.py +4 -85
  147. mindspore/nn/optim/ada_grad.py +0 -1
  148. mindspore/nn/optim/adafactor.py +0 -1
  149. mindspore/nn/optim/adam.py +32 -127
  150. mindspore/nn/optim/adamax.py +0 -1
  151. mindspore/nn/optim/asgd.py +0 -1
  152. mindspore/nn/optim/ftrl.py +8 -102
  153. mindspore/nn/optim/lamb.py +1 -4
  154. mindspore/nn/optim/lars.py +0 -3
  155. mindspore/nn/optim/lazyadam.py +25 -218
  156. mindspore/nn/optim/momentum.py +5 -43
  157. mindspore/nn/optim/optimizer.py +6 -55
  158. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  159. mindspore/nn/optim/rmsprop.py +0 -1
  160. mindspore/nn/optim/rprop.py +0 -1
  161. mindspore/nn/optim/sgd.py +0 -1
  162. mindspore/nn/optim/tft_wrapper.py +2 -4
  163. mindspore/nn/optim/thor.py +0 -2
  164. mindspore/nn/probability/bijector/bijector.py +7 -8
  165. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  166. mindspore/nn/probability/bijector/power_transform.py +20 -21
  167. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  168. mindspore/nn/probability/bijector/softplus.py +13 -14
  169. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  170. mindspore/nn/wrap/cell_wrapper.py +39 -5
  171. mindspore/nn/wrap/grad_reducer.py +4 -89
  172. mindspore/numpy/array_creations.py +4 -4
  173. mindspore/numpy/fft.py +9 -9
  174. mindspore/numpy/utils_const.py +1 -1
  175. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  176. mindspore/onnx/onnx_export.py +137 -0
  177. mindspore/opencv_core4110.dll +0 -0
  178. mindspore/opencv_imgcodecs4110.dll +0 -0
  179. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  180. mindspore/ops/__init__.py +2 -0
  181. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  182. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  183. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  184. mindspore/ops/_op_impl/cpu/__init__.py +1 -5
  185. mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
  186. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
  187. mindspore/ops/auto_generate/gen_extend_func.py +6 -11
  188. mindspore/ops/auto_generate/gen_ops_def.py +385 -154
  189. mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
  190. mindspore/ops/communication.py +97 -0
  191. mindspore/ops/composite/__init__.py +5 -2
  192. mindspore/ops/composite/base.py +16 -2
  193. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  195. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  196. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  197. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  198. mindspore/ops/function/__init__.py +2 -0
  199. mindspore/ops/function/array_func.py +24 -18
  200. mindspore/ops/function/comm_func.py +3883 -0
  201. mindspore/ops/function/debug_func.py +7 -6
  202. mindspore/ops/function/grad/grad_func.py +4 -12
  203. mindspore/ops/function/math_func.py +89 -86
  204. mindspore/ops/function/nn_func.py +92 -313
  205. mindspore/ops/function/random_func.py +9 -18
  206. mindspore/ops/functional.py +4 -1
  207. mindspore/ops/functional_overload.py +377 -30
  208. mindspore/ops/operations/__init__.py +2 -5
  209. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  210. mindspore/ops/operations/_inner_ops.py +12 -50
  211. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  212. mindspore/ops/operations/array_ops.py +5 -50
  213. mindspore/ops/operations/comm_ops.py +95 -17
  214. mindspore/ops/operations/custom_ops.py +237 -22
  215. mindspore/ops/operations/debug_ops.py +33 -35
  216. mindspore/ops/operations/manually_defined/ops_def.py +39 -318
  217. mindspore/ops/operations/math_ops.py +5 -5
  218. mindspore/ops/operations/nn_ops.py +3 -3
  219. mindspore/ops/operations/sparse_ops.py +0 -83
  220. mindspore/ops/primitive.py +4 -27
  221. mindspore/ops/tensor_method.py +88 -10
  222. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  223. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  224. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  226. mindspore/ops_generate/common/gen_constants.py +11 -10
  227. mindspore/ops_generate/common/op_proto.py +18 -1
  228. mindspore/ops_generate/common/template.py +102 -245
  229. mindspore/ops_generate/common/template_utils.py +212 -0
  230. mindspore/ops_generate/gen_custom_ops.py +69 -0
  231. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  232. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  233. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  234. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  235. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  236. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  237. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  238. mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
  239. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  240. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  241. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  242. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  243. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  244. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  246. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  247. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  248. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  249. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  250. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  251. mindspore/parallel/_auto_parallel_context.py +5 -15
  252. mindspore/parallel/_cell_wrapper.py +1 -1
  253. mindspore/parallel/_parallel_serialization.py +4 -6
  254. mindspore/parallel/_ps_context.py +2 -2
  255. mindspore/parallel/_utils.py +34 -17
  256. mindspore/parallel/auto_parallel.py +23 -9
  257. mindspore/parallel/checkpoint_transform.py +20 -2
  258. mindspore/parallel/cluster/process_entity/_api.py +28 -33
  259. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  260. mindspore/parallel/cluster/run.py +5 -3
  261. mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
  262. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  263. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  264. mindspore/parallel/function/reshard_func.py +6 -5
  265. mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
  266. mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
  267. mindspore/parallel/shard.py +7 -21
  268. mindspore/parallel/strategy.py +336 -0
  269. mindspore/parallel/transform_safetensors.py +127 -20
  270. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  272. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  273. mindspore/profiler/common/constant.py +5 -0
  274. mindspore/profiler/common/file_manager.py +9 -0
  275. mindspore/profiler/common/msprof_cmd_tool.py +40 -4
  276. mindspore/profiler/common/path_manager.py +65 -24
  277. mindspore/profiler/common/profiler_context.py +27 -14
  278. mindspore/profiler/common/profiler_info.py +3 -3
  279. mindspore/profiler/common/profiler_meta_data.py +1 -0
  280. mindspore/profiler/common/profiler_op_analyse.py +10 -6
  281. mindspore/profiler/common/profiler_path_manager.py +13 -0
  282. mindspore/profiler/common/util.py +30 -3
  283. mindspore/profiler/dynamic_profiler.py +91 -46
  284. mindspore/profiler/envprofiler.py +30 -5
  285. mindspore/profiler/experimental_config.py +18 -2
  286. mindspore/profiler/platform/cpu_profiler.py +10 -4
  287. mindspore/profiler/platform/npu_profiler.py +34 -7
  288. mindspore/profiler/profiler.py +193 -145
  289. mindspore/profiler/profiler_action_controller.py +1 -1
  290. mindspore/profiler/profiler_interface.py +2 -2
  291. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  292. mindspore/run_check/_check_version.py +108 -24
  293. mindspore/runtime/__init__.py +9 -6
  294. mindspore/runtime/executor.py +35 -0
  295. mindspore/runtime/memory.py +113 -0
  296. mindspore/runtime/thread_bind_core.py +1 -1
  297. mindspore/swresample-4.dll +0 -0
  298. mindspore/swscale-6.dll +0 -0
  299. mindspore/tinyxml2.dll +0 -0
  300. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  301. mindspore/tools/data_dump.py +130 -0
  302. mindspore/tools/sdc_detect.py +91 -0
  303. mindspore/tools/stress_detect.py +63 -0
  304. mindspore/train/__init__.py +6 -6
  305. mindspore/train/_utils.py +8 -21
  306. mindspore/train/amp.py +6 -7
  307. mindspore/train/callback/_callback.py +2 -1
  308. mindspore/train/callback/_checkpoint.py +1 -17
  309. mindspore/train/callback/_flops_collector.py +10 -6
  310. mindspore/train/callback/_train_fault_tolerance.py +72 -25
  311. mindspore/train/data_sink.py +5 -9
  312. mindspore/train/dataset_helper.py +5 -5
  313. mindspore/train/model.py +41 -230
  314. mindspore/train/serialization.py +160 -401
  315. mindspore/train/train_thor/model_thor.py +2 -2
  316. mindspore/turbojpeg.dll +0 -0
  317. mindspore/utils/__init__.py +6 -3
  318. mindspore/utils/dlpack.py +92 -0
  319. mindspore/utils/dryrun.py +1 -1
  320. mindspore/utils/runtime_execution_order_check.py +10 -0
  321. mindspore/utils/sdc_detect.py +14 -12
  322. mindspore/utils/stress_detect.py +43 -0
  323. mindspore/utils/utils.py +152 -16
  324. mindspore/version.py +1 -1
  325. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  326. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
  327. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  328. mindspore/communication/_hccl_management.py +0 -297
  329. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
  330. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  331. mindspore/experimental/llm_boost/atb/__init__.py +0 -23
  332. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  333. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  334. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  335. mindspore/experimental/llm_boost/register.py +0 -130
  336. mindspore/experimental/llm_boost/utils.py +0 -31
  337. mindspore/include/OWNERS +0 -7
  338. mindspore/mindspore_cpu_res_manager.dll +0 -0
  339. mindspore/mindspore_ops_kernel_common.dll +0 -0
  340. mindspore/mindspore_res_manager.dll +0 -0
  341. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  342. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  343. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  344. mindspore/nn/reinforcement/tensor_array.py +0 -145
  345. mindspore/opencv_core452.dll +0 -0
  346. mindspore/opencv_imgcodecs452.dll +0 -0
  347. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  348. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  349. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  350. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  351. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  352. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  353. mindspore/ops/operations/_tensor_array.py +0 -359
  354. mindspore/ops/operations/rl_ops.py +0 -288
  355. mindspore/parallel/_offload_context.py +0 -275
  356. mindspore/parallel/_recovery_context.py +0 -115
  357. mindspore/parallel/_transformer/__init__.py +0 -35
  358. mindspore/parallel/_transformer/layers.py +0 -765
  359. mindspore/parallel/_transformer/loss.py +0 -251
  360. mindspore/parallel/_transformer/moe.py +0 -693
  361. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  362. mindspore/parallel/_transformer/transformer.py +0 -3124
  363. mindspore/parallel/mpi/_mpi_config.py +0 -116
  364. mindspore/profiler/common/validator/validate_path.py +0 -84
  365. mindspore/train/memory_profiling_pb2.py +0 -298
  366. mindspore/utils/hooks.py +0 -81
  367. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  368. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  369. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  370. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -28,8 +28,7 @@ def print_(*input_x):
28
28
  Outputs the inputs to stdout.
29
29
  The outputs are printed to screen by default.
30
30
  It can also be saved in a file by setting the parameter `print_file_path` in `context`.
31
- :func:`mindspore.parse_print` can be employed to reload the data.
32
- For more information, please refer to :func:`mindspore.set_context` and :func:`mindspore.parse_print`.
31
+ For more information, please refer to :func:`mindspore.set_context`.
33
32
  In Ascend platform with graph mode, the environment variables `MS_DUMP_SLICE_SIZE` and `MS_DUMP_WAIT_TIME`
34
33
  can be set to solve operator execution failure when outputting big tensor or outputting tensor intensively.
35
34
 
@@ -124,6 +123,7 @@ def tensordump(file_name, tensor, mode='out'):
124
123
  --master_port=11450 --log_dir=msrun_log --join=True --cluster_time_out=300 tensordump_example.py
125
124
 
126
125
  >>> import os
126
+ >>> import time
127
127
  >>> import numpy as np
128
128
  >>> import mindspore
129
129
  >>> from mindspore import nn, context
@@ -150,18 +150,19 @@ def tensordump(file_name, tensor, mode='out'):
150
150
  >>> strategy1 = ((1, 2), (2, 1))
151
151
  >>> strategy2 = ((1, 2), (2, 1))
152
152
  >>> with no_init_parameters():
153
- >>> net = Net(strategy1, strategy2)
153
+ ... net = Net(strategy1, strategy2)
154
154
  >>> x = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
155
155
  >>> y = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
156
156
  >>> b = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
157
- >>> parallel_net = Autoparallel(net, parallel_mode="semi_auto")
157
+ >>> parallel_net = AutoParallel(net, parallel_mode="semi_auto")
158
158
  >>> parallel_net.dataset_strategy(config="full_batch")
159
159
  >>> out = parallel_net(x, y, b)
160
160
  >>> print(f"out shape is: {out.shape}")
161
- >>> # out shape is (64, 64)
161
+ out shape is (64, 64)
162
+ >>> time.sleep(0.5) # npy file is generated asynchronously, spend an interval time then load it.
162
163
  >>> matmul1_output_slice = np.load(f'rank_{rank_id}_mul1_mul2_float32_0.npy') # load matmul1's output slice
163
164
  >>> print(f"matmul1_output_slice is loaded, shape is: {matmul1_output_slice.shape}")
164
- >>> # matmul1_output_slice is loaded, shape is: (64, 64)
165
+ matmul1_output_slice is loaded, shape is: (64, 64)
165
166
  """
166
167
 
167
168
  if not isinstance(file_name, str):
@@ -654,9 +654,7 @@ def _check_jvp_input_v_len(inputs_len, v_len):
654
654
 
655
655
  def jvp(fn, inputs, v, has_aux=False):
656
656
  """
657
- Compute the jacobian-vector-product of the given network. The calculation procedure of JVP can be found in
658
- `forward-mode differentiation
659
- <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#forward-mode-ad>`_.
657
+ Compute the jacobian-vector-product of the given network.
660
658
 
661
659
  Args:
662
660
  fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single Tensor or tuple of
@@ -869,9 +867,7 @@ _vjp_grad_op_with_weight = _Grad(get_all=True, get_by_list=True, sens_param=True
869
867
 
870
868
  def vjp(fn, *inputs, weights=None, has_aux=False):
871
869
  """
872
- Compute the vector-jacobian-product of the given network. `vjp` matches
873
- `reverse-mode differentiation
874
- <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#reverse-mode-ad>`_.
870
+ Compute the vector-jacobian-product of the given network.
875
871
 
876
872
  Args:
877
873
  fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single Tensor or tuple of
@@ -1070,9 +1066,7 @@ _vmap = _Vmap()
1070
1066
 
1071
1067
  def jacfwd(fn, grad_position=0, has_aux=False):
1072
1068
  """
1073
- Compute Jacobian via forward mode, corresponding to
1074
- `forward-mode differentiation
1075
- <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#forward-mode-ad>`_.
1069
+ Compute Jacobian via forward mode.
1076
1070
  When number of outputs is much greater than that of inputs, it's better to calculate Jacobian via forward mode than
1077
1071
  reverse mode to get better performance.
1078
1072
 
@@ -1241,9 +1235,7 @@ _grad = _Grad(get_by_position=True, has_aux=False, sens_param=True)
1241
1235
 
1242
1236
  def jacrev(fn, grad_position=0, has_aux=False):
1243
1237
  """
1244
- Compute Jacobian via reverse mode, corresponding to
1245
- `reverse-mode differentiation
1246
- <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#reverse-mode-ad>`_.
1238
+ Compute Jacobian via reverse mode.
1247
1239
  When number of inputs is much greater than that of outputs, it's better to calculate Jacobian via reverse mode than
1248
1240
  forward mode to get better performance.
1249
1241
 
@@ -44,7 +44,7 @@ from mindspore.ops.auto_generate.pyboost_inner_prim import reduce_max_impl, redu
44
44
  from mindspore.ops.operations.math_ops import Ormqr
45
45
  from mindspore.ops.operations.math_ops import DivMod
46
46
  from mindspore.ops.auto_generate import multi_scale_deformable_attn_op
47
- from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
47
+ from mindspore.ops.operations.array_ops import MatrixSetDiagV3
48
48
  # 1
49
49
  from mindspore.ops.auto_generate import (minimum, maximum, mul, muls, sin, sinc, sinh, cummax, real, conj, add, sub,
50
50
  cos,
@@ -58,7 +58,7 @@ from mindspore.ops.auto_generate import (minimum, maximum, mul, muls, sin, sinc,
58
58
  xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext, roll,
59
59
  bincount_ext, rotated_iou_op, cat, narrow, var_op, pow, inplace_erfinv_op,
60
60
  frac_ext, pow_tensor_scalar_op, not_equal_op, isinf, addmv_op, cdist,
61
- addbmm_op, addmm_op, pow_scalar_tensor_op)
61
+ addbmm_op, addmm_op, pow_scalar_tensor_op, transpose_op)
62
62
  # 2
63
63
  from mindspore.ops.functional_overload import gmm
64
64
  # 3
@@ -183,7 +183,6 @@ tensor_muls = muls
183
183
  tensor_pow = P.Pow()
184
184
  pows = tensor_pow
185
185
  tensor_sub = P.Sub()
186
- transpose_ = P.Transpose()
187
186
  xdivy_ = P.Xdivy()
188
187
  tensor_div_ = P.Div()
189
188
  tensor_divmod_ = DivMod()
@@ -707,7 +706,7 @@ def permute(input, axis):
707
706
  [ 8. 11.]
708
707
  [ 9. 12.]]]
709
708
  """
710
- return transpose_(input, axis)
709
+ return transpose_op(input, axis)
711
710
 
712
711
 
713
712
  def subtract(input, other, *, alpha=1):
@@ -1218,7 +1217,7 @@ def logical_not(input):
1218
1217
 
1219
1218
  Examples:
1220
1219
  >>> import mindspore
1221
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1220
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1222
1221
  >>> output = mindspore.ops.logical_not(x)
1223
1222
  >>> print(output)
1224
1223
  [False True False]
@@ -1250,23 +1249,23 @@ def logical_or(input, other):
1250
1249
 
1251
1250
  Examples:
1252
1251
  >>> import mindspore
1253
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1254
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1252
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1253
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1255
1254
  >>> output = mindspore.ops.logical_or(x, y)
1256
1255
  >>> print(output)
1257
1256
  [ True True True]
1258
- >>> x = mindspore.tensor(1, mindspore.bool_)
1259
- >>> y = mindspore.tensor(0, mindspore.bool_)
1257
+ >>> x = mindspore.tensor(1, mindspore.bool)
1258
+ >>> y = mindspore.tensor(0, mindspore.bool)
1260
1259
  >>> output = mindspore.ops.logical_or(x, y)
1261
1260
  >>> print(output)
1262
1261
  True
1263
1262
  >>> x = True
1264
- >>> y = mindspore.tensor(0, mindspore.bool_)
1263
+ >>> y = mindspore.tensor(0, mindspore.bool)
1265
1264
  >>> output = mindspore.ops.logical_or(x, y)
1266
1265
  >>> print(output)
1267
1266
  True
1268
1267
  >>> x = True
1269
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
1268
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
1270
1269
  >>> output = mindspore.ops.logical_or(x, y)
1271
1270
  >>> print(output)
1272
1271
  [True True]
@@ -1298,23 +1297,23 @@ def logical_and(input, other):
1298
1297
 
1299
1298
  Examples:
1300
1299
  >>> import mindspore
1301
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1302
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1300
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1301
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1303
1302
  >>> output = mindspore.ops.logical_and(x, y)
1304
1303
  >>> print(output)
1305
1304
  [ True False False]
1306
- >>> x = mindspore.tensor(1, mindspore.bool_)
1307
- >>> y = mindspore.tensor(0, mindspore.bool_)
1305
+ >>> x = mindspore.tensor(1, mindspore.bool)
1306
+ >>> y = mindspore.tensor(0, mindspore.bool)
1308
1307
  >>> output = mindspore.ops.logical_and(x, y)
1309
1308
  >>> print(output)
1310
1309
  False
1311
1310
  >>> x = True
1312
- >>> y = mindspore.tensor(0, mindspore.bool_)
1311
+ >>> y = mindspore.tensor(0, mindspore.bool)
1313
1312
  >>> output = mindspore.ops.logical_and(x, y)
1314
1313
  >>> print(output)
1315
1314
  False
1316
1315
  >>> x = True
1317
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
1316
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
1318
1317
  >>> output = mindspore.ops.logical_and(x, y)
1319
1318
  >>> print(output)
1320
1319
  [True False]
@@ -1595,7 +1594,7 @@ def t(input):
1595
1594
  [3, 4]])
1596
1595
  """
1597
1596
  if input.ndim == 2:
1598
- return transpose_(input, (1, 0))
1597
+ return transpose_op(input, (1, 0))
1599
1598
  return input
1600
1599
 
1601
1600
 
@@ -1782,10 +1781,10 @@ def pow_ext(input, exponent):
1782
1781
  Args:
1783
1782
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
1784
1783
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1785
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1784
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1786
1785
  exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
1787
1786
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1788
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1787
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1789
1788
 
1790
1789
  Returns:
1791
1790
  Tensor, the shape is the same as the one after broadcasting,
@@ -4215,33 +4214,33 @@ def var_mean(input, axis=None, ddof=0, keepdims=False):
4215
4214
  Tensor(shape=[], dtype=Float32, value= 3.16667))
4216
4215
  >>>
4217
4216
  >>> # case 2: Compute the variance and mean along axis 0.
4218
- >>> output = mindspore.ops.var_mean(input, axis=0)
4217
+ >>> mindspore.ops.var_mean(input, axis=0)
4219
4218
  (Tensor(shape=[4], dtype=Float32, value= [ 2.88888884e+00, 6.66666687e-01, 1.55555570e+00, 2.22222194e-01]),
4220
4219
  Tensor(shape=[4], dtype=Float32, value= [ 3.33333325e+00, 3.00000000e+00, 3.66666675e+00, 2.66666675e+00]))
4221
4220
  >>>
4222
4221
  >>> # case 3: If keepdims=True, the output shape will be same of that of the input.
4223
- >>> output = mindspore.ops.var_mean(input, axis=0, keepdims=True)
4222
+ >>> mindspore.ops.var_mean(input, axis=0, keepdims=True)
4224
4223
  (Tensor(shape=[1, 4], dtype=Float32, value=
4225
4224
  [[ 2.88888884e+00, 6.66666687e-01, 1.55555570e+00, 2.22222194e-01]]),
4226
4225
  Tensor(shape=[1, 4], dtype=Float32, value=
4227
4226
  [[ 3.33333325e+00, 3.00000000e+00, 3.66666675e+00, 2.66666675e+00]]))
4228
4227
  >>>
4229
4228
  >>> # case 4: If ddof=1:
4230
- >>> output = mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=1)
4229
+ >>> mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=1)
4231
4230
  (Tensor(shape=[1, 4], dtype=Float32, value=
4232
4231
  [[ 4.33333349e+00, 1.00000000e+00, 2.33333349e+00, 3.33333313e-01]]),
4233
4232
  Tensor(shape=[1, 4], dtype=Float32, value=
4234
4233
  [[ 3.33333325e+00, 3.00000000e+00, 3.66666675e+00, 2.66666675e+00]]))
4235
4234
  >>>
4236
4235
  >>> # case 5: If ddof=True, same as ddof=1:
4237
- >>> output = mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=True)
4236
+ >>> mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=True)
4238
4237
  (Tensor(shape=[1, 4], dtype=Float32, value=
4239
4238
  [[ 4.33333349e+00, 1.00000000e+00, 2.33333349e+00, 3.33333313e-01]]),
4240
4239
  Tensor(shape=[1, 4], dtype=Float32, value=
4241
4240
  [[ 3.33333325e+00, 3.00000000e+00, 3.66666675e+00, 2.66666675e+00]]))
4242
4241
  >>>
4243
4242
  >>> # case 6: If ddof=False, same as ddof=0:
4244
- >>> output = mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=False)
4243
+ >>> mindspore.ops.var_mean(input, axis=0, keepdims=True, ddof=False)
4245
4244
  (Tensor(shape=[1, 4], dtype=Float32, value=
4246
4245
  [[ 2.88888884e+00, 6.66666687e-01, 1.55555570e+00, 2.22222194e-01]]),
4247
4246
  Tensor(shape=[1, 4], dtype=Float32, value=
@@ -5102,9 +5101,6 @@ def bernoulli_ext(input, *, generator=None):
5102
5101
  .. math::
5103
5102
  output_{i} \sim Bernoulli(p=input_{i})
5104
5103
 
5105
- .. warning::
5106
- This is an experimental API that is subject to change or deletion.
5107
-
5108
5104
  Args:
5109
5105
  input (Tensor): The input tensor of Bernoulli distribution, where the i^{th} element 'input_{i}' represents the
5110
5106
  probability that the corresponding output element 'output_{i}' is set to '1', therefore each element in
@@ -5149,6 +5145,18 @@ def bernoulli_ext(input, *, generator=None):
5149
5145
  return bernoulli_ext_(input, seed, offset)
5150
5146
 
5151
5147
 
5148
+ def bernoulli_(input, p=0.5, *, generator=None):
5149
+ r"""
5150
+ bernoulli_(input, p=0.5, *, generator=None) -> Tensor
5151
+
5152
+ In-place version of :func:`mindspore.ops.bernoulli_ext`.
5153
+ """
5154
+ if generator is None:
5155
+ generator = default_generator
5156
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
5157
+ return ops.functional_overload.bernoulli_(input, p, seed, offset)
5158
+
5159
+
5152
5160
  def bessel_i1(x):
5153
5161
  r"""
5154
5162
  Computes the first order modified Bessel function of the first kind for each element input.
@@ -5424,10 +5432,10 @@ def cummin(input, axis):
5424
5432
  else:
5425
5433
  x_shape = shape_(input)
5426
5434
  prem = _create_cummin_perm(axis, x_shape)
5427
- input = transpose_(input, prem)
5435
+ input = transpose_op(input, prem)
5428
5436
  out1, out2 = cummin_op(input)
5429
- out1 = transpose_(out1, prem)
5430
- out2 = transpose_(out2, prem)
5437
+ out1 = transpose_op(out1, prem)
5438
+ out2 = transpose_op(out2, prem)
5431
5439
  return (out1, out2)
5432
5440
 
5433
5441
 
@@ -6625,7 +6633,7 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
6625
6633
  >>> # case 4: Use "where" to include only specific elements in computing the minimum.
6626
6634
  >>> where = mindspore.tensor([[1, 0, 1, 0],
6627
6635
  ... [0, 0, 1, 1],
6628
- ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6636
+ ... [1, 1, 1, 0]], dtype=mindspore.bool)
6629
6637
  >>> mindspore.ops.amin(input, axis=1, keepdims=True, initial=0, where=where)
6630
6638
  Tensor(shape=[3, 1], dtype=Int64, value=
6631
6639
  [[ 0],
@@ -6707,7 +6715,7 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6707
6715
  >>> # case 4: Use "where" to include only specific elements in computing the maximum.
6708
6716
  >>> where = mindspore.tensor([[0, 0, 1, 0],
6709
6717
  ... [0, 0, 1, 1],
6710
- ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6718
+ ... [1, 1, 1, 0]], dtype=mindspore.bool)
6711
6719
  >>> mindspore.ops.amax(input, axis=1, keepdims=True, initial=0, where=where)
6712
6720
  Tensor(shape=[3, 1], dtype=Int64, value=
6713
6721
  [[4],
@@ -7693,9 +7701,6 @@ def norm_ext(input, p='fro', dim=None, keepdim=False, *, dtype=None):
7693
7701
  other `int` or `float` -- not supported -- :math:`sum(abs(x)^{p})^{(1 / p)}`
7694
7702
  ====================== ================================ ==========================================
7695
7703
 
7696
- .. warning::
7697
- This is an experimental API that is subject to change or deletion.
7698
-
7699
7704
  Args:
7700
7705
  input (Tensor): The shape is :math:`(*)` or :math:`(*, m, n)`
7701
7706
  where :math:`*` means, any number of additional dimensions.
@@ -8206,6 +8211,10 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
8206
8211
  [5.27734413e-05 2.15672745e-01 1.00000000e+00 2.15672745e-01
8207
8212
  5.27734413e-05]
8208
8213
  """
8214
+ if not isinstance(periodic, bool):
8215
+ raise TypeError(
8216
+ f"For 'kaiser_window', 'periodic' must be a variable of Boolean type, but got {type(periodic)}"
8217
+ )
8209
8218
  if not isinstance(window_length, int):
8210
8219
  raise TypeError(
8211
8220
  f"For 'kaiser_window', 'window_length' must be a non-negative integer, but got {type(window_length)}"
@@ -8216,10 +8225,6 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
8216
8225
  )
8217
8226
  if window_length <= 1:
8218
8227
  return Tensor(np.ones(window_length))
8219
- if not isinstance(periodic, bool):
8220
- raise TypeError(
8221
- f"For 'kaiser_window', 'periodic' must be a variable of Boolean type, but got {type(periodic)}"
8222
- )
8223
8228
  if dtype is not None and dtype not in mstype.float_type:
8224
8229
  raise TypeError(f"For 'kaiser_window', 'dtype' must be floating point dtypes, but got {dtype}.")
8225
8230
  if periodic:
@@ -8311,12 +8316,9 @@ def stft(x, n_fft, hop_length=None, win_length=None, window=None, center=True,
8311
8316
  >>> print(output.shape)
8312
8317
  (2, 33, 450, 2)
8313
8318
  """
8314
- if hop_length is None:
8315
- hop_length = int(n_fft // 4)
8316
- if win_length is None:
8317
- win_length = int(n_fft // 1)
8318
- if window is None:
8319
- window = ops.ones(win_length, mstype.float32)
8319
+ hop_length = int(n_fft // 4) if hop_length is None else hop_length
8320
+ win_length = int(n_fft // 1) if win_length is None else win_length
8321
+ window = ops.ones(win_length, mstype.float32) if window is None else window
8320
8322
 
8321
8323
  def _is_complex(x):
8322
8324
  return dtype_(x) in [mstype.complex64, mstype.complex128]
@@ -8421,8 +8423,10 @@ def matmul(input, other):
8421
8423
  Return the matrix product of two tensors.
8422
8424
 
8423
8425
  Note:
8424
- - The dtype of `input` and `other` must be same.
8426
+ - `input` and `other` must have same data type, and both of them must be not scalar and support broadcast.
8425
8427
  - On Ascend, the rank of `input` or `other` must be between 1 and 6.
8428
+ - `input` and `other` must not be empty tensor when executing the backward process for dynamic shape case in
8429
+ JIT mode.
8426
8430
 
8427
8431
  Args:
8428
8432
  input (Tensor): The first input tensor.
@@ -8441,18 +8445,19 @@ def matmul(input, other):
8441
8445
  >>> other = mindspore.ops.arange(20, dtype=mindspore.float32).reshape(4, 5)
8442
8446
  >>> output = mindspore.ops.matmul(input, other)
8443
8447
  >>> print(output)
8444
- [[[ 70, 76, 82, 88, 94],
8445
- [ 190, 212, 234, 256, 278],
8446
- [ 310, 348, 386, 424, 462]],
8447
- [[ 430, 484, 538, 592, 646],
8448
- [ 550, 620, 690, 760, 830],
8449
- [ 670, 756, 842, 928, 1014]]]
8448
+ [[[ 70. 76. 82. 88. 94.]
8449
+ [ 190. 212. 234. 256. 278.]
8450
+ [ 310. 348. 386. 424. 462.]]
8451
+ [[ 430. 484. 538. 592. 646.]
8452
+ [ 550. 620. 690. 760. 830.]
8453
+ [ 670. 756. 842. 928. 1014.]]]
8450
8454
  >>>
8451
8455
  >>> # case 2 : The rank of `input` is 1.
8452
8456
  >>> input = mindspore.ops.ones(([1, 2]))
8453
8457
  >>> other = mindspore.ops.ones(([2]))
8454
- >>> mindspore.ops.matmul(input, other)
8455
- Tensor(shape=[1], dtype=Float32, value= [ 2.00000000e+00])
8458
+ >>> output = mindspore.ops.matmul(input, other)
8459
+ >>> print(output)
8460
+ [2.]
8456
8461
  """
8457
8462
  return auto_generate.matmul_ext(input, other)
8458
8463
 
@@ -8544,14 +8549,14 @@ def bmm(input_x, mat2):
8544
8549
  >>> mat2 = mindspore.ops.arange(72, dtype=mindspore.float32).reshape(2, 4, 3, 3)
8545
8550
  >>> out = mindspore.ops.bmm(input_x, mat2)
8546
8551
  >>> print(out)
8547
- [[[[ 15, 18, 21]],
8548
- [[ 150, 162, 174]],
8549
- [[ 447, 468, 489]],
8550
- [[ 906, 936, 966]]],
8551
- [[[1527, 1566, 1605]],
8552
- [[2310, 2358, 2406]],
8553
- [[3255, 3312, 3369]],
8554
- [[4362, 4428, 4494]]]]
8552
+ [[[[ 15. 18. 21.]]
8553
+ [[ 150. 162. 174.]]
8554
+ [[ 447. 468. 489.]]
8555
+ [[ 906. 936. 966.]]]
8556
+ [[[1527. 1566. 1605.]]
8557
+ [[2310. 2358. 2406.]]
8558
+ [[3255. 3312. 3369.]]
8559
+ [[4362. 4428. 4494.]]]]
8555
8560
  """
8556
8561
  return batch_matmul_(input_x, mat2)
8557
8562
 
@@ -9160,10 +9165,10 @@ def remainder_ext(input, other):
9160
9165
  input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
9161
9166
  a bool or a tensor whose data type is
9162
9167
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
9163
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9168
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9164
9169
  other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
9165
- a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
9166
- When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
9170
+ a bool or a tensor whose data type is number or bool when the dividend is a tensor.
9171
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
9167
9172
 
9168
9173
  Returns:
9169
9174
  Tensor, with dtype promoted and shape broadcasted.
@@ -10177,23 +10182,23 @@ def logical_xor(input, other):
10177
10182
 
10178
10183
  Examples:
10179
10184
  >>> import mindspore
10180
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
10181
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
10185
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
10186
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
10182
10187
  >>> output = mindspore.ops.logical_xor(x, y)
10183
10188
  >>> print(output)
10184
10189
  [False True True]
10185
- >>> x = mindspore.tensor(1, mindspore.bool_)
10186
- >>> y = mindspore.tensor(0, mindspore.bool_)
10190
+ >>> x = mindspore.tensor(1, mindspore.bool)
10191
+ >>> y = mindspore.tensor(0, mindspore.bool)
10187
10192
  >>> output = mindspore.ops.logical_xor(x, y)
10188
10193
  >>> print(output)
10189
10194
  True
10190
10195
  >>> x = True
10191
- >>> y = mindspore.tensor(0, mindspore.bool_)
10196
+ >>> y = mindspore.tensor(0, mindspore.bool)
10192
10197
  >>> output = mindspore.ops.logical_xor(x, y)
10193
10198
  >>> print(output)
10194
10199
  True
10195
10200
  >>> x = True
10196
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
10201
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
10197
10202
  >>> output = mindspore.ops.logical_xor(x, y)
10198
10203
  >>> print(output)
10199
10204
  [False True]
@@ -10334,8 +10339,6 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
10334
10339
  [[0, 0, 0], [0, 0, 0], [0, 0, 0], [3, 6, 9]],
10335
10340
  [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]])
10336
10341
  """
10337
-
10338
- transpose_op = Transpose()
10339
10342
  matrix_set_diag_op = MatrixSetDiagV3(align="LEFT_RIGHT")
10340
10343
  zeros = ops.Zeros()
10341
10344
  if not isinstance(input, (Tensor, Tensor_)):
@@ -10652,7 +10655,7 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
10652
10655
 
10653
10656
 
10654
10657
  def as_strided(x, shape=None, strides=None):
10655
- n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
10658
+ n = np.dtype(mstype._dtype_to_nptype(x.dtype)).itemsize # pylint:disable=protected-access
10656
10659
  strides = tuple(np.array(strides) * n)
10657
10660
  if x.dtype == mstype.bfloat16:
10658
10661
  return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
@@ -10703,7 +10706,7 @@ def _permute_input(input, input_dim, ret_dim):
10703
10706
  (dim_permute_a if not is_transformed_dim[i] else dim_permute_b).append(value)
10704
10707
 
10705
10708
  # strides
10706
- type_size = np.dtype(mstype.dtype_to_nptype(input.dtype)).itemsize
10709
+ type_size = np.dtype(mstype._dtype_to_nptype(input.dtype)).itemsize # pylint:disable=protected-access
10707
10710
  input_strides = [int(x / type_size) for x in input.strides]
10708
10711
 
10709
10712
  def cmp(x, y):
@@ -10728,7 +10731,7 @@ def _permute_input(input, input_dim, ret_dim):
10728
10731
  dim_permute = dim_permute_a + dim_permute_b
10729
10732
 
10730
10733
  # permute
10731
- input = transpose_(input, tuple(dim_permute))
10734
+ input = transpose_op(input, tuple(dim_permute))
10732
10735
 
10733
10736
  return input, dim_permute
10734
10737
 
@@ -10816,7 +10819,7 @@ def _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_size
10816
10819
  for i in range(batch_dims, input_dim):
10817
10820
  out_strides[dim_permute[i]] = out.strides[1 + (i - batch_dims)]
10818
10821
 
10819
- type_size = np.dtype(mstype.dtype_to_nptype(out.dtype)).itemsize
10822
+ type_size = np.dtype(mstype._dtype_to_nptype(out.dtype)).itemsize # pylint:disable=protected-access
10820
10823
  if out.shape != out_sizes or out.strides != out_strides:
10821
10824
  out = as_strided(out, out_sizes, [int(i / type_size) for i in out_strides])
10822
10825
  return out
@@ -11194,7 +11197,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
11194
11197
  Default ``()`` , which counts all non-zero elements.
11195
11198
  keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
11196
11199
  Default ``False`` , don't keep these dimensions.
11197
- dtype (Union[Number, mindspore.bool\_], optional): The data type returned.
11200
+ dtype (Union[Number, mindspore.bool], optional): The data type returned.
11198
11201
  Default ``mstype.int32`` .
11199
11202
 
11200
11203
 
@@ -11438,8 +11441,8 @@ def tensor_dot(x1, x2, axes):
11438
11441
  x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape(x2_shape, axes, 1)
11439
11442
  output_shape = x1_ret + x2_ret # combine free axes from both inputs
11440
11443
  # run tensor_dot op
11441
- x1_transposed = transpose_(x1, x1_transpose_fwd)
11442
- x2_transposed = transpose_(x2, x2_transpose_fwd)
11444
+ x1_transposed = transpose_op(x1, x1_transpose_fwd)
11445
+ x2_transposed = transpose_op(x2, x2_transpose_fwd)
11443
11446
  x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
11444
11447
  x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
11445
11448
  mul_result = matmul_op(x1_reshaped, x2_reshaped)
@@ -11607,7 +11610,7 @@ def dot(input, other):
11607
11610
 
11608
11611
  if len(input_shape) > 2 or len(other_shape) > 2:
11609
11612
  other_shape_transpose = _get_transpose_shape(other_shape)
11610
- other_transpose = transpose_(other, other_shape_transpose)
11613
+ other_transpose = transpose_op(other, other_shape_transpose)
11611
11614
  input_reshape = reshape_(input, (-1, input_shape[-1]))
11612
11615
  other_reshape = reshape_(other_transpose, (other_shape[-2], -1))
11613
11616
  mul_result = matmul_op(input_reshape, other_reshape)
@@ -11840,8 +11843,8 @@ def batch_dot(x1, x2, axes=None):
11840
11843
  x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape_batchdot(x2_shape, axes, 1)
11841
11844
  output_shape = _get_output_shape(x1_batch_size, x1_ret, x2_ret)
11842
11845
 
11843
- x1_transposed = transpose_(x1, x1_transpose_fwd)
11844
- x2_transposed = transpose_(x2, x2_transpose_fwd)
11846
+ x1_transposed = transpose_op(x1, x1_transpose_fwd)
11847
+ x2_transposed = transpose_op(x2, x2_transpose_fwd)
11845
11848
  x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
11846
11849
  x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
11847
11850
 
@@ -12083,11 +12086,11 @@ def mul_ext(input, other):
12083
12086
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
12084
12087
  a bool or a tensor whose data type is
12085
12088
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12086
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12089
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12087
12090
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
12088
12091
  a bool or a tensor whose data type is
12089
12092
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12090
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12093
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12091
12094
 
12092
12095
  Returns:
12093
12096
  Tensor, the shape is the same as the one after broadcasting,