mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (384) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +40 -9
  7. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  8. mindspore/_extends/optimize/cell_utils.py +96 -0
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +44 -22
  11. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  12. mindspore/_extends/parse/parser.py +37 -62
  13. mindspore/_extends/parse/resources.py +39 -0
  14. mindspore/_extends/parse/standard_method.py +43 -13
  15. mindspore/_extends/parse/trope.py +8 -1
  16. mindspore/_extends/pijit/__init__.py +1 -2
  17. mindspore/amp.py +4 -4
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/adasum.py +1 -1
  24. mindspore/boost/boost_cell_wrapper.py +4 -4
  25. mindspore/common/__init__.py +27 -2
  26. mindspore/common/_grad_function.py +2 -1
  27. mindspore/common/_pijit_context.py +28 -7
  28. mindspore/common/_stub_tensor.py +1 -209
  29. mindspore/common/_tensor_cpp_method.py +1 -1
  30. mindspore/common/_tensor_docs.py +77 -16
  31. mindspore/common/api.py +238 -113
  32. mindspore/common/dtype.py +21 -11
  33. mindspore/common/dump.py +10 -15
  34. mindspore/common/generator.py +5 -3
  35. mindspore/common/hook_handle.py +11 -2
  36. mindspore/common/jit_config.py +1 -1
  37. mindspore/common/jit_trace.py +84 -105
  38. mindspore/common/parameter.py +26 -12
  39. mindspore/common/recompute.py +3 -3
  40. mindspore/common/sparse_tensor.py +0 -3
  41. mindspore/common/symbol.py +0 -1
  42. mindspore/common/tensor.py +81 -81
  43. mindspore/communication/_comm_helper.py +46 -4
  44. mindspore/communication/management.py +79 -7
  45. mindspore/context.py +58 -40
  46. mindspore/dataset/core/config.py +3 -3
  47. mindspore/dataset/engine/datasets.py +20 -7
  48. mindspore/dataset/engine/datasets_user_defined.py +33 -3
  49. mindspore/dataset/engine/iterators.py +2 -2
  50. mindspore/dataset/engine/obs/config_loader.py +2 -2
  51. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  52. mindspore/dataset/transforms/py_transforms.py +7 -3
  53. mindspore/dataset/transforms/transforms.py +7 -3
  54. mindspore/dataset/vision/validators.py +1 -0
  55. mindspore/device_context/ascend/device.py +1 -1
  56. mindspore/device_context/gpu/__init__.py +2 -2
  57. mindspore/device_context/gpu/device.py +1 -1
  58. mindspore/device_context/gpu/op_precision.py +4 -2
  59. mindspore/device_context/gpu/op_tuning.py +6 -3
  60. mindspore/device_manager.py +16 -9
  61. mindspore/dnnl.dll +0 -0
  62. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
  63. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  64. mindspore/experimental/optim/adadelta.py +13 -20
  65. mindspore/experimental/optim/adagrad.py +15 -22
  66. mindspore/experimental/optim/adam.py +17 -24
  67. mindspore/experimental/optim/adamax.py +14 -22
  68. mindspore/experimental/optim/adamw.py +28 -34
  69. mindspore/experimental/optim/asgd.py +15 -25
  70. mindspore/experimental/optim/lr_scheduler.py +27 -45
  71. mindspore/experimental/optim/nadam.py +14 -24
  72. mindspore/experimental/optim/optimizer.py +13 -23
  73. mindspore/experimental/optim/radam.py +18 -24
  74. mindspore/experimental/optim/rmsprop.py +14 -25
  75. mindspore/experimental/optim/rprop.py +15 -26
  76. mindspore/experimental/optim/sgd.py +9 -19
  77. mindspore/hal/__init__.py +4 -4
  78. mindspore/hal/contiguous_tensors_handle.py +2 -2
  79. mindspore/hal/memory.py +27 -7
  80. mindspore/include/api/cell.h +37 -1
  81. mindspore/include/api/delegate.h +10 -0
  82. mindspore/include/api/model.h +3 -0
  83. mindspore/include/api/types.h +2 -2
  84. mindspore/include/c_api/model_c.h +0 -58
  85. mindspore/include/c_api/tensor_c.h +0 -26
  86. mindspore/include/dataset/vision_ascend.h +1 -1
  87. mindspore/jpeg62.dll +0 -0
  88. mindspore/mindrecord/tools/cifar10.py +60 -11
  89. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  90. mindspore/mindspore_backend_common.dll +0 -0
  91. mindspore/mindspore_backend_manager.dll +0 -0
  92. mindspore/mindspore_common.dll +0 -0
  93. mindspore/mindspore_core.dll +0 -0
  94. mindspore/mindspore_cpu_res_manager.dll +0 -0
  95. mindspore/mindspore_dump.dll +0 -0
  96. mindspore/mindspore_frontend.dll +0 -0
  97. mindspore/mindspore_glog.dll +0 -0
  98. mindspore/mindspore_memory_pool.dll +0 -0
  99. mindspore/mindspore_ms_backend.dll +0 -0
  100. mindspore/mindspore_ops.dll +0 -0
  101. mindspore/mindspore_ops_host.dll +0 -0
  102. mindspore/mindspore_ops_kernel_common.dll +0 -0
  103. mindspore/mindspore_profiler.dll +0 -0
  104. mindspore/mindspore_pyboost.dll +0 -0
  105. mindspore/mindspore_pynative.dll +0 -0
  106. mindspore/mindspore_res_manager.dll +0 -0
  107. mindspore/mindspore_runtime_pipeline.dll +0 -0
  108. mindspore/mint/__init__.py +6 -46
  109. mindspore/mint/distributed/__init__.py +1 -0
  110. mindspore/mint/distributed/distributed.py +212 -9
  111. mindspore/mint/nn/__init__.py +1 -1
  112. mindspore/mint/nn/functional.py +53 -6
  113. mindspore/mint/nn/layer/_functions.py +164 -294
  114. mindspore/mint/nn/layer/activation.py +8 -6
  115. mindspore/mint/nn/layer/conv.py +137 -101
  116. mindspore/mint/nn/layer/normalization.py +8 -22
  117. mindspore/mint/optim/adam.py +19 -18
  118. mindspore/mint/optim/adamw.py +14 -8
  119. mindspore/mint/optim/sgd.py +5 -5
  120. mindspore/nn/cell.py +328 -502
  121. mindspore/nn/grad/cell_grad.py +11 -12
  122. mindspore/nn/layer/activation.py +32 -34
  123. mindspore/nn/layer/basic.py +67 -64
  124. mindspore/nn/layer/channel_shuffle.py +4 -4
  125. mindspore/nn/layer/combined.py +4 -2
  126. mindspore/nn/layer/conv.py +117 -110
  127. mindspore/nn/layer/dense.py +9 -7
  128. mindspore/nn/layer/embedding.py +50 -52
  129. mindspore/nn/layer/image.py +37 -39
  130. mindspore/nn/layer/math.py +111 -112
  131. mindspore/nn/layer/normalization.py +56 -44
  132. mindspore/nn/layer/pooling.py +58 -63
  133. mindspore/nn/layer/rnn_cells.py +33 -33
  134. mindspore/nn/layer/rnns.py +56 -56
  135. mindspore/nn/layer/thor_layer.py +74 -73
  136. mindspore/nn/layer/transformer.py +11 -1
  137. mindspore/nn/learning_rate_schedule.py +20 -20
  138. mindspore/nn/loss/loss.py +79 -81
  139. mindspore/nn/optim/adam.py +3 -3
  140. mindspore/nn/optim/adasum.py +2 -2
  141. mindspore/nn/optim/asgd.py +2 -0
  142. mindspore/nn/optim/optimizer.py +1 -1
  143. mindspore/nn/optim/thor.py +2 -2
  144. mindspore/nn/probability/distribution/exponential.py +2 -1
  145. mindspore/nn/probability/distribution/poisson.py +2 -1
  146. mindspore/nn/sparse/sparse.py +3 -3
  147. mindspore/nn/wrap/cell_wrapper.py +34 -37
  148. mindspore/nn/wrap/grad_reducer.py +37 -37
  149. mindspore/nn/wrap/loss_scale.py +72 -74
  150. mindspore/numpy/array_creations.py +5 -5
  151. mindspore/numpy/fft.py +1 -1
  152. mindspore/numpy/math_ops.py +5 -5
  153. mindspore/opencv_core452.dll +0 -0
  154. mindspore/opencv_imgcodecs452.dll +0 -0
  155. mindspore/opencv_imgproc452.dll +0 -0
  156. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  157. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  158. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  159. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  160. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
  161. mindspore/ops/auto_generate/gen_extend_func.py +23 -141
  162. mindspore/ops/auto_generate/gen_ops_def.py +727 -321
  163. mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
  164. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  165. mindspore/ops/composite/__init__.py +10 -0
  166. mindspore/ops/composite/base.py +8 -4
  167. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  168. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  169. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  170. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  171. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  172. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  173. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  174. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  175. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  176. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  177. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  178. mindspore/ops/function/__init__.py +3 -1
  179. mindspore/ops/function/_add_attr_func.py +11 -6
  180. mindspore/ops/function/array_func.py +9 -96
  181. mindspore/ops/function/debug_func.py +4 -3
  182. mindspore/ops/function/grad/grad_func.py +1 -1
  183. mindspore/ops/function/math_func.py +33 -540
  184. mindspore/ops/function/nn_func.py +28 -74
  185. mindspore/ops/function/other_func.py +4 -1
  186. mindspore/ops/function/random_func.py +44 -5
  187. mindspore/ops/function/vmap_func.py +2 -1
  188. mindspore/ops/functional.py +2 -3
  189. mindspore/ops/functional_overload.py +571 -6
  190. mindspore/ops/op_info_register.py +21 -0
  191. mindspore/ops/operations/__init__.py +16 -11
  192. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  193. mindspore/ops/operations/_inner_ops.py +3 -6
  194. mindspore/ops/operations/_sequence_ops.py +1 -1
  195. mindspore/ops/operations/array_ops.py +2 -2
  196. mindspore/ops/operations/comm_ops.py +185 -26
  197. mindspore/ops/operations/custom_ops.py +294 -174
  198. mindspore/ops/operations/debug_ops.py +59 -4
  199. mindspore/ops/operations/image_ops.py +13 -13
  200. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  201. mindspore/ops/operations/math_ops.py +3 -4
  202. mindspore/ops/operations/nn_ops.py +7 -39
  203. mindspore/ops/primitive.py +6 -10
  204. mindspore/ops/tensor_method.py +47 -8
  205. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  206. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  207. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  208. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  209. mindspore/ops_generate/common/base_generator.py +14 -0
  210. mindspore/ops_generate/common/gen_constants.py +8 -3
  211. mindspore/ops_generate/common/gen_utils.py +0 -19
  212. mindspore/ops_generate/common/op_proto.py +11 -4
  213. mindspore/ops_generate/common/template.py +88 -11
  214. mindspore/ops_generate/gen_ops.py +1 -1
  215. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  216. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  217. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  218. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  219. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  220. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  221. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  222. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  223. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  224. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  225. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  226. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  227. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  228. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  229. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  230. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  231. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  232. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  233. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  234. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  235. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  236. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  237. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  238. mindspore/parallel/_auto_parallel_context.py +11 -8
  239. mindspore/parallel/_cell_wrapper.py +113 -45
  240. mindspore/parallel/_parallel_serialization.py +1 -1
  241. mindspore/parallel/_ps_context.py +4 -6
  242. mindspore/parallel/_tensor.py +167 -12
  243. mindspore/parallel/_transformer/moe.py +1 -1
  244. mindspore/parallel/_transformer/transformer.py +13 -8
  245. mindspore/parallel/auto_parallel.py +14 -7
  246. mindspore/parallel/checkpoint_convert.py +3 -3
  247. mindspore/parallel/checkpoint_transform.py +11 -7
  248. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  249. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  250. mindspore/parallel/cluster/run.py +43 -4
  251. mindspore/parallel/function/__init__.py +8 -1
  252. mindspore/parallel/function/reshard_func.py +6 -7
  253. mindspore/parallel/nn/__init__.py +15 -2
  254. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  255. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  256. mindspore/parallel/shard.py +3 -4
  257. mindspore/parallel/transform_safetensors.py +463 -174
  258. mindspore/profiler/__init__.py +2 -1
  259. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  260. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  261. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  262. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  263. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  264. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  265. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  266. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  267. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  268. mindspore/profiler/analysis/task_manager.py +1 -1
  269. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  270. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  272. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  273. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  274. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  275. mindspore/profiler/common/constant.py +16 -0
  276. mindspore/profiler/common/profiler_context.py +25 -27
  277. mindspore/profiler/common/profiler_info.py +0 -16
  278. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  279. mindspore/profiler/common/profiler_output_path.py +23 -8
  280. mindspore/profiler/common/profiler_parameters.py +128 -35
  281. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  282. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  283. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  284. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  285. mindspore/profiler/dynamic_profiler.py +305 -314
  286. mindspore/profiler/envprofiler.py +12 -7
  287. mindspore/profiler/experimental_config.py +96 -6
  288. mindspore/profiler/mstx.py +33 -12
  289. mindspore/profiler/platform/__init__.py +2 -3
  290. mindspore/profiler/platform/npu_profiler.py +29 -19
  291. mindspore/profiler/profiler.py +35 -19
  292. mindspore/profiler/profiler_action_controller.py +64 -76
  293. mindspore/profiler/schedule.py +10 -4
  294. mindspore/rewrite/common/config.py +1 -0
  295. mindspore/rewrite/common/namer.py +1 -0
  296. mindspore/rewrite/common/namespace.py +1 -0
  297. mindspore/rewrite/node/node.py +31 -11
  298. mindspore/rewrite/parsers/assign_parser.py +1 -1
  299. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  300. mindspore/run_check/_check_version.py +7 -10
  301. mindspore/runtime/__init__.py +5 -5
  302. mindspore/runtime/event.py +10 -4
  303. mindspore/runtime/executor.py +60 -45
  304. mindspore/runtime/memory.py +30 -32
  305. mindspore/runtime/thread_bind_core.py +298 -164
  306. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  307. mindspore/swresample-4.dll +0 -0
  308. mindspore/swscale-6.dll +0 -0
  309. mindspore/tinyxml2.dll +0 -0
  310. mindspore/train/_utils.py +14 -4
  311. mindspore/train/amp.py +43 -20
  312. mindspore/train/callback/__init__.py +5 -5
  313. mindspore/train/callback/_checkpoint.py +3 -6
  314. mindspore/train/callback/_flops_collector.py +1 -1
  315. mindspore/train/callback/_landscape.py +0 -1
  316. mindspore/train/callback/_train_fault_tolerance.py +97 -16
  317. mindspore/train/data_sink.py +11 -2
  318. mindspore/train/dataset_helper.py +9 -0
  319. mindspore/train/model.py +135 -55
  320. mindspore/train/serialization.py +133 -111
  321. mindspore/train/summary/summary_record.py +13 -2
  322. mindspore/turbojpeg.dll +0 -0
  323. mindspore/utils/__init__.py +3 -2
  324. mindspore/utils/dryrun.py +0 -6
  325. mindspore/utils/runtime_execution_order_check.py +163 -77
  326. mindspore/utils/sdc_detect.py +68 -0
  327. mindspore/utils/utils.py +6 -9
  328. mindspore/version.py +1 -1
  329. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  330. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
  331. mindspore/_deprecated/jit.py +0 -198
  332. mindspore/experimental/es/__init__.py +0 -22
  333. mindspore/experimental/es/embedding_service.py +0 -891
  334. mindspore/experimental/es/embedding_service_layer.py +0 -581
  335. mindspore/profiler/parser/__init__.py +0 -14
  336. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  337. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  338. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  339. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  340. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  341. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  342. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  343. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  344. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  345. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  346. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  347. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  348. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  349. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  350. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  351. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  352. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  353. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  354. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  355. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  356. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  357. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  358. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  359. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  360. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  361. mindspore/profiler/parser/container.py +0 -229
  362. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  363. mindspore/profiler/parser/flops_parser.py +0 -531
  364. mindspore/profiler/parser/framework_enum.py +0 -111
  365. mindspore/profiler/parser/framework_parser.py +0 -464
  366. mindspore/profiler/parser/framework_struct.py +0 -61
  367. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  368. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  369. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  370. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  371. mindspore/profiler/parser/hccl_parser.py +0 -573
  372. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  373. mindspore/profiler/parser/integrator.py +0 -526
  374. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  375. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  376. mindspore/profiler/parser/minddata_parser.py +0 -186
  377. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  378. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  379. mindspore/profiler/parser/optime_parser.py +0 -250
  380. mindspore/profiler/parser/profiler_info.py +0 -213
  381. mindspore/profiler/parser/step_trace_parser.py +0 -666
  382. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  383. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  384. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -30,7 +30,6 @@ from mindspore.ops.primitive import prim_attr_register, Primitive, PrimitiveWith
30
30
  from mindspore._checkparam import check_hook_fn
31
31
  from mindspore.ops import operations as P
32
32
 
33
-
34
33
  SUMMARY_TENSOR_CACHE = []
35
34
 
36
35
 
@@ -396,6 +395,10 @@ class InsertGradientOf(Primitive):
396
395
  """
397
396
  Attaches callback to the graph node that will be invoked on the node's gradient.
398
397
 
398
+ .. warning::
399
+ In the callback, exercise caution when using side-effect operators,
400
+ such as the TensorDump operator, as current support is incomplete.
401
+
399
402
  Args:
400
403
  f (Function): MindSpore's Function. Callback function.
401
404
 
@@ -459,15 +462,67 @@ class InsertGradientOf(Primitive):
459
462
  self.f = f
460
463
 
461
464
 
465
+ class DumpGradient(Primitive):
466
+ """
467
+ The `DumpGradient` Primitive is a hook, used to dump dout which pass to `x`.
468
+
469
+ Inputs:
470
+ - **path** (str) - The path of the file to be saved.
471
+ - **x** (Tensor) - Input Tensor of any dimension.
472
+ - **input_output** (str) - support value should be one of ['in', 'out'].
473
+
474
+ Supported Platforms:
475
+ ``Ascend``
476
+
477
+ Examples:
478
+ >>> import numpy as np
479
+ >>> import mindspore as ms
480
+ >>> from mindspore import ops
481
+ >>> from mindspore import Tensor
482
+ >>> ms.set_context(mode=ms.PYNATIVE_MODE)
483
+ >>> ms.set_device(device_target="Ascend")
484
+ >>> dg = ops.DumpGradient()
485
+ >>> def dout_dump_test(x, y):
486
+ ... x = dg("x_dout.npy", x, 'out')
487
+ ... print(f"x value is {x}")
488
+ ... z = x * y
489
+ ... return z
490
+ >>> ms_grad = ms.grad(dout_dump_test, grad_position=(0,1))
491
+ >>> x_grad, y_grad = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
492
+ >>> print(f"x grad is {x_grad}, y_grad is {y_grad}")
493
+ >>> x_grad_npy = np.load("x_dout.npy")
494
+ >>> print(f"load x_grad from npy, x_grad is {x_grad_npy}")
495
+ x value is 1.0
496
+ x grad is 2.0, y grad is 1.0
497
+ load x_grad from npy, x_grad is array(2., dtype=float32)
498
+ """
499
+
500
+ @prim_attr_register
501
+ def __init__(self):
502
+ pass
503
+
504
+ def _dump_hook(self, dout):
505
+ P.TensorDump()(self.bwd_dump_path, dout)
506
+ return dout
507
+
508
+ def __call__(self, path, x, input_output):
509
+ self.bwd_dump_path = path
510
+ x = P.InsertGradientOf(self._dump_hook)(x)
511
+ return x
512
+
513
+
462
514
  class Morph(PrimitiveWithInfer):
463
515
  """
464
516
  The `Morph` Primitive is used to encapsulate a user-defined function `fn`, allowing it to be used as a custom
465
517
  Primitive.
466
- The primary application scenario of the `Morph` Primitive is in the auto-parallel case after `GRAPH_MODE` mode,
467
- where collective communication operators are used within the user-defined `fn` to implement custom parallel
468
- computation logic, especially in scenarios where `fn` involves dynamic shapes.
518
+
519
+ The `Morph` Primitive is primarily designed for custom graph optimization in GRAPH mode. For example, it supports
520
+ encapsulation of irregular collective communications (such as :func:`mindspore.ops.AlltoAllV`) in distributed
521
+ auto-parallel training scenarios.
522
+
469
523
  When the `Morph` Primitive is applied to inputs, it is actually the encapsulated user-defined function `fn` that is
470
524
  applied to the inputs.
525
+
471
526
  The main difference between the `Morph` Primitive and :func:`mindspore.ops.Custom` is that the former is expanded
472
527
  and replaced by the user-defined `fn` before automatic differentiation, so there is no need to implement a backward
473
528
  function.
@@ -102,19 +102,19 @@ class AdjustContrastv2(Primitive):
102
102
  ``Ascend`` ``GPU`` ``CPU``
103
103
 
104
104
  Examples:
105
- >>> images = Tensor([[[1.0, 2.0, 3.0],
106
- ... [4.0, 5.0, 6.0]],
107
- ... [[7.0, 8.0, 9.0],
108
- ... [10.0, 11.0, 12.0]]], mstype.float32)
109
- >>> contrast_factor = Tensor(2., mstype.float32)
110
- >>> adjustcontrastv2 = AdjustContrastv2()
111
- >>> output = adjustcontrastv2(images, contrast_factor)
112
- >>> print(output)
113
- [[[-3.5 -2.5 -1.5]
114
- [ 2.5 3.5 4.5]]
115
- <BLANKLINE>
116
- [[ 8.5 9.5 10.5]
117
- [14.5 15.5 16.5]]]
105
+ >>> images = Tensor([[[1.0, 2.0, 3.0],
106
+ ... [4.0, 5.0, 6.0]],
107
+ ... [[7.0, 8.0, 9.0],
108
+ ... [10.0, 11.0, 12.0]]], mstype.float32)
109
+ >>> contrast_factor = Tensor(2., mstype.float32)
110
+ >>> adjustcontrastv2 = AdjustContrastv2()
111
+ >>> output = adjustcontrastv2(images, contrast_factor)
112
+ >>> print(output)
113
+ [[[-3.5 -2.5 -1.5]
114
+ [ 2.5 3.5 4.5]]
115
+ <BLANKLINE>
116
+ [[ 8.5 9.5 10.5]
117
+ [14.5 15.5 16.5]]]
118
118
  """
119
119
 
120
120
  @prim_attr_register
@@ -26,7 +26,6 @@ from mindspore.ops._primitive_cache import _get_cache_prim
26
26
  from mindspore.ops._utils import arg_handler as handler
27
27
  from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
28
28
  from mindspore.common import Tensor, CSRTensor, COOTensor
29
- from mindspore.common._stub_tensor import _convert_stub
30
29
  from mindspore._c_expression import typing
31
30
  from mindspore._c_expression import TensorPy as Tensor_
32
31
  from mindspore._c_expression import pyboost_cast, pyboost_tile, pyboost_zeros, pyboost_ones, pyboost_type_as
@@ -1057,8 +1056,8 @@ class Tile(Primitive):
1057
1056
  def __call__(self, input, dims):
1058
1057
  # Add for jit context.
1059
1058
  if jit_context() and jit_context().compiled:
1060
- return None
1061
- res = _convert_stub(pyboost_tile(self, [input, dims]))
1059
+ return jit_context().default_output()
1060
+ res = pyboost_tile(self, [input, dims])
1062
1061
  # Add for jit context.
1063
1062
  if jit_context():
1064
1063
  if validator.is_stub_tensor(res):
@@ -1066,7 +1065,6 @@ class Tile(Primitive):
1066
1065
  return jit_context().run_op(self, res, input, dims)
1067
1066
  return res
1068
1067
 
1069
- # pylint: disable=missing-docstring
1070
1068
  def check_elim(self, *args):
1071
1069
  base_tensor, dims = args
1072
1070
  if not isinstance(base_tensor, Tensor):
@@ -1230,11 +1228,11 @@ class Cast(Primitive):
1230
1228
  def __call__(self, input_x, dtype):
1231
1229
  # Add for jit context.
1232
1230
  if jit_context() and jit_context().compiled:
1233
- return None
1231
+ return jit_context().default_output()
1234
1232
  should_elim, output = self.check_elim(input_x, dtype)
1235
1233
  if should_elim:
1236
1234
  return output
1237
- res = _convert_stub(pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)]))
1235
+ res = pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)])
1238
1236
  # Add for jit context.
1239
1237
  if jit_context():
1240
1238
  if validator.is_stub_tensor(res):
@@ -1293,7 +1291,7 @@ class TypeAs(Primitive):
1293
1291
  def __call__(self, input, other):
1294
1292
  if input.dtype == other.dtype:
1295
1293
  return input
1296
- return _convert_stub(pyboost_type_as(self, [input, other]))
1294
+ return pyboost_type_as(self, [input, other])
1297
1295
 
1298
1296
 
1299
1297
  def to_sequence(val):
@@ -2070,9 +2068,9 @@ class Ones(Primitive):
2070
2068
  def __call__(self, size, type=None):
2071
2069
  # Add for jit context.
2072
2070
  if jit_context() and jit_context().compiled:
2073
- return None
2074
- res = _convert_stub(pyboost_ones(self, [size, type if type is None \
2075
- else handler.dtype_to_type_id('Ones', 'type', type)]))
2071
+ return jit_context().default_output()
2072
+ res = pyboost_ones(self, [size, type if type is None \
2073
+ else handler.dtype_to_type_id('Ones', 'type', type)])
2076
2074
  # Add for jit context.
2077
2075
  if jit_context():
2078
2076
  if validator.is_stub_tensor(res):
@@ -2130,9 +2128,9 @@ class Zeros(Primitive):
2130
2128
  def __call__(self, size, type=None):
2131
2129
  # Add for jit context.
2132
2130
  if jit_context() and jit_context().compiled:
2133
- return None
2134
- res = _convert_stub(pyboost_zeros(self, [size, type if type is None else \
2135
- handler.dtype_to_type_id('Zeros', 'type', type)]))
2131
+ return jit_context().default_output()
2132
+ res = pyboost_zeros(self, [size, type if type is None else \
2133
+ handler.dtype_to_type_id('Zeros', 'type', type)])
2136
2134
  # Add for jit context.
2137
2135
  if jit_context():
2138
2136
  if validator.is_stub_tensor(res):
@@ -2217,7 +2215,8 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
2217
2215
  keep_prob (double, optional): The keep probability of dropout. Value range is (0.0, 1.0]. When `keep_prob`
2218
2216
  is 1.0, `drop_mask` should be None.
2219
2217
  Default: ``1.0``.
2220
- scalar_value (double, optional): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5).
2218
+ scalar_value (double, optional): The scale value indicating the scale coefficient, which is used as the
2219
+ scalar of Muls in the calculation. Generally, the value is 1.0 / (D ** 0.5).
2221
2220
  Default: ``1.0``.
2222
2221
  pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
2223
2222
  When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect.
@@ -2599,8 +2598,8 @@ def fused_infer_attention_score(query, key, value, *, pse_shift=None, atten_mask
2599
2598
  taking exp, and then the sum is computed to obtain softmax_sum. Finally, the log of softmax_sum is taken,
2600
2599
  and softmax_max is added to obtain softmax_lse. The softmax_lse is only calculated when softmax_lse_flag
2601
2600
  is True, and the shape would be :math:`(B, N, Q\_S, 1)`. If softmax_lse_flag is False, then a tensor with
2602
- shape :math:`(1)` filled with zeros would be returned. In graph mode with JitConfig set to O2, please ensure
2603
- that the softmax_lse_flag is enabled before using softmax_lse; otherwise, an exception will occur.
2601
+ shape :math:`(1)` filled with zeros would be returned. In GE backend, please ensure that the softmax_lse_flag
2602
+ is enabled before using softmax_lse; otherwise, an exception will occur.
2604
2603
 
2605
2604
  Constraints:
2606
2605
  - Full Inference Scenario (Q_S > 1):
@@ -1231,7 +1231,7 @@ class Heaviside(Primitive):
1231
1231
  0, & \text { if x }<0 \\
1232
1232
  \text { values, } & \text { if x }==0 \\
1233
1233
  1, & \text { if x }>0
1234
- \end{array}\right
1234
+ \end{array}\right.
1235
1235
 
1236
1236
  .. warning::
1237
1237
  This is an experimental API that is subject to change or deletion.
@@ -3354,9 +3354,8 @@ class ComplexAbs(Primitive):
3354
3354
 
3355
3355
  Examples:
3356
3356
  >>> import mindspore
3357
- >>> import numpy as np
3358
3357
  >>> from mindspore import Tensor, ops
3359
- >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
3358
+ >>> x = Tensor(3+4j, mindspore.complex64)
3360
3359
  >>> complex_abs = ops.ComplexAbs()
3361
3360
  >>> output = complex_abs(x)
3362
3361
  >>> print(output)
@@ -3630,7 +3629,7 @@ class MatrixSolveLs(Primitive):
3630
3629
  TypeError: If `l2_regularizer` is not float64.
3631
3630
  TypeError: If `fast` is not bool.
3632
3631
  ValueError: If dimensions of `matrix` or `rhs` is less than 2.
3633
- ValueError: If shape of `matrix` dose not match the shape of `rhs`.
3632
+ ValueError: If shape of `matrix` does not match the shape of `rhs`.
3634
3633
 
3635
3634
  Supported Platforms:
3636
3635
  ``CPU``
@@ -32,7 +32,6 @@ from mindspore.ops.primitive import PrimitiveWithCheck
32
32
  from mindspore.ops.primitive import prim_attr_register
33
33
  from mindspore.run_check._check_version import AscendEnvChecker
34
34
  from mindspore._c_expression import pyboost_all_finite
35
- from mindspore.common._stub_tensor import _convert_stub
36
35
  from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU, ReLU6, Dense, Tanh,
37
36
  Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
38
37
  NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
@@ -6434,39 +6433,8 @@ class Conv3D(Primitive):
6434
6433
 
6435
6434
  Outputs:
6436
6435
  Tensor, the value that applied 3D convolution. The shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
6437
-
6438
- `pad_mode` is ``"same"``:
6439
-
6440
- .. math::
6441
- \begin{array}{ll} \\
6442
- D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
6443
- H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
6444
- W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
6445
- \end{array}
6446
-
6447
- `pad_mode` is ``"valid"``:
6448
-
6449
- .. math::
6450
- \begin{array}{ll} \\
6451
- D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
6452
- {\text{stride[0]}} + 1} \right \rfloor \\
6453
- H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
6454
- {\text{stride[1]}} + 1} \right \rfloor \\
6455
- W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
6456
- {\text{stride[2]}} + 1} \right \rfloor \\
6457
- \end{array}
6458
-
6459
- `pad_mode` is ``"pad"``:
6460
-
6461
- .. math::
6462
- \begin{array}{ll} \\
6463
- D_{out} = \left \lfloor{\frac{D_{in} + pad[0] + pad[1] - (\text{dilation[0]} - 1) \times
6464
- \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
6465
- H_{out} = \left \lfloor{\frac{H_{in} + pad[2] + pad[3] - (\text{dilation[1]} - 1) \times
6466
- \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
6467
- W_{out} = \left \lfloor{\frac{W_{in} + pad[4] + pad[5] - (\text{dilation[2]} - 1) \times
6468
- \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
6469
- \end{array}
6436
+ To see how different pad modes affect the output shape, please refer to
6437
+ :class:`mindspore.nn.Conv3d` for more details.
6470
6438
 
6471
6439
  Raises:
6472
6440
  TypeError: If `out_channel` or `group` is not an int.
@@ -7151,8 +7119,8 @@ class Conv3DTranspose(Primitive):
7151
7119
  self.format = validator.check_string(data_format, ['NCDHW'], 'format', self.name)
7152
7120
  self.add_prim_attr('data_format', self.format)
7153
7121
 
7154
- self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name,
7155
- allow_five=False, ret_five=True, greater_zero=False)
7122
+ self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name, allow_five=False,
7123
+ ret_five=True, greater_zero=False, pad_value=0)
7156
7124
  output_padding_ = (self.output_padding[2], self.output_padding[3], self.output_padding[4])
7157
7125
  if self.pad_mode != 'pad' and output_padding_ != (0, 0, 0):
7158
7126
  raise ValueError(f"For '{self.name}', the 'output_padding' must be zero or (0, 0, 0) "
@@ -7402,8 +7370,8 @@ class ApplyAdagradDA(Primitive):
7402
7370
  >>> global_step = Tensor(2, mstype.int32)
7403
7371
  >>> output = net(grad, lr, l1, l2, global_step)
7404
7372
  >>> print(output)
7405
- [[-0.00073906, -0.00136889],
7406
- [-0.00059699, -0.00142478]]
7373
+ [[-0.00073906 -0.00136889]
7374
+ [-0.00059699 -0.00142478]]
7407
7375
  """
7408
7376
 
7409
7377
  __mindspore_signature__ = (
@@ -9289,4 +9257,4 @@ class AllFinite(Primitive):
9289
9257
  "in the current environment does not support AllFinite.")
9290
9258
 
9291
9259
  def __call__(self, *args):
9292
- return _convert_stub(pyboost_all_finite(self, args))
9260
+ return pyboost_all_finite(self, args)
@@ -170,10 +170,13 @@ class Primitive(Primitive_):
170
170
  raise TypeError(f'The element of strategy must be tuple/Layout type, but got:{type(in_ele)}')
171
171
  if isinstance(in_ele, tuple):
172
172
  for in_value in in_ele:
173
- if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
173
+ if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY \
174
+ and not self.attrs.get("self_define_shard", False):
174
175
  raise TypeError(f'The {log_info}: {strategy} of {self.name} is not valid,'
175
176
  f' the value of strategy must be int type, but got:{type(in_value)}')
176
- if isinstance(in_value, Layout) and (self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY):
177
+ if isinstance(in_value, Layout) and (
178
+ self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY or self.attrs.get("self_define_shard",
179
+ False)):
177
180
  is_layout.append(True)
178
181
  continue
179
182
  is_layout.append(False)
@@ -315,7 +318,7 @@ class Primitive(Primitive_):
315
318
  out_is_layout = self._check_shard_strategy(out_strategy, "out_strategy")
316
319
  is_layout = in_is_layout if in_is_layout is not None else out_is_layout
317
320
  if out_is_layout is not None and is_layout != out_is_layout and \
318
- self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
321
+ self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY and not self.attrs.get("self_define_shard", False):
319
322
  raise ValueError(f'The in_strategy type must equal to the out_strategy type, '
320
323
  f'one using tuple(tuple) and the other using tuple(Layout) is not allowed.')
321
324
 
@@ -409,12 +412,6 @@ class Primitive(Primitive_):
409
412
  return output
410
413
  return _run_op(self, self.name, args)
411
414
 
412
- def __getstate__(self):
413
- return self.__dict__
414
-
415
- def __setstate__(self, d):
416
- self.__dict__.update(d)
417
-
418
415
  def __deepcopy__(self, memo):
419
416
  return type(self)(**self.init_attrs)
420
417
 
@@ -1032,7 +1029,6 @@ def _run_op(obj, op_name, args):
1032
1029
  res = _pynative_executor.run_op_async(obj, op_name, args)
1033
1030
  # Add for jit context.
1034
1031
  if jit_context():
1035
- # todo support TensorPy
1036
1032
  return jit_context().run_op(obj, res, *args)
1037
1033
  return res
1038
1034
 
@@ -21,7 +21,7 @@ from mindspore.ops import operations as P
21
21
  from mindspore.ops import functional as F
22
22
  from mindspore.ops.composite.multitype_ops import _compile_utils as utils
23
23
  from mindspore.ops.composite.multitype_ops._compile_utils import (
24
- sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv
24
+ sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv, _tensor_mod
25
25
  )
26
26
  from mindspore.ops.auto_generate.gen_ops_prim import (
27
27
  inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op,
@@ -237,7 +237,7 @@ from mindspore.ops.function.array_func import scatter
237
237
  # 92 scatter_add
238
238
  from mindspore.ops.function.array_func import tensor_scatter_add
239
239
  # 93 select
240
- from mindspore.ops.auto_generate import select, select_ext
240
+ from mindspore.ops.auto_generate import select, select_ext_view
241
241
  # 94 sigmoid
242
242
  from mindspore.ops.auto_generate import sigmoid
243
243
  # 95 sin
@@ -273,7 +273,7 @@ from mindspore.ops.operations.manually_defined import tile
273
273
  # 110 topk
274
274
  from mindspore.ops.function.array_func import topk
275
275
  # 111 transpose
276
- from mindspore.ops.auto_generate import transpose, transpose_ext
276
+ from mindspore.ops.auto_generate import transpose, transpose_ext_view
277
277
  # 112 tril
278
278
  from mindspore.ops.function.array_func import tril
279
279
  # 113 trunc
@@ -503,16 +503,16 @@ def tensor_allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
503
503
 
504
504
 
505
505
  # 8 any
506
- def tensor_any(x, axis=None, keep_dims=False):
506
+ def reduce_tensor_any(x, axis=None, keep_dims=False):
507
507
  if axis is None:
508
508
  axis = ()
509
509
  return any(x, axis, keep_dims)
510
510
 
511
511
 
512
- def deprecated_tensor_any(x, dim=None, keepdim=False):
512
+ def tensor_any(input, dim=None, keepdim=False):
513
513
  if dim is None:
514
514
  dim = ()
515
- return any(x, dim, keepdim)
515
+ return any(input, dim, keepdim)
516
516
 
517
517
 
518
518
  # 9 arctan2
@@ -948,6 +948,10 @@ def deprecated_tensor_remainder(input, divisor):
948
948
  return remainder(input, divisor)
949
949
 
950
950
 
951
+ def deprecated_tensor_mod(input, other):
952
+ return _tensor_mod(input, other)
953
+
954
+
951
955
  # 86 repeat
952
956
  def tensor_repeat(input, *repeats):
953
957
  raise RuntimeError("'repeat' is not supported on this device.")
@@ -998,7 +1002,7 @@ def deprecated_tensor_scatter_add(input, indices, updates):
998
1002
 
999
1003
  # 93 select
1000
1004
  def tensor_select_ext(input, dim, index):
1001
- return select_ext(input, dim, index)
1005
+ return select_ext_view(input, dim, index)
1002
1006
 
1003
1007
 
1004
1008
  def deprecated_tensor_select(input, condition, y):
@@ -1170,7 +1174,7 @@ def deprecated_tensor_topk(input, k, dim=None, largest=True, sorted=True):
1170
1174
 
1171
1175
  # 111 transpose
1172
1176
  def tensor_transpose_ext(input, dim0, dim1):
1173
- return transpose_ext(input, dim0, dim1)
1177
+ return transpose_ext_view(input, dim0, dim1)
1174
1178
 
1175
1179
 
1176
1180
  def deprecated_tensor_transpose(input, *axes):
@@ -1514,6 +1518,16 @@ def tensor_empty(*size, dtype=None, device=None):
1514
1518
  "This is a function for empty not should be called. Please check the implementation.")
1515
1519
 
1516
1520
 
1521
+ def tensor_empty_like(input, *, dtype=None, device=None):
1522
+ raise NotImplementedError(
1523
+ "This is a function for empty_like should not be called. Please check the implementation.")
1524
+
1525
+
1526
+ def tensor_new_empty(input, size, *, dtype=None, device=None):
1527
+ raise NotImplementedError(
1528
+ "This is a function for new_empty should not be called. Please check the implementation.")
1529
+
1530
+
1517
1531
  def deprecated_tensor_logaddexp(input, other):
1518
1532
  return F.logaddexp(input, other)
1519
1533
 
@@ -1744,6 +1758,10 @@ def deprecated_tensor_diag(input):
1744
1758
  return F.diag(input)
1745
1759
 
1746
1760
 
1761
+ def deprecated_einsum(equation, operands):
1762
+ raise NotImplementedError('einsum only supports Ascend.')
1763
+
1764
+
1747
1765
  # 916 index_add
1748
1766
  @constexpr
1749
1767
  def _check_index_add_alpha(alpha):
@@ -1808,6 +1826,8 @@ def tensor_inplace_sub(input, other, *, alpha=1):
1808
1826
  return sub(input, other)
1809
1827
  return sub_ext(input, other, alpha=alpha)
1810
1828
 
1829
+ def tensor_new_full(input, size, fill_value, *, dtype=None):
1830
+ raise NotImplementedError("new_full method support Ascend only")
1811
1831
 
1812
1832
  def tensor_div_empty_(input, other, rounding_mode=None):
1813
1833
  raise ValueError("should not come here for div_ method.")
@@ -1840,6 +1860,10 @@ def all_gather_matmul(
1840
1860
  raise NotImplementedError('all_gather_matmul only supports Ascend.')
1841
1861
 
1842
1862
 
1863
+ def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
1864
+ raise NotImplementedError('conv3d only supports Ascend.')
1865
+
1866
+
1843
1867
  def tensor_floor_divide_(input, other):
1844
1868
  return _tensor_floordiv(input, other)
1845
1869
 
@@ -1886,3 +1910,18 @@ def tensor_gelu(input, *, approximate):
1886
1910
 
1887
1911
  def deprecated_pixel_shuffle(input, upscale_factor):
1888
1912
  return F.pixel_shuffle(input, upscale_factor)
1913
+
1914
+
1915
+ def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias=None, output_dtype=None,
1916
+ x1_dtype=None, x2_dtype=None, pertoken_scale_dtype=None, scale_dtype=None, group_sizes=None):
1917
+ r"""
1918
+ For details, please refer to :func:`mindspore.ops.auto_generate.quant_matmul`.
1919
+ """
1920
+ raise NotImplementedError('quant_matmul only supports Ascend.')
1921
+
1922
+
1923
+ def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
1924
+ raise NotImplementedError("gmm has not been implemented by python.")
1925
+
1926
+ def raise_func(*args, **kwargs):
1927
+ raise NotImplementedError("this func has not been implemented.")
@@ -53,7 +53,7 @@ class CppCreatePrimInstanceHelperGenerator(BaseGenerator):
53
53
  """
54
54
  py_arg_default = self.generate_op_arg_default_value(op_protos)
55
55
  py_labels = self.generate_op_labels(op_protos)
56
- res_str = (template.PY_LICENSE_STR + py_arg_default + py_labels)
56
+ res_str = template.PY_LICENSE_STR + py_arg_default + py_labels
57
57
 
58
58
  save_path = os.path.join(work_path, K.PY_AUTO_GEN_PATH)
59
59
  file_name = "cpp_create_prim_instance_helper.py"
@@ -159,14 +159,6 @@ class FunctionalMapCppGenerator(BaseGenerator):
159
159
  sig_str += '}\n},'
160
160
  return sig_str
161
161
 
162
- def _is_input_arg(self, arg_name, op_name):
163
- res = False
164
- if op_name in K.INPUT_NAME_MAP and arg_name == K.INPUT_NAME_MAP[op_name]:
165
- res = True
166
- elif op_name not in K.INPUT_NAME_MAP and arg_name in K.INPUT_ARGS_NAME:
167
- res = True
168
- return res
169
-
170
162
  def _generate_single_signature_str(self, func_api_name, tensor_proto, is_tensor_method) -> str:
171
163
  """
172
164
  Generates a single function signature string for the given operation prototype.
@@ -186,7 +178,7 @@ class FunctionalMapCppGenerator(BaseGenerator):
186
178
  arg_valid_types = []
187
179
  for _, arg in enumerate(op_proto.op_args):
188
180
  arg_name = arg.arg_name
189
- if is_tensor_method and self._is_input_arg(arg_name, op_name):
181
+ if is_tensor_method and _is_input_arg(arg_name, op_name):
190
182
  continue
191
183
 
192
184
  arg_valid_types = self._handle_arg_valid_types(arg, arg_name, arg_valid_types, func_api_name)
@@ -502,3 +494,12 @@ class FunctionalMapCppGenerator(BaseGenerator):
502
494
  self._get_and_append_single_op_varargs_list(func_protos,
503
495
  mint_varargs_list)
504
496
  return mint_varargs_list
497
+
498
+
499
+ def _is_input_arg(arg_name, op_name):
500
+ res = False
501
+ if op_name in K.INPUT_NAME_MAP and arg_name == K.INPUT_NAME_MAP[op_name]:
502
+ res = True
503
+ elif op_name not in K.INPUT_NAME_MAP and arg_name in K.INPUT_ARGS_NAME:
504
+ res = True
505
+ return res