mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (403) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +36 -61
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +32 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +76 -15
  37. mindspore/common/api.py +193 -112
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +2 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +48 -83
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +38 -23
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +32 -2
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +1 -0
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +4 -44
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +208 -5
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +122 -98
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +325 -499
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +86 -85
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +1 -1
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/optimizer.py +1 -1
  159. mindspore/nn/optim/thor.py +2 -2
  160. mindspore/nn/probability/distribution/exponential.py +2 -1
  161. mindspore/nn/probability/distribution/poisson.py +2 -1
  162. mindspore/nn/sparse/sparse.py +3 -3
  163. mindspore/nn/wrap/cell_wrapper.py +34 -37
  164. mindspore/nn/wrap/grad_reducer.py +37 -37
  165. mindspore/nn/wrap/loss_scale.py +72 -74
  166. mindspore/numpy/array_creations.py +5 -5
  167. mindspore/numpy/fft.py +1 -1
  168. mindspore/numpy/math_ops.py +1 -1
  169. mindspore/opencv_core452.dll +0 -0
  170. mindspore/opencv_imgcodecs452.dll +0 -0
  171. mindspore/opencv_imgproc452.dll +0 -0
  172. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  173. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  174. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  175. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  176. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
  177. mindspore/ops/auto_generate/gen_extend_func.py +1 -51
  178. mindspore/ops/auto_generate/gen_ops_def.py +463 -257
  179. mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
  180. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  181. mindspore/ops/composite/__init__.py +10 -0
  182. mindspore/ops/composite/base.py +8 -4
  183. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  184. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  185. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  186. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  187. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  188. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  189. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  190. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  191. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  192. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  193. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  194. mindspore/ops/function/__init__.py +3 -1
  195. mindspore/ops/function/_add_attr_func.py +11 -6
  196. mindspore/ops/function/array_func.py +7 -94
  197. mindspore/ops/function/debug_func.py +4 -3
  198. mindspore/ops/function/grad/grad_func.py +1 -1
  199. mindspore/ops/function/math_func.py +21 -367
  200. mindspore/ops/function/nn_func.py +26 -41
  201. mindspore/ops/function/other_func.py +4 -1
  202. mindspore/ops/function/random_func.py +31 -4
  203. mindspore/ops/functional.py +0 -2
  204. mindspore/ops/functional_overload.py +463 -6
  205. mindspore/ops/op_info_register.py +21 -0
  206. mindspore/ops/operations/__init__.py +5 -2
  207. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  208. mindspore/ops/operations/_inner_ops.py +3 -6
  209. mindspore/ops/operations/_sequence_ops.py +1 -1
  210. mindspore/ops/operations/comm_ops.py +185 -26
  211. mindspore/ops/operations/custom_ops.py +235 -172
  212. mindspore/ops/operations/debug_ops.py +55 -4
  213. mindspore/ops/operations/image_ops.py +13 -13
  214. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  215. mindspore/ops/operations/math_ops.py +3 -4
  216. mindspore/ops/operations/nn_ops.py +5 -6
  217. mindspore/ops/primitive.py +6 -10
  218. mindspore/ops/tensor_method.py +36 -4
  219. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  220. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  221. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  222. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  223. mindspore/ops_generate/common/base_generator.py +14 -0
  224. mindspore/ops_generate/common/gen_constants.py +7 -2
  225. mindspore/ops_generate/common/gen_utils.py +0 -19
  226. mindspore/ops_generate/common/op_proto.py +11 -4
  227. mindspore/ops_generate/common/template.py +88 -11
  228. mindspore/ops_generate/gen_ops.py +1 -1
  229. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  230. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  231. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  232. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  233. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  234. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  235. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  236. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  237. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  238. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  239. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  240. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  241. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  242. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  243. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  244. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  245. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  246. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  247. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  248. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  249. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  250. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  251. mindspore/parallel/_auto_parallel_context.py +4 -2
  252. mindspore/parallel/_cell_wrapper.py +106 -40
  253. mindspore/parallel/_parallel_serialization.py +1 -1
  254. mindspore/parallel/_ps_context.py +4 -6
  255. mindspore/parallel/_tensor.py +167 -12
  256. mindspore/parallel/_transformer/moe.py +1 -1
  257. mindspore/parallel/_transformer/transformer.py +13 -8
  258. mindspore/parallel/auto_parallel.py +12 -5
  259. mindspore/parallel/checkpoint_convert.py +3 -3
  260. mindspore/parallel/checkpoint_transform.py +3 -1
  261. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  262. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  263. mindspore/parallel/cluster/run.py +43 -4
  264. mindspore/parallel/function/__init__.py +8 -1
  265. mindspore/parallel/function/reshard_func.py +1 -1
  266. mindspore/parallel/nn/__init__.py +15 -2
  267. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  268. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  269. mindspore/parallel/shard.py +2 -2
  270. mindspore/parallel/transform_safetensors.py +462 -174
  271. mindspore/pgodb140.dll +0 -0
  272. mindspore/pgort140.dll +0 -0
  273. mindspore/profiler/__init__.py +2 -1
  274. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  275. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  276. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  277. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  278. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  279. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  280. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  281. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  282. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  283. mindspore/profiler/analysis/task_manager.py +1 -1
  284. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  285. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  286. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  287. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  288. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  289. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  290. mindspore/profiler/common/constant.py +16 -0
  291. mindspore/profiler/common/profiler_context.py +25 -27
  292. mindspore/profiler/common/profiler_info.py +0 -16
  293. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  294. mindspore/profiler/common/profiler_output_path.py +23 -8
  295. mindspore/profiler/common/profiler_parameters.py +128 -35
  296. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  297. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  298. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  299. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  300. mindspore/profiler/dynamic_profiler.py +305 -314
  301. mindspore/profiler/envprofiler.py +12 -7
  302. mindspore/profiler/experimental_config.py +96 -6
  303. mindspore/profiler/mstx.py +33 -12
  304. mindspore/profiler/platform/__init__.py +2 -3
  305. mindspore/profiler/platform/npu_profiler.py +29 -19
  306. mindspore/profiler/profiler.py +35 -19
  307. mindspore/profiler/profiler_action_controller.py +64 -76
  308. mindspore/profiler/schedule.py +10 -4
  309. mindspore/rewrite/common/config.py +1 -0
  310. mindspore/rewrite/common/namer.py +1 -0
  311. mindspore/rewrite/common/namespace.py +1 -0
  312. mindspore/rewrite/node/node.py +31 -11
  313. mindspore/rewrite/parsers/assign_parser.py +1 -1
  314. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  315. mindspore/run_check/_check_version.py +7 -10
  316. mindspore/runtime/__init__.py +5 -5
  317. mindspore/runtime/event.py +10 -4
  318. mindspore/runtime/executor.py +60 -45
  319. mindspore/runtime/memory.py +21 -30
  320. mindspore/runtime/thread_bind_core.py +298 -164
  321. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  322. mindspore/swresample-4.dll +0 -0
  323. mindspore/swscale-6.dll +0 -0
  324. mindspore/tbbmalloc.dll +0 -0
  325. mindspore/tinyxml2.dll +0 -0
  326. mindspore/train/_utils.py +6 -2
  327. mindspore/train/amp.py +43 -20
  328. mindspore/train/callback/__init__.py +5 -5
  329. mindspore/train/callback/_checkpoint.py +3 -6
  330. mindspore/train/callback/_flops_collector.py +1 -1
  331. mindspore/train/callback/_landscape.py +0 -1
  332. mindspore/train/callback/_train_fault_tolerance.py +71 -13
  333. mindspore/train/data_sink.py +11 -2
  334. mindspore/train/dataset_helper.py +9 -0
  335. mindspore/train/model.py +51 -33
  336. mindspore/train/serialization.py +133 -111
  337. mindspore/train/summary/summary_record.py +13 -2
  338. mindspore/turbojpeg.dll +0 -0
  339. mindspore/utils/__init__.py +3 -2
  340. mindspore/utils/dryrun.py +0 -6
  341. mindspore/utils/runtime_execution_order_check.py +162 -78
  342. mindspore/utils/sdc_detect.py +68 -0
  343. mindspore/utils/utils.py +6 -9
  344. mindspore/vcmeta.dll +0 -0
  345. mindspore/vcruntime140.dll +0 -0
  346. mindspore/vcruntime140_1.dll +0 -0
  347. mindspore/version.py +1 -1
  348. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  349. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +352 -390
  350. mindspore/_deprecated/jit.py +0 -198
  351. mindspore/experimental/es/__init__.py +0 -22
  352. mindspore/experimental/es/embedding_service.py +0 -891
  353. mindspore/experimental/es/embedding_service_layer.py +0 -581
  354. mindspore/profiler/parser/__init__.py +0 -14
  355. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  356. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  357. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  358. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  359. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  360. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  361. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  362. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  363. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  364. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  365. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  366. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  367. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  368. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  369. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  370. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  371. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  372. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  373. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  374. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  375. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  376. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  377. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  378. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  379. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  380. mindspore/profiler/parser/container.py +0 -229
  381. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  382. mindspore/profiler/parser/flops_parser.py +0 -531
  383. mindspore/profiler/parser/framework_enum.py +0 -111
  384. mindspore/profiler/parser/framework_parser.py +0 -464
  385. mindspore/profiler/parser/framework_struct.py +0 -61
  386. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  387. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  388. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  389. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  390. mindspore/profiler/parser/hccl_parser.py +0 -573
  391. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  392. mindspore/profiler/parser/integrator.py +0 -526
  393. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  394. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  395. mindspore/profiler/parser/minddata_parser.py +0 -186
  396. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  397. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  398. mindspore/profiler/parser/optime_parser.py +0 -250
  399. mindspore/profiler/parser/profiler_info.py +0 -213
  400. mindspore/profiler/parser/step_trace_parser.py +0 -666
  401. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  402. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  403. {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -296,7 +296,7 @@ def uniform_(input, from_=0, to=1, *, generator=None):
296
296
  Returns:
297
297
  Tensor, with the same shape and dtype as `input` tensor.
298
298
 
299
- Raises:
299
+ Raises:
300
300
  TypeError: If `input` is not a Tensor.
301
301
  TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, float64,
302
302
  bfloat16.
@@ -765,9 +765,9 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
765
765
  Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
766
766
 
767
767
  Args:
768
- mean (Union[float, Tensor]): Mean value of each element, the shape of the `mean` tensor
768
+ mean (Union[Tensor]): Mean value of each element, the shape of the `mean` tensor
769
769
  should be the same as that of the `std` tensor.
770
- std (Union[float, Tensor]): Standard deviation for each element, the shape of the `std` tensor
770
+ std (Union[Tensor]): Standard deviation for each element, the shape of the `std` tensor
771
771
  should be the same as that of the `mean` tensor. The value of `std` should be greater than or equal to 0.
772
772
 
773
773
  Keyword Args:
@@ -793,6 +793,33 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
793
793
  >>> print(output.shape)
794
794
  (3,)
795
795
 
796
+ .. function:: normal(mean, std) -> Tensor
797
+ :noindex:
798
+
799
+ Similar to the function above, but the means are shared among all drawn elements.
800
+
801
+ Args:
802
+ mean (float): Mean value of each element.
803
+ std (Tensor): Standard deviation for each element. The value of `std` should be greater
804
+ than or equal to 0.
805
+
806
+ Returns:
807
+ Outputs a tensor with the same shape as `std`.
808
+
809
+ Supported Platforms:
810
+ ``Ascend``
811
+
812
+ Examples:
813
+ >>> import mindspore
814
+ >>> import numpy as np
815
+ >>> from mindspore import ops
816
+ >>> from mindspore import Tensor
817
+ >>> mean = 1.
818
+ >>> std = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
819
+ >>> output = ops.function.random_func.normal_ext(mean, std)
820
+ >>> print(output.shape)
821
+ (3,)
822
+
796
823
  .. function:: normal(mean, std=1.0) -> Tensor
797
824
  :noindex:
798
825
 
@@ -1007,7 +1034,7 @@ def gamma(shape, alpha, beta, seed=None):
1007
1034
  (3, 2, 2)
1008
1035
  >>> # case 2: alpha_shape is (2, 3), so shape is (3, 1, 3)
1009
1036
  >>> shape = (3, 1, 3)
1010
- >>> alpha = mindspore.tensor([[1, 3, 4], [2, 5, 6]]), mindspore.float32)
1037
+ >>> alpha = mindspore.tensor([[1, 3, 4], [2, 5, 6]], mindspore.float32)
1011
1038
  >>> beta = mindspore.tensor([1.0], mindspore.float32)
1012
1039
  >>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
1013
1040
  >>> result = output.shape
@@ -33,7 +33,6 @@ from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
33
33
  from mindspore.ops.operations.math_ops import Roll
34
34
  from mindspore.ops.composite.math_ops import mm
35
35
  from mindspore.ops.function.math_func import dot
36
- from mindspore.ops.function.array_func import new_empty
37
36
  from mindspore.ops import auto_generate
38
37
  from mindspore.ops.auto_generate import cast
39
38
  from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
@@ -383,7 +382,6 @@ setattr(tensor_operator_registry, 'nanmedian', nanmedian)
383
382
  setattr(tensor_operator_registry, 'csr_to_coo', csr_to_coo)
384
383
  setattr(tensor_operator_registry, 'zeros', zeros)
385
384
  setattr(tensor_operator_registry, 'ones', ones)
386
- setattr(tensor_operator_registry, 'new_empty', new_empty)
387
385
  setattr(tensor_operator_registry, 'unsorted_segment_min', unsorted_segment_min)
388
386
  setattr(tensor_operator_registry, 'unsorted_segment_max', unsorted_segment_max)
389
387
  setattr(tensor_operator_registry, 'unsorted_segment_prod', unsorted_segment_prod)
@@ -16,10 +16,14 @@
16
16
  from mindspore._c_expression import _add_instance
17
17
  from mindspore._c_expression import _addcdiv_instance
18
18
  from mindspore._c_expression import _all_gather_matmul_instance
19
+ from mindspore._c_expression import _any_instance
19
20
  from mindspore._c_expression import _bitwise_not_instance
20
21
  from mindspore._c_expression import _clamp_instance
22
+ from mindspore._c_expression import _conv3d_instance
21
23
  from mindspore._c_expression import _div_instance
24
+ from mindspore._c_expression import _einsum_instance
22
25
  from mindspore._c_expression import _empty_instance
26
+ from mindspore._c_expression import _empty_like_instance
23
27
  from mindspore._c_expression import _floor_divide_instance
24
28
  from mindspore._c_expression import _fmod_instance
25
29
  from mindspore._c_expression import _gelu_instance
@@ -27,6 +31,7 @@ from mindspore._c_expression import _gmm_instance
27
31
  from mindspore._c_expression import _gmm_backward_instance
28
32
  from mindspore._c_expression import _gmm_backward_fusion_instance
29
33
  from mindspore._c_expression import _greater_equal_instance
34
+ from mindspore._c_expression import _index_add_instance
30
35
  from mindspore._c_expression import _kthvalue_instance
31
36
  from mindspore._c_expression import _lerp_instance
32
37
  from mindspore._c_expression import _matmul_reduce_scatter_instance
@@ -34,8 +39,10 @@ from mindspore._c_expression import _max_instance
34
39
  from mindspore._c_expression import _min_instance
35
40
  from mindspore._c_expression import _nansum_instance
36
41
  from mindspore._c_expression import _pixel_shuffle_instance
42
+ from mindspore._c_expression import _quant_matmul_instance
37
43
  from mindspore._c_expression import _remainder_instance
38
44
  from mindspore._c_expression import _repeat_interleave_instance
45
+ from mindspore._c_expression import _rmod_instance
39
46
  from mindspore._c_expression import _sub_instance
40
47
  from mindspore._c_expression import _where_instance
41
48
  from mindspore._c_expression import _xlogy_instance
@@ -264,6 +271,52 @@ def all_gather_matmul(*args, **kwargs):
264
271
  return _all_gather_matmul_instance(*args, **kwargs)
265
272
 
266
273
 
274
+ def any(*args, **kwargs):
275
+ r"""
276
+ any(input) -> Tensor
277
+
278
+ Check if ``True`` is present in `input` .
279
+
280
+ Args:
281
+ input (Tensor): The input tensor.
282
+
283
+ Returns:
284
+ Tensor
285
+
286
+ Supported Platforms:
287
+ ``Ascend`` ``GPU`` ``CPU``
288
+
289
+ Examples:
290
+ >>> import mindspore
291
+ >>> input = mindspore.tensor([[True, False], [True, True]])
292
+ >>> mindspore.ops.functional_overload.any(input)
293
+ Tensor(shape=[], dtype=Bool, value= True)
294
+
295
+ .. function:: any(input, dim, keepdim=False) -> Tensor
296
+ :noindex:
297
+
298
+ Check if ``True`` is present in the specified dimension of `input` .
299
+
300
+ Args:
301
+ input (Tensor): The input tensor.
302
+ dim (int): The dimensions to reduce.
303
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
304
+
305
+ Returns:
306
+ Tensor
307
+
308
+ Supported Platforms:
309
+ ``Ascend`` ``GPU`` ``CPU``
310
+
311
+ Examples:
312
+ >>> import mindspore
313
+ >>> input = mindspore.tensor([[True, False], [True, True]])
314
+ >>> mindspore.ops.functional_overload.any(input, dim=1)
315
+ Tensor(shape=[2], dtype=Bool, value= [ True, True])
316
+ """
317
+ return _any_instance(*args, **kwargs)
318
+
319
+
267
320
  def bitwise_not(*args, **kwargs):
268
321
  r"""
269
322
  bitwise_not(input) -> Tensor
@@ -376,6 +429,143 @@ def clip(*args, **kwargs):
376
429
  return _clamp_instance(*args, **kwargs)
377
430
 
378
431
 
432
+ def conv3d(*args, **kwargs):
433
+ r"""
434
+ conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
435
+
436
+ Applies a 3D convolution over an input tensor. The input tensor is typically of
437
+ shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`,
438
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`D, H, W` are the depth,
439
+ height and width of the feature graph, respectively.
440
+
441
+ The output is calculated based on formula:
442
+
443
+ .. math::
444
+
445
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
446
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
447
+
448
+ where :math:`bias` is the output channel bias, :math:`ccor` is
449
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_
450
+ , :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
451
+
452
+ Here are the indices' meanings:
453
+
454
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
455
+ where :math:`N` is the batch size of the input.
456
+
457
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
458
+ where :math:`C_{out}` is the number of
459
+ output channels, which is also equal to the number of kernels.
460
+
461
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
462
+ where :math:`C_{in}` is the number of
463
+ input channels, which is also equal to the number of channels in the convolutional kernels.
464
+
465
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
466
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
467
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
468
+ channel in the :math:`i`-th batch of the input feature map.
469
+
470
+ The shape of the convolutional kernel is given by :math:`(kd, kh, kw)` where :math:`kd` , :math:`kd` and\
471
+ :math:`kw` are the depth, height and width of the kernel, respectively.
472
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
473
+ will be :math:`(C_{out}, C_{in} / \text{group}, kd, kh, kw)`,
474
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
475
+
476
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
477
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
478
+
479
+ The following lists some of the limitations of the parameters.
480
+
481
+ - input -- The input to the conv3d. The input must have each dimension size within the range [1, int32_max].
482
+ - weight -- Filters of shape :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`. The value of :math:`kh`
483
+ and :math:`kw` is in the range [1, 511]. The remaining values are in the range [1, int32_max].
484
+ And :math:`kh*kw*k0` is less 65536 (k0 is 16. If data type is float32, k0 is 8).
485
+ - bias -- Bias Tensor with shape :math:`(C_{out})`. The shape must equal to the first dimension of the weight.
486
+ - stride -- The distance of kernel moving. It can be an int number or
487
+ tuple (noted by :math:`(stride_d, stride_h, stride_w)`). stride_h and stride_w are in the range [1, 63].
488
+ stride_d is in the range [1, 255].
489
+ - padding -- If padding is an int number, it is in the range [0, 255].
490
+ - dilation -- The value is in the range [1, 255].
491
+ - groups -- The value is in the range [1, 65535].
492
+ - :math:`C_{in} \% \text{groups} == 0 \quad \text{and} \quad C_{out} \% \text{groups} == 0` .
493
+ - :math:`weight[1] == C_{in} / groups` .
494
+ - :math:`H_{in} + PadUp + PadDown >= (kh - 1) * DilationH + 1` .
495
+ - :math:`W_{in} + PadLeft + PadRight >= (kw - 1) * DilationW + 1` .
496
+ - :math:`D_{in} + PadFront + PadBack >= (kd - 1) * DilationD + 1` .
497
+ - :math:`H_{out} = (H_{in} + PadUp + PadDown - ((kh - 1) * DilationH + 1)) / StrideH + 1` .
498
+ - :math:`W_{out} = (W_{in} + PadLeft + PadRight - ((kw - 1) * DilationW + 1)) / StrideW + 1` .
499
+ - :math:`D_{out} = (D_{in} + PadFront + PadBack - ((kd - 1) * DilationD + 1)) / StrideD + 1` .
500
+ - :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) \% StrideD <= PadBack` .
501
+ - :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) \% StrideH <= PadDown` .
502
+ - :math:`stride_d <= kernel_d` .
503
+ - :math:`PadUp < kh` and :math:`PadDown < kh` . When `padding` = ``'valid'``, both PadUp and PadDown are zeros.
504
+ When `padding` = ``'same'``, pad can be calculated by
505
+ :math:`floor(((H_{out}-1) * strideH + (kh - 1) * DilationH + 1 - H_{in}) / 2)` for high dimension.
506
+ It is similar way to calculate the padding for depth and width dimension. And the depth and width
507
+ dimensions also have the same constraints.
508
+ - :math:`((kh - 1) * DilationH - PadUp)` should be in [0, 255]. It is the same constraint for depth
509
+ and width dimension.
510
+ - If `padding` is ``'same'``, `stride` must be 1.
511
+
512
+ .. warning::
513
+ It is only supported on Atlas A2 Training Series Products.
514
+
515
+ Args:
516
+ input (Tensor): Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
517
+ weight (Tensor): Set size of kernel is :math:`(kd, kh,
518
+ kw)`, then the shape is :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`.
519
+ bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
520
+ When bias is ``None`` , zeros will be used. Default: ``None`` .
521
+ stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int
522
+ number that represents the depth, the height and width of movement are both strides, or a
523
+ tuple of triple int numbers that
524
+ represent the depth, height and width of movement respectively. Default: ``1`` .
525
+ padding (Union(int, tuple[int], list[int], str), optional): Implicit paddings on both sides of the input `x`.
526
+ Can be a string, one integer or a tuple/list with 3 integers.
527
+ If `padding` is a string, the optional values are ``"same"`` , ``"valid"``.
528
+
529
+ - same: Adopts the way of completion. The height and width of the output will be equal to
530
+ the input `x` divided by stride. The padding will be evenly calculated in top and bottom,
531
+ left and right possiblily. Otherwise, the last extra padding will be calculated from the bottom
532
+ and the right side. If this mode is set, `stride` must be 1.
533
+
534
+ - valid: Adopts the way of discarding. The possible largest height and width of output will be returned
535
+ without padding. Extra pixels will be discarded.
536
+
537
+ If `padding` is one integer, the paddings of top, bottom, left and right are the same, equal to padding.
538
+ If `padding` is a tuple/list with 3 integers, the padding of head, tail, top, bottom,
539
+ left and right equal to pad[0], pad[0], pad[1], pad[1], pad[2] and pad[2] correspondingly. Default: ``0`` .
540
+ dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
541
+ Default: ``1`` .
542
+ groups (int, optional): Splits `input` into groups. Default: ``1`` .
543
+
544
+ Returns:
545
+ Tensor, the same dtype as the `input`, with the shape :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
546
+ or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
547
+
548
+ Raises:
549
+ TypeError: If `stride`, `padding` or `dilation` is neither an int nor a tuple.
550
+ TypeError: `groups` is not an int.
551
+ TypeError: If `bias` is not a Tensor.
552
+
553
+ Supported Platforms:
554
+ ``Ascend``
555
+
556
+ Examples:
557
+ >>> import mindspore
558
+ >>> import numpy as np
559
+ >>> from mindspore import mint
560
+ >>> x = mindspore.Tensor(np.random.randn(12, 1, 60, 50, 8), mindspore.float16)
561
+ >>> w = mindspore.Tensor(np.random.randn(26, 1, 2, 4, 4), mindspore.float16)
562
+ >>> out = mint.nn.functional.conv3d(x, w)
563
+ >>> print(out.shape)
564
+ (12, 26, 59, 47, 5)
565
+ """
566
+ return _conv3d_instance(*args, **kwargs)
567
+
568
+
379
569
  def div(*args, **kwargs):
380
570
  r"""
381
571
  div(input, other, *, rounding_mode=None) -> Tensor
@@ -441,9 +631,106 @@ def divide(*args, **kwargs):
441
631
  return _div_instance(*args, **kwargs)
442
632
 
443
633
 
634
+ def einsum(*args, **kwargs):
635
+ r"""
636
+ According to the Einstein summation Convention (Einsum),
637
+ the product of the input tensor elements is summed along the specified dimension.
638
+ You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
639
+
640
+ Note:
641
+ The sublist format is also supported. For example, einsum_ext(op1, sublist1, op2, sublist2, ..., sublist_out).
642
+ In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
643
+ integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
644
+ Dynamic shape, dynamic rank input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
645
+ <https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html>`_.
646
+
647
+ .. warning::
648
+ This is an experimental API that is subject to change or deletion.
649
+
650
+ Args:
651
+ equation (str): Notation based on the Einstein summation convention, represent the operation you want to do.
652
+ the value can contain only letters, commas, ellipsis and arrow. The letters(must be in [a-zA-Z]) represent
653
+ input tensor dimension, commas(,) represent separate tensors, ellipsis indicates the tensor dimension that
654
+ you do not care about, the left of the arrow indicates the input tensors, and the right of it indicates the
655
+ desired output dimension. If there are no arrows in the equation, the letters that appear exactly once in
656
+ the equation will be part of the output, sorted in increasing alphabetical order. The output is computed by
657
+ multiplying the input operands element-wise, with their dimensions aligned based on the letters, and then
658
+ summing out the dimensions whose letters are not part of the output. If there is one arrow in the equation,
659
+ the output letters must appear at least once for some input operand and at most once for the output.
660
+ operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
661
+
662
+ Returns:
663
+ Tensor, the shape of it can be obtained from the `equation` , and the dtype is the same as input tensors.
664
+
665
+ Raises:
666
+ TypeError: If `equation` is invalid, or the `equation` does not match the input tensor.
667
+ ValueError: If the number in sublist is not in [0, 52) in sublist format.
668
+
669
+ Supported Platforms:
670
+ ``Ascend``
671
+
672
+ Examples:
673
+ >>> import mindspore
674
+ >>> import numpy as np
675
+ >>> from mindspore import Tensor, ops
676
+ >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
677
+ >>> equation = "i->"
678
+ >>> output = ops.einsum_ext(equation, x)
679
+ >>> print(output)
680
+ 7.0
681
+ >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
682
+ >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
683
+ >>> equation = "i,i->i"
684
+ >>> output = ops.einsum_ext(equation, x, y)
685
+ >>> print(output)
686
+ [ 2. 8. 12.]
687
+ >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
688
+ >>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
689
+ >>> equation = "ij,jk->ik"
690
+ >>> output = ops.einsum_ext(equation, x, y)
691
+ >>> print(output)
692
+ [[16. 22.]
693
+ [37. 52.]]
694
+ >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
695
+ >>> equation = "ij->ji"
696
+ >>> output = ops.einsum_ext(equation, x)
697
+ >>> print(output)
698
+ [[1. 4.]
699
+ [2. 5.]
700
+ [3. 6.]]
701
+ >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
702
+ >>> equation = "ij->j"
703
+ >>> output = ops.einsum_ext(equation, x)
704
+ >>> print(output)
705
+ [5. 7. 9.]
706
+ >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
707
+ >>> equation = "...->"
708
+ >>> output = ops.einsum_ext(equation, x)
709
+ >>> print(output)
710
+ 21.0
711
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
712
+ >>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
713
+ >>> equation = "j,i->ji"
714
+ >>> output = ops.einsum_ext(equation, x, y)
715
+ >>> print(output)
716
+ [[ 2. 4. 1.]
717
+ [ 4. 8. 2.]
718
+ [ 6. 12. 3.]]
719
+ >>> x = mindspore.Tensor([1, 2, 3, 4], mindspore.float32)
720
+ >>> y = mindspore.Tensor([1, 2], mindspore.float32)
721
+ >>> output = ops.einsum_ext(x, [..., 1], y, [..., 2], [..., 1, 2])
722
+ >>> print(output)
723
+ [[1. 2.]
724
+ [2. 4.]
725
+ [3. 6.]
726
+ [4. 8.]]
727
+ """
728
+ return _einsum_instance(*args, **kwargs)
729
+
730
+
444
731
  def empty(*args, **kwargs):
445
732
  r"""
446
- empty(*size, dtype=None, device=None) -> Tensor
733
+ empty(*size, *, dtype=None, device=None) -> Tensor
447
734
 
448
735
  Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
449
736
  `dtype` and `device` respectively.
@@ -453,22 +740,23 @@ def empty(*args, **kwargs):
453
740
 
454
741
  Args:
455
742
  size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
456
- positive integers or tupled or list containing positive integers.
743
+ positive integers or tuple or list containing positive integers.
457
744
 
458
745
  Keyword Args:
459
746
  dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
460
747
  `mindspore.float32` will be used. Default: ``None`` .
461
- device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
462
- `device = None`, `mindspore.context.device_target` will be used. Default ``None``.
748
+ device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
749
+ ``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
750
+ `mindspore.context.device_target` will be used. Default ``None``.
463
751
 
464
752
  Returns:
465
- Tensor, whose dtype and size are defined by input.
753
+ Tensor, whose shape, dtype and device are defined by input.
466
754
 
467
755
  Raises:
468
756
  TypeError: If `size` is neither an int nor a tuple or list of int.
469
757
 
470
758
  Supported Platforms:
471
- ``Ascend``
759
+ ``Ascend`` ``CPU``
472
760
 
473
761
  Examples:
474
762
  >>> import mindspore
@@ -481,8 +769,55 @@ def empty(*args, **kwargs):
481
769
  return _empty_instance(*args, **kwargs)
482
770
 
483
771
 
772
+ def empty_like(*args, **kwargs):
773
+ r"""
774
+ empty_like(input, *, dtype=None, device=None) -> Tensor
775
+
776
+ Returns an uninitialized Tensor with the same shape as the `input`. Its dtype is specified by `dtype` and its
777
+ device is specified by `device`.
778
+
779
+ .. warning::
780
+ This is an experimental API that is subject to change or deletion.
781
+
782
+ Args:
783
+ input (Tensor): Tensor of any dimension.
784
+
785
+ Keyword Args:
786
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`, the
787
+ tensor will have the same dtype as input `input`. Default ``None``.
788
+ device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
789
+ ``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
790
+ the value set by :func:`mindspore.set_device` will be used. Default ``None``.
791
+
792
+ Returns:
793
+ Tensor, has the same shape, type and device as `input` but with uninitialized data (May be a random value).
794
+
795
+ Raises:
796
+ TypeError: If `input` is not a Tensor.
797
+
798
+ Supported Platforms:
799
+ ``Ascend`` ``CPU``
800
+
801
+ Examples:
802
+ >>> import mindspore
803
+ >>> from mindspore import ops, Tensor
804
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6]])
805
+ >>> output1 = ops.empty_like(x)
806
+ >>> print(output1)
807
+ [[0 0 0]
808
+ [0 0 0]]
809
+ >>> output2 = ops.empty_like(x, dtype=mindspore.float64)
810
+ >>> print(output2)
811
+ [[0. 0. 0.]
812
+ [0. 0. 0.]]
813
+ """
814
+ return _empty_like_instance(*args, **kwargs)
815
+
816
+
484
817
  def floor_divide(*args, **kwargs):
485
818
  r"""
819
+ floor_divide(input, other) -> Tensor
820
+
486
821
  Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
487
822
 
488
823
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
@@ -812,6 +1147,60 @@ def ge(*args, **kwargs):
812
1147
  return _greater_equal_instance(*args, **kwargs)
813
1148
 
814
1149
 
1150
+ def index_add(*args, **kwargs):
1151
+ r"""
1152
+ index_add(input, dim, index, source, *, alpha=1) -> Tensor
1153
+
1154
+ Accumulate the elements of `alpha` times `source` into the `input` by adding to the index in the order given in `index`. For example, if ``dim == 0`` , ``index[i] == j`` , and ``alpha = -1`` , then the `i` th row of `source` is subtracted from the `j` th row of `input` . The `dim` th dimension of `source` must have the same size as the length of `index` , and all other dimensions must match `input`, or an error will be raised. For a 3-D tensor, the output is defined as follows:
1155
+
1156
+ .. math::
1157
+ \begin{array}{ll}
1158
+ input[index[i],\ :,\ :]\ +=\ alpha * source[i,\ :,\ :] \qquad \#if\ dim == 0 \\
1159
+ input[:,\ \ index[i],\ :]\ +=\ alpha * source[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
1160
+ input[:,\ :,\ \ index[i]]\ +=\ alpha * source[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
1161
+ \end{array}
1162
+
1163
+ .. warning::
1164
+ This is an experimental API that is subject to change or deletion.
1165
+
1166
+ Args:
1167
+ input (Tensor): The input Tensor.
1168
+ dim (int): The dimension along which to index.
1169
+ index (Tensor): Add the value of "input Tensor" and `source` along the dimension of the `dim` according to the specified index value, with data type int32. The `index` must be 1D with the same size as the size of `source` in the `dim` dimension. The values of `index` should be in [0, b), where the b is the size of "input Tensor" in the `dim` dimension.
1170
+ source (Tensor): The input tensor with the value to add. Must have same data type as "input Tensor". The shape must be the same as "input Tensor" except the `dim` th dimension.
1171
+
1172
+ Keyword Args:
1173
+ alpha (number, optional): The scalar multiplier for source. Default: ``1``.
1174
+
1175
+ Returns:
1176
+ Tensor, has the same shape and dtype as `input`.
1177
+
1178
+ Raises:
1179
+ TypeError: If neither `index` nor `source` is a Tensor.
1180
+ ValueError: If the value of `dim` is out of the dimension range of `source` shape.
1181
+ ValueError: If `index` rank is not the same as `source` rank.
1182
+ ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension of source[dim].
1183
+ ValueError: If the shape of `source` is not the same as that of `input` except the `dim` axis.
1184
+
1185
+ Supported Platforms:
1186
+ ``Ascend``
1187
+
1188
+ Examples:
1189
+ >>> import numpy as np
1190
+ >>> import mindspore
1191
+ >>> from mindspore import Tensor, mint
1192
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1193
+ >>> index = Tensor(np.array([0, 2]), mindspore.int32)
1194
+ >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
1195
+ >>> output = mint.index_add(x, 1, index, y, alpha=1)
1196
+ >>> print(output)
1197
+ [[ 1.5 2. 4. ]
1198
+ [ 5. 5. 7.5]
1199
+ [ 9. 8. 11.5]]
1200
+ """
1201
+ return _index_add_instance(*args, **kwargs)
1202
+
1203
+
815
1204
  def kthvalue(*args, **kwargs):
816
1205
  r"""
817
1206
  Calculates the kth smallest value along given dim specified by `dim` of the input
@@ -1256,6 +1645,60 @@ def pixel_shuffle(*args, **kwargs):
1256
1645
  return _pixel_shuffle_instance(*args, **kwargs)
1257
1646
 
1258
1647
 
1648
+ def quant_matmul(*args, **kwargs):
1649
+ r"""
1650
+ quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias=None, output_dtype=None, x1_dtype=None, x2_dtype=None, pertoken_scale_dtype=None, scale_dtype=None, group_sizes=None) -> Tensor
1651
+
1652
+ Used for quantized matrix multiplication.
1653
+
1654
+ .. warning::
1655
+ This is an experimental API that is subject to change or deletion.
1656
+ Only support on David training series.
1657
+
1658
+ Args:
1659
+ x1 (Tensor): Tensor of shape :math:`(*, M, K)` . The dimension of `input` should be in [2, 6].
1660
+ x2 (Tensor): Tensor of shape :math:`(*, K, N)` . The dimension of `input` should be in [2, 6].
1661
+ scale (Tensor): Tensor of shape :math:`(T,)` . T should be equal to 1 or N, N is the last dimension of `x2`.
1662
+
1663
+ Keyword Args:
1664
+ offset (Tensor, optional): Tensor of shape :math:`(T,)` . T should be equal to 1 or N, N is the last dimension of `x2`. Default: ``None`` .
1665
+ pertoken_scale (Tensor, optional): Tensor of shape :math:`(M,)` . M is second-to-last dimension of `x1`. Default: ``None`` .
1666
+ A valid Tensor must deliver to `pertoken_scale` , ``None`` will cause unexpected error.
1667
+ bias (Tensor, optional): Tensor of shape :math:`(N,)` or :math:`(B, 1, N)` , N is the last dimension of `x2`.
1668
+ If dimension of `output` is 2, 4, 5 or 6, `bias` must has shape :math:`(N,)` . Default: ``None`` .
1669
+ output_dtype (:class:`mindspore.dtype`, optional): the dtype of `output`. Default: ``None`` .
1670
+ x1_dtype (:class:`mindspore.dtype`, optional): Cast `x1` to `x1_dtype` before calculation. Default: ``None`` .
1671
+ x2_dtype (:class:`mindspore.dtype`, optional): Cast `x2` to `x2_dtype` before calculation. Default: ``None`` .
1672
+ pertoken_scale_dtype (:class:`mindspore.dtype`, optional): Cast `pertoken_scale` to `pertoken_scale_dtype` before calculation. Default: ``None`` .
1673
+ scale_dtype (:class:`mindspore.dtype`, optional): Cast `scale` to `scale_dtype` before calculation. Default: ``None`` .
1674
+ group_sizes (Union[tuple(int), list(int)], optional): A sequence of int elements. Must have 3 elements. Default: ``None`` .
1675
+
1676
+ Returns:
1677
+ Tensor of shape :math:`(*, M, N)` .
1678
+
1679
+ Raises:
1680
+ ValueError: If dtype of `x1` is int8 or int32.
1681
+
1682
+ Supported Platforms:
1683
+ ``Ascend``
1684
+
1685
+ Examples:
1686
+ >>> import numpy as np
1687
+ >>> import mindspore as ms
1688
+ >>> from mindspore import ops, Tensor
1689
+ >>> x1 = Tensor(np.random.randn(2, 3, 4), ms.float8_e4m3)
1690
+ >>> x2 = Tensor(np.random.randn(2, 4, 5), ms.float8_e4m3)
1691
+ >>> scale = Tensor(np.random.randn(1,), ms.float32)
1692
+ >>> pertoken_scale = Tensor(np.random.randn(3,), ms.float32)
1693
+ >>> output = ops.auto_generate.quant_matmul(x1, x2, scale, pertoken_scale=pertoken_scale, output_dtype=ms.bfloat16)
1694
+ >>> print(output.shape)
1695
+ (2, 3, 5)
1696
+ >>> print(output.dtype)
1697
+ BFloat16
1698
+ """
1699
+ return _quant_matmul_instance(*args, **kwargs)
1700
+
1701
+
1259
1702
  def remainder(*args, **kwargs):
1260
1703
  r"""
1261
1704
  remainder(input, other) -> Tensor
@@ -1347,6 +1790,13 @@ def repeat_interleave(*args, **kwargs):
1347
1790
  return _repeat_interleave_instance(*args, **kwargs)
1348
1791
 
1349
1792
 
1793
+ def rmod(*args, **kwargs):
1794
+ r"""
1795
+ rmod(input, other) -> Tensor
1796
+ """
1797
+ return _rmod_instance(*args, **kwargs)
1798
+
1799
+
1350
1800
  def sub(*args, **kwargs):
1351
1801
  r"""
1352
1802
  sub(input, other, *, alpha=1) -> Tensor
@@ -1518,12 +1968,16 @@ __all__ = [
1518
1968
  "__add__",
1519
1969
  "addcdiv",
1520
1970
  "all_gather_matmul",
1971
+ "any",
1521
1972
  "bitwise_not",
1522
1973
  "clamp",
1523
1974
  "clip",
1975
+ "conv3d",
1524
1976
  "div",
1525
1977
  "divide",
1978
+ "einsum",
1526
1979
  "empty",
1980
+ "empty_like",
1527
1981
  "floor_divide",
1528
1982
  "fmod",
1529
1983
  "gelu",
@@ -1532,6 +1986,7 @@ __all__ = [
1532
1986
  "gmm_backward_fusion",
1533
1987
  "greater_equal",
1534
1988
  "ge",
1989
+ "index_add",
1535
1990
  "kthvalue",
1536
1991
  "lerp",
1537
1992
  "matmul_reduce_scatter",
@@ -1539,8 +1994,10 @@ __all__ = [
1539
1994
  "min",
1540
1995
  "nansum",
1541
1996
  "pixel_shuffle",
1997
+ "quant_matmul",
1542
1998
  "remainder",
1543
1999
  "repeat_interleave",
2000
+ "rmod",
1544
2001
  "sub",
1545
2002
  "__sub__",
1546
2003
  "where",