mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (384) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +40 -9
  7. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  8. mindspore/_extends/optimize/cell_utils.py +96 -0
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +44 -22
  11. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  12. mindspore/_extends/parse/parser.py +37 -62
  13. mindspore/_extends/parse/resources.py +39 -0
  14. mindspore/_extends/parse/standard_method.py +43 -13
  15. mindspore/_extends/parse/trope.py +8 -1
  16. mindspore/_extends/pijit/__init__.py +1 -2
  17. mindspore/amp.py +4 -4
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/adasum.py +1 -1
  24. mindspore/boost/boost_cell_wrapper.py +4 -4
  25. mindspore/common/__init__.py +27 -2
  26. mindspore/common/_grad_function.py +2 -1
  27. mindspore/common/_pijit_context.py +28 -7
  28. mindspore/common/_stub_tensor.py +1 -209
  29. mindspore/common/_tensor_cpp_method.py +1 -1
  30. mindspore/common/_tensor_docs.py +77 -16
  31. mindspore/common/api.py +238 -113
  32. mindspore/common/dtype.py +21 -11
  33. mindspore/common/dump.py +10 -15
  34. mindspore/common/generator.py +5 -3
  35. mindspore/common/hook_handle.py +11 -2
  36. mindspore/common/jit_config.py +1 -1
  37. mindspore/common/jit_trace.py +84 -105
  38. mindspore/common/parameter.py +26 -12
  39. mindspore/common/recompute.py +3 -3
  40. mindspore/common/sparse_tensor.py +0 -3
  41. mindspore/common/symbol.py +0 -1
  42. mindspore/common/tensor.py +81 -81
  43. mindspore/communication/_comm_helper.py +46 -4
  44. mindspore/communication/management.py +79 -7
  45. mindspore/context.py +58 -40
  46. mindspore/dataset/core/config.py +3 -3
  47. mindspore/dataset/engine/datasets.py +20 -7
  48. mindspore/dataset/engine/datasets_user_defined.py +33 -3
  49. mindspore/dataset/engine/iterators.py +2 -2
  50. mindspore/dataset/engine/obs/config_loader.py +2 -2
  51. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  52. mindspore/dataset/transforms/py_transforms.py +7 -3
  53. mindspore/dataset/transforms/transforms.py +7 -3
  54. mindspore/dataset/vision/validators.py +1 -0
  55. mindspore/device_context/ascend/device.py +1 -1
  56. mindspore/device_context/gpu/__init__.py +2 -2
  57. mindspore/device_context/gpu/device.py +1 -1
  58. mindspore/device_context/gpu/op_precision.py +4 -2
  59. mindspore/device_context/gpu/op_tuning.py +6 -3
  60. mindspore/device_manager.py +16 -9
  61. mindspore/dnnl.dll +0 -0
  62. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
  63. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  64. mindspore/experimental/optim/adadelta.py +13 -20
  65. mindspore/experimental/optim/adagrad.py +15 -22
  66. mindspore/experimental/optim/adam.py +17 -24
  67. mindspore/experimental/optim/adamax.py +14 -22
  68. mindspore/experimental/optim/adamw.py +28 -34
  69. mindspore/experimental/optim/asgd.py +15 -25
  70. mindspore/experimental/optim/lr_scheduler.py +27 -45
  71. mindspore/experimental/optim/nadam.py +14 -24
  72. mindspore/experimental/optim/optimizer.py +13 -23
  73. mindspore/experimental/optim/radam.py +18 -24
  74. mindspore/experimental/optim/rmsprop.py +14 -25
  75. mindspore/experimental/optim/rprop.py +15 -26
  76. mindspore/experimental/optim/sgd.py +9 -19
  77. mindspore/hal/__init__.py +4 -4
  78. mindspore/hal/contiguous_tensors_handle.py +2 -2
  79. mindspore/hal/memory.py +27 -7
  80. mindspore/include/api/cell.h +37 -1
  81. mindspore/include/api/delegate.h +10 -0
  82. mindspore/include/api/model.h +3 -0
  83. mindspore/include/api/types.h +2 -2
  84. mindspore/include/c_api/model_c.h +0 -58
  85. mindspore/include/c_api/tensor_c.h +0 -26
  86. mindspore/include/dataset/vision_ascend.h +1 -1
  87. mindspore/jpeg62.dll +0 -0
  88. mindspore/mindrecord/tools/cifar10.py +60 -11
  89. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  90. mindspore/mindspore_backend_common.dll +0 -0
  91. mindspore/mindspore_backend_manager.dll +0 -0
  92. mindspore/mindspore_common.dll +0 -0
  93. mindspore/mindspore_core.dll +0 -0
  94. mindspore/mindspore_cpu_res_manager.dll +0 -0
  95. mindspore/mindspore_dump.dll +0 -0
  96. mindspore/mindspore_frontend.dll +0 -0
  97. mindspore/mindspore_glog.dll +0 -0
  98. mindspore/mindspore_memory_pool.dll +0 -0
  99. mindspore/mindspore_ms_backend.dll +0 -0
  100. mindspore/mindspore_ops.dll +0 -0
  101. mindspore/mindspore_ops_host.dll +0 -0
  102. mindspore/mindspore_ops_kernel_common.dll +0 -0
  103. mindspore/mindspore_profiler.dll +0 -0
  104. mindspore/mindspore_pyboost.dll +0 -0
  105. mindspore/mindspore_pynative.dll +0 -0
  106. mindspore/mindspore_res_manager.dll +0 -0
  107. mindspore/mindspore_runtime_pipeline.dll +0 -0
  108. mindspore/mint/__init__.py +6 -46
  109. mindspore/mint/distributed/__init__.py +1 -0
  110. mindspore/mint/distributed/distributed.py +212 -9
  111. mindspore/mint/nn/__init__.py +1 -1
  112. mindspore/mint/nn/functional.py +53 -6
  113. mindspore/mint/nn/layer/_functions.py +164 -294
  114. mindspore/mint/nn/layer/activation.py +8 -6
  115. mindspore/mint/nn/layer/conv.py +137 -101
  116. mindspore/mint/nn/layer/normalization.py +8 -22
  117. mindspore/mint/optim/adam.py +19 -18
  118. mindspore/mint/optim/adamw.py +14 -8
  119. mindspore/mint/optim/sgd.py +5 -5
  120. mindspore/nn/cell.py +328 -502
  121. mindspore/nn/grad/cell_grad.py +11 -12
  122. mindspore/nn/layer/activation.py +32 -34
  123. mindspore/nn/layer/basic.py +67 -64
  124. mindspore/nn/layer/channel_shuffle.py +4 -4
  125. mindspore/nn/layer/combined.py +4 -2
  126. mindspore/nn/layer/conv.py +117 -110
  127. mindspore/nn/layer/dense.py +9 -7
  128. mindspore/nn/layer/embedding.py +50 -52
  129. mindspore/nn/layer/image.py +37 -39
  130. mindspore/nn/layer/math.py +111 -112
  131. mindspore/nn/layer/normalization.py +56 -44
  132. mindspore/nn/layer/pooling.py +58 -63
  133. mindspore/nn/layer/rnn_cells.py +33 -33
  134. mindspore/nn/layer/rnns.py +56 -56
  135. mindspore/nn/layer/thor_layer.py +74 -73
  136. mindspore/nn/layer/transformer.py +11 -1
  137. mindspore/nn/learning_rate_schedule.py +20 -20
  138. mindspore/nn/loss/loss.py +79 -81
  139. mindspore/nn/optim/adam.py +3 -3
  140. mindspore/nn/optim/adasum.py +2 -2
  141. mindspore/nn/optim/asgd.py +2 -0
  142. mindspore/nn/optim/optimizer.py +1 -1
  143. mindspore/nn/optim/thor.py +2 -2
  144. mindspore/nn/probability/distribution/exponential.py +2 -1
  145. mindspore/nn/probability/distribution/poisson.py +2 -1
  146. mindspore/nn/sparse/sparse.py +3 -3
  147. mindspore/nn/wrap/cell_wrapper.py +34 -37
  148. mindspore/nn/wrap/grad_reducer.py +37 -37
  149. mindspore/nn/wrap/loss_scale.py +72 -74
  150. mindspore/numpy/array_creations.py +5 -5
  151. mindspore/numpy/fft.py +1 -1
  152. mindspore/numpy/math_ops.py +5 -5
  153. mindspore/opencv_core452.dll +0 -0
  154. mindspore/opencv_imgcodecs452.dll +0 -0
  155. mindspore/opencv_imgproc452.dll +0 -0
  156. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  157. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  158. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  159. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  160. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
  161. mindspore/ops/auto_generate/gen_extend_func.py +23 -141
  162. mindspore/ops/auto_generate/gen_ops_def.py +727 -321
  163. mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
  164. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  165. mindspore/ops/composite/__init__.py +10 -0
  166. mindspore/ops/composite/base.py +8 -4
  167. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  168. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  169. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  170. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  171. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  172. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  173. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  174. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  175. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  176. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  177. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  178. mindspore/ops/function/__init__.py +3 -1
  179. mindspore/ops/function/_add_attr_func.py +11 -6
  180. mindspore/ops/function/array_func.py +9 -96
  181. mindspore/ops/function/debug_func.py +4 -3
  182. mindspore/ops/function/grad/grad_func.py +1 -1
  183. mindspore/ops/function/math_func.py +33 -540
  184. mindspore/ops/function/nn_func.py +28 -74
  185. mindspore/ops/function/other_func.py +4 -1
  186. mindspore/ops/function/random_func.py +44 -5
  187. mindspore/ops/function/vmap_func.py +2 -1
  188. mindspore/ops/functional.py +2 -3
  189. mindspore/ops/functional_overload.py +571 -6
  190. mindspore/ops/op_info_register.py +21 -0
  191. mindspore/ops/operations/__init__.py +16 -11
  192. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  193. mindspore/ops/operations/_inner_ops.py +3 -6
  194. mindspore/ops/operations/_sequence_ops.py +1 -1
  195. mindspore/ops/operations/array_ops.py +2 -2
  196. mindspore/ops/operations/comm_ops.py +185 -26
  197. mindspore/ops/operations/custom_ops.py +294 -174
  198. mindspore/ops/operations/debug_ops.py +59 -4
  199. mindspore/ops/operations/image_ops.py +13 -13
  200. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  201. mindspore/ops/operations/math_ops.py +3 -4
  202. mindspore/ops/operations/nn_ops.py +7 -39
  203. mindspore/ops/primitive.py +6 -10
  204. mindspore/ops/tensor_method.py +47 -8
  205. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  206. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  207. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  208. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  209. mindspore/ops_generate/common/base_generator.py +14 -0
  210. mindspore/ops_generate/common/gen_constants.py +8 -3
  211. mindspore/ops_generate/common/gen_utils.py +0 -19
  212. mindspore/ops_generate/common/op_proto.py +11 -4
  213. mindspore/ops_generate/common/template.py +88 -11
  214. mindspore/ops_generate/gen_ops.py +1 -1
  215. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  216. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  217. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  218. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  219. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  220. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  221. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  222. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  223. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  224. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  225. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  226. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  227. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  228. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  229. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  230. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  231. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  232. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  233. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  234. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  235. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  236. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  237. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  238. mindspore/parallel/_auto_parallel_context.py +11 -8
  239. mindspore/parallel/_cell_wrapper.py +113 -45
  240. mindspore/parallel/_parallel_serialization.py +1 -1
  241. mindspore/parallel/_ps_context.py +4 -6
  242. mindspore/parallel/_tensor.py +167 -12
  243. mindspore/parallel/_transformer/moe.py +1 -1
  244. mindspore/parallel/_transformer/transformer.py +13 -8
  245. mindspore/parallel/auto_parallel.py +14 -7
  246. mindspore/parallel/checkpoint_convert.py +3 -3
  247. mindspore/parallel/checkpoint_transform.py +11 -7
  248. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  249. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  250. mindspore/parallel/cluster/run.py +43 -4
  251. mindspore/parallel/function/__init__.py +8 -1
  252. mindspore/parallel/function/reshard_func.py +6 -7
  253. mindspore/parallel/nn/__init__.py +15 -2
  254. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  255. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  256. mindspore/parallel/shard.py +3 -4
  257. mindspore/parallel/transform_safetensors.py +463 -174
  258. mindspore/profiler/__init__.py +2 -1
  259. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  260. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  261. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  262. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  263. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  264. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  265. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  266. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  267. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  268. mindspore/profiler/analysis/task_manager.py +1 -1
  269. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  270. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  272. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  273. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  274. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  275. mindspore/profiler/common/constant.py +16 -0
  276. mindspore/profiler/common/profiler_context.py +25 -27
  277. mindspore/profiler/common/profiler_info.py +0 -16
  278. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  279. mindspore/profiler/common/profiler_output_path.py +23 -8
  280. mindspore/profiler/common/profiler_parameters.py +128 -35
  281. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  282. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  283. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  284. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  285. mindspore/profiler/dynamic_profiler.py +305 -314
  286. mindspore/profiler/envprofiler.py +12 -7
  287. mindspore/profiler/experimental_config.py +96 -6
  288. mindspore/profiler/mstx.py +33 -12
  289. mindspore/profiler/platform/__init__.py +2 -3
  290. mindspore/profiler/platform/npu_profiler.py +29 -19
  291. mindspore/profiler/profiler.py +35 -19
  292. mindspore/profiler/profiler_action_controller.py +64 -76
  293. mindspore/profiler/schedule.py +10 -4
  294. mindspore/rewrite/common/config.py +1 -0
  295. mindspore/rewrite/common/namer.py +1 -0
  296. mindspore/rewrite/common/namespace.py +1 -0
  297. mindspore/rewrite/node/node.py +31 -11
  298. mindspore/rewrite/parsers/assign_parser.py +1 -1
  299. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  300. mindspore/run_check/_check_version.py +7 -10
  301. mindspore/runtime/__init__.py +5 -5
  302. mindspore/runtime/event.py +10 -4
  303. mindspore/runtime/executor.py +60 -45
  304. mindspore/runtime/memory.py +30 -32
  305. mindspore/runtime/thread_bind_core.py +298 -164
  306. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  307. mindspore/swresample-4.dll +0 -0
  308. mindspore/swscale-6.dll +0 -0
  309. mindspore/tinyxml2.dll +0 -0
  310. mindspore/train/_utils.py +14 -4
  311. mindspore/train/amp.py +43 -20
  312. mindspore/train/callback/__init__.py +5 -5
  313. mindspore/train/callback/_checkpoint.py +3 -6
  314. mindspore/train/callback/_flops_collector.py +1 -1
  315. mindspore/train/callback/_landscape.py +0 -1
  316. mindspore/train/callback/_train_fault_tolerance.py +97 -16
  317. mindspore/train/data_sink.py +11 -2
  318. mindspore/train/dataset_helper.py +9 -0
  319. mindspore/train/model.py +135 -55
  320. mindspore/train/serialization.py +133 -111
  321. mindspore/train/summary/summary_record.py +13 -2
  322. mindspore/turbojpeg.dll +0 -0
  323. mindspore/utils/__init__.py +3 -2
  324. mindspore/utils/dryrun.py +0 -6
  325. mindspore/utils/runtime_execution_order_check.py +163 -77
  326. mindspore/utils/sdc_detect.py +68 -0
  327. mindspore/utils/utils.py +6 -9
  328. mindspore/version.py +1 -1
  329. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  330. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
  331. mindspore/_deprecated/jit.py +0 -198
  332. mindspore/experimental/es/__init__.py +0 -22
  333. mindspore/experimental/es/embedding_service.py +0 -891
  334. mindspore/experimental/es/embedding_service_layer.py +0 -581
  335. mindspore/profiler/parser/__init__.py +0 -14
  336. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  337. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  338. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  339. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  340. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  341. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  342. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  343. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  344. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  345. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  346. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  347. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  348. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  349. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  350. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  351. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  352. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  353. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  354. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  355. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  356. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  357. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  358. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  359. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  360. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  361. mindspore/profiler/parser/container.py +0 -229
  362. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  363. mindspore/profiler/parser/flops_parser.py +0 -531
  364. mindspore/profiler/parser/framework_enum.py +0 -111
  365. mindspore/profiler/parser/framework_parser.py +0 -464
  366. mindspore/profiler/parser/framework_struct.py +0 -61
  367. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  368. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  369. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  370. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  371. mindspore/profiler/parser/hccl_parser.py +0 -573
  372. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  373. mindspore/profiler/parser/integrator.py +0 -526
  374. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  375. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  376. mindspore/profiler/parser/minddata_parser.py +0 -186
  377. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  378. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  379. mindspore/profiler/parser/optime_parser.py +0 -250
  380. mindspore/profiler/parser/profiler_info.py +0 -213
  381. mindspore/profiler/parser/step_trace_parser.py +0 -666
  382. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  383. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  384. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -53,9 +53,10 @@ from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d
53
53
  upsample_nearest1d_op, upsample_nearest2d_op, upsample_nearest3d_op,
54
54
  upsample_linear1d_op, upsample_bilinear2d_op, upsample_bicubic2d_op,
55
55
  upsample_trilinear3d_impl, fill_scalar_op, floor_op, nllloss_2d_op,
56
- masked_fill_op, masked_select, ones, flatten_ext, conv_transpose2d)
56
+ masked_fill_op, masked_select, ones, flatten_ext, conv_transpose2d,
57
+ func_max_pool2d_op)
57
58
  # 2
58
-
59
+ from mindspore.ops.auto_generate.pyboost_inner_prim import grid_sampler_2d_impl, grid_sampler_3d_impl
59
60
  # 3
60
61
 
61
62
  # 4
@@ -91,10 +92,10 @@ from mindspore.ops.auto_generate import avg_pool3d_ext_op
91
92
  # 19
92
93
 
93
94
  # 20
94
-
95
+ from mindspore.ops.functional_overload import conv3d as conv3d_op
95
96
  from mindspore.ops.auto_generate.gen_ops_prim import embedding_op, MaxPoolWithIndices, \
96
97
  PromptFlashAttention, MaxPoolWithMask
97
- from mindspore.ops.auto_generate.gen_ops_prim import conv3d_ext_op, conv3d_padding_op, conv2d_ext_op, \
98
+ from mindspore.ops.auto_generate.gen_ops_prim import conv2d_ext_op, \
98
99
  conv2d_padding_op, conv1d_ext_op, conv1d_padding_op, speed_fusion_attention_op
99
100
  from mindspore.common.generator import default_generator
100
101
  from mindspore.ops.auto_generate import hardshrink, hardsigmoid, hardswish
@@ -4420,7 +4421,7 @@ def nll_loss_ext(input, target, weight=None, ignore_index=-100, reduction='mean'
4420
4421
  :math:`N` is the batch size, :math:`c` belonging to :math:`[0, C-1]` is class index,
4421
4422
  where :math:`C` is the number of classes.
4422
4423
 
4423
- If `reduction` is not ``'None'`` (default ``'mean'``), then
4424
+ If `reduction` is not ``'none'`` (default ``'mean'``), then
4424
4425
 
4425
4426
  .. math::
4426
4427
 
@@ -4444,7 +4445,7 @@ def nll_loss_ext(input, target, weight=None, ignore_index=-100, reduction='mean'
4444
4445
  weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
4445
4446
  If not None, the shape is :math:`(C,)`.
4446
4447
  The data type must be float16 or float32 or bfloat16(only supported by Atlas A2 training series products).
4447
- It should have the same data type as `input` . Default: ``'None'`` .
4448
+ It should have the same data type as `input` . Default: ``None`` .
4448
4449
  ignore_index (int, optional): Specifies a target value that is ignored
4449
4450
  and does not contribute to the input gradient. Default: ``-100`` .
4450
4451
  reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
@@ -4481,10 +4482,10 @@ def _nllloss_nd(input, target, weight=None, ignore_index=-100, reduction='mean')
4481
4482
  weight = ones(n_classes, input.dtype)
4482
4483
  if input_dim < 1:
4483
4484
  raise ValueError(f"input dim should be less than 1, but got {input_dim}")
4484
- if input_dim != 1 and input.shape[0] != target.shape[0]:
4485
+ if F.isconstant(input_dim) and F.isconstant(target.ndim) and input_dim != 1 and input.shape[0] != target.shape[0]:
4485
4486
  raise ValueError(f"input bacth_size should be equal to target batch_size, but got {input.shape[0]} and "
4486
4487
  f"{target.shape[0]}")
4487
- if input_dim == 1 or input_dim == 2:
4488
+ if input_dim in [1, 2]:
4488
4489
  return nllloss_impl(input, target, weight, reduction, ignore_index)[0]
4489
4490
  if input_dim == 4:
4490
4491
  return nllloss_2d_op(input, target, weight, reduction, ignore_index)[0]
@@ -5365,7 +5366,7 @@ def max_pool3d(x, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=Fal
5365
5366
  return out
5366
5367
 
5367
5368
 
5368
- def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=False):
5369
+ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=None):
5369
5370
  """
5370
5371
  Given an `input` and a flow-field `grid`, computes the `output` using `input` values and pixel locations from
5371
5372
  `grid`. Only spatial (4-D) and volumetric (5-D) `input` is supported.
@@ -5411,10 +5412,10 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
5411
5412
  padding_mode (str, optional): An optional string specifying the pad method.
5412
5413
  The optional values are "zeros", "border" or
5413
5414
  "reflection". Default: ``'zeros'`` .
5414
- align_corners (bool, optional): If set to `True`, the extrema (-1 and 1) are considered as referring to
5415
+ align_corners (bool, optional): If set to ``True``, the extrema (-1 and 1) are considered as referring to
5415
5416
  the center points of the input's corner pixels. If set to `False`, they are instead considered as referring
5416
5417
  to the corner points of the input's corner pixels, making the sampling more resolution agnostic. Default:
5417
- ``False`` .
5418
+ ``None``, which is the same as ``False`` .
5418
5419
 
5419
5420
  Returns:
5420
5421
  Tensor, dtype is the same as `input` and whose shape is :math:`(N, C, H_{out}, W_{out})` (4-D) and
@@ -5451,11 +5452,10 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
5451
5452
  [[14.5 ]
5452
5453
  [14.8 ]]]]
5453
5454
  """
5455
+ align_corners = False if align_corners is None else align_corners
5454
5456
  if input.ndim == 4:
5455
- _grid_sampler_2d = _get_cache_prim(NN_OPS.GridSampler2D)(mode, padding_mode, align_corners)
5456
- return _grid_sampler_2d(input, grid)
5457
- _grid_sampler_3d = _get_cache_prim(NN_OPS.GridSampler3D)(mode, padding_mode, align_corners)
5458
- return _grid_sampler_3d(input, grid)
5457
+ return grid_sampler_2d_impl(input, grid, mode, padding_mode, align_corners)
5458
+ return grid_sampler_3d_impl(input, grid, mode, padding_mode, align_corners)
5459
5459
 
5460
5460
 
5461
5461
  @constexpr
@@ -6307,7 +6307,7 @@ def conv1d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
6307
6307
  this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
6308
6308
 
6309
6309
  - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
6310
- :math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`。
6310
+ :math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`。
6311
6311
 
6312
6312
  Returns:
6313
6313
  Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
@@ -6372,9 +6372,9 @@ def _get_pad_info(dilation, weight):
6372
6372
  for i in range(2):
6373
6373
  d = dilation[i]
6374
6374
  weight_size = weight.shape[i + 2]
6375
- pad = d * (weight_size - 1)
6376
- pad_l += (int(pad / 2),)
6377
- pad_r += (int(pad - pad_l[i]),)
6375
+ pad_item = d * (weight_size - 1)
6376
+ pad_l += (int(pad_item / 2),)
6377
+ pad_r += (int(pad_item - pad_l[i]),)
6378
6378
  if pad_l[i] != pad_r[i]:
6379
6379
  need_pad_nd = True
6380
6380
  return need_pad_nd, pad_l, pad_r
@@ -6477,7 +6477,7 @@ def conv2d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
6477
6477
  groups (int, optional): Splits `input` into groups. Default: ``1`` .
6478
6478
 
6479
6479
  - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
6480
- :math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
6480
+ :math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`
6481
6481
 
6482
6482
  Returns:
6483
6483
  Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
@@ -6981,10 +6981,6 @@ def batch_norm_ext(input, running_mean, running_var, weight=None, bias=None, tra
6981
6981
  [[ 2.1621194 1.2360122]
6982
6982
  [14.810596 10.180061 ]]
6983
6983
  """
6984
- if weight is None:
6985
- weight = ops.ones([input.shape[1]], dtype=input.dtype)
6986
- if bias is None:
6987
- bias = ops.zeros([input.shape[1]], dtype=input.dtype)
6988
6984
  output = batch_norm_ext_op(input, weight, bias, running_mean, running_var, training, momentum, eps)
6989
6985
  return output[0]
6990
6986
 
@@ -7260,39 +7256,8 @@ def conv3d(input, weight, bias=None, stride=1, pad_mode="valid", padding=0, dila
7260
7256
 
7261
7257
  Returns:
7262
7258
  Tensor, the value that applied 3D convolution. The shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
7263
-
7264
- `pad_mode` is ``"same"``:
7265
-
7266
- .. math::
7267
- \begin{array}{ll} \\
7268
- D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
7269
- H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
7270
- W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
7271
- \end{array}
7272
-
7273
- `pad_mode` is ``"valid"``:
7274
-
7275
- .. math::
7276
- \begin{array}{ll} \\
7277
- D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
7278
- {\text{stride[0]}} + 1} \right \rfloor \\
7279
- H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
7280
- {\text{stride[1]}} + 1} \right \rfloor \\
7281
- W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
7282
- {\text{stride[2]}} + 1} \right \rfloor \\
7283
- \end{array}
7284
-
7285
- `pad_mode` is ``"pad"``:
7286
-
7287
- .. math::
7288
- \begin{array}{ll} \\
7289
- D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
7290
- \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
7291
- H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
7292
- \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
7293
- W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
7294
- \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
7295
- \end{array}
7259
+ To see how different pad modes affect the output shape, please refer to
7260
+ :class:`mindspore.nn.Conv3d` for more details.
7296
7261
 
7297
7262
  Raises:
7298
7263
  TypeError: If `out_channel` or `groups` is not an int.
@@ -7413,8 +7378,8 @@ def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
7413
7378
  - :math:`H_{out} = (H_{in} + PadUp + PadDown - ((kh - 1) * DilationH + 1)) / StrideH + 1` .
7414
7379
  - :math:`W_{out} = (W_{in} + PadLeft + PadRight - ((kw - 1) * DilationW + 1)) / StrideW + 1` .
7415
7380
  - :math:`D_{out} = (D_{in} + PadFront + PadBack - ((kd - 1) * DilationD + 1)) / StrideD + 1` .
7416
- - :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) /% StrideD <= PadBack` .
7417
- - :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) /% StrideH <= PadDown` .
7381
+ - :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) \% StrideD <= PadBack` .
7382
+ - :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) \% StrideH <= PadDown` .
7418
7383
  - :math:`stride_d <= kernel_d` .
7419
7384
  - :math:`PadUp < kh` and :math:`PadDown < kh` . When `padding` = ``'valid'``, both PadUp and PadDown are zeros.
7420
7385
  When `padding` = ``'same'``, pad can be calculated by
@@ -7481,12 +7446,7 @@ def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
7481
7446
  (12, 26, 59, 47, 5)
7482
7447
  """
7483
7448
 
7484
- if isinstance(padding, (tuple, list, int)):
7485
- return conv3d_ext_op(input, weight, bias, stride, padding, dilation, groups)
7486
- if isinstance(padding, str):
7487
- return conv3d_padding_op(input, weight, bias, stride, padding, dilation, groups)
7488
- raise TypeError(f"For conv3d, the parameter 'padding' must be a tuple/list " \
7489
- f"or a string, but got {type(padding)}")
7449
+ return conv3d_op(input, weight, bias, stride, padding, dilation, groups)
7490
7450
 
7491
7451
 
7492
7452
  @_primexpr
@@ -7554,7 +7514,7 @@ def pixel_shuffle(input, upscale_factor):
7554
7514
  c, h, w = idx[-3:]
7555
7515
  _check_pxiel_shuffle_valid(c, upscale_factor)
7556
7516
  c = c // upscale_factor ** 2
7557
- input_perm = (pre + (c, upscale_factor, upscale_factor, h, w))
7517
+ input_perm = pre + (c, upscale_factor, upscale_factor, h, w)
7558
7518
  input = reshape_(input, input_perm)
7559
7519
  input_perm = [i for i in range(length - 2)]
7560
7520
  input_perm = input_perm + [length, length - 2, length + 1, length - 1]
@@ -7618,7 +7578,7 @@ def pixel_unshuffle(input, downscale_factor):
7618
7578
  _check_pxiel_unshuffle_valid(h, w, downscale_factor)
7619
7579
  h = h // downscale_factor
7620
7580
  w = w // downscale_factor
7621
- input_perm = (pre + (c, h, downscale_factor, w, downscale_factor))
7581
+ input_perm = pre + (c, h, downscale_factor, w, downscale_factor)
7622
7582
  input = reshape_(input, input_perm)
7623
7583
  input_perm = [i for i in range(length - 2)]
7624
7584
  input_perm = input_perm + [length - 1, length + 1, length - 2, length]
@@ -8943,13 +8903,7 @@ def max_pool2d_ext(input, kernel_size, stride=None, padding=0, dilation=1, ceil_
8943
8903
  >>> print(argmax.shape)
8944
8904
  (20, 16, 24, 31)
8945
8905
  """
8946
- strides = stride if (stride is not None) else kernel_size
8947
- if return_indices:
8948
- max_pool_func_ = _get_cache_prim(MaxPoolWithIndices)(kernel_size, strides, padding, dilation, ceil_mode)
8949
- out, indices = max_pool_func_(input)
8950
- else:
8951
- max_pool_func_ = _get_cache_prim(MaxPoolWithMask)(kernel_size, strides, padding, dilation, ceil_mode)
8952
- out, indices = max_pool_func_(input)
8906
+ out, indices = func_max_pool2d_op(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
8953
8907
  if return_indices:
8954
8908
  return out, indices
8955
8909
  return out
@@ -15,6 +15,7 @@
15
15
  """Defines other operators with functional form."""
16
16
  from mindspore.ops import operations as P
17
17
  from mindspore.ops.auto_generate import rotary_position_embedding
18
+ from mindspore.ops.auto_generate import moe_distribute_dispatch, moe_distribute_combine
18
19
  from mindspore.ops.auto_generate.gen_ops_prim import moe_init_routing_v2_op
19
20
 
20
21
  partial_ = P.Partial()
@@ -268,6 +269,8 @@ __all__ = [
268
269
  'partial',
269
270
  'rotary_position_embedding',
270
271
  'move_to',
271
- 'moe_init_routing_v2'
272
+ 'moe_init_routing_v2',
273
+ 'moe_distribute_dispatch',
274
+ 'moe_distribute_combine'
272
275
  ]
273
276
  __all__.sort()
@@ -32,7 +32,7 @@ from mindspore.common.generator import default_generator
32
32
  from mindspore.ops.auto_generate import UniformExt, NormalTensorTensor, \
33
33
  NormalTensorFloat, NormalFloatTensor, NormalFloatFloat, RandExt, RandLikeExt, MultinomialExt, \
34
34
  Randn, RandnLike, RandInt, RandIntLike, RandpermExt, InplaceRandom, InplaceNormal
35
- from mindspore.ops.auto_generate.gen_ops_prim import inplace_uniform_op
35
+ from mindspore.ops.auto_generate.gen_ops_prim import inplace_uniform_op, inplace_exponential_op
36
36
 
37
37
  inplace_normal_ = InplaceNormal()
38
38
  normal_tensor_tensor_op = NormalTensorTensor()
@@ -296,7 +296,7 @@ def uniform_(input, from_=0, to=1, *, generator=None):
296
296
  Returns:
297
297
  Tensor, with the same shape and dtype as `input` tensor.
298
298
 
299
- Raises:
299
+ Raises:
300
300
  TypeError: If `input` is not a Tensor.
301
301
  TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, float64,
302
302
  bfloat16.
@@ -384,6 +384,18 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
384
384
  return value
385
385
 
386
386
 
387
+
388
+ @_function_forbid_reuse
389
+ def exponential_(input, lambd=1, *, generator=None):
390
+ r"""
391
+ exponential
392
+ """
393
+ if generator is None:
394
+ generator = default_generator
395
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
396
+ return inplace_exponential_op(input, lambd, seed, offset)
397
+
398
+
387
399
  @_function_forbid_reuse
388
400
  def standard_normal(shape, seed=None):
389
401
  r"""
@@ -753,9 +765,9 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
753
765
  Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
754
766
 
755
767
  Args:
756
- mean (Union[float, Tensor]): Mean value of each element, the shape of the `mean` tensor
768
+ mean (Union[Tensor]): Mean value of each element, the shape of the `mean` tensor
757
769
  should be the same as that of the `std` tensor.
758
- std (Union[float, Tensor]): Standard deviation for each element, the shape of the `std` tensor
770
+ std (Union[Tensor]): Standard deviation for each element, the shape of the `std` tensor
759
771
  should be the same as that of the `mean` tensor. The value of `std` should be greater than or equal to 0.
760
772
 
761
773
  Keyword Args:
@@ -781,6 +793,33 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
781
793
  >>> print(output.shape)
782
794
  (3,)
783
795
 
796
+ .. function:: normal(mean, std) -> Tensor
797
+ :noindex:
798
+
799
+ Similar to the function above, but the means are shared among all drawn elements.
800
+
801
+ Args:
802
+ mean (float): Mean value of each element.
803
+ std (Tensor): Standard deviation for each element. The value of `std` should be greater
804
+ than or equal to 0.
805
+
806
+ Returns:
807
+ Outputs a tensor with the same shape as `std`.
808
+
809
+ Supported Platforms:
810
+ ``Ascend``
811
+
812
+ Examples:
813
+ >>> import mindspore
814
+ >>> import numpy as np
815
+ >>> from mindspore import ops
816
+ >>> from mindspore import Tensor
817
+ >>> mean = 1.
818
+ >>> std = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
819
+ >>> output = ops.function.random_func.normal_ext(mean, std)
820
+ >>> print(output.shape)
821
+ (3,)
822
+
784
823
  .. function:: normal(mean, std=1.0) -> Tensor
785
824
  :noindex:
786
825
 
@@ -995,7 +1034,7 @@ def gamma(shape, alpha, beta, seed=None):
995
1034
  (3, 2, 2)
996
1035
  >>> # case 2: alpha_shape is (2, 3), so shape is (3, 1, 3)
997
1036
  >>> shape = (3, 1, 3)
998
- >>> alpha = mindspore.tensor([[1, 3, 4], [2, 5, 6]]), mindspore.float32)
1037
+ >>> alpha = mindspore.tensor([[1, 3, 4], [2, 5, 6]], mindspore.float32)
999
1038
  >>> beta = mindspore.tensor([1.0], mindspore.float32)
1000
1039
  >>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
1001
1040
  >>> result = output.shape
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -102,6 +102,7 @@ def vmap(fn, in_axes=0, out_axes=0):
102
102
  [[-2 1 4]
103
103
  [ 8 9 10]]
104
104
  """
105
+
105
106
  return vmap_instance(fn, in_axes, out_axes)
106
107
 
107
108
 
@@ -21,7 +21,7 @@ from mindspore.ops import _constants
21
21
  from mindspore.ops.function import *
22
22
  from mindspore.ops.function.array_func import chunk_ext, zero_
23
23
  from mindspore.ops.function.math_func import all, argmax_ext, float_power_ext, erfinv_, tanh_, bernoulli_ext
24
- from mindspore.ops.function.random_func import random_, uniform_ext, uniform_, normal_
24
+ from mindspore.ops.function.random_func import random_, uniform_ext, uniform_, normal_, exponential_
25
25
  from mindspore.ops import operations as P
26
26
  from mindspore.ops.operations import array_ops
27
27
  from mindspore.ops.operations._sequence_ops import TensorToTuple
@@ -33,7 +33,6 @@ from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
33
33
  from mindspore.ops.operations.math_ops import Roll
34
34
  from mindspore.ops.composite.math_ops import mm
35
35
  from mindspore.ops.function.math_func import dot
36
- from mindspore.ops.function.array_func import new_empty
37
36
  from mindspore.ops import auto_generate
38
37
  from mindspore.ops.auto_generate import cast
39
38
  from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
@@ -343,6 +342,7 @@ setattr(tensor_operator_registry, 'tensor_slice', tensor_slice)
343
342
  setattr(tensor_operator_registry, 'select', select)
344
343
  setattr(tensor_operator_registry, 'uniform', uniform_ext)
345
344
  setattr(tensor_operator_registry, 'uniform_', uniform_)
345
+ setattr(tensor_operator_registry, 'exponential_', exponential_)
346
346
  setattr(tensor_operator_registry, 'gather', gather)
347
347
  setattr(tensor_operator_registry, 'gather_d', gather_d)
348
348
  setattr(tensor_operator_registry, 'gather_elements', gather_elements)
@@ -382,7 +382,6 @@ setattr(tensor_operator_registry, 'nanmedian', nanmedian)
382
382
  setattr(tensor_operator_registry, 'csr_to_coo', csr_to_coo)
383
383
  setattr(tensor_operator_registry, 'zeros', zeros)
384
384
  setattr(tensor_operator_registry, 'ones', ones)
385
- setattr(tensor_operator_registry, 'new_empty', new_empty)
386
385
  setattr(tensor_operator_registry, 'unsorted_segment_min', unsorted_segment_min)
387
386
  setattr(tensor_operator_registry, 'unsorted_segment_max', unsorted_segment_max)
388
387
  setattr(tensor_operator_registry, 'unsorted_segment_prod', unsorted_segment_prod)