mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (384) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +40 -9
  7. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  8. mindspore/_extends/optimize/cell_utils.py +96 -0
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +44 -22
  11. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  12. mindspore/_extends/parse/parser.py +37 -62
  13. mindspore/_extends/parse/resources.py +39 -0
  14. mindspore/_extends/parse/standard_method.py +43 -13
  15. mindspore/_extends/parse/trope.py +8 -1
  16. mindspore/_extends/pijit/__init__.py +1 -2
  17. mindspore/amp.py +4 -4
  18. mindspore/avcodec-59.dll +0 -0
  19. mindspore/avdevice-59.dll +0 -0
  20. mindspore/avfilter-8.dll +0 -0
  21. mindspore/avformat-59.dll +0 -0
  22. mindspore/avutil-57.dll +0 -0
  23. mindspore/boost/adasum.py +1 -1
  24. mindspore/boost/boost_cell_wrapper.py +4 -4
  25. mindspore/common/__init__.py +27 -2
  26. mindspore/common/_grad_function.py +2 -1
  27. mindspore/common/_pijit_context.py +28 -7
  28. mindspore/common/_stub_tensor.py +1 -209
  29. mindspore/common/_tensor_cpp_method.py +1 -1
  30. mindspore/common/_tensor_docs.py +77 -16
  31. mindspore/common/api.py +238 -113
  32. mindspore/common/dtype.py +21 -11
  33. mindspore/common/dump.py +10 -15
  34. mindspore/common/generator.py +5 -3
  35. mindspore/common/hook_handle.py +11 -2
  36. mindspore/common/jit_config.py +1 -1
  37. mindspore/common/jit_trace.py +84 -105
  38. mindspore/common/parameter.py +26 -12
  39. mindspore/common/recompute.py +3 -3
  40. mindspore/common/sparse_tensor.py +0 -3
  41. mindspore/common/symbol.py +0 -1
  42. mindspore/common/tensor.py +81 -81
  43. mindspore/communication/_comm_helper.py +46 -4
  44. mindspore/communication/management.py +79 -7
  45. mindspore/context.py +58 -40
  46. mindspore/dataset/core/config.py +3 -3
  47. mindspore/dataset/engine/datasets.py +20 -7
  48. mindspore/dataset/engine/datasets_user_defined.py +33 -3
  49. mindspore/dataset/engine/iterators.py +2 -2
  50. mindspore/dataset/engine/obs/config_loader.py +2 -2
  51. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  52. mindspore/dataset/transforms/py_transforms.py +7 -3
  53. mindspore/dataset/transforms/transforms.py +7 -3
  54. mindspore/dataset/vision/validators.py +1 -0
  55. mindspore/device_context/ascend/device.py +1 -1
  56. mindspore/device_context/gpu/__init__.py +2 -2
  57. mindspore/device_context/gpu/device.py +1 -1
  58. mindspore/device_context/gpu/op_precision.py +4 -2
  59. mindspore/device_context/gpu/op_tuning.py +6 -3
  60. mindspore/device_manager.py +16 -9
  61. mindspore/dnnl.dll +0 -0
  62. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
  63. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  64. mindspore/experimental/optim/adadelta.py +13 -20
  65. mindspore/experimental/optim/adagrad.py +15 -22
  66. mindspore/experimental/optim/adam.py +17 -24
  67. mindspore/experimental/optim/adamax.py +14 -22
  68. mindspore/experimental/optim/adamw.py +28 -34
  69. mindspore/experimental/optim/asgd.py +15 -25
  70. mindspore/experimental/optim/lr_scheduler.py +27 -45
  71. mindspore/experimental/optim/nadam.py +14 -24
  72. mindspore/experimental/optim/optimizer.py +13 -23
  73. mindspore/experimental/optim/radam.py +18 -24
  74. mindspore/experimental/optim/rmsprop.py +14 -25
  75. mindspore/experimental/optim/rprop.py +15 -26
  76. mindspore/experimental/optim/sgd.py +9 -19
  77. mindspore/hal/__init__.py +4 -4
  78. mindspore/hal/contiguous_tensors_handle.py +2 -2
  79. mindspore/hal/memory.py +27 -7
  80. mindspore/include/api/cell.h +37 -1
  81. mindspore/include/api/delegate.h +10 -0
  82. mindspore/include/api/model.h +3 -0
  83. mindspore/include/api/types.h +2 -2
  84. mindspore/include/c_api/model_c.h +0 -58
  85. mindspore/include/c_api/tensor_c.h +0 -26
  86. mindspore/include/dataset/vision_ascend.h +1 -1
  87. mindspore/jpeg62.dll +0 -0
  88. mindspore/mindrecord/tools/cifar10.py +60 -11
  89. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  90. mindspore/mindspore_backend_common.dll +0 -0
  91. mindspore/mindspore_backend_manager.dll +0 -0
  92. mindspore/mindspore_common.dll +0 -0
  93. mindspore/mindspore_core.dll +0 -0
  94. mindspore/mindspore_cpu_res_manager.dll +0 -0
  95. mindspore/mindspore_dump.dll +0 -0
  96. mindspore/mindspore_frontend.dll +0 -0
  97. mindspore/mindspore_glog.dll +0 -0
  98. mindspore/mindspore_memory_pool.dll +0 -0
  99. mindspore/mindspore_ms_backend.dll +0 -0
  100. mindspore/mindspore_ops.dll +0 -0
  101. mindspore/mindspore_ops_host.dll +0 -0
  102. mindspore/mindspore_ops_kernel_common.dll +0 -0
  103. mindspore/mindspore_profiler.dll +0 -0
  104. mindspore/mindspore_pyboost.dll +0 -0
  105. mindspore/mindspore_pynative.dll +0 -0
  106. mindspore/mindspore_res_manager.dll +0 -0
  107. mindspore/mindspore_runtime_pipeline.dll +0 -0
  108. mindspore/mint/__init__.py +6 -46
  109. mindspore/mint/distributed/__init__.py +1 -0
  110. mindspore/mint/distributed/distributed.py +212 -9
  111. mindspore/mint/nn/__init__.py +1 -1
  112. mindspore/mint/nn/functional.py +53 -6
  113. mindspore/mint/nn/layer/_functions.py +164 -294
  114. mindspore/mint/nn/layer/activation.py +8 -6
  115. mindspore/mint/nn/layer/conv.py +137 -101
  116. mindspore/mint/nn/layer/normalization.py +8 -22
  117. mindspore/mint/optim/adam.py +19 -18
  118. mindspore/mint/optim/adamw.py +14 -8
  119. mindspore/mint/optim/sgd.py +5 -5
  120. mindspore/nn/cell.py +328 -502
  121. mindspore/nn/grad/cell_grad.py +11 -12
  122. mindspore/nn/layer/activation.py +32 -34
  123. mindspore/nn/layer/basic.py +67 -64
  124. mindspore/nn/layer/channel_shuffle.py +4 -4
  125. mindspore/nn/layer/combined.py +4 -2
  126. mindspore/nn/layer/conv.py +117 -110
  127. mindspore/nn/layer/dense.py +9 -7
  128. mindspore/nn/layer/embedding.py +50 -52
  129. mindspore/nn/layer/image.py +37 -39
  130. mindspore/nn/layer/math.py +111 -112
  131. mindspore/nn/layer/normalization.py +56 -44
  132. mindspore/nn/layer/pooling.py +58 -63
  133. mindspore/nn/layer/rnn_cells.py +33 -33
  134. mindspore/nn/layer/rnns.py +56 -56
  135. mindspore/nn/layer/thor_layer.py +74 -73
  136. mindspore/nn/layer/transformer.py +11 -1
  137. mindspore/nn/learning_rate_schedule.py +20 -20
  138. mindspore/nn/loss/loss.py +79 -81
  139. mindspore/nn/optim/adam.py +3 -3
  140. mindspore/nn/optim/adasum.py +2 -2
  141. mindspore/nn/optim/asgd.py +2 -0
  142. mindspore/nn/optim/optimizer.py +1 -1
  143. mindspore/nn/optim/thor.py +2 -2
  144. mindspore/nn/probability/distribution/exponential.py +2 -1
  145. mindspore/nn/probability/distribution/poisson.py +2 -1
  146. mindspore/nn/sparse/sparse.py +3 -3
  147. mindspore/nn/wrap/cell_wrapper.py +34 -37
  148. mindspore/nn/wrap/grad_reducer.py +37 -37
  149. mindspore/nn/wrap/loss_scale.py +72 -74
  150. mindspore/numpy/array_creations.py +5 -5
  151. mindspore/numpy/fft.py +1 -1
  152. mindspore/numpy/math_ops.py +5 -5
  153. mindspore/opencv_core452.dll +0 -0
  154. mindspore/opencv_imgcodecs452.dll +0 -0
  155. mindspore/opencv_imgproc452.dll +0 -0
  156. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  157. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  158. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  159. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  160. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
  161. mindspore/ops/auto_generate/gen_extend_func.py +23 -141
  162. mindspore/ops/auto_generate/gen_ops_def.py +727 -321
  163. mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
  164. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  165. mindspore/ops/composite/__init__.py +10 -0
  166. mindspore/ops/composite/base.py +8 -4
  167. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  168. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  169. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  170. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  171. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  172. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  173. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  174. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  175. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  176. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  177. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  178. mindspore/ops/function/__init__.py +3 -1
  179. mindspore/ops/function/_add_attr_func.py +11 -6
  180. mindspore/ops/function/array_func.py +9 -96
  181. mindspore/ops/function/debug_func.py +4 -3
  182. mindspore/ops/function/grad/grad_func.py +1 -1
  183. mindspore/ops/function/math_func.py +33 -540
  184. mindspore/ops/function/nn_func.py +28 -74
  185. mindspore/ops/function/other_func.py +4 -1
  186. mindspore/ops/function/random_func.py +44 -5
  187. mindspore/ops/function/vmap_func.py +2 -1
  188. mindspore/ops/functional.py +2 -3
  189. mindspore/ops/functional_overload.py +571 -6
  190. mindspore/ops/op_info_register.py +21 -0
  191. mindspore/ops/operations/__init__.py +16 -11
  192. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  193. mindspore/ops/operations/_inner_ops.py +3 -6
  194. mindspore/ops/operations/_sequence_ops.py +1 -1
  195. mindspore/ops/operations/array_ops.py +2 -2
  196. mindspore/ops/operations/comm_ops.py +185 -26
  197. mindspore/ops/operations/custom_ops.py +294 -174
  198. mindspore/ops/operations/debug_ops.py +59 -4
  199. mindspore/ops/operations/image_ops.py +13 -13
  200. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  201. mindspore/ops/operations/math_ops.py +3 -4
  202. mindspore/ops/operations/nn_ops.py +7 -39
  203. mindspore/ops/primitive.py +6 -10
  204. mindspore/ops/tensor_method.py +47 -8
  205. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  206. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  207. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  208. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  209. mindspore/ops_generate/common/base_generator.py +14 -0
  210. mindspore/ops_generate/common/gen_constants.py +8 -3
  211. mindspore/ops_generate/common/gen_utils.py +0 -19
  212. mindspore/ops_generate/common/op_proto.py +11 -4
  213. mindspore/ops_generate/common/template.py +88 -11
  214. mindspore/ops_generate/gen_ops.py +1 -1
  215. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  216. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  217. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  218. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  219. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  220. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  221. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  222. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  223. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  224. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  225. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  226. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  227. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  228. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  229. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  230. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  231. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  232. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  233. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  234. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  235. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  236. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  237. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  238. mindspore/parallel/_auto_parallel_context.py +11 -8
  239. mindspore/parallel/_cell_wrapper.py +113 -45
  240. mindspore/parallel/_parallel_serialization.py +1 -1
  241. mindspore/parallel/_ps_context.py +4 -6
  242. mindspore/parallel/_tensor.py +167 -12
  243. mindspore/parallel/_transformer/moe.py +1 -1
  244. mindspore/parallel/_transformer/transformer.py +13 -8
  245. mindspore/parallel/auto_parallel.py +14 -7
  246. mindspore/parallel/checkpoint_convert.py +3 -3
  247. mindspore/parallel/checkpoint_transform.py +11 -7
  248. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  249. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  250. mindspore/parallel/cluster/run.py +43 -4
  251. mindspore/parallel/function/__init__.py +8 -1
  252. mindspore/parallel/function/reshard_func.py +6 -7
  253. mindspore/parallel/nn/__init__.py +15 -2
  254. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  255. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  256. mindspore/parallel/shard.py +3 -4
  257. mindspore/parallel/transform_safetensors.py +463 -174
  258. mindspore/profiler/__init__.py +2 -1
  259. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  260. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  261. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  262. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  263. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  264. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  265. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  266. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  267. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  268. mindspore/profiler/analysis/task_manager.py +1 -1
  269. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  270. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  272. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  273. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  274. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  275. mindspore/profiler/common/constant.py +16 -0
  276. mindspore/profiler/common/profiler_context.py +25 -27
  277. mindspore/profiler/common/profiler_info.py +0 -16
  278. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  279. mindspore/profiler/common/profiler_output_path.py +23 -8
  280. mindspore/profiler/common/profiler_parameters.py +128 -35
  281. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  282. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  283. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  284. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  285. mindspore/profiler/dynamic_profiler.py +305 -314
  286. mindspore/profiler/envprofiler.py +12 -7
  287. mindspore/profiler/experimental_config.py +96 -6
  288. mindspore/profiler/mstx.py +33 -12
  289. mindspore/profiler/platform/__init__.py +2 -3
  290. mindspore/profiler/platform/npu_profiler.py +29 -19
  291. mindspore/profiler/profiler.py +35 -19
  292. mindspore/profiler/profiler_action_controller.py +64 -76
  293. mindspore/profiler/schedule.py +10 -4
  294. mindspore/rewrite/common/config.py +1 -0
  295. mindspore/rewrite/common/namer.py +1 -0
  296. mindspore/rewrite/common/namespace.py +1 -0
  297. mindspore/rewrite/node/node.py +31 -11
  298. mindspore/rewrite/parsers/assign_parser.py +1 -1
  299. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  300. mindspore/run_check/_check_version.py +7 -10
  301. mindspore/runtime/__init__.py +5 -5
  302. mindspore/runtime/event.py +10 -4
  303. mindspore/runtime/executor.py +60 -45
  304. mindspore/runtime/memory.py +30 -32
  305. mindspore/runtime/thread_bind_core.py +298 -164
  306. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  307. mindspore/swresample-4.dll +0 -0
  308. mindspore/swscale-6.dll +0 -0
  309. mindspore/tinyxml2.dll +0 -0
  310. mindspore/train/_utils.py +14 -4
  311. mindspore/train/amp.py +43 -20
  312. mindspore/train/callback/__init__.py +5 -5
  313. mindspore/train/callback/_checkpoint.py +3 -6
  314. mindspore/train/callback/_flops_collector.py +1 -1
  315. mindspore/train/callback/_landscape.py +0 -1
  316. mindspore/train/callback/_train_fault_tolerance.py +97 -16
  317. mindspore/train/data_sink.py +11 -2
  318. mindspore/train/dataset_helper.py +9 -0
  319. mindspore/train/model.py +135 -55
  320. mindspore/train/serialization.py +133 -111
  321. mindspore/train/summary/summary_record.py +13 -2
  322. mindspore/turbojpeg.dll +0 -0
  323. mindspore/utils/__init__.py +3 -2
  324. mindspore/utils/dryrun.py +0 -6
  325. mindspore/utils/runtime_execution_order_check.py +163 -77
  326. mindspore/utils/sdc_detect.py +68 -0
  327. mindspore/utils/utils.py +6 -9
  328. mindspore/version.py +1 -1
  329. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  330. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
  331. mindspore/_deprecated/jit.py +0 -198
  332. mindspore/experimental/es/__init__.py +0 -22
  333. mindspore/experimental/es/embedding_service.py +0 -891
  334. mindspore/experimental/es/embedding_service_layer.py +0 -581
  335. mindspore/profiler/parser/__init__.py +0 -14
  336. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  337. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  338. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  339. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  340. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  341. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  342. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  343. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  344. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  345. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  346. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  347. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  348. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  349. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  350. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  351. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  352. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  353. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  354. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  355. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  356. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  357. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  358. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  359. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  360. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  361. mindspore/profiler/parser/container.py +0 -229
  362. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  363. mindspore/profiler/parser/flops_parser.py +0 -531
  364. mindspore/profiler/parser/framework_enum.py +0 -111
  365. mindspore/profiler/parser/framework_parser.py +0 -464
  366. mindspore/profiler/parser/framework_struct.py +0 -61
  367. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  368. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  369. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  370. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  371. mindspore/profiler/parser/hccl_parser.py +0 -573
  372. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  373. mindspore/profiler/parser/integrator.py +0 -526
  374. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  375. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  376. mindspore/profiler/parser/minddata_parser.py +0 -186
  377. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  378. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  379. mindspore/profiler/parser/optime_parser.py +0 -250
  380. mindspore/profiler/parser/profiler_info.py +0 -213
  381. mindspore/profiler/parser/step_trace_parser.py +0 -666
  382. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  383. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  384. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ import math
19
19
  import numpy as np
20
20
 
21
21
  from mindspore import context
22
- from mindspore.ops import operations as P
22
+ from mindspore import ops
23
23
  import mindspore.common.dtype as mstype
24
24
  from mindspore.common.parameter import Parameter
25
25
  from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
@@ -272,20 +272,20 @@ class Conv2d(_Conv):
272
272
 
273
273
  .. math::
274
274
  \begin{array}{ll} \\
275
- H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
276
- {\text{stride[0]}}} \right \rceil \\
277
- W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
278
- {\text{stride[1]}}} \right \rceil \\
275
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
276
+ {\text{stride[0]}}} \right \rfloor + 1 \\
277
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
278
+ {\text{stride[1]}}} \right \rfloor + 1 \\
279
279
  \end{array}
280
280
 
281
281
  pad_mode is ``'pad'``:
282
282
 
283
283
  .. math::
284
284
  \begin{array}{ll} \\
285
- H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
286
- \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
287
- W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
288
- \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
285
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
286
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
287
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
288
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
289
289
  \end{array}
290
290
 
291
291
  Raises:
@@ -351,16 +351,16 @@ class Conv2d(_Conv):
351
351
  bias_init,
352
352
  data_format,
353
353
  dtype=dtype)
354
- self.conv2d = P.Conv2D(out_channel=self.out_channels,
355
- kernel_size=self.kernel_size,
356
- mode=1,
357
- pad_mode=self.pad_mode,
358
- pad=self.padding,
359
- stride=self.stride,
360
- dilation=self.dilation,
361
- group=self.group,
362
- data_format=self.data_format)
363
- self.bias_add = P.BiasAdd(data_format=self.data_format)
354
+ self.conv2d = ops.Conv2D(out_channel=self.out_channels,
355
+ kernel_size=self.kernel_size,
356
+ mode=1,
357
+ pad_mode=self.pad_mode,
358
+ pad=self.padding,
359
+ stride=self.stride,
360
+ dilation=self.dilation,
361
+ group=self.group,
362
+ data_format=self.data_format)
363
+ self.bias_add = ops.BiasAdd(data_format=self.data_format)
364
364
 
365
365
  def construct(self, x):
366
366
  output = self.conv2d(x, self.weight)
@@ -476,19 +476,25 @@ class Conv1d(_Conv):
476
476
  pad_mode is ``'same'``:
477
477
 
478
478
  .. math::
479
- L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil
479
+ \begin{array}{ll} \\
480
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
481
+ \end{array}
480
482
 
481
483
  pad_mode is ``'valid'``:
482
484
 
483
485
  .. math::
484
- L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
485
- {\text{stride}}} \right \rceil
486
+ \begin{array}{ll} \\
487
+ L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) - 1}
488
+ {\text{stride}}} \right \rfloor + 1 \\
489
+ \end{array}
486
490
 
487
491
  pad_mode is ``'pad'``:
488
492
 
489
493
  .. math::
490
- L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{kernel_size} - 1) \times
491
- \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor
494
+ \begin{array}{ll} \\
495
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - \text{dilation} \times
496
+ (\text{kernel_size} - 1) - 1}{\text{stride}}} \right \rfloor + 1 \\
497
+ \end{array}
492
498
 
493
499
  Raises:
494
500
  TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
@@ -541,8 +547,8 @@ class Conv1d(_Conv):
541
547
  kernel_size = (1, kernel_size)
542
548
  stride = (1, stride)
543
549
  dilation = (1, dilation)
544
- get_shape = P.Shape()
545
- get_dtype = P.DType()
550
+ get_shape = ops.Shape()
551
+ get_dtype = ops.DType()
546
552
  if isinstance(weight_init, Tensor):
547
553
  weight_init_shape = get_shape(weight_init)
548
554
  Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
@@ -566,18 +572,18 @@ class Conv1d(_Conv):
566
572
  dtype=dtype)
567
573
  self.padding = (0, 0, padding, padding)
568
574
  Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
569
- self.conv2d = P.Conv2D(out_channel=self.out_channels,
570
- kernel_size=self.kernel_size,
571
- mode=1,
572
- pad_mode=self.pad_mode,
573
- pad=self.padding,
574
- stride=self.stride,
575
- dilation=self.dilation,
576
- group=self.group)
577
- self.bias_add = P.BiasAdd()
578
- self.expand_dims = P.ExpandDims()
579
- self.squeeze = P.Squeeze(2)
580
- self.shape = P.Shape()
575
+ self.conv2d = ops.Conv2D(out_channel=self.out_channels,
576
+ kernel_size=self.kernel_size,
577
+ mode=1,
578
+ pad_mode=self.pad_mode,
579
+ pad=self.padding,
580
+ stride=self.stride,
581
+ dilation=self.dilation,
582
+ group=self.group)
583
+ self.bias_add = ops.BiasAdd()
584
+ self.expand_dims = ops.ExpandDims()
585
+ self.squeeze = ops.Squeeze(2)
586
+ self.shape = ops.Shape()
581
587
 
582
588
  def construct(self, x):
583
589
  x = self.expand_dims(x, 2)
@@ -727,24 +733,24 @@ class Conv3d(_Conv):
727
733
 
728
734
  .. math::
729
735
  \begin{array}{ll} \\
730
- D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
731
- {\text{stride[0]}} + 1} \right \rfloor \\
732
- H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
733
- {\text{stride[1]}} + 1} \right \rfloor \\
734
- W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
735
- {\text{stride[2]}} + 1} \right \rfloor \\
736
+ D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
737
+ {\text{stride[0]}}} \right \rfloor + 1 \\
738
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
739
+ {\text{stride[1]}}} \right \rfloor + 1 \\
740
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) - 1}
741
+ {\text{stride[2]}}} \right \rfloor + 1 \\
736
742
  \end{array}
737
743
 
738
744
  pad_mode is ``'pad'`` :
739
745
 
740
746
  .. math::
741
747
  \begin{array}{ll} \\
742
- D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
743
- \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
744
- H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
745
- \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
746
- W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
747
- \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
748
+ D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
749
+ (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
750
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
751
+ (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
752
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - \text{dilation[2]} \times
753
+ (\text{kernel_size[2]} - 1) - 1}{\text{stride[2]}}} \right \rfloor + 1 \\
748
754
  \end{array}
749
755
 
750
756
  Raises:
@@ -812,20 +818,20 @@ class Conv3d(_Conv):
812
818
  data_format,
813
819
  dtype=dtype)
814
820
  out_channels = self.out_channels // group
815
- self.conv3d = P.Conv3D(out_channel=out_channels,
816
- kernel_size=self.kernel_size,
817
- mode=1,
818
- pad_mode=self.pad_mode,
819
- pad=self.padding,
820
- stride=self.stride,
821
- dilation=self.dilation,
822
- group=1,
823
- data_format=self.data_format)
824
- self.bias_add = P.BiasAdd(data_format=self.data_format)
825
- self.shape = P.Shape()
826
- self.concat = P.Concat(1)
827
- self.split_0 = P.Split(0, self.group)
828
- self.split_1 = P.Split(1, self.group)
821
+ self.conv3d = ops.Conv3D(out_channel=out_channels,
822
+ kernel_size=self.kernel_size,
823
+ mode=1,
824
+ pad_mode=self.pad_mode,
825
+ pad=self.padding,
826
+ stride=self.stride,
827
+ dilation=self.dilation,
828
+ group=1,
829
+ data_format=self.data_format)
830
+ self.bias_add = ops.BiasAdd(data_format=self.data_format)
831
+ self.shape = ops.Shape()
832
+ self.concat = ops.Concat(1)
833
+ self.split_0 = ops.Split(0, self.group)
834
+ self.split_1 = ops.Split(1, self.group)
829
835
 
830
836
  def construct(self, x):
831
837
  if self.group == 1:
@@ -935,11 +941,12 @@ class Conv3dTranspose(_Conv):
935
941
  Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
936
942
  data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
937
943
  Default: ``'NCDHW'`` .
938
- dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
944
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Should be the same as dtype of input.
945
+ Default: ``mstype.float32`` .
939
946
 
940
947
  Inputs:
941
948
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
942
- Currently input data dtype only supports float16 and float32.
949
+ Currently input data dtype for Ascend only supports float16; for CPU/GPU only supports float16 and float32.
943
950
 
944
951
  Outputs:
945
952
  Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
@@ -982,7 +989,7 @@ class Conv3dTranspose(_Conv):
982
989
  TypeError: If `in_channels`, `out_channels` or `group` is not an int.
983
990
  TypeError: If `kernel_size`, `stride`, `padding` , `dilation` or `output_padding`
984
991
  is neither an int nor a tuple of three.
985
- TypeError: If input data type is not float16 or float32.
992
+ TypeError: If input data type is not supported.For CPU/GPU: not float16 or float32;for ASCEND, not float16.
986
993
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
987
994
  ValueError: If `padding` is less than 0.
988
995
  ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'``, ``'pad'``.
@@ -997,9 +1004,9 @@ class Conv3dTranspose(_Conv):
997
1004
  >>> import mindspore
998
1005
  >>> from mindspore import Tensor, nn
999
1006
  >>> import numpy as np
1000
- >>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float32)
1007
+ >>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float16)
1001
1008
  >>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2),
1002
- ... pad_mode='pad')
1009
+ ... pad_mode='pad', dtype=mindspore.float16)
1003
1010
  >>> output = conv3d_transpose(x)
1004
1011
  >>> print(output.shape)
1005
1012
  (32, 3, 13, 37, 33)
@@ -1032,7 +1039,7 @@ class Conv3dTranspose(_Conv):
1032
1039
  if isinstance(padding, tuple):
1033
1040
  Validator.check_equal_int(len(padding), 6, 'padding size', self.cls_name)
1034
1041
  self.output_padding = _check_3d_int_or_tuple("output_padding", output_padding, self.cls_name,
1035
- greater_zero=False)
1042
+ greater_zero=False, pad_value=0)
1036
1043
  super(Conv3dTranspose, self).__init__(
1037
1044
  in_channels,
1038
1045
  out_channels,
@@ -1048,19 +1055,19 @@ class Conv3dTranspose(_Conv):
1048
1055
  data_format,
1049
1056
  transposed=True,
1050
1057
  dtype=dtype)
1051
- self.conv3d_transpose = P.Conv3DTranspose(in_channel=self.in_channels,
1052
- out_channel=self.out_channels,
1053
- kernel_size=self.kernel_size,
1054
- mode=1,
1055
- pad_mode=self.pad_mode,
1056
- pad=self.padding,
1057
- stride=self.stride,
1058
- dilation=self.dilation,
1059
- group=self.group,
1060
- output_padding=self.output_padding,
1061
- data_format=self.data_format)
1062
- self.bias_add = P.BiasAdd(data_format=self.data_format)
1063
- self.shape = P.Shape()
1058
+ self.conv3d_transpose = ops.Conv3DTranspose(in_channel=self.in_channels,
1059
+ out_channel=self.out_channels,
1060
+ kernel_size=self.kernel_size,
1061
+ mode=1,
1062
+ pad_mode=self.pad_mode,
1063
+ pad=self.padding,
1064
+ stride=self.stride,
1065
+ dilation=self.dilation,
1066
+ group=self.group,
1067
+ output_padding=self.output_padding,
1068
+ data_format=self.data_format)
1069
+ self.bias_add = ops.BiasAdd(data_format=self.data_format)
1070
+ self.shape = ops.Shape()
1064
1071
 
1065
1072
  def construct(self, x):
1066
1073
  output = self.conv3d_transpose(x, self.weight)
@@ -1219,7 +1226,7 @@ class Conv2dTranspose(_Conv):
1219
1226
  >>> output = net(x).shape
1220
1227
  >>> print(output)
1221
1228
  (1, 64, 19, 53)
1222
- """
1229
+ """
1223
1230
 
1224
1231
  def __init__(self,
1225
1232
  in_channels,
@@ -1265,7 +1272,7 @@ class Conv2dTranspose(_Conv):
1265
1272
 
1266
1273
  self.in_channels = in_channels
1267
1274
  self.out_channels = out_channels
1268
- self.shape = P.Shape()
1275
+ self.shape = ops.Shape()
1269
1276
  Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
1270
1277
  self.is_valid = self.pad_mode == 'valid'
1271
1278
  self.is_same = self.pad_mode == 'same'
@@ -1273,15 +1280,15 @@ class Conv2dTranspose(_Conv):
1273
1280
  self.output_padding = output_padding
1274
1281
 
1275
1282
  # cause Conv2DTranspose's out_channel refers to Conv2D's out_channel.
1276
- self.conv2d_transpose = P.Conv2DTranspose(out_channel=in_channels,
1277
- kernel_size=kernel_size,
1278
- mode=1,
1279
- pad_mode=pad_mode,
1280
- pad=padding,
1281
- stride=stride,
1282
- dilation=dilation,
1283
- group=group)
1284
- self.bias_add = P.BiasAdd()
1283
+ self.conv2d_transpose = ops.Conv2DTranspose(out_channel=in_channels,
1284
+ kernel_size=kernel_size,
1285
+ mode=1,
1286
+ pad_mode=pad_mode,
1287
+ pad=padding,
1288
+ stride=stride,
1289
+ dilation=dilation,
1290
+ group=group)
1291
+ self.bias_add = ops.BiasAdd()
1285
1292
  if isinstance(self.padding, int):
1286
1293
  self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
1287
1294
  else:
@@ -1308,7 +1315,7 @@ class Conv2dTranspose(_Conv):
1308
1315
  if not self.is_pad and (self.output_padding[0] > 0 or self.output_padding[1] > 0):
1309
1316
  raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
1310
1317
 
1311
- pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding[0]), (0, self.output_padding[1])))
1318
+ pad = ops.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding[0]), (0, self.output_padding[1])))
1312
1319
  return pad(conv2d_trans_ret)
1313
1320
 
1314
1321
  if self.output_padding == 0:
@@ -1320,7 +1327,7 @@ class Conv2dTranspose(_Conv):
1320
1327
  raise ValueError("output_padding must be in range of [0, max(stride_w, dilation_w)).")
1321
1328
  if not self.is_pad and self.output_padding > 0:
1322
1329
  raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
1323
- pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding), (0, self.output_padding)))
1330
+ pad = ops.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding), (0, self.output_padding)))
1324
1331
  return pad(conv2d_trans_ret)
1325
1332
 
1326
1333
 
@@ -1444,8 +1451,8 @@ class Conv1dTranspose(_Conv):
1444
1451
  kernel_size = (1, kernel_size)
1445
1452
  stride = (1, stride)
1446
1453
  dilation = (1, dilation)
1447
- get_shape = P.Shape()
1448
- get_dtype = P.DType()
1454
+ get_shape = ops.Shape()
1455
+ get_dtype = ops.DType()
1449
1456
  if isinstance(weight_init, Tensor):
1450
1457
  weight_init_shape = get_shape(weight_init)
1451
1458
  Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
@@ -1473,24 +1480,24 @@ class Conv1dTranspose(_Conv):
1473
1480
  self.padding = (0, 0, padding, padding)
1474
1481
  self.in_channels = in_channels
1475
1482
  self.out_channels = out_channels
1476
- self.shape = P.Shape()
1483
+ self.shape = ops.Shape()
1477
1484
  Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
1478
1485
  self.is_valid = self.pad_mode == 'valid'
1479
1486
  self.is_same = self.pad_mode == 'same'
1480
1487
  self.is_pad = self.pad_mode == 'pad'
1481
1488
 
1482
1489
  # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
1483
- self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
1484
- kernel_size=kernel_size,
1485
- mode=1,
1486
- pad_mode=pad_mode,
1487
- pad=self.padding,
1488
- stride=stride,
1489
- dilation=dilation,
1490
- group=group)
1491
- self.bias_add = P.BiasAdd()
1492
- self.expand_dims = P.ExpandDims()
1493
- self.squeeze = P.Squeeze(2)
1490
+ self.conv2d_transpose = ops.Conv2DBackpropInput(out_channel=in_channels,
1491
+ kernel_size=kernel_size,
1492
+ mode=1,
1493
+ pad_mode=pad_mode,
1494
+ pad=self.padding,
1495
+ stride=stride,
1496
+ dilation=dilation,
1497
+ group=group)
1498
+ self.bias_add = ops.BiasAdd()
1499
+ self.expand_dims = ops.ExpandDims()
1500
+ self.squeeze = ops.Squeeze(2)
1494
1501
 
1495
1502
  def shard(self, strategy):
1496
1503
  self.conv2d_transpose.shard(strategy)
@@ -18,7 +18,7 @@ from __future__ import absolute_import
18
18
 
19
19
  import math
20
20
 
21
- import mindspore.ops as P
21
+ from mindspore import ops
22
22
  import mindspore.common.dtype as mstype
23
23
  from mindspore.common.tensor import Tensor
24
24
  from mindspore.common.initializer import initializer, Uniform
@@ -41,9 +41,9 @@ def check_dense_inputs_same_shape(input1, input2, prim_name=None):
41
41
  @constexpr(check=False)
42
42
  def _check_is_tensor(param_name, input_data, cls_name):
43
43
  """Internal function, used to check whether the input data is Tensor."""
44
- if input_data is not None and not isinstance(P.typeof(input_data), mstype.TensorType):
44
+ if input_data is not None and not isinstance(ops.typeof(input_data), mstype.TensorType):
45
45
  raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.TensorType}', "
46
- f"but got '{P.typeof(input_data)}'")
46
+ f"but got '{ops.typeof(input_data)}'")
47
47
 
48
48
 
49
49
  @_primexpr
@@ -73,9 +73,11 @@ class BiDense(Cell):
73
73
  in2_channels (int): The number of channels in the input2 space.
74
74
  out_channels (int): The number of channels in the output space.
75
75
  weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter.
76
- The values of str refer to the function `initializer`. Default: ``None`` .
76
+ The values of str refer to the function :func:`mindspore.common.initializer.initializer`.
77
+ Default: ``None`` .
77
78
  bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter.
78
- The values of str refer to the function `initializer`. Default: ``None`` .
79
+ The values of str refer to the function :func:`mindspore.common.initializer.initializer`.
80
+ Default: ``None`` .
79
81
  has_bias (bool): Specifies whether the layer uses :math:`\text{bias}` vector. Default: ``True`` .
80
82
  dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
81
83
 
@@ -170,8 +172,8 @@ class BiDense(Cell):
170
172
  f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
171
173
  f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
172
174
  self.bias = Parameter(initializer(bias_init, [out_channels], dtype=dtype), name="bias")
173
- self.bias_add = P.BiasAdd()
174
- self.matmul = P.MatMul()
175
+ self.bias_add = ops.BiasAdd()
176
+ self.matmul = ops.MatMul()
175
177
 
176
178
  def construct(self, input1, input2):
177
179
  _check_is_tensor("input1", input1, self.cls_name)
@@ -19,8 +19,6 @@ import mindspore.common.dtype as mstype
19
19
  import mindspore.ops as ops
20
20
  from mindspore import log as logger
21
21
  from mindspore.common.tensor import Tensor
22
- from mindspore.ops import operations as P
23
- from mindspore.ops import functional as F
24
22
  from mindspore.common.parameter import Parameter
25
23
  from mindspore.common.parameter import _get_unique_parameter_key
26
24
  from mindspore.common.initializer import initializer, Normal
@@ -130,17 +128,17 @@ class Embedding(Cell):
130
128
  self.init_tensor = Tensor(self.init_tensor, init_tensor_type)
131
129
  self.embedding_table = Parameter(
132
130
  self.init_tensor, name='embedding_table')
133
- self.expand = P.ExpandDims()
134
- self.reshape_flat = P.Reshape()
131
+ self.expand = ops.ExpandDims()
132
+ self.reshape_flat = ops.Reshape()
135
133
  self.shp_flat = (-1,)
136
- self.gather = P.Gather()
137
- self.one_hot = P.OneHot()
134
+ self.gather = ops.Gather()
135
+ self.one_hot = ops.OneHot()
138
136
  self.on_value = Tensor(1.0, self.dtype)
139
137
  self.off_value = Tensor(0.0, self.dtype)
140
- self.array_mul = P.MatMul()
141
- self.reshape = P.Reshape()
142
- self.get_shp = P.Shape()
143
- self.concat = P.Concat()
138
+ self.array_mul = ops.MatMul()
139
+ self.reshape = ops.Reshape()
140
+ self.get_shp = ops.Shape()
141
+ self.concat = ops.Concat()
144
142
 
145
143
  def construct(self, ids):
146
144
  out_shape = self.get_shp(ids) + (self.embedding_size,)
@@ -311,9 +309,9 @@ class EmbeddingLookup(Cell):
311
309
 
312
310
  Note:
313
311
  When 'target' is set to 'CPU', this module will use
314
- P.EmbeddingLookup().set_device('CPU') which
312
+ ops.EmbeddingLookup().set_device('CPU') which
315
313
  specified 'offset = 0' to lookup table.
316
- When 'target' is set to 'DEVICE', this module will use P.Gather() which
314
+ When 'target' is set to 'DEVICE', this module will use ops.Gather() which
317
315
  specified 'axis = 0' to lookup table.
318
316
  In field slice mode, the manual_shapes must be given. It is a tuple ,where
319
317
  the element is vocab[i], vocab[i] is the row numbers for i-th part.
@@ -407,10 +405,10 @@ class EmbeddingLookup(Cell):
407
405
  raise ValueError(f"For '{self.cls_name}', 'sparse' must be True when 'target' is \"CPU\", "
408
406
  f"but got 'sparse': {sparse} and 'target': {target}")
409
407
  if sparse:
410
- self.gatherv2 = P.SparseGatherV2()
408
+ self.gatherv2 = ops.SparseGatherV2()
411
409
  else:
412
- self.gatherv2 = P.Gather()
413
- self.embeddinglookup = P.EmbeddingLookup().set_device('CPU')
410
+ self.gatherv2 = ops.Gather()
411
+ self.embeddinglookup = ops.EmbeddingLookup().set_device('CPU')
414
412
  self.is_ps_server = False
415
413
  enable_ps = _get_ps_context("enable_ps")
416
414
  if enable_ps:
@@ -422,13 +420,13 @@ class EmbeddingLookup(Cell):
422
420
  parallel_mode = _get_parallel_mode()
423
421
  is_auto_parallel = parallel_mode in (
424
422
  ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL)
425
- self.gather_revert = P.Gather()
426
- self.reshape_first = P.Reshape()
427
- self.reshape = P.Reshape()
428
- self.unique = P.Unique()
429
- self.shape = P.Shape()
423
+ self.gather_revert = ops.Gather()
424
+ self.reshape_first = ops.Reshape()
425
+ self.reshape = ops.Reshape()
426
+ self.unique = ops.Unique()
427
+ self.shape = ops.Shape()
430
428
  if is_auto_parallel:
431
- self.unique = P.Unique().shard(((1,),))
429
+ self.unique = ops.Unique().shard(((1,),))
432
430
  if self.cache_enable and enable_ps:
433
431
  self._set_voacb_cache_enable_for_ps(
434
432
  vocab_cache_size, embedding_size, vocab_size, param_init, dtype=dtype)
@@ -582,12 +580,12 @@ class EmbeddingLookup(Cell):
582
580
 
583
581
  # Add EmbeddingLookup ops on different servers.
584
582
  if self.target == 'CPU':
585
- embedding_lookup = P.EmbeddingLookup().set_device('CPU')
583
+ embedding_lookup = ops.EmbeddingLookup().set_device('CPU')
586
584
  else:
587
585
  if self.sparse:
588
- embedding_lookup = P.SparseGatherV2()
586
+ embedding_lookup = ops.SparseGatherV2()
589
587
  else:
590
- embedding_lookup = P.Gather()
588
+ embedding_lookup = ops.Gather()
591
589
  embedding_lookup.add_prim_attr(
592
590
  'offset', self.embedding_offset[i])
593
591
  embedding_lookup.add_prim_attr('rank_id', i)
@@ -596,7 +594,7 @@ class EmbeddingLookup(Cell):
596
594
 
597
595
  # For now unique operation is not applied,
598
596
  # so we need to reduce the lookup results from different servers with AddN.
599
- self.reduce_lookup_result = P.AddN()
597
+ self.reduce_lookup_result = ops.AddN()
600
598
 
601
599
  def _do_server_embedding_lookup(self, indices):
602
600
  '''
@@ -647,7 +645,7 @@ class EmbeddingLookup(Cell):
647
645
  else:
648
646
  out = self.gatherv2(self.embedding_table, indices, 0)
649
647
  if self.max_norm is not None:
650
- axis = _make_axis_range(F.rank(indices), F.rank(out))
648
+ axis = _make_axis_range(ops.rank(indices), ops.rank(out))
651
649
  clip_by_norm = ClipByNorm(axis)
652
650
  out = clip_by_norm(out, self.max_norm)
653
651
  return out
@@ -660,9 +658,9 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
660
658
 
661
659
  Note:
662
660
  When 'target' is set to 'CPU', this module will use
663
- P.EmbeddingLookup().set_device('CPU') which
661
+ ops.EmbeddingLookup().set_device('CPU') which
664
662
  specified 'offset = 0' to lookup table.
665
- When 'target' is set to 'DEVICE', this module will use P.Gather() which
663
+ When 'target' is set to 'DEVICE', this module will use ops.Gather() which
666
664
  specified 'axis = 0' to lookup table.
667
665
  The vectors with the same field_ids will be combined by the `operator`, such as 'SUM', 'MAX' and
668
666
  'MEAN'. Ensure the input_values of the padded id is zero, so that they can be ignored. The final
@@ -753,29 +751,29 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
753
751
  field_size, 'field_size', self.cls_name)
754
752
  self.operator = operator
755
753
 
756
- self.mul = P.Mul()
757
- self.inf_mask_mul = P.Mul()
758
- self.bias_add = P.Add()
759
- self.inf_add = P.Add()
754
+ self.mul = ops.Mul()
755
+ self.inf_mask_mul = ops.Mul()
756
+ self.bias_add = ops.Add()
757
+ self.inf_add = ops.Add()
760
758
  self.merge_op = None
761
- self.count_op = P.UnsortedSegmentSum()
762
- self.abs = P.Abs()
763
- self.equal = P.Equal()
764
- self.add = P.Add()
765
- self.cast = P.Cast()
766
- self.div_no_nan = P.DivNoNan()
767
- self.expand = P.ExpandDims()
768
- self.max_mask_mul = P.Mul()
769
- self.max_no_equal = P.NotEqual()
759
+ self.count_op = ops.UnsortedSegmentSum()
760
+ self.abs = ops.Abs()
761
+ self.equal = ops.Equal()
762
+ self.add = ops.Add()
763
+ self.cast = ops.Cast()
764
+ self.div_no_nan = ops.DivNoNan()
765
+ self.expand = ops.ExpandDims()
766
+ self.max_mask_mul = ops.Mul()
767
+ self.max_no_equal = ops.NotEqual()
770
768
 
771
769
  Validator.check_string(
772
770
  operator, ['SUM', 'MAX', 'MEAN'], 'operator', self.cls_name)
773
771
  if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
774
- self.merge_op = P.UnsortedSegmentSum()
772
+ self.merge_op = ops.UnsortedSegmentSum()
775
773
  elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
776
- self.merge_op = P.UnsortedSegmentMax()
774
+ self.merge_op = ops.UnsortedSegmentMax()
777
775
  else:
778
- self.merge_op = P.UnsortedSegmentSum()
776
+ self.merge_op = ops.UnsortedSegmentSum()
779
777
 
780
778
 
781
779
  parallel_mode = _get_parallel_mode()
@@ -822,16 +820,16 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
822
820
  self.negative_inf_value = -3.402823466E+38
823
821
 
824
822
  def construct(self, input_indices, input_values, field_ids):
825
- _check_input_2d(F.shape(input_indices), "input_indices", self.cls_name)
826
- _check_input_2d(F.shape(input_values), "input_values", self.cls_name)
827
- _check_input_2d(F.shape(field_ids), "field_ids", self.cls_name)
828
- _check_input_dtype(F.dtype(input_indices), "input_indices", [mstype.int32, mstype.int64], self.cls_name)
829
- _check_input_dtype(F.dtype(input_values), "input_values", [mstype.float32], self.cls_name)
830
- _check_input_dtype(F.dtype(field_ids), "field_ids", [mstype.int32], self.cls_name)
823
+ _check_input_2d(ops.shape(input_indices), "input_indices", self.cls_name)
824
+ _check_input_2d(ops.shape(input_values), "input_values", self.cls_name)
825
+ _check_input_2d(ops.shape(field_ids), "field_ids", self.cls_name)
826
+ _check_input_dtype(ops.dtype(input_indices), "input_indices", [mstype.int32, mstype.int64], self.cls_name)
827
+ _check_input_dtype(ops.dtype(input_values), "input_values", [mstype.float32], self.cls_name)
828
+ _check_input_dtype(ops.dtype(field_ids), "field_ids", [mstype.int32], self.cls_name)
831
829
 
832
830
  batch_size = self.shape(input_indices)[0]
833
831
  num_segments = batch_size * self.field_size
834
- bias = F.tuple_to_array(F.make_range(0, num_segments, self.field_size))
832
+ bias = ops.tuple_to_array(ops.make_range(0, num_segments, self.field_size))
835
833
  bias = self.reshape(bias, (batch_size, -1))
836
834
  field_ids = self.bias_add(field_ids, bias)
837
835
 
@@ -848,7 +846,7 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
848
846
  else:
849
847
  out = self.gatherv2(self.embedding_table, input_indices, 0)
850
848
  if self.max_norm is not None:
851
- axis = _make_axis_range(F.rank(input_indices), F.rank(out))
849
+ axis = _make_axis_range(ops.rank(input_indices), ops.rank(out))
852
850
  clip_by_norm = ClipByNorm(axis)
853
851
  out = clip_by_norm(out, self.max_norm)
854
852