mindspore 2.2.14__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc1__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1153) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -4
  3. mindspore/_akg/akg/composite/build_module.py +155 -11
  4. mindspore/_akg/akg/config/repository.json +38 -0
  5. mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
  7. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
  8. mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
  9. mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
  10. mindspore/_akg/akg/utils/gen_random.py +14 -8
  11. mindspore/_akg/akg/utils/op_dsl.py +11 -0
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
  13. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  14. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +58 -0
  17. mindspore/_extends/builtin_operations.py +2 -1
  18. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  19. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  20. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  24. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  25. mindspore/_extends/parse/__init__.py +18 -14
  26. mindspore/_extends/parse/compile_config.py +229 -0
  27. mindspore/_extends/parse/parser.py +155 -59
  28. mindspore/_extends/parse/resources.py +40 -7
  29. mindspore/_extends/parse/standard_method.py +124 -204
  30. mindspore/_extends/remote/kernel_build_server.py +2 -0
  31. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  32. mindspore/_profiler.py +30 -0
  33. mindspore/amp.py +24 -18
  34. mindspore/bin/cache_admin +0 -0
  35. mindspore/bin/cache_server +0 -0
  36. mindspore/boost/boost_cell_wrapper.py +1 -1
  37. mindspore/boost/group_loss_scale_manager.py +1 -1
  38. mindspore/common/__init__.py +3 -1
  39. mindspore/common/_jit_fallback_utils.py +2 -3
  40. mindspore/common/_register_for_adapter.py +7 -0
  41. mindspore/common/_stub_tensor.py +6 -1
  42. mindspore/common/_utils.py +5 -17
  43. mindspore/common/api.py +91 -48
  44. mindspore/common/auto_dynamic_shape.py +27 -14
  45. mindspore/common/dtype.py +5 -4
  46. mindspore/common/dump.py +5 -4
  47. mindspore/common/initializer.py +1 -1
  48. mindspore/common/jit_config.py +20 -11
  49. mindspore/common/lazy_inline.py +58 -17
  50. mindspore/common/mindir_util.py +12 -2
  51. mindspore/common/mutable.py +79 -14
  52. mindspore/common/parameter.py +19 -4
  53. mindspore/common/seed.py +9 -9
  54. mindspore/common/sparse_tensor.py +251 -18
  55. mindspore/common/symbol.py +122 -0
  56. mindspore/common/tensor.py +321 -433
  57. mindspore/communication/__init__.py +3 -3
  58. mindspore/communication/_comm_helper.py +5 -0
  59. mindspore/communication/management.py +53 -38
  60. mindspore/config/op_info.config +22 -54
  61. mindspore/context.py +167 -59
  62. mindspore/dataset/__init__.py +5 -5
  63. mindspore/dataset/audio/__init__.py +6 -6
  64. mindspore/dataset/audio/transforms.py +711 -158
  65. mindspore/dataset/callback/ds_callback.py +2 -2
  66. mindspore/dataset/engine/cache_client.py +2 -2
  67. mindspore/dataset/engine/datasets.py +72 -38
  68. mindspore/dataset/engine/datasets_audio.py +14 -14
  69. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  70. mindspore/dataset/engine/datasets_text.py +38 -38
  71. mindspore/dataset/engine/datasets_user_defined.py +7 -7
  72. mindspore/dataset/engine/datasets_vision.py +75 -71
  73. mindspore/dataset/engine/offload.py +5 -7
  74. mindspore/dataset/text/__init__.py +3 -3
  75. mindspore/dataset/text/transforms.py +408 -121
  76. mindspore/dataset/text/utils.py +9 -9
  77. mindspore/dataset/transforms/__init__.py +1 -1
  78. mindspore/dataset/transforms/transforms.py +261 -76
  79. mindspore/dataset/utils/browse_dataset.py +9 -9
  80. mindspore/dataset/vision/__init__.py +3 -3
  81. mindspore/dataset/vision/c_transforms.py +5 -5
  82. mindspore/dataset/vision/transforms.py +2264 -514
  83. mindspore/dataset/vision/utils.py +40 -9
  84. mindspore/dataset/vision/validators.py +7 -1
  85. mindspore/experimental/optim/__init__.py +12 -2
  86. mindspore/experimental/optim/adadelta.py +161 -0
  87. mindspore/experimental/optim/adagrad.py +168 -0
  88. mindspore/experimental/optim/adam.py +35 -34
  89. mindspore/experimental/optim/adamax.py +170 -0
  90. mindspore/experimental/optim/adamw.py +40 -16
  91. mindspore/experimental/optim/asgd.py +153 -0
  92. mindspore/experimental/optim/lr_scheduler.py +60 -119
  93. mindspore/experimental/optim/nadam.py +157 -0
  94. mindspore/experimental/optim/optimizer.py +15 -8
  95. mindspore/experimental/optim/radam.py +194 -0
  96. mindspore/experimental/optim/rmsprop.py +154 -0
  97. mindspore/experimental/optim/rprop.py +164 -0
  98. mindspore/experimental/optim/sgd.py +28 -19
  99. mindspore/hal/__init__.py +34 -0
  100. mindspore/hal/_ascend.py +57 -0
  101. mindspore/hal/_base.py +57 -0
  102. mindspore/hal/_cpu.py +56 -0
  103. mindspore/hal/_gpu.py +57 -0
  104. mindspore/hal/device.py +356 -0
  105. mindspore/hal/event.py +179 -0
  106. mindspore/hal/stream.py +337 -0
  107. mindspore/include/api/data_type.h +2 -2
  108. mindspore/include/api/dual_abi_helper.h +16 -3
  109. mindspore/include/api/model.h +1 -3
  110. mindspore/include/api/status.h +14 -0
  111. mindspore/include/c_api/model_c.h +173 -0
  112. mindspore/include/c_api/ms/base/types.h +1 -0
  113. mindspore/include/c_api/types_c.h +19 -0
  114. mindspore/include/dataset/execute.h +1 -3
  115. mindspore/include/mindapi/base/format.h +125 -23
  116. mindspore/include/mindapi/base/types.h +7 -0
  117. mindspore/lib/libdnnl.so.2 +0 -0
  118. mindspore/lib/libmindspore.so +0 -0
  119. mindspore/lib/libmindspore_backend.so +0 -0
  120. mindspore/lib/libmindspore_common.so +0 -0
  121. mindspore/lib/libmindspore_core.so +0 -0
  122. mindspore/lib/libmindspore_glog.so.0 +0 -0
  123. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  124. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  125. mindspore/lib/libmindspore_shared_lib.so +0 -0
  126. mindspore/lib/libmpi_adapter.so +0 -0
  127. mindspore/lib/libmpi_collective.so +0 -0
  128. mindspore/lib/libnnacl.so +0 -0
  129. mindspore/lib/libopencv_core.so.4.5 +0 -0
  130. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  131. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  132. mindspore/lib/libps_cache.so +0 -0
  133. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
  134. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
  135. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
  136. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
  137. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
  138. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
  139. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  140. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
  141. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  142. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  143. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6325 -1767
  144. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  162. mindspore/lib/plugin/ascend/libakg.so +0 -0
  163. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  164. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  165. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  166. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  167. mindspore/lib/plugin/cpu/libakg.so +0 -0
  168. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  169. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  170. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  171. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  172. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  173. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  174. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  175. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  176. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  177. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  178. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  179. mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
  180. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  181. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  182. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  183. mindspore/mindrecord/__init__.py +5 -1
  184. mindspore/mindrecord/config.py +809 -0
  185. mindspore/mindrecord/filereader.py +25 -0
  186. mindspore/mindrecord/filewriter.py +74 -56
  187. mindspore/mindrecord/mindpage.py +40 -6
  188. mindspore/mindrecord/shardutils.py +3 -2
  189. mindspore/mindrecord/shardwriter.py +7 -0
  190. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  191. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  192. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  193. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  194. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  195. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  196. mindspore/multiprocessing/__init__.py +68 -0
  197. mindspore/nn/cell.py +86 -133
  198. mindspore/nn/dynamic_lr.py +2 -2
  199. mindspore/nn/layer/activation.py +79 -90
  200. mindspore/nn/layer/basic.py +4 -80
  201. mindspore/nn/layer/channel_shuffle.py +3 -16
  202. mindspore/nn/layer/container.py +3 -3
  203. mindspore/nn/layer/conv.py +71 -71
  204. mindspore/nn/layer/embedding.py +105 -44
  205. mindspore/nn/layer/image.py +4 -7
  206. mindspore/nn/layer/normalization.py +46 -38
  207. mindspore/nn/layer/padding.py +26 -39
  208. mindspore/nn/layer/pooling.py +13 -9
  209. mindspore/nn/layer/rnn_cells.py +5 -15
  210. mindspore/nn/layer/rnns.py +6 -5
  211. mindspore/nn/layer/thor_layer.py +1 -2
  212. mindspore/nn/layer/timedistributed.py +1 -1
  213. mindspore/nn/layer/transformer.py +52 -50
  214. mindspore/nn/learning_rate_schedule.py +6 -5
  215. mindspore/nn/loss/loss.py +43 -64
  216. mindspore/nn/optim/ada_grad.py +4 -2
  217. mindspore/nn/optim/adadelta.py +3 -1
  218. mindspore/nn/optim/adafactor.py +1 -1
  219. mindspore/nn/optim/adam.py +102 -181
  220. mindspore/nn/optim/adamax.py +4 -2
  221. mindspore/nn/optim/adasum.py +2 -2
  222. mindspore/nn/optim/asgd.py +4 -2
  223. mindspore/nn/optim/ftrl.py +31 -61
  224. mindspore/nn/optim/lamb.py +5 -3
  225. mindspore/nn/optim/lars.py +2 -2
  226. mindspore/nn/optim/lazyadam.py +6 -4
  227. mindspore/nn/optim/momentum.py +13 -25
  228. mindspore/nn/optim/optimizer.py +6 -3
  229. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  230. mindspore/nn/optim/rmsprop.py +9 -3
  231. mindspore/nn/optim/rprop.py +4 -2
  232. mindspore/nn/optim/sgd.py +6 -5
  233. mindspore/nn/optim/thor.py +2 -2
  234. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  235. mindspore/nn/probability/distribution/beta.py +2 -2
  236. mindspore/nn/probability/distribution/categorical.py +4 -6
  237. mindspore/nn/probability/distribution/cauchy.py +2 -2
  238. mindspore/nn/probability/distribution/exponential.py +1 -1
  239. mindspore/nn/probability/distribution/gumbel.py +2 -2
  240. mindspore/nn/probability/distribution/poisson.py +2 -2
  241. mindspore/nn/probability/distribution/uniform.py +2 -2
  242. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  243. mindspore/nn/wrap/__init__.py +2 -1
  244. mindspore/nn/wrap/cell_wrapper.py +33 -12
  245. mindspore/nn/wrap/grad_reducer.py +148 -8
  246. mindspore/nn/wrap/loss_scale.py +7 -7
  247. mindspore/numpy/__init__.py +2 -0
  248. mindspore/numpy/array_creations.py +2 -0
  249. mindspore/numpy/array_ops.py +1 -5
  250. mindspore/numpy/fft.py +431 -0
  251. mindspore/numpy/math_ops.py +54 -60
  252. mindspore/numpy/utils.py +3 -0
  253. mindspore/ops/__init__.py +5 -4
  254. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  255. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -22
  256. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  257. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  258. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  259. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  260. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  261. mindspore/ops/_op_impl/__init__.py +0 -1
  262. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  263. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  264. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  265. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  266. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  267. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  268. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  269. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  270. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  271. mindspore/ops/_vmap/vmap_array_ops.py +137 -101
  272. mindspore/ops/_vmap/vmap_base.py +8 -1
  273. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  274. mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
  275. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  276. mindspore/ops/_vmap/vmap_math_ops.py +74 -49
  277. mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
  278. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  279. mindspore/ops/auto_generate/__init__.py +31 -0
  280. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
  281. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
  282. mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
  283. mindspore/ops/auto_generate/gen_extend_func.py +130 -0
  284. mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
  285. mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
  286. mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
  287. mindspore/ops/composite/__init__.py +5 -2
  288. mindspore/ops/composite/base.py +118 -17
  289. mindspore/ops/composite/math_ops.py +9 -48
  290. mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
  291. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
  292. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  293. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  296. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  297. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  298. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  299. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  300. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  303. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  304. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  305. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  307. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  308. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  312. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  313. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  314. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  315. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  316. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  317. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  318. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  319. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  320. mindspore/ops/deprecated.py +14 -3
  321. mindspore/ops/extend/__init__.py +46 -0
  322. mindspore/ops/extend/array_func.py +152 -0
  323. mindspore/ops/extend/math_func.py +76 -0
  324. mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
  325. mindspore/ops/function/__init__.py +19 -11
  326. mindspore/ops/function/array_func.py +251 -1440
  327. mindspore/ops/function/clip_func.py +12 -13
  328. mindspore/ops/function/debug_func.py +1 -4
  329. mindspore/ops/function/fft_func.py +31 -0
  330. mindspore/ops/function/grad/grad_func.py +24 -17
  331. mindspore/ops/function/image_func.py +27 -21
  332. mindspore/ops/function/linalg_func.py +35 -68
  333. mindspore/ops/function/math_func.py +451 -2360
  334. mindspore/ops/function/nn_func.py +459 -780
  335. mindspore/ops/function/other_func.py +4 -5
  336. mindspore/ops/function/parameter_func.py +5 -93
  337. mindspore/ops/function/random_func.py +24 -80
  338. mindspore/ops/function/sparse_unary_func.py +9 -16
  339. mindspore/ops/function/spectral_func.py +1 -1
  340. mindspore/ops/function/vmap_func.py +14 -14
  341. mindspore/ops/functional.py +56 -62
  342. mindspore/ops/op_info_register.py +22 -19
  343. mindspore/ops/operations/__init__.py +19 -19
  344. mindspore/ops/operations/_grad_ops.py +20 -723
  345. mindspore/ops/operations/_inner_ops.py +178 -286
  346. mindspore/ops/operations/_scalar_ops.py +5 -480
  347. mindspore/ops/operations/_sequence_ops.py +4 -34
  348. mindspore/ops/operations/array_ops.py +99 -2491
  349. mindspore/ops/operations/comm_ops.py +38 -46
  350. mindspore/ops/operations/custom_ops.py +8 -8
  351. mindspore/ops/operations/debug_ops.py +100 -31
  352. mindspore/ops/operations/image_ops.py +1 -217
  353. mindspore/ops/operations/inner_ops.py +3 -38
  354. mindspore/ops/operations/linalg_ops.py +1 -49
  355. mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
  356. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  357. mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
  358. mindspore/ops/operations/math_ops.py +703 -4601
  359. mindspore/ops/operations/nn_ops.py +374 -1748
  360. mindspore/ops/operations/other_ops.py +50 -42
  361. mindspore/ops/operations/random_ops.py +3 -52
  362. mindspore/ops/primitive.py +196 -96
  363. mindspore/ops_generate/__init__.py +27 -0
  364. mindspore/ops_generate/arg_dtype_cast.py +248 -0
  365. mindspore/ops_generate/arg_handler.py +147 -0
  366. mindspore/ops_generate/gen_aclnn_implement.py +266 -0
  367. mindspore/ops_generate/gen_ops.py +1062 -0
  368. mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
  369. mindspore/ops_generate/gen_pyboost_func.py +932 -0
  370. mindspore/ops_generate/gen_utils.py +188 -0
  371. mindspore/ops_generate/op_proto.py +138 -0
  372. mindspore/ops_generate/pyboost_utils.py +364 -0
  373. mindspore/ops_generate/template.py +238 -0
  374. mindspore/parallel/__init__.py +5 -4
  375. mindspore/parallel/_auto_parallel_context.py +21 -76
  376. mindspore/parallel/_cell_wrapper.py +16 -9
  377. mindspore/parallel/_cost_model_context.py +1 -1
  378. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  379. mindspore/parallel/_parallel_serialization.py +30 -46
  380. mindspore/parallel/_ps_context.py +1 -1
  381. mindspore/parallel/_recovery_context.py +1 -1
  382. mindspore/parallel/_tensor.py +19 -7
  383. mindspore/parallel/_transformer/__init__.py +1 -1
  384. mindspore/parallel/_transformer/layers.py +1 -1
  385. mindspore/parallel/_transformer/loss.py +1 -1
  386. mindspore/parallel/_transformer/moe.py +1 -1
  387. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  388. mindspore/parallel/_transformer/transformer.py +1 -1
  389. mindspore/parallel/_utils.py +131 -6
  390. mindspore/parallel/algo_parameter_config.py +6 -6
  391. mindspore/parallel/checkpoint_transform.py +180 -196
  392. mindspore/parallel/cluster/__init__.py +15 -0
  393. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  394. mindspore/parallel/cluster/process_entity/_api.py +345 -0
  395. mindspore/parallel/cluster/process_entity/_utils.py +116 -0
  396. mindspore/parallel/cluster/run.py +139 -0
  397. mindspore/parallel/mpi/__init__.py +1 -1
  398. mindspore/parallel/mpi/_mpi_config.py +1 -1
  399. mindspore/parallel/parameter_broadcast.py +152 -0
  400. mindspore/parallel/shard.py +99 -2
  401. mindspore/profiler/common/util.py +20 -0
  402. mindspore/profiler/envprofiling.py +1 -1
  403. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  404. mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
  405. mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
  406. mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
  407. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
  408. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
  409. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
  410. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
  411. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  412. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
  413. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  414. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  415. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  416. mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
  417. mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
  418. mindspore/profiler/parser/ascend_msprof_generator.py +68 -285
  419. mindspore/profiler/parser/ascend_op_generator.py +75 -42
  420. mindspore/profiler/parser/ascend_timeline_generator.py +293 -135
  421. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  422. mindspore/profiler/parser/framework_parser.py +3 -2
  423. mindspore/profiler/parser/integrator.py +3 -1
  424. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  425. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  426. mindspore/profiler/parser/profiler_info.py +5 -0
  427. mindspore/profiler/profiling.py +296 -166
  428. mindspore/rewrite/__init__.py +2 -13
  429. mindspore/rewrite/api/node.py +121 -35
  430. mindspore/rewrite/api/pattern_engine.py +2 -3
  431. mindspore/rewrite/api/scoped_value.py +16 -15
  432. mindspore/rewrite/api/symbol_tree.py +45 -29
  433. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  434. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  435. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  436. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  437. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  438. mindspore/rewrite/common/__init__.py +1 -2
  439. mindspore/rewrite/common/config.py +24 -0
  440. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  441. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  442. mindspore/rewrite/common/namespace.py +118 -0
  443. mindspore/rewrite/node/__init__.py +5 -5
  444. mindspore/rewrite/node/call_function.py +23 -7
  445. mindspore/rewrite/node/cell_container.py +7 -3
  446. mindspore/rewrite/node/control_flow.py +53 -28
  447. mindspore/rewrite/node/node.py +212 -196
  448. mindspore/rewrite/node/node_manager.py +51 -22
  449. mindspore/rewrite/node/node_topological_manager.py +3 -23
  450. mindspore/rewrite/parsers/__init__.py +12 -0
  451. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  452. mindspore/rewrite/parsers/assign_parser.py +635 -413
  453. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  454. mindspore/rewrite/parsers/class_def_parser.py +107 -144
  455. mindspore/rewrite/parsers/constant_parser.py +5 -5
  456. mindspore/rewrite/parsers/container_parser.py +4 -6
  457. mindspore/rewrite/parsers/expr_parser.py +55 -0
  458. mindspore/rewrite/parsers/for_parser.py +31 -98
  459. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  460. mindspore/rewrite/parsers/if_parser.py +28 -10
  461. mindspore/rewrite/parsers/module_parser.py +8 -182
  462. mindspore/rewrite/parsers/parser.py +1 -5
  463. mindspore/rewrite/parsers/parser_register.py +1 -1
  464. mindspore/rewrite/parsers/return_parser.py +5 -10
  465. mindspore/rewrite/parsers/while_parser.py +59 -0
  466. mindspore/rewrite/sparsify/utils.py +1 -1
  467. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  468. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  469. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  470. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  471. mindspore/run_check/_check_version.py +6 -14
  472. mindspore/run_check/run_check.py +1 -1
  473. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  474. mindspore/scipy/__init__.py +2 -1
  475. mindspore/scipy/fft.py +133 -0
  476. mindspore/scipy/linalg.py +140 -55
  477. mindspore/scipy/ops.py +15 -71
  478. mindspore/scipy/ops_grad.py +5 -34
  479. mindspore/scipy/optimize/line_search.py +2 -2
  480. mindspore/scipy/optimize/minimize.py +1 -1
  481. mindspore/train/__init__.py +3 -2
  482. mindspore/train/_utils.py +178 -4
  483. mindspore/train/amp.py +167 -245
  484. mindspore/train/callback/_backup_and_restore.py +4 -4
  485. mindspore/train/callback/_callback.py +4 -4
  486. mindspore/train/callback/_checkpoint.py +39 -13
  487. mindspore/train/callback/_early_stop.py +2 -2
  488. mindspore/train/callback/_landscape.py +14 -8
  489. mindspore/train/callback/_loss_monitor.py +2 -2
  490. mindspore/train/callback/_on_request_exit.py +2 -2
  491. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  492. mindspore/train/callback/_summary_collector.py +7 -7
  493. mindspore/train/callback/_time_monitor.py +2 -2
  494. mindspore/train/data_sink.py +1 -1
  495. mindspore/train/dataset_helper.py +13 -4
  496. mindspore/train/loss_scale_manager.py +2 -2
  497. mindspore/train/metrics/accuracy.py +7 -7
  498. mindspore/train/metrics/confusion_matrix.py +8 -6
  499. mindspore/train/metrics/cosine_similarity.py +6 -4
  500. mindspore/train/metrics/error.py +2 -2
  501. mindspore/train/metrics/metric.py +3 -3
  502. mindspore/train/metrics/perplexity.py +2 -1
  503. mindspore/train/metrics/topk.py +2 -2
  504. mindspore/train/mind_ir_pb2.py +75 -6
  505. mindspore/train/model.py +24 -22
  506. mindspore/train/serialization.py +256 -132
  507. mindspore/train/summary/summary_record.py +51 -28
  508. mindspore/train/train_thor/convert_utils.py +3 -3
  509. mindspore/version.py +1 -1
  510. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
  511. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +514 -1060
  512. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
  513. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  514. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  517. mindspore/config/super_bar_config.json +0 -544
  518. mindspore/gen_ops.py +0 -273
  519. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  520. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  521. mindspore/nn/layer/flash_attention.py +0 -189
  522. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  523. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  524. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  525. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  526. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  527. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  528. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  529. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  530. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  531. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  532. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  533. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  534. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  535. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  536. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  537. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  538. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  539. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  540. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  542. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  543. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  544. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  547. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  548. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  549. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  550. mindspore/ops/_op_impl/tbe/add.py +0 -42
  551. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  552. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  553. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  554. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  555. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  556. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  557. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  558. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  559. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  560. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  561. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  562. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  563. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  565. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  566. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  567. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  568. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  569. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  570. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  571. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  572. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  573. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  574. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  575. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  576. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  577. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  578. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  579. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  580. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  581. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  582. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  583. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  584. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  585. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  586. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  587. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  588. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  589. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  590. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  591. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  592. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  594. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  595. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  596. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  597. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  598. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  599. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  600. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  601. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  602. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  603. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  604. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  605. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  606. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  607. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  608. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  609. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  610. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  611. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  612. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  613. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  614. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  615. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  616. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  617. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  618. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  619. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  620. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  621. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  622. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  623. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  624. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  625. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  626. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  629. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  630. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  631. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  632. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  633. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  634. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  635. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  636. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  637. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  638. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  639. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  640. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  641. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  642. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  643. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  644. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  645. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  646. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  647. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  648. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  650. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  651. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  652. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  653. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  654. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  655. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  656. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  657. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  658. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  659. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  660. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  661. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  662. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  663. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  665. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  666. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  667. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  668. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  669. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  670. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  671. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  672. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  673. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  674. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  675. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  676. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  677. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  678. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  679. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  680. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  681. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  682. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  683. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  684. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  685. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  686. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  687. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  688. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  689. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  690. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  691. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  692. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  693. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  694. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  695. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  696. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  697. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  698. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  699. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  701. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  702. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  703. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  704. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  705. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  706. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  707. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  708. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  709. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  710. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  711. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  712. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  713. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  714. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  715. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  716. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  717. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  718. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  719. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  720. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  721. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  722. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  723. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  724. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  725. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  726. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  727. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  728. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  729. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  730. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  731. mindspore/ops/_op_impl/tbe/div.py +0 -41
  732. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  733. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  734. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  735. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  736. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  737. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  738. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  739. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  740. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  741. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  742. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  743. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  744. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  745. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  746. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  747. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  748. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  749. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  750. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  751. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  752. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  753. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  754. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  755. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  756. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  757. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  758. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  759. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  761. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  762. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  763. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  764. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  765. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  766. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  767. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  768. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  769. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  770. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  771. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  772. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  773. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  774. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  775. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  776. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  777. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  778. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  779. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  780. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  781. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  782. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  783. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  784. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  785. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  786. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  787. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  788. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  789. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  790. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  791. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  792. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  793. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  794. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  795. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  796. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  797. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  798. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  799. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  800. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  801. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  802. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  803. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  804. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  805. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  806. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  807. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  808. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  809. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  810. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  811. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  812. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  813. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  814. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  815. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  816. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  817. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  818. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  819. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  820. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  821. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  822. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  823. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  824. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  825. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  826. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  827. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  828. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  829. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  830. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  831. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  832. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  833. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  834. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  835. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  836. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  837. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  838. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  839. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  840. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  841. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  843. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  844. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  845. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  846. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  847. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  849. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  850. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  851. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  852. mindspore/ops/_op_impl/tbe/less.py +0 -41
  853. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  854. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  855. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  856. mindspore/ops/_op_impl/tbe/log.py +0 -40
  857. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  858. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  859. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  860. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  861. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  862. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  863. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  864. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  866. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  867. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  868. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  869. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  870. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  871. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  872. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  873. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  874. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  875. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  876. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  877. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  878. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  879. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  880. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  881. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  882. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  883. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  884. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  885. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  886. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  887. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  888. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  889. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  890. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  891. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  892. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  893. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  894. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  895. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  896. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  897. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  898. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  899. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  900. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  901. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  902. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  903. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  904. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  905. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  906. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  907. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  908. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  909. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  910. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  911. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  912. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  913. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  914. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  915. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  916. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  917. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  918. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  919. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  920. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  921. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  922. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  923. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  924. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  925. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  926. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  927. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  928. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  929. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  930. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  931. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  932. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  933. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  934. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  935. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  936. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  937. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  938. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  939. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  940. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  941. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  942. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  943. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  944. mindspore/ops/_op_impl/tbe/range.py +0 -39
  945. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  946. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  947. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  948. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  949. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  950. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  951. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  952. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  953. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  954. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  955. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  956. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  957. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  958. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  959. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  960. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  962. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  963. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  964. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  965. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  966. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  967. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  968. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  969. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  970. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  971. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  972. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  973. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  974. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  975. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  977. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  978. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  979. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  980. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  981. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  982. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  984. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  985. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  986. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  987. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  988. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  989. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  990. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  991. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  992. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  993. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  994. mindspore/ops/_op_impl/tbe/round.py +0 -38
  995. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  996. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  997. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  998. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  999. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1000. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1001. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1002. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1003. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1004. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1005. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1006. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1011. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1012. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1013. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1014. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1015. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1016. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1017. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1018. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1019. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1020. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1021. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1022. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1023. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1024. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1026. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1027. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1028. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1029. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1030. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1031. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1032. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1033. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1034. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1035. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1036. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1037. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1038. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1040. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1041. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1042. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1043. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1044. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1045. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1046. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1047. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1049. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1051. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1052. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1053. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1054. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1055. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1057. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1058. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1060. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1061. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1062. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1063. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1073. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1074. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1075. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1076. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1077. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1078. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1079. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1080. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1082. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1083. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1084. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1085. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1086. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1087. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1088. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1089. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1090. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1094. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1097. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1098. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1099. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1100. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1101. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1102. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1103. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1104. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1106. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1107. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1108. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1109. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1110. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1111. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1112. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1113. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1114. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1115. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1116. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1117. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1118. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1119. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1120. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1121. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1122. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1123. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1124. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1125. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1126. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1127. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1128. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1129. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1135. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1136. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1137. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1138. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1141. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1143. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1144. mindspore/ops/_tracefunc.py +0 -241
  1145. mindspore/ops/arg_dtype_cast.py +0 -54
  1146. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1147. mindspore/rewrite/ast_creator_register.py +0 -37
  1148. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1149. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1150. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1151. mindspore/rewrite/namespace.py +0 -53
  1152. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
  1153. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -50,6 +50,10 @@ STRICT = 0
50
50
  COMPATIBLE = 1
51
51
  LAX = 2
52
52
 
53
+ # Enumerate for the property 'debug_level'.
54
+ RELEASE = 0
55
+ DEBUG = 1
56
+
53
57
 
54
58
  def _make_directory(path):
55
59
  """Make directory."""
@@ -161,6 +165,7 @@ class _Context:
161
165
  self._context_switches = _ContextSwitchInfo(False)
162
166
  self._context_handle = MSContext.get_instance()
163
167
  self._support_binary = False
168
+ self._mode = PYNATIVE_MODE
164
169
 
165
170
  def __getattribute__(self, attr):
166
171
  value = object.__getattribute__(self, attr)
@@ -176,7 +181,7 @@ class _Context:
176
181
 
177
182
  def get_mode(self):
178
183
  """Get current mode."""
179
- return self.get_param(ms_ctx_param.mode)
184
+ return self._mode
180
185
 
181
186
  def set_mode(self, mode):
182
187
  """
@@ -204,6 +209,7 @@ class _Context:
204
209
  raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
205
210
  f"or context.PYNATIVE_MODE (1), but got {mode}.")
206
211
  self.set_param(ms_ctx_param.mode, mode)
212
+ self._mode = mode
207
213
 
208
214
  def set_jit_syntax_level(self, level):
209
215
  """"Set the JIT syntax level for graph compiling"""
@@ -212,6 +218,14 @@ class _Context:
212
218
  f"or context.LAX, but got {level}.")
213
219
  self.set_param(ms_ctx_param.jit_syntax_level, level)
214
220
 
221
+ def set_debug_level(self, level):
222
+ """"Set the debug level for graph compiling"""
223
+ if level != RELEASE and level != DEBUG:
224
+ raise ValueError(f"For 'context.set_debug_level', the argument 'level' should be context.RELEASE "
225
+ f"or context.DEBUG, but got {level}.")
226
+ self.set_param(ms_ctx_param.debug_level, level)
227
+
228
+
215
229
  def set_memory_optimize_level(self, memory_optimize_level):
216
230
  """
217
231
  The memory optimize level, support "O0", "O1".
@@ -268,11 +282,13 @@ class _Context:
268
282
  "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
269
283
  - jit_compile (bool): ``False`` and ``True``.
270
284
  - atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
285
+ - op_precision_mode (str): precision mode config file path.
286
+ - ge_options (dict): Global or session CANN options.
271
287
  - exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
272
288
  Default: ``"2"`` .
273
- - op_precision_mode (str): config file path.
274
289
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
275
290
  If its value is None or '', it does not take effect. Default None.
291
+ - host_scheduling_max_threshold(int): The host scheduling max threshold.
276
292
  """
277
293
  ascend_cfg_modes = {
278
294
  'precision_mode': ["force_fp16", "allow_fp32_to_fp16", "allow_mix_precision", "must_keep_origin_dtype",
@@ -284,7 +300,9 @@ class _Context:
284
300
  'conv_allow_hf32': [True, False],
285
301
  'exception_dump': ["0", "1", "2"],
286
302
  'op_precision_mode': (str,),
303
+ 'ge_options': (dict,),
287
304
  'parallel_speed_up_json_path': (str, None),
305
+ 'host_scheduling_max_threshold': (int,),
288
306
  'topo_order': (dict,)
289
307
  }
290
308
  ascend_cfg_setters = {
@@ -295,7 +313,9 @@ class _Context:
295
313
  'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
296
314
  'exception_dump': self._get_ascend_config_setter('exception_dump'),
297
315
  'op_precision_mode': self._set_op_precision_mode,
316
+ 'ge_options': self._set_ge_options,
298
317
  'parallel_speed_up_json_path': self._set_speedup_config_path,
318
+ 'host_scheduling_max_threshold': self._get_ascend_config_setter('host_scheduling_max_threshold', str),
299
319
  'topo_order': self._set_topo_order
300
320
  }
301
321
  ascend_cfg_set = tuple(ascend_cfg_modes.keys())
@@ -475,9 +495,10 @@ class _Context:
475
495
 
476
496
  def set_mempool_block_size(self, mempool_block_size):
477
497
  """Set the block size of memory pool."""
478
- if _get_mode() == GRAPH_MODE:
498
+ is_force_kbk = os.getenv("GRAPH_OP_RUN")
499
+ if _get_mode() == GRAPH_MODE and is_force_kbk != "1":
479
500
  logger.warning("Graph mode doesn't support to set parameter 'mempool_block_size' of context currently, "
480
- "you can use context.set_context to set pynative mode.")
501
+ "you can use context.set_context to set pynative mode or set env GRAPH_OP_RUN=1.")
481
502
  return
482
503
  if not Validator.check_str_by_regular(mempool_block_size, _RE_PATTERN):
483
504
  raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
@@ -563,6 +584,7 @@ class _Context:
563
584
  'deterministic': set_deterministic,
564
585
  'ascend_config': set_ascend_config,
565
586
  'jit_syntax_level': set_jit_syntax_level,
587
+ 'debug_level': set_debug_level,
566
588
  'gpu_config': set_gpu_config,
567
589
  'aoe_config': set_aoe_config,
568
590
  }
@@ -620,6 +642,28 @@ class _Context:
620
642
  f"got '{op_precision_path}'.")
621
643
  self.set_param(ms_ctx_param.op_precision_mode, ascend_value)
622
644
 
645
+ def _set_ge_options(self, ge_options):
646
+ """Set ge options."""
647
+ for level, options in ge_options.items():
648
+ if level not in ['global', 'session']:
649
+ raise ValueError(f"For 'ascend_config', the key of ge_options must be one of "
650
+ f"('global', 'session'), but got {level}.")
651
+
652
+ if not isinstance(options, dict):
653
+ raise TypeError(f"For 'ge_options', the type of {level} options must be dict, "
654
+ f"but got {type(options)}. The error options: {options}.")
655
+
656
+ for key, value in options.items():
657
+ if not isinstance(key, str):
658
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
659
+ f"but got {type(key)}. The error key is {key}.")
660
+ if not isinstance(value, str):
661
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
662
+ f"but got {type(value)}. The error value is {value}")
663
+
664
+ options_str = json.dumps(ge_options)
665
+ self.set_param(ms_ctx_param.ge_options, options_str)
666
+
623
667
  def _set_topo_order(self, topo_order):
624
668
  """
625
669
  Set topo order.
@@ -652,23 +696,31 @@ class _Context:
652
696
  f"{speedup_config_real_path} does not exist, please check whether the "
653
697
  f"'parallel_speed_up_json_path' is correct.")
654
698
  try:
655
- valid_option = {"recompute_comm_overlap": ms_ctx_param.recompute_comm_overlap,
656
- "matmul_grad_comm_overlap": ms_ctx_param.matmul_grad_comm_overlap,
657
- "enable_task_opt": ms_ctx_param.enable_task_opt,
658
- "enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
659
- "interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
660
- "enable_opt_shard_comm_opt": ms_ctx_param.enable_opt_shard_comm_opt,
661
- "interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
699
+ valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
700
+ "matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
701
+ "enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
702
+ "enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
703
+ "interleaved_matmul_comm": (ms_ctx_param.interleaved_matmul_comm, bool),
704
+ "enable_opt_shard_comm_opt": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
705
+ "enable_begin_end_inline_opt": (ms_ctx_param.enable_begin_end_inline_opt, bool),
706
+ "enable_concat_eliminate_opt": (ms_ctx_param.enable_concat_eliminate_opt, bool),
707
+ "interleaved_layernorm_comm": (ms_ctx_param.interleaved_layernorm_comm, bool),
708
+ "compute_communicate_fusion_level":
709
+ (ms_ctx_param.compute_communicate_fusion_level, int),
710
+ "enable_flash_attention_load_balance":
711
+ (ms_ctx_param.enable_flash_attention_load_balance, bool)}
662
712
  with open(speedup_config_real_path, 'r') as f:
663
713
  speedup_config = json.load(f)
664
- for k, v in speedup_config.items():
665
- if not isinstance(k, str):
666
- raise TypeError("key {} is not a str".format(k))
667
- if k not in valid_option:
668
- raise ValueError("key {} should be one of {}.".format(k, valid_option.keys()))
669
- if not isinstance(v, bool):
670
- raise TypeError("value {} is not a bool".format(v))
671
- self.set_param(valid_option.get(k), v)
714
+ for key, value in speedup_config.items():
715
+ if not isinstance(key, str):
716
+ raise TypeError("key {} is not a str".format(key))
717
+ if key not in valid_option:
718
+ raise ValueError("key {} should be one of {}.".format(key, valid_option.keys()))
719
+ set_func, valid_type = valid_option.get(key)
720
+ if not isinstance(value, valid_type):
721
+ raise TypeError(f"The value type of {key} must be {valid_type}, "
722
+ f"but got value is {value} and type is {type(value)}.")
723
+ self.set_param(set_func, value)
672
724
  except (TypeError, ValueError) as exo:
673
725
  raise ValueError(str(exo) + "\nFor 'context.set_context', "
674
726
  "open or load the 'speedup_config_path' file {} "
@@ -705,7 +757,7 @@ def _context():
705
757
  auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
706
758
  strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
707
759
  all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
708
- pipeline_config=dict, parallel_optimizer_config=dict,
760
+ pipeline_result_broadcast=bool, parallel_optimizer_config=dict,
709
761
  comm_fusion=dict, strategy_ckpt_config=dict)
710
762
  def set_auto_parallel_context(**kwargs):
711
763
  r"""
@@ -730,11 +782,14 @@ def set_auto_parallel_context(**kwargs):
730
782
  parallel_mode parameter_broadcast
731
783
  all_reduce_fusion_config strategy_ckpt_load_file
732
784
  enable_parallel_optimizer strategy_ckpt_save_file
733
- parallel_optimizer_config dataset_strategy
734
- enable_alltoall pipeline_stages
735
- pipeline_config auto_parallel_search_mode
785
+ parallel_optimizer_config full_batch
786
+ enable_alltoall dataset_strategy
787
+ \ pipeline_stages
788
+ \ pipeline_result_broadcast
789
+ \ auto_parallel_search_mode
736
790
  \ comm_fusion
737
791
  \ strategy_ckpt_config
792
+ \ group_ckpt_save_file
738
793
  =========================== ===========================
739
794
 
740
795
  Args:
@@ -744,6 +799,8 @@ def set_auto_parallel_context(**kwargs):
744
799
  "stand_alone" do not support gradients_mean. Default: ``False`` .
745
800
  gradient_fp32_sync (bool): Run allreduce of gradients in fp32. "stand_alone", "data_parallel"
746
801
  and "hybrid_parallel" do not support gradient_fp32_sync. Default: ``True`` .
802
+ loss_repeated_mean (bool) - Indicates whether the mean operator is executed backwards when the
803
+ calculation is repeated. Default: ``True`` .
747
804
  parallel_mode (str): There are five kinds of parallel modes, ``"stand_alone"`` , ``"data_parallel"`` ,
748
805
  ``"hybrid_parallel"`` , ``"semi_auto_parallel"`` and ``"auto_parallel"`` . Note the pynative mode
749
806
  only supports the ``"stand_alone"`` and ``"data_parallel"`` mode. Default: ``"stand_alone"`` .
@@ -758,15 +815,16 @@ def set_auto_parallel_context(**kwargs):
758
815
 
759
816
  - auto_parallel: Achieving parallelism automatically.
760
817
  search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
761
- ``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"recursive_programming"`` .
818
+ ``"sharding_propagation"`` and ``"dynamic_programming"`` (Not recommended).
819
+ Default: ``"recursive_programming"`` .
762
820
 
763
821
  - recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
764
822
  it is recommended that users set the batch size to be greater than or equal to the product of
765
823
  the number of devices and the number of multi-copy parallelism.
766
824
 
767
- - dynamic_programming: Dynamic programming search mode.
768
-
769
825
  - sharding_propagation: Propagate shardings from configured ops to non-configured ops.
826
+
827
+ - dynamic_programming: Dynamic programming search mode.
770
828
  auto_parallel_search_mode (str): This is the old version of 'search_mode'. Here, remaining this attribute is
771
829
  for forward compatibility, and this attribute will be deleted in a future MindSpore version.
772
830
  parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
@@ -801,12 +859,8 @@ def set_auto_parallel_context(**kwargs):
801
859
  distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
802
860
  stages.
803
861
  Default: ``1`` .
804
- pipeline_config (dict): A dict contains the keys and values for setting the pipeline parallelism configuration.
805
- It supports the following keys:
806
-
807
- - pipeline_interleave(bool): Indicates whether to enable the interleaved execution mode.
808
- - pipeline_scheduler(str): Indicates the scheduling mode for pipeline parallelism. Only support
809
- ``gpipe/1f1b``.
862
+ pipeline_result_broadcast (bool): A switch that broadcast the last stage result to all other stage in pipeline
863
+ parallel inference. Default: ``False`` .
810
864
  parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
811
865
  configure. The configure provides more detailed behavior control about parallel training
812
866
  when parallel optimizer is enabled. The configure will be effective when we use
@@ -866,14 +920,15 @@ def set_auto_parallel_context(**kwargs):
866
920
  - load_file (str): The path to load parallel strategy checkpoint. If the file name extension is
867
921
  `.json`, the file is loaded in JSON format. Otherwise, the file is loaded in ProtoBuf
868
922
  format.
869
- Default: ''
923
+ Default: ``''``
870
924
 
871
925
  - save_file (str): The path to save parallel strategy checkpoint. If the file name extension is
872
926
  `.json`, the file is saved in JSON format. Otherwise, the file is saved in ProtoBuf format.
873
- Default: ''
927
+ Default: ``''``
874
928
 
875
929
  - only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
876
930
  Default: ``True`` .
931
+ group_ckpt_save_file (str): The path to save parallel group checkpoint.
877
932
 
878
933
  Raises:
879
934
  ValueError: If input key is not attribute in auto parallel context.
@@ -885,8 +940,8 @@ def set_auto_parallel_context(**kwargs):
885
940
  >>> ms.set_auto_parallel_context(gradients_mean=True)
886
941
  >>> ms.set_auto_parallel_context(gradient_fp32_sync=False)
887
942
  >>> ms.set_auto_parallel_context(parallel_mode="auto_parallel")
888
- >>> ms.set_auto_parallel_context(search_mode="dynamic_programming")
889
- >>> ms.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
943
+ >>> ms.set_auto_parallel_context(search_mode="recursive_programming")
944
+ >>> ms.set_auto_parallel_context(auto_parallel_search_mode="recursive_programming")
890
945
  >>> ms.set_auto_parallel_context(parameter_broadcast=False)
891
946
  >>> ms.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
892
947
  >>> ms.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
@@ -895,6 +950,7 @@ def set_auto_parallel_context(**kwargs):
895
950
  >>> ms.set_auto_parallel_context(enable_alltoall=False)
896
951
  >>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
897
952
  >>> ms.set_auto_parallel_context(pipeline_stages=2)
953
+ >>> ms.set_auto_parallel_context(pipeline_stages=2, pipeline_result_broadcast=True)
898
954
  >>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
899
955
  ... "optimizer_weight_shard_size": 2}
900
956
  >>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
@@ -945,6 +1001,7 @@ def reset_auto_parallel_context():
945
1001
  - enable_parallel_optimizer: False.
946
1002
  - enable_alltoall: False.
947
1003
  - pipeline_stages: 1.
1004
+ - pipeline_result_broadcast: False.
948
1005
  - fusion_threshold: 64.
949
1006
 
950
1007
  Examples:
@@ -1043,7 +1100,7 @@ def _check_target_specific_cfgs(device, arg_key):
1043
1100
  max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
1044
1101
  graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
1045
1102
  grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
1046
- op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int,
1103
+ op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int, debug_level=int,
1047
1104
  jit_enable_inplace_ops=bool, gpu_config=dict)
1048
1105
  def set_context(**kwargs):
1049
1106
  """
@@ -1093,6 +1150,8 @@ def set_context(**kwargs):
1093
1150
  | | reserve_class_name_in_scope | CPU/GPU/Ascend |
1094
1151
  | +------------------------------+----------------------------+
1095
1152
  | | pynative_synchronize | CPU/GPU/Ascend |
1153
+ | +------------------------------+----------------------------+
1154
+ | | debug_level | CPU/GPU/Ascend |
1096
1155
  +-------------------------+------------------------------+----------------------------+
1097
1156
  | Executive Control | mode | CPU/GPU/Ascend |
1098
1157
  | +------------------------------+----------------------------+
@@ -1145,12 +1204,16 @@ def set_context(**kwargs):
1145
1204
  and max_device_memory. 'max_device_memory' should be set before the program runs.
1146
1205
  variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
1147
1206
  Please use parameter 'max_device_memory' instead.
1148
- mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
1207
+ mempool_block_size (str): Set the size of the memory pool block in PyNative mode or GRAPH_OP_RUN=1 for devices.
1149
1208
  The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block size is the
1150
1209
  minimum of the available memory of the device and mempool_block_size.
1151
1210
  op_timeout (int): Set the maximum duration of executing an operator in seconds.
1152
- If the execution time exceeds this value, system will terminate the task. 0 means endless wait.
1153
- Default: ``1900`` .
1211
+ If the execution time exceeds this value, system will terminate the task.
1212
+ 0 means endless wait. The defaults for AI Core and AICPU operators vary on different hardware.
1213
+ For more information,
1214
+ please refer to `Ascend Community
1215
+ <https://www.hiascend.com/>`_.
1216
+ Default: ``900`` .
1154
1217
  save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: ``0`` .
1155
1218
  Available values are:
1156
1219
 
@@ -1162,7 +1225,7 @@ def set_context(**kwargs):
1162
1225
  When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
1163
1226
  `save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
1164
1227
  are saved in the current directory.
1165
- save_graphs_path (str): Path to save graphs. Default: ".".
1228
+ save_graphs_path (str): Path to save graphs. Default: ``"."``.
1166
1229
  If the specified directory does not exist, the system will automatically create the directory.
1167
1230
  During distributed training, graphs will be saved to the directory of
1168
1231
  `save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
@@ -1226,7 +1289,7 @@ def set_context(**kwargs):
1226
1289
  If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
1227
1290
  For details of graph kernel fusion, please check
1228
1291
  `Enabling Graph Kernel Fusion
1229
- <https://www.mindspore.cn/tutorials/experts/en/r2.2/optimize/graph_fusion_engine.html>`_.
1292
+ <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/optimize/graph_fusion_engine.html>`_.
1230
1293
  graph_kernel_flags (str):
1231
1294
  Optimization options of graph kernel fusion, and the priority is higher when it conflicts
1232
1295
  with enable_graph_kernel. Only for experienced users.
@@ -1282,7 +1345,7 @@ def set_context(**kwargs):
1282
1345
  the compile cache is loaded. Note that only limited automatic detection for the changes of
1283
1346
  python scripts is supported by now, which means that there is a correctness risk. Default: ``False`` .
1284
1347
  This is an experimental prototype that is subject to change and/or deletion.
1285
- compile_cache_path (str): Path to save the compile cache. Default: ".".
1348
+ compile_cache_path (str): Path to save the compile cache. Default: ``"."``.
1286
1349
  If the specified directory does not exist, the system will automatically create the directory.
1287
1350
  The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
1288
1351
  the ID of the current device in the cluster.
@@ -1299,16 +1362,18 @@ def set_context(**kwargs):
1299
1362
  of the interfaces would be compiled by MindSpore to the interfaces definition .py file that should be
1300
1363
  guaranteed to be writable. Then compile the .py file to the .pyc or .so file, and could run in Graph mode.
1301
1364
  memory_optimize_level (str): The memory optimize level.
1302
- Default: O0. The value must be in ['O0', 'O1'].
1365
+ On Ascend hardware platform, default: ``O1``, on other hardware platforms, default: ``O0``.
1366
+ The value must be in ['O0', 'O1'].
1303
1367
 
1304
- - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver).
1305
- - O1: priority memory option, enable SOMAS.
1368
+ - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver)
1369
+ and some other memory optimizations.
1370
+ - O1: priority memory option, enable SOMAS and some other memory optimizations.
1306
1371
  memory_offload (str): Whether to enable the memory offload function. When it is enabled, the idle data will be
1307
1372
  temporarily copied to the host side in the case of insufficient device memory. The value must be in the
1308
1373
  range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
1309
1374
 
1310
1375
  - ON: Enable the memory Offload function. On Ascend hardware platform, this parameter does not take effect
1311
- when the environment variable "GRAPH_OP_RUN=1" is not set; This parameter does not take effect when
1376
+ when jit_level of JitConfig is not set 'O0'; This parameter does not take effect when
1312
1377
  memory_optimize_level is set 'O1'.
1313
1378
  - OFF: Turn off the memory Offload function.
1314
1379
  ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
@@ -1334,7 +1399,10 @@ def set_context(**kwargs):
1334
1399
  - allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
1335
1400
  the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
1336
1401
 
1337
- - jit_compile (bool): Whether to select online compilation. the default value is based on CANN.
1402
+ - jit_compile (bool): Whether to select online compilation. When set to 'True', online compilation is
1403
+ prioritized. When set to 'False', compiled operator binary files are prioritized to improve compilation
1404
+ performance. The default settings are online compilation for static shape, and compiled operator binary
1405
+ files for dynamic shape.
1338
1406
  - atomic_clean_policy (int): The policy for cleaning memory occupied by atomic operators in the network.
1339
1407
  Default: ``1`` .
1340
1408
 
@@ -1354,20 +1422,50 @@ def set_context(**kwargs):
1354
1422
  for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
1355
1423
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1356
1424
  to `Ascend community <https://www.hiascend.com/>`_ .
1425
+ - ge_options (dict): Set options for CANN. The options are divided into two categories: global and session.
1426
+ This is an experimental prototype that is subject to change and/or deletion.
1427
+ For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/70RC1/inferapplicationdev/graphdevg/atlasgeapi_07_0119.html>`_ .
1428
+ The configuration options in `ge_options` may be duplicated with the options in `ascend_config`. If the
1429
+ same configuration options are set in both `ascend_config` and `ge_options`, the one set in `ge_options`
1430
+ shall prevail.
1431
+
1432
+ - global (dict): Set global options.
1433
+ - session (dict): Set session options.
1434
+
1357
1435
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
1358
1436
  can refer to `parallel_speed_up.json
1359
- <https://gitee.com/mindspore/mindspore/blob/r2.2/config/parallel_speed_up.json>`_ .
1437
+ <https://gitee.com/mindspore/mindspore/blob/r2.3.q1/config/parallel_speed_up.json>`_ .
1360
1438
  If its value is None or '', it does not take effect. Default None.
1361
1439
 
1362
1440
  - recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
1363
1441
  Default: False.
1364
- - matmul_grad_comm_overlap (bool): Enable overlap between grad ops and communication ops if True.
1365
- Default: False.
1442
+ - matmul_grad_comm_overlap (bool): Enable overlap between dw matmul and
1443
+ tensor parallel communication ops if True. Default: False.
1366
1444
  - enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
1367
1445
  Default: False.
1368
- - interleaved_matmul_comm (bool): Enable interleaved optimization of Matmul-Comm if True. Default: False.
1369
- - interleaved_layernorm_comm (bool): Enable interleaved optimization of LayerNorm-Comm if True.
1446
+ - enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
1447
+ Currently, do not support
1448
+ `LazyInline <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore/mindspore.lazy_inline.html>`
1449
+ Default: False.
1450
+ - enable_opt_shard_comm_opt (bool): Enable overlap between forward ops
1451
+ and optimizer parallel allgather communication if True. Currently, do not support
1452
+ `LazyInline <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore/mindspore.lazy_inline.html>`
1370
1453
  Default: False.
1454
+ - compute_communicate_fusion_level (int): Enable the fusion between compute and communicate.
1455
+ Default: ``0``.
1456
+
1457
+ - 0: Disable fusion.
1458
+
1459
+ - 1: Apply fusion to forward nodes.
1460
+
1461
+ - 2: Apply fusion to backward nodes.
1462
+
1463
+ - 3: Apply fusion to all nodes.
1464
+ - host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
1465
+ used when run the static graph, the default value is 0. When the number of operations in the static graph
1466
+ is less than the max threshold, this graph will be executed in dynamic shape process. In large model
1467
+ scenarios, this approach can save stream resources. If the number of operations in the static graph is
1468
+ greater than the maximum threshold, this graph will be executed in original static process.
1371
1469
 
1372
1470
  jit_syntax_level (int): Set JIT syntax level for graph compiling, triggered by GRAPH_MODE and @jit decorator.
1373
1471
  The value must be ``STRICT`` or ``LAX`` . Default: ``LAX`` . All levels support all backends.
@@ -1378,6 +1476,12 @@ def set_context(**kwargs):
1378
1476
  affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
1379
1477
  able to be exported.
1380
1478
 
1479
+ debug_level (int): Set config for debugging. Default value: ``RELEASE``.
1480
+
1481
+ - ``RELEASE``: Used for normally running, and some debug information will be discard to get a better
1482
+ compiling performance.
1483
+ - ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
1484
+
1381
1485
  gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
1382
1486
  Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
1383
1487
  and `matmul_allow_tf32` are supported on GPU hardware platform.
@@ -1482,8 +1586,11 @@ def set_context(**kwargs):
1482
1586
  >>> ms.set_context(memory_offload='ON')
1483
1587
  >>> ms.set_context(deterministic='ON')
1484
1588
  >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
1485
- ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file"})
1589
+ ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file",
1590
+ ... "ge_options": {"global": {"ge.opSelectImplmode": "high_precision"},
1591
+ ... "session": {"ge.exec.atomicCleanPolicy": "0"}}})
1486
1592
  >>> ms.set_context(jit_syntax_level=ms.STRICT)
1593
+ >>> ms.set_context(debug_level=ms.DEBUG)
1487
1594
  >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
1488
1595
  ... "matmul_allow_tf32": True})
1489
1596
  """
@@ -1502,7 +1609,7 @@ def set_context(**kwargs):
1502
1609
  "For details, please see the interface parameter API comments")
1503
1610
  continue
1504
1611
  if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1505
- 'op_precision_mode'):
1612
+ 'op_precision_mode', 'host_scheduling_max_threshold', 'ge_options'):
1506
1613
  raise ValueError(f"Please set '{key}' through parameter ascend_config")
1507
1614
  if key == 'save_graphs':
1508
1615
  if value is True:
@@ -1514,6 +1621,9 @@ def set_context(**kwargs):
1514
1621
  if key == 'jit_syntax_level' and value not in (STRICT, COMPATIBLE, LAX):
1515
1622
  raise ValueError(f"For 'jit_syntax_level', the value should be context.STRICT"
1516
1623
  f" or context.LAX, but got {value}.")
1624
+ if key == 'debug_level' and value not in (RELEASE, DEBUG):
1625
+ raise ValueError(f"For 'debug_level', the value should be context.DEBUG"
1626
+ f" or context.RELEASE, but got {value}.")
1517
1627
  if not _check_target_specific_cfgs(device, key):
1518
1628
  continue
1519
1629
  if hasattr(ctx, key):
@@ -1668,9 +1778,7 @@ def get_ps_context(attr_key):
1668
1778
 
1669
1779
  def reset_ps_context():
1670
1780
  """
1671
- Reset parameter server training mode context attributes to the default values:
1672
-
1673
- - enable_ps: False.
1781
+ Reset parameter server training mode context attributes to the default values.
1674
1782
 
1675
1783
  Meaning of each field and its default value refer to :func:`mindspore.set_ps_context`.
1676
1784
 
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
21
21
 
22
22
  We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
23
23
  on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
24
- can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.2/dataset/cache.html>`_ .
24
+ can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/dataset/cache.html>`_ .
25
25
 
26
26
  Common imported modules in corresponding API examples are as follows:
27
27
 
@@ -55,11 +55,11 @@ The specific steps are as follows:
55
55
  - Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
56
56
  `.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
57
57
  - Dataset sample transform operation: The user can add data transform operations
58
- ( `vision transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
58
+ ( `vision transform <https://mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.\
59
59
  dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
60
- `NLP transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
60
+ `NLP transform <https://mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.\
61
61
  dataset.transforms.html#module-mindspore.dataset.text>`_ ,
62
- `audio transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
62
+ `audio transform <https://mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.\
63
63
  dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
64
64
  operation to perform transformations. During data preprocessing, multiple map operations can be defined to
65
65
  perform different transform operations to different fields. The data transform operation can also be a
@@ -73,7 +73,7 @@ Quick start of Dataset Pipeline
73
73
  -------------------------------
74
74
 
75
75
  For a quick start of using Dataset Pipeline, download `Load & Process Data With Dataset Pipeline
76
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/dataset_gallery.html>`_
76
+ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/samples/dataset/dataset_gallery.html>`_
77
77
  to local and run in sequence.
78
78
 
79
79
  """
@@ -40,22 +40,22 @@ Descriptions of common data processing terms are as follows:
40
40
  The data transform operation can be executed in the data processing pipeline or in the eager mode:
41
41
 
42
42
  - Pipeline mode is generally used to process big datasets. Examples refer to
43
- `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.2/api_python/
43
+ `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/
44
44
  mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
45
45
  - Eager mode is more like a function call to process data. Examples refer to
46
- `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.2/advanced/dataset/eager.html>`_ .
46
+ `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.3.q1/advanced/dataset/eager.html>`_ .
47
47
  """
48
48
  from __future__ import absolute_import
49
49
 
50
- from mindspore.dataset.audio import transforms
51
- from mindspore.dataset.audio import utils
52
- from mindspore.dataset.audio.transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
50
+ from . import transforms
51
+ from . import utils
52
+ from .transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
53
53
  BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, ComplexNorm, ComputeDeltas, Contrast, DBToAmplitude, \
54
54
  DCShift, DeemphBiquad, DetectPitchFrequency, Dither, EqualizerBiquad, Fade, Filtfilt, Flanger, FrequencyMasking, \
55
55
  Gain, GriffinLim, HighpassBiquad, InverseMelScale, InverseSpectrogram, LFCC, LFilter, LowpassBiquad, Magphase, \
56
56
  MaskAlongAxis, MaskAlongAxisIID, MelScale, MelSpectrogram, MFCC, MuLawDecoding, MuLawEncoding, Overdrive, \
57
57
  Phaser, PhaseVocoder, PitchShift, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram, \
58
58
  TimeMasking, TimeStretch, TrebleBiquad, Vad, Vol
59
- from mindspore.dataset.audio.utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
59
+ from .utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
60
60
  MelType, Modulation, NormMode, NormType, ResampleMethod, ScaleType, WindowType, create_dct, linear_fbanks, \
61
61
  melscale_fbanks