mindspore 2.2.14__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc2__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1172) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -4
  3. mindspore/_akg/akg/composite/build_module.py +155 -11
  4. mindspore/_akg/akg/config/repository.json +38 -0
  5. mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
  7. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
  8. mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
  9. mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
  10. mindspore/_akg/akg/utils/gen_random.py +14 -8
  11. mindspore/_akg/akg/utils/op_dsl.py +11 -0
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +18 -8
  13. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  14. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +78 -0
  17. mindspore/_extends/builtin_operations.py +2 -1
  18. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  19. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  20. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  24. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  25. mindspore/_extends/parse/__init__.py +18 -14
  26. mindspore/_extends/parse/compile_config.py +229 -0
  27. mindspore/_extends/parse/parser.py +155 -59
  28. mindspore/_extends/parse/resources.py +40 -7
  29. mindspore/_extends/parse/standard_method.py +127 -206
  30. mindspore/_extends/remote/kernel_build_server.py +2 -0
  31. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  32. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _profiler.py} +13 -16
  33. mindspore/amp.py +24 -18
  34. mindspore/bin/cache_admin +0 -0
  35. mindspore/bin/cache_server +0 -0
  36. mindspore/boost/boost_cell_wrapper.py +1 -1
  37. mindspore/boost/group_loss_scale_manager.py +1 -1
  38. mindspore/common/__init__.py +7 -3
  39. mindspore/common/_jit_fallback_utils.py +2 -3
  40. mindspore/common/_register_for_adapter.py +7 -0
  41. mindspore/common/_register_for_recompute.py +48 -0
  42. mindspore/common/_stub_tensor.py +7 -1
  43. mindspore/common/_utils.py +5 -17
  44. mindspore/common/api.py +145 -50
  45. mindspore/common/auto_dynamic_shape.py +27 -14
  46. mindspore/common/dtype.py +9 -6
  47. mindspore/common/dump.py +5 -4
  48. mindspore/common/hook_handle.py +51 -4
  49. mindspore/common/initializer.py +1 -1
  50. mindspore/common/jit_config.py +33 -13
  51. mindspore/common/lazy_inline.py +58 -17
  52. mindspore/common/mindir_util.py +12 -2
  53. mindspore/common/mutable.py +79 -14
  54. mindspore/common/parameter.py +24 -4
  55. mindspore/common/recompute.py +247 -0
  56. mindspore/common/seed.py +9 -9
  57. mindspore/common/sparse_tensor.py +251 -18
  58. mindspore/common/symbol.py +122 -0
  59. mindspore/common/tensor.py +391 -465
  60. mindspore/communication/__init__.py +3 -3
  61. mindspore/communication/_comm_helper.py +5 -0
  62. mindspore/communication/management.py +53 -38
  63. mindspore/config/op_info.config +22 -54
  64. mindspore/context.py +176 -55
  65. mindspore/dataset/__init__.py +5 -5
  66. mindspore/dataset/audio/__init__.py +6 -6
  67. mindspore/dataset/audio/transforms.py +711 -158
  68. mindspore/dataset/callback/ds_callback.py +2 -2
  69. mindspore/dataset/engine/cache_client.py +2 -2
  70. mindspore/dataset/engine/datasets.py +72 -38
  71. mindspore/dataset/engine/datasets_audio.py +14 -14
  72. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  73. mindspore/dataset/engine/datasets_text.py +38 -38
  74. mindspore/dataset/engine/datasets_user_defined.py +7 -7
  75. mindspore/dataset/engine/datasets_vision.py +75 -71
  76. mindspore/dataset/engine/offload.py +5 -7
  77. mindspore/dataset/text/__init__.py +3 -3
  78. mindspore/dataset/text/transforms.py +408 -121
  79. mindspore/dataset/text/utils.py +9 -9
  80. mindspore/dataset/transforms/__init__.py +1 -1
  81. mindspore/dataset/transforms/transforms.py +261 -76
  82. mindspore/dataset/utils/browse_dataset.py +9 -9
  83. mindspore/dataset/vision/__init__.py +3 -3
  84. mindspore/dataset/vision/c_transforms.py +5 -5
  85. mindspore/dataset/vision/transforms.py +2264 -514
  86. mindspore/dataset/vision/utils.py +40 -9
  87. mindspore/dataset/vision/validators.py +7 -1
  88. mindspore/experimental/optim/__init__.py +12 -2
  89. mindspore/experimental/optim/adadelta.py +161 -0
  90. mindspore/experimental/optim/adagrad.py +168 -0
  91. mindspore/experimental/optim/adam.py +35 -34
  92. mindspore/experimental/optim/adamax.py +170 -0
  93. mindspore/experimental/optim/adamw.py +40 -16
  94. mindspore/experimental/optim/asgd.py +153 -0
  95. mindspore/experimental/optim/lr_scheduler.py +66 -121
  96. mindspore/experimental/optim/nadam.py +157 -0
  97. mindspore/experimental/optim/optimizer.py +15 -8
  98. mindspore/experimental/optim/radam.py +194 -0
  99. mindspore/experimental/optim/rmsprop.py +154 -0
  100. mindspore/experimental/optim/rprop.py +164 -0
  101. mindspore/experimental/optim/sgd.py +28 -19
  102. mindspore/hal/__init__.py +34 -0
  103. mindspore/hal/_ascend.py +57 -0
  104. mindspore/hal/_base.py +57 -0
  105. mindspore/hal/_cpu.py +56 -0
  106. mindspore/hal/_gpu.py +57 -0
  107. mindspore/hal/device.py +356 -0
  108. mindspore/hal/event.py +179 -0
  109. mindspore/hal/stream.py +339 -0
  110. mindspore/include/api/data_type.h +2 -2
  111. mindspore/include/api/dual_abi_helper.h +16 -3
  112. mindspore/include/api/model.h +1 -3
  113. mindspore/include/api/status.h +14 -0
  114. mindspore/include/c_api/model_c.h +173 -0
  115. mindspore/include/c_api/ms/base/types.h +1 -0
  116. mindspore/include/c_api/types_c.h +19 -0
  117. mindspore/include/dataset/execute.h +1 -3
  118. mindspore/include/mindapi/base/format.h +125 -23
  119. mindspore/include/mindapi/base/types.h +12 -0
  120. mindspore/lib/libdnnl.so.2 +0 -0
  121. mindspore/lib/libmindspore.so +0 -0
  122. mindspore/lib/libmindspore_backend.so +0 -0
  123. mindspore/lib/libmindspore_common.so +0 -0
  124. mindspore/lib/libmindspore_core.so +0 -0
  125. mindspore/lib/libmindspore_glog.so.0 +0 -0
  126. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  127. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  128. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  129. mindspore/lib/libmindspore_shared_lib.so +0 -0
  130. mindspore/lib/libmpi_adapter.so +0 -0
  131. mindspore/lib/libmpi_collective.so +0 -0
  132. mindspore/lib/libnnacl.so +0 -0
  133. mindspore/lib/libopencv_core.so.4.5 +0 -0
  134. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  135. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  136. mindspore/lib/libps_cache.so +0 -0
  137. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
  138. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
  139. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
  140. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
  141. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
  142. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
  143. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  144. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
  145. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  146. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  147. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6318 -1760
  148. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  163. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  164. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
  165. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  166. mindspore/lib/plugin/ascend/libakg.so +0 -0
  167. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  168. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  169. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  170. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  171. mindspore/lib/plugin/cpu/libakg.so +0 -0
  172. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  173. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  174. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  175. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  176. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  177. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  178. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  179. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  180. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  181. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  182. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  183. mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
  184. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  185. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  186. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  187. mindspore/log.py +2 -2
  188. mindspore/mindrecord/__init__.py +5 -1
  189. mindspore/mindrecord/config.py +809 -0
  190. mindspore/mindrecord/filereader.py +25 -0
  191. mindspore/mindrecord/filewriter.py +74 -56
  192. mindspore/mindrecord/mindpage.py +40 -6
  193. mindspore/mindrecord/shardutils.py +3 -2
  194. mindspore/mindrecord/shardwriter.py +7 -0
  195. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  196. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  197. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  198. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  199. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  200. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  201. mindspore/mint/__init__.py +457 -0
  202. mindspore/mint/nn/__init__.py +430 -0
  203. mindspore/mint/nn/functional.py +424 -0
  204. mindspore/mint/optim/__init__.py +24 -0
  205. mindspore/mint/optim/adamw.py +186 -0
  206. mindspore/multiprocessing/__init__.py +72 -0
  207. mindspore/nn/__init__.py +3 -0
  208. mindspore/nn/cell.py +131 -174
  209. mindspore/nn/dynamic_lr.py +2 -2
  210. mindspore/nn/extend/__init__.py +29 -0
  211. mindspore/nn/extend/basic.py +140 -0
  212. mindspore/nn/extend/embedding.py +143 -0
  213. mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
  214. mindspore/nn/extend/layer/normalization.py +107 -0
  215. mindspore/nn/extend/pooling.py +117 -0
  216. mindspore/nn/generator.py +297 -0
  217. mindspore/nn/layer/activation.py +79 -90
  218. mindspore/nn/layer/basic.py +113 -81
  219. mindspore/nn/layer/channel_shuffle.py +3 -16
  220. mindspore/nn/layer/container.py +3 -3
  221. mindspore/nn/layer/conv.py +71 -71
  222. mindspore/nn/layer/embedding.py +105 -44
  223. mindspore/nn/layer/image.py +4 -7
  224. mindspore/nn/layer/normalization.py +52 -66
  225. mindspore/nn/layer/padding.py +30 -39
  226. mindspore/nn/layer/pooling.py +13 -9
  227. mindspore/nn/layer/rnn_cells.py +5 -15
  228. mindspore/nn/layer/rnns.py +6 -5
  229. mindspore/nn/layer/thor_layer.py +1 -2
  230. mindspore/nn/layer/timedistributed.py +1 -1
  231. mindspore/nn/layer/transformer.py +52 -50
  232. mindspore/nn/learning_rate_schedule.py +6 -5
  233. mindspore/nn/loss/loss.py +43 -64
  234. mindspore/nn/optim/ada_grad.py +4 -2
  235. mindspore/nn/optim/adadelta.py +3 -1
  236. mindspore/nn/optim/adafactor.py +1 -1
  237. mindspore/nn/optim/adam.py +102 -181
  238. mindspore/nn/optim/adamax.py +4 -2
  239. mindspore/nn/optim/adasum.py +2 -2
  240. mindspore/nn/optim/asgd.py +4 -2
  241. mindspore/nn/optim/ftrl.py +31 -61
  242. mindspore/nn/optim/lamb.py +5 -3
  243. mindspore/nn/optim/lars.py +2 -2
  244. mindspore/nn/optim/lazyadam.py +6 -4
  245. mindspore/nn/optim/momentum.py +13 -25
  246. mindspore/nn/optim/optimizer.py +6 -3
  247. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  248. mindspore/nn/optim/rmsprop.py +9 -3
  249. mindspore/nn/optim/rprop.py +4 -2
  250. mindspore/nn/optim/sgd.py +6 -5
  251. mindspore/nn/optim/thor.py +2 -2
  252. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  253. mindspore/nn/probability/distribution/beta.py +2 -2
  254. mindspore/nn/probability/distribution/categorical.py +4 -6
  255. mindspore/nn/probability/distribution/cauchy.py +2 -2
  256. mindspore/nn/probability/distribution/exponential.py +1 -1
  257. mindspore/nn/probability/distribution/gumbel.py +2 -2
  258. mindspore/nn/probability/distribution/poisson.py +2 -2
  259. mindspore/nn/probability/distribution/uniform.py +2 -2
  260. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  261. mindspore/nn/wrap/__init__.py +2 -1
  262. mindspore/nn/wrap/cell_wrapper.py +33 -12
  263. mindspore/nn/wrap/grad_reducer.py +148 -8
  264. mindspore/nn/wrap/loss_scale.py +7 -7
  265. mindspore/numpy/__init__.py +2 -0
  266. mindspore/numpy/array_creations.py +2 -0
  267. mindspore/numpy/array_ops.py +1 -5
  268. mindspore/numpy/fft.py +431 -0
  269. mindspore/numpy/math_ops.py +54 -60
  270. mindspore/numpy/utils.py +3 -0
  271. mindspore/ops/__init__.py +5 -4
  272. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  273. mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
  274. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  275. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  276. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  277. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  278. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  279. mindspore/ops/_op_impl/__init__.py +0 -1
  280. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  281. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  282. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  283. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  284. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  285. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  286. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  287. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  288. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  289. mindspore/ops/_vmap/vmap_array_ops.py +137 -101
  290. mindspore/ops/_vmap/vmap_base.py +8 -1
  291. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  292. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  293. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  294. mindspore/ops/_vmap/vmap_math_ops.py +101 -57
  295. mindspore/ops/_vmap/vmap_nn_ops.py +230 -97
  296. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  297. mindspore/ops/auto_generate/__init__.py +31 -0
  298. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +205 -0
  299. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +257 -0
  300. mindspore/ops/auto_generate/gen_arg_handler.py +171 -0
  301. mindspore/ops/auto_generate/gen_extend_func.py +404 -0
  302. mindspore/ops/auto_generate/gen_ops_def.py +5653 -0
  303. mindspore/ops/auto_generate/gen_ops_prim.py +11623 -0
  304. mindspore/ops/auto_generate/pyboost_inner_prim.py +359 -0
  305. mindspore/ops/composite/__init__.py +5 -2
  306. mindspore/ops/composite/base.py +118 -17
  307. mindspore/ops/composite/math_ops.py +9 -48
  308. mindspore/ops/composite/multitype_ops/_compile_utils.py +168 -602
  309. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +24 -133
  310. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  313. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  314. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  315. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  316. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  317. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  318. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  319. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  320. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  321. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  322. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  323. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  324. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  325. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  326. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  327. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  328. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  329. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  330. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  331. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  332. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  333. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  334. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  335. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  336. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  337. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  338. mindspore/ops/deprecated.py +14 -3
  339. mindspore/ops/extend/__init__.py +54 -0
  340. mindspore/ops/extend/array_func.py +259 -0
  341. mindspore/ops/extend/math_func.py +76 -0
  342. mindspore/ops/extend/nn_func.py +384 -0
  343. mindspore/ops/function/__init__.py +37 -12
  344. mindspore/ops/function/array_func.py +702 -1867
  345. mindspore/ops/function/clip_func.py +19 -31
  346. mindspore/ops/function/debug_func.py +1 -4
  347. mindspore/ops/function/fft_func.py +31 -0
  348. mindspore/ops/function/grad/grad_func.py +24 -17
  349. mindspore/ops/function/image_func.py +27 -21
  350. mindspore/ops/function/linalg_func.py +35 -68
  351. mindspore/ops/function/math_func.py +639 -2531
  352. mindspore/ops/function/nn_func.py +1274 -832
  353. mindspore/ops/function/other_func.py +4 -5
  354. mindspore/ops/function/parameter_func.py +5 -93
  355. mindspore/ops/function/random_func.py +84 -71
  356. mindspore/ops/function/sparse_unary_func.py +9 -16
  357. mindspore/ops/function/spectral_func.py +1 -1
  358. mindspore/ops/function/vmap_func.py +14 -14
  359. mindspore/ops/functional.py +57 -63
  360. mindspore/ops/op_info_register.py +16 -43
  361. mindspore/ops/operations/__init__.py +19 -20
  362. mindspore/ops/operations/_grad_ops.py +20 -828
  363. mindspore/ops/operations/_inner_ops.py +180 -288
  364. mindspore/ops/operations/_scalar_ops.py +5 -480
  365. mindspore/ops/operations/_sequence_ops.py +6 -36
  366. mindspore/ops/operations/array_ops.py +83 -2697
  367. mindspore/ops/operations/comm_ops.py +38 -46
  368. mindspore/ops/operations/custom_ops.py +14 -96
  369. mindspore/ops/operations/debug_ops.py +100 -31
  370. mindspore/ops/operations/image_ops.py +1 -217
  371. mindspore/ops/operations/inner_ops.py +3 -38
  372. mindspore/ops/operations/linalg_ops.py +1 -49
  373. mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
  374. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  375. mindspore/ops/operations/manually_defined/ops_def.py +1716 -0
  376. mindspore/ops/operations/math_ops.py +581 -4629
  377. mindspore/ops/operations/nn_ops.py +260 -1941
  378. mindspore/ops/operations/other_ops.py +50 -42
  379. mindspore/ops/operations/random_ops.py +3 -52
  380. mindspore/ops/operations/sparse_ops.py +3 -3
  381. mindspore/ops/primitive.py +196 -96
  382. mindspore/ops_generate/__init__.py +27 -0
  383. mindspore/ops_generate/arg_dtype_cast.py +257 -0
  384. mindspore/ops_generate/arg_handler.py +171 -0
  385. mindspore/ops_generate/gen_aclnn_implement.py +266 -0
  386. mindspore/ops_generate/gen_ops.py +1062 -0
  387. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  388. mindspore/ops_generate/gen_pyboost_func.py +939 -0
  389. mindspore/ops_generate/gen_utils.py +188 -0
  390. mindspore/ops_generate/op_proto.py +138 -0
  391. mindspore/ops_generate/pyboost_utils.py +349 -0
  392. mindspore/ops_generate/template.py +238 -0
  393. mindspore/parallel/__init__.py +6 -4
  394. mindspore/parallel/_auto_parallel_context.py +52 -2
  395. mindspore/parallel/_cell_wrapper.py +16 -9
  396. mindspore/parallel/_cost_model_context.py +1 -1
  397. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  398. mindspore/parallel/_parallel_serialization.py +29 -13
  399. mindspore/parallel/_ps_context.py +1 -1
  400. mindspore/parallel/_recovery_context.py +1 -1
  401. mindspore/parallel/_tensor.py +19 -7
  402. mindspore/parallel/_transformer/__init__.py +1 -1
  403. mindspore/parallel/_transformer/layers.py +1 -1
  404. mindspore/parallel/_transformer/loss.py +1 -1
  405. mindspore/parallel/_transformer/moe.py +1 -1
  406. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  407. mindspore/parallel/_transformer/transformer.py +1 -1
  408. mindspore/parallel/_utils.py +147 -6
  409. mindspore/parallel/algo_parameter_config.py +6 -6
  410. mindspore/parallel/checkpoint_transform.py +180 -24
  411. mindspore/parallel/cluster/__init__.py +15 -0
  412. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  413. mindspore/parallel/cluster/process_entity/_api.py +345 -0
  414. mindspore/parallel/cluster/process_entity/_utils.py +116 -0
  415. mindspore/parallel/cluster/run.py +139 -0
  416. mindspore/parallel/mpi/__init__.py +1 -1
  417. mindspore/parallel/mpi/_mpi_config.py +1 -1
  418. mindspore/parallel/parameter_broadcast.py +152 -0
  419. mindspore/parallel/shard.py +99 -2
  420. mindspore/profiler/common/util.py +20 -0
  421. mindspore/profiler/envprofiling.py +1 -1
  422. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  423. mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
  424. mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
  425. mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
  426. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +109 -0
  427. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
  428. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
  429. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  430. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  431. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
  432. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  433. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  434. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  435. mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
  436. mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
  437. mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
  438. mindspore/profiler/parser/ascend_op_generator.py +92 -42
  439. mindspore/profiler/parser/ascend_timeline_generator.py +294 -133
  440. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  441. mindspore/profiler/parser/framework_parser.py +3 -2
  442. mindspore/profiler/parser/integrator.py +3 -1
  443. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  444. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  445. mindspore/profiler/parser/profiler_info.py +16 -1
  446. mindspore/profiler/profiling.py +305 -167
  447. mindspore/rewrite/__init__.py +2 -13
  448. mindspore/rewrite/api/node.py +121 -35
  449. mindspore/rewrite/api/pattern_engine.py +2 -3
  450. mindspore/rewrite/api/scoped_value.py +16 -15
  451. mindspore/rewrite/api/symbol_tree.py +45 -29
  452. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  453. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  454. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  455. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  456. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  457. mindspore/rewrite/common/__init__.py +1 -2
  458. mindspore/rewrite/common/config.py +24 -0
  459. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  460. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  461. mindspore/rewrite/common/namespace.py +118 -0
  462. mindspore/rewrite/node/__init__.py +5 -5
  463. mindspore/rewrite/node/call_function.py +23 -7
  464. mindspore/rewrite/node/cell_container.py +7 -3
  465. mindspore/rewrite/node/control_flow.py +53 -28
  466. mindspore/rewrite/node/node.py +212 -196
  467. mindspore/rewrite/node/node_manager.py +51 -22
  468. mindspore/rewrite/node/node_topological_manager.py +3 -23
  469. mindspore/rewrite/parsers/__init__.py +12 -0
  470. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  471. mindspore/rewrite/parsers/assign_parser.py +635 -413
  472. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  473. mindspore/rewrite/parsers/class_def_parser.py +107 -144
  474. mindspore/rewrite/parsers/constant_parser.py +5 -5
  475. mindspore/rewrite/parsers/container_parser.py +4 -6
  476. mindspore/rewrite/parsers/expr_parser.py +55 -0
  477. mindspore/rewrite/parsers/for_parser.py +31 -98
  478. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  479. mindspore/rewrite/parsers/if_parser.py +28 -10
  480. mindspore/rewrite/parsers/module_parser.py +8 -182
  481. mindspore/rewrite/parsers/parser.py +1 -5
  482. mindspore/rewrite/parsers/parser_register.py +1 -1
  483. mindspore/rewrite/parsers/return_parser.py +5 -10
  484. mindspore/rewrite/parsers/while_parser.py +59 -0
  485. mindspore/rewrite/sparsify/utils.py +1 -1
  486. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  487. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  488. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  489. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  490. mindspore/run_check/_check_version.py +6 -14
  491. mindspore/run_check/run_check.py +1 -1
  492. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  493. mindspore/scipy/__init__.py +2 -1
  494. mindspore/scipy/fft.py +133 -0
  495. mindspore/scipy/linalg.py +140 -55
  496. mindspore/scipy/ops.py +15 -71
  497. mindspore/scipy/ops_grad.py +5 -34
  498. mindspore/scipy/optimize/line_search.py +2 -2
  499. mindspore/scipy/optimize/minimize.py +1 -1
  500. mindspore/train/__init__.py +3 -2
  501. mindspore/train/_utils.py +178 -4
  502. mindspore/train/amp.py +167 -245
  503. mindspore/train/anf_ir_pb2.py +8 -2
  504. mindspore/train/callback/_backup_and_restore.py +4 -4
  505. mindspore/train/callback/_callback.py +4 -4
  506. mindspore/train/callback/_checkpoint.py +39 -13
  507. mindspore/train/callback/_early_stop.py +2 -2
  508. mindspore/train/callback/_landscape.py +14 -8
  509. mindspore/train/callback/_loss_monitor.py +2 -2
  510. mindspore/train/callback/_on_request_exit.py +2 -2
  511. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  512. mindspore/train/callback/_summary_collector.py +7 -7
  513. mindspore/train/callback/_time_monitor.py +2 -2
  514. mindspore/train/data_sink.py +1 -1
  515. mindspore/train/dataset_helper.py +18 -4
  516. mindspore/train/loss_scale_manager.py +2 -2
  517. mindspore/train/metrics/accuracy.py +7 -7
  518. mindspore/train/metrics/confusion_matrix.py +8 -6
  519. mindspore/train/metrics/cosine_similarity.py +6 -4
  520. mindspore/train/metrics/error.py +2 -2
  521. mindspore/train/metrics/metric.py +3 -3
  522. mindspore/train/metrics/perplexity.py +2 -1
  523. mindspore/train/metrics/topk.py +2 -2
  524. mindspore/train/mind_ir_pb2.py +89 -15
  525. mindspore/train/model.py +24 -22
  526. mindspore/train/serialization.py +257 -133
  527. mindspore/train/summary/summary_record.py +51 -28
  528. mindspore/train/train_thor/convert_utils.py +3 -3
  529. mindspore/version.py +1 -1
  530. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  531. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +534 -1066
  532. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +1 -0
  533. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  534. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  535. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  536. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  537. mindspore/config/super_bar_config.json +0 -544
  538. mindspore/gen_ops.py +0 -273
  539. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  540. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  541. mindspore/nn/layer/flash_attention.py +0 -189
  542. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  543. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  544. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  545. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  546. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  547. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  548. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  549. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  550. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  551. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  552. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  553. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  554. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  555. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  556. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  557. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  558. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  559. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  560. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  561. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  562. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  563. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  564. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  565. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  566. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  567. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  568. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  569. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  570. mindspore/ops/_op_impl/tbe/add.py +0 -42
  571. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  572. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  573. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  574. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  575. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  576. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  577. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  578. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  579. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  580. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  581. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  582. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  583. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  584. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  585. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  586. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  587. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  588. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  589. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  590. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  591. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  592. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  593. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  594. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  595. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  596. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  597. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  598. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  599. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  600. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  601. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  602. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  603. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  604. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  605. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  606. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  607. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  608. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  609. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  610. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  611. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  612. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  613. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  614. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  615. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  616. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  617. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  618. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  619. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  620. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  621. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  622. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  623. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  624. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  625. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  626. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  627. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  628. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  629. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  630. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  631. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  632. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  633. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  634. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  635. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  636. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  637. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  638. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  639. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  640. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  641. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  642. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  643. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  644. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  645. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  646. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  647. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  648. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  649. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  650. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  651. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  652. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  653. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  654. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  655. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  656. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  657. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  658. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  659. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  660. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  661. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  662. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  663. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  664. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  665. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  666. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  667. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  668. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  669. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  670. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  671. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  672. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  673. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  674. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  675. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  676. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  677. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  678. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  679. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  680. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  681. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  682. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  683. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  684. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  685. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  686. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  687. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  688. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  689. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  690. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  691. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  692. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  693. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  694. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  695. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  696. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  697. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  698. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  699. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  700. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  701. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  702. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  703. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  704. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  705. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  706. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  707. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  708. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  709. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  710. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  711. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  712. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  713. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  714. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  715. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  716. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  717. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  718. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  719. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  720. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  721. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  722. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  723. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  724. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  725. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  726. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  727. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  728. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  729. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  730. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  731. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  732. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  733. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  734. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  735. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  736. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  737. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  738. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  739. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  740. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  741. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  742. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  743. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  744. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  745. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  746. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  747. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  748. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  749. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  750. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  751. mindspore/ops/_op_impl/tbe/div.py +0 -41
  752. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  753. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  754. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  755. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  756. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  757. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  758. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  759. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  760. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  761. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  762. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  763. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  764. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  765. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  766. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  767. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  768. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  769. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  770. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  771. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  772. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  773. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  774. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  775. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  776. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  777. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  778. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  779. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  780. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  781. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  782. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  783. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  784. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  785. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  786. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  787. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  788. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  789. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  790. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  791. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  792. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  793. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  794. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  795. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  796. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  797. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  798. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  799. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  800. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  801. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  802. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  803. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  804. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  805. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  806. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  807. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  808. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  809. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  810. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  811. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  812. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  813. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  814. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  815. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  816. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  817. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  818. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  819. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  820. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  821. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  822. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  823. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  824. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  825. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  826. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  827. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  828. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  829. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  830. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  831. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  832. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  833. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  834. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  835. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  836. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  837. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  838. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  839. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  840. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  841. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  842. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  843. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  844. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  845. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  846. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  847. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  848. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  849. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  850. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  851. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  852. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  853. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  854. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  855. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  856. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  857. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  858. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  859. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  860. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  861. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  862. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  863. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  864. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  865. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  866. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  867. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  868. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  869. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  870. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  871. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  872. mindspore/ops/_op_impl/tbe/less.py +0 -41
  873. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  874. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  875. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  876. mindspore/ops/_op_impl/tbe/log.py +0 -40
  877. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  878. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  879. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  880. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  881. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  882. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  883. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  884. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  885. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  886. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  887. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  888. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  889. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  890. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  891. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  892. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  893. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  894. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  895. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  896. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  897. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  898. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  899. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  900. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  901. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  902. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  903. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  904. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  905. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  906. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  907. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  908. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  909. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  910. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  911. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  912. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  913. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  914. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  915. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  916. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  917. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  918. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  919. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  920. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  921. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  922. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  923. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  924. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  925. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  926. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  927. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  928. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  929. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  930. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  931. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  932. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  933. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  934. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  935. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  936. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  937. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  938. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  939. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  940. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  941. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  942. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  943. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  944. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  945. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  946. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  947. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  948. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  949. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  950. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  951. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  952. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  953. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  954. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  955. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  956. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  957. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  958. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  959. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  960. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  961. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  962. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  963. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  964. mindspore/ops/_op_impl/tbe/range.py +0 -39
  965. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  966. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  967. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  968. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  969. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  970. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  971. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  972. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  973. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  974. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  975. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  976. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  977. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  978. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  979. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  980. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  981. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  982. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  983. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  984. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  985. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  986. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  987. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  988. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  989. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  990. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  991. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  992. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  993. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  994. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  995. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  996. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  997. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  998. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  999. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  1000. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  1001. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  1002. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  1003. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  1004. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  1005. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  1006. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  1007. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  1008. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  1009. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  1010. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  1011. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  1012. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  1013. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  1014. mindspore/ops/_op_impl/tbe/round.py +0 -38
  1015. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  1016. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  1017. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  1018. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1019. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1020. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1021. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1022. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1023. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1024. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1025. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1026. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1027. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1028. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1029. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1030. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1031. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1032. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1033. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1034. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1035. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1036. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1037. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1038. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1039. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1040. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1041. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1042. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1043. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1044. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1045. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1046. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1047. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1048. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1049. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1050. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1051. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1052. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1053. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1054. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1055. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1056. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1057. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1058. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1059. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1060. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1061. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1062. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1063. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1064. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1065. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1066. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1067. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1068. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1069. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1070. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1071. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1072. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1073. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1074. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1075. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1076. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1077. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1078. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1079. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1080. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1081. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1082. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1083. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1084. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1085. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1086. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1087. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1088. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1089. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1090. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1091. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1092. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1093. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1094. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1095. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1096. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1097. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1098. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1099. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1100. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1101. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1102. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1103. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1104. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1105. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1106. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1107. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1108. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1109. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1110. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1111. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1112. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1113. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1114. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1115. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1116. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1117. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1118. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1119. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1120. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1121. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1122. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1123. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1124. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1125. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1126. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1127. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1128. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1129. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1130. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1131. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1132. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1133. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1134. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1135. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1136. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1137. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1138. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1139. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1140. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1141. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1142. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1143. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1144. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1145. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1146. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1147. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1148. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1149. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1150. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1151. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1152. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1153. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1154. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1155. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1156. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1157. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1158. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1159. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1160. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1161. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1162. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1163. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1164. mindspore/ops/_tracefunc.py +0 -241
  1165. mindspore/ops/arg_dtype_cast.py +0 -54
  1166. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1167. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1168. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1169. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1170. mindspore/rewrite/namespace.py +0 -53
  1171. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  1172. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -50,6 +50,10 @@ STRICT = 0
50
50
  COMPATIBLE = 1
51
51
  LAX = 2
52
52
 
53
+ # Enumerate for the property 'debug_level'.
54
+ RELEASE = 0
55
+ DEBUG = 1
56
+
53
57
 
54
58
  def _make_directory(path):
55
59
  """Make directory."""
@@ -161,6 +165,7 @@ class _Context:
161
165
  self._context_switches = _ContextSwitchInfo(False)
162
166
  self._context_handle = MSContext.get_instance()
163
167
  self._support_binary = False
168
+ self._mode = PYNATIVE_MODE
164
169
 
165
170
  def __getattribute__(self, attr):
166
171
  value = object.__getattribute__(self, attr)
@@ -176,7 +181,7 @@ class _Context:
176
181
 
177
182
  def get_mode(self):
178
183
  """Get current mode."""
179
- return self.get_param(ms_ctx_param.mode)
184
+ return self._mode
180
185
 
181
186
  def set_mode(self, mode):
182
187
  """
@@ -204,6 +209,7 @@ class _Context:
204
209
  raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
205
210
  f"or context.PYNATIVE_MODE (1), but got {mode}.")
206
211
  self.set_param(ms_ctx_param.mode, mode)
212
+ self._mode = mode
207
213
 
208
214
  def set_jit_syntax_level(self, level):
209
215
  """"Set the JIT syntax level for graph compiling"""
@@ -212,6 +218,14 @@ class _Context:
212
218
  f"or context.LAX, but got {level}.")
213
219
  self.set_param(ms_ctx_param.jit_syntax_level, level)
214
220
 
221
+ def set_debug_level(self, level):
222
+ """"Set the debug level for graph compiling"""
223
+ if level != RELEASE and level != DEBUG:
224
+ raise ValueError(f"For 'context.set_debug_level', the argument 'level' should be context.RELEASE "
225
+ f"or context.DEBUG, but got {level}.")
226
+ self.set_param(ms_ctx_param.debug_level, level)
227
+
228
+
215
229
  def set_memory_optimize_level(self, memory_optimize_level):
216
230
  """
217
231
  The memory optimize level, support "O0", "O1".
@@ -268,11 +282,13 @@ class _Context:
268
282
  "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
269
283
  - jit_compile (bool): ``False`` and ``True``.
270
284
  - atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
285
+ - op_precision_mode (str): precision mode config file path.
286
+ - ge_options (dict): Global or session CANN options.
271
287
  - exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
272
288
  Default: ``"2"`` .
273
- - op_precision_mode (str): config file path.
274
289
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
275
290
  If its value is None or '', it does not take effect. Default None.
291
+ - host_scheduling_max_threshold(int): The host scheduling max threshold.
276
292
  """
277
293
  ascend_cfg_modes = {
278
294
  'precision_mode': ["force_fp16", "allow_fp32_to_fp16", "allow_mix_precision", "must_keep_origin_dtype",
@@ -284,7 +300,9 @@ class _Context:
284
300
  'conv_allow_hf32': [True, False],
285
301
  'exception_dump': ["0", "1", "2"],
286
302
  'op_precision_mode': (str,),
303
+ 'ge_options': (dict,),
287
304
  'parallel_speed_up_json_path': (str, None),
305
+ 'host_scheduling_max_threshold': (int,),
288
306
  'topo_order': (dict,)
289
307
  }
290
308
  ascend_cfg_setters = {
@@ -295,7 +313,9 @@ class _Context:
295
313
  'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
296
314
  'exception_dump': self._get_ascend_config_setter('exception_dump'),
297
315
  'op_precision_mode': self._set_op_precision_mode,
316
+ 'ge_options': self._set_ge_options,
298
317
  'parallel_speed_up_json_path': self._set_speedup_config_path,
318
+ 'host_scheduling_max_threshold': self._get_ascend_config_setter('host_scheduling_max_threshold', str),
299
319
  'topo_order': self._set_topo_order
300
320
  }
301
321
  ascend_cfg_set = tuple(ascend_cfg_modes.keys())
@@ -475,9 +495,10 @@ class _Context:
475
495
 
476
496
  def set_mempool_block_size(self, mempool_block_size):
477
497
  """Set the block size of memory pool."""
478
- if _get_mode() == GRAPH_MODE:
498
+ is_force_kbk = os.getenv("GRAPH_OP_RUN")
499
+ if _get_mode() == GRAPH_MODE and is_force_kbk != "1":
479
500
  logger.warning("Graph mode doesn't support to set parameter 'mempool_block_size' of context currently, "
480
- "you can use context.set_context to set pynative mode.")
501
+ "you can use context.set_context to set pynative mode or set env GRAPH_OP_RUN=1.")
481
502
  return
482
503
  if not Validator.check_str_by_regular(mempool_block_size, _RE_PATTERN):
483
504
  raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
@@ -563,6 +584,7 @@ class _Context:
563
584
  'deterministic': set_deterministic,
564
585
  'ascend_config': set_ascend_config,
565
586
  'jit_syntax_level': set_jit_syntax_level,
587
+ 'debug_level': set_debug_level,
566
588
  'gpu_config': set_gpu_config,
567
589
  'aoe_config': set_aoe_config,
568
590
  }
@@ -620,6 +642,28 @@ class _Context:
620
642
  f"got '{op_precision_path}'.")
621
643
  self.set_param(ms_ctx_param.op_precision_mode, ascend_value)
622
644
 
645
+ def _set_ge_options(self, ge_options):
646
+ """Set ge options."""
647
+ for level, options in ge_options.items():
648
+ if level not in ['global', 'session']:
649
+ raise ValueError(f"For 'ascend_config', the key of ge_options must be one of "
650
+ f"('global', 'session'), but got {level}.")
651
+
652
+ if not isinstance(options, dict):
653
+ raise TypeError(f"For 'ge_options', the type of {level} options must be dict, "
654
+ f"but got {type(options)}. The error options: {options}.")
655
+
656
+ for key, value in options.items():
657
+ if not isinstance(key, str):
658
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
659
+ f"but got {type(key)}. The error key is {key}.")
660
+ if not isinstance(value, str):
661
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
662
+ f"but got {type(value)}. The error value is {value}")
663
+
664
+ options_str = json.dumps(ge_options)
665
+ self.set_param(ms_ctx_param.ge_options, options_str)
666
+
623
667
  def _set_topo_order(self, topo_order):
624
668
  """
625
669
  Set topo order.
@@ -652,23 +696,32 @@ class _Context:
652
696
  f"{speedup_config_real_path} does not exist, please check whether the "
653
697
  f"'parallel_speed_up_json_path' is correct.")
654
698
  try:
655
- valid_option = {"recompute_comm_overlap": ms_ctx_param.recompute_comm_overlap,
656
- "matmul_grad_comm_overlap": ms_ctx_param.matmul_grad_comm_overlap,
657
- "enable_task_opt": ms_ctx_param.enable_task_opt,
658
- "enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
659
- "interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
660
- "enable_opt_shard_comm_opt": ms_ctx_param.enable_opt_shard_comm_opt,
661
- "interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
699
+ valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
700
+ "matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
701
+ "enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
702
+ "enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
703
+ "interleaved_matmul_comm": (ms_ctx_param.interleaved_matmul_comm, bool),
704
+ "bias_add_comm_swap": (ms_ctx_param.bias_add_comm_swap, bool),
705
+ "enable_opt_shard_comm_opt": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
706
+ "enable_begin_end_inline_opt": (ms_ctx_param.enable_begin_end_inline_opt, bool),
707
+ "enable_concat_eliminate_opt": (ms_ctx_param.enable_concat_eliminate_opt, bool),
708
+ "interleaved_layernorm_comm": (ms_ctx_param.interleaved_layernorm_comm, bool),
709
+ "compute_communicate_fusion_level":
710
+ (ms_ctx_param.compute_communicate_fusion_level, int),
711
+ "enable_flash_attention_load_balance":
712
+ (ms_ctx_param.enable_flash_attention_load_balance, bool)}
662
713
  with open(speedup_config_real_path, 'r') as f:
663
714
  speedup_config = json.load(f)
664
- for k, v in speedup_config.items():
665
- if not isinstance(k, str):
666
- raise TypeError("key {} is not a str".format(k))
667
- if k not in valid_option:
668
- raise ValueError("key {} should be one of {}.".format(k, valid_option.keys()))
669
- if not isinstance(v, bool):
670
- raise TypeError("value {} is not a bool".format(v))
671
- self.set_param(valid_option.get(k), v)
715
+ for key, value in speedup_config.items():
716
+ if not isinstance(key, str):
717
+ raise TypeError("key {} is not a str".format(key))
718
+ if key not in valid_option:
719
+ raise ValueError("key {} should be one of {}.".format(key, valid_option.keys()))
720
+ set_func, valid_type = valid_option.get(key)
721
+ if not isinstance(value, valid_type):
722
+ raise TypeError(f"The value type of {key} must be {valid_type}, "
723
+ f"but got value is {value} and type is {type(value)}.")
724
+ self.set_param(set_func, value)
672
725
  except (TypeError, ValueError) as exo:
673
726
  raise ValueError(str(exo) + "\nFor 'context.set_context', "
674
727
  "open or load the 'speedup_config_path' file {} "
@@ -705,8 +758,9 @@ def _context():
705
758
  auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
706
759
  strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
707
760
  all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
708
- pipeline_config=dict, parallel_optimizer_config=dict,
709
- comm_fusion=dict, strategy_ckpt_config=dict)
761
+ pipeline_result_broadcast=bool, parallel_optimizer_config=dict,
762
+ pipeline_config=dict,
763
+ comm_fusion=dict, strategy_ckpt_config=dict, force_fp32_communication=bool)
710
764
  def set_auto_parallel_context(**kwargs):
711
765
  r"""
712
766
  Set auto parallel context, only data parallel supported on CPU.
@@ -733,8 +787,10 @@ def set_auto_parallel_context(**kwargs):
733
787
  parallel_optimizer_config dataset_strategy
734
788
  enable_alltoall pipeline_stages
735
789
  pipeline_config auto_parallel_search_mode
790
+ force_fp32_communication pipeline_result_broadcast
736
791
  \ comm_fusion
737
792
  \ strategy_ckpt_config
793
+ \ group_ckpt_save_file
738
794
  =========================== ===========================
739
795
 
740
796
  Args:
@@ -744,6 +800,8 @@ def set_auto_parallel_context(**kwargs):
744
800
  "stand_alone" do not support gradients_mean. Default: ``False`` .
745
801
  gradient_fp32_sync (bool): Run allreduce of gradients in fp32. "stand_alone", "data_parallel"
746
802
  and "hybrid_parallel" do not support gradient_fp32_sync. Default: ``True`` .
803
+ loss_repeated_mean (bool) - Indicates whether the mean operator is executed backwards when the
804
+ calculation is repeated. Default: ``True`` .
747
805
  parallel_mode (str): There are five kinds of parallel modes, ``"stand_alone"`` , ``"data_parallel"`` ,
748
806
  ``"hybrid_parallel"`` , ``"semi_auto_parallel"`` and ``"auto_parallel"`` . Note the pynative mode
749
807
  only supports the ``"stand_alone"`` and ``"data_parallel"`` mode. Default: ``"stand_alone"`` .
@@ -758,15 +816,16 @@ def set_auto_parallel_context(**kwargs):
758
816
 
759
817
  - auto_parallel: Achieving parallelism automatically.
760
818
  search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
761
- ``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"recursive_programming"`` .
819
+ ``"sharding_propagation"`` and ``"dynamic_programming"`` (Not recommended).
820
+ Default: ``"recursive_programming"`` .
762
821
 
763
822
  - recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
764
823
  it is recommended that users set the batch size to be greater than or equal to the product of
765
824
  the number of devices and the number of multi-copy parallelism.
766
825
 
767
- - dynamic_programming: Dynamic programming search mode.
768
-
769
826
  - sharding_propagation: Propagate shardings from configured ops to non-configured ops.
827
+
828
+ - dynamic_programming: Dynamic programming search mode.
770
829
  auto_parallel_search_mode (str): This is the old version of 'search_mode'. Here, remaining this attribute is
771
830
  for forward compatibility, and this attribute will be deleted in a future MindSpore version.
772
831
  parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
@@ -792,6 +851,9 @@ def set_auto_parallel_context(**kwargs):
792
851
  data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
793
852
  parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
794
853
  `Lamb` and `AdamWeightDecay` in Ascend . Default: ``False`` .
854
+ force_fp32_communication (bool): A switch that determines whether reduce operators (AllReduce, ReduceScatter)
855
+ are forced to use the fp32 data type for communication during communication. True is the enable
856
+ switch. Default: ``False`` .
795
857
  enable_alltoall (bool): A switch that allows AllToAll operators to be generated during communication. If its
796
858
  value is ``False`` , there will be a combination of operators such as AllGather, Split and
797
859
  Concat instead of AllToAll. Default: ``False`` .
@@ -801,6 +863,8 @@ def set_auto_parallel_context(**kwargs):
801
863
  distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
802
864
  stages.
803
865
  Default: ``1`` .
866
+ pipeline_result_broadcast (bool): A switch that broadcast the last stage result to all other stage in pipeline
867
+ parallel inference. Default: ``False`` .
804
868
  pipeline_config (dict): A dict contains the keys and values for setting the pipeline parallelism configuration.
805
869
  It supports the following keys:
806
870
 
@@ -866,14 +930,15 @@ def set_auto_parallel_context(**kwargs):
866
930
  - load_file (str): The path to load parallel strategy checkpoint. If the file name extension is
867
931
  `.json`, the file is loaded in JSON format. Otherwise, the file is loaded in ProtoBuf
868
932
  format.
869
- Default: ''
933
+ Default: ``''``
870
934
 
871
935
  - save_file (str): The path to save parallel strategy checkpoint. If the file name extension is
872
936
  `.json`, the file is saved in JSON format. Otherwise, the file is saved in ProtoBuf format.
873
- Default: ''
937
+ Default: ``''``
874
938
 
875
939
  - only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
876
940
  Default: ``True`` .
941
+ group_ckpt_save_file (str): The path to save parallel group checkpoint.
877
942
 
878
943
  Raises:
879
944
  ValueError: If input key is not attribute in auto parallel context.
@@ -885,8 +950,8 @@ def set_auto_parallel_context(**kwargs):
885
950
  >>> ms.set_auto_parallel_context(gradients_mean=True)
886
951
  >>> ms.set_auto_parallel_context(gradient_fp32_sync=False)
887
952
  >>> ms.set_auto_parallel_context(parallel_mode="auto_parallel")
888
- >>> ms.set_auto_parallel_context(search_mode="dynamic_programming")
889
- >>> ms.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
953
+ >>> ms.set_auto_parallel_context(search_mode="recursive_programming")
954
+ >>> ms.set_auto_parallel_context(auto_parallel_search_mode="recursive_programming")
890
955
  >>> ms.set_auto_parallel_context(parameter_broadcast=False)
891
956
  >>> ms.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
892
957
  >>> ms.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
@@ -895,6 +960,7 @@ def set_auto_parallel_context(**kwargs):
895
960
  >>> ms.set_auto_parallel_context(enable_alltoall=False)
896
961
  >>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
897
962
  >>> ms.set_auto_parallel_context(pipeline_stages=2)
963
+ >>> ms.set_auto_parallel_context(pipeline_stages=2, pipeline_result_broadcast=True)
898
964
  >>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
899
965
  ... "optimizer_weight_shard_size": 2}
900
966
  >>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
@@ -943,8 +1009,10 @@ def reset_auto_parallel_context():
943
1009
  - strategy_ckpt_save_file: ''.
944
1010
  - full_batch: False.
945
1011
  - enable_parallel_optimizer: False.
1012
+ - force_fp32_communication: False
946
1013
  - enable_alltoall: False.
947
1014
  - pipeline_stages: 1.
1015
+ - pipeline_result_broadcast: False.
948
1016
  - fusion_threshold: 64.
949
1017
 
950
1018
  Examples:
@@ -1043,7 +1111,7 @@ def _check_target_specific_cfgs(device, arg_key):
1043
1111
  max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
1044
1112
  graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
1045
1113
  grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
1046
- op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int,
1114
+ op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int, debug_level=int,
1047
1115
  jit_enable_inplace_ops=bool, gpu_config=dict)
1048
1116
  def set_context(**kwargs):
1049
1117
  """
@@ -1093,6 +1161,8 @@ def set_context(**kwargs):
1093
1161
  | | reserve_class_name_in_scope | CPU/GPU/Ascend |
1094
1162
  | +------------------------------+----------------------------+
1095
1163
  | | pynative_synchronize | CPU/GPU/Ascend |
1164
+ | +------------------------------+----------------------------+
1165
+ | | debug_level | CPU/GPU/Ascend |
1096
1166
  +-------------------------+------------------------------+----------------------------+
1097
1167
  | Executive Control | mode | CPU/GPU/Ascend |
1098
1168
  | +------------------------------+----------------------------+
@@ -1145,12 +1215,16 @@ def set_context(**kwargs):
1145
1215
  and max_device_memory. 'max_device_memory' should be set before the program runs.
1146
1216
  variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
1147
1217
  Please use parameter 'max_device_memory' instead.
1148
- mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
1218
+ mempool_block_size (str): Set the size of the memory pool block in PyNative mode or GRAPH_OP_RUN=1 for devices.
1149
1219
  The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block size is the
1150
1220
  minimum of the available memory of the device and mempool_block_size.
1151
1221
  op_timeout (int): Set the maximum duration of executing an operator in seconds.
1152
- If the execution time exceeds this value, system will terminate the task. 0 means endless wait.
1153
- Default: ``1900`` .
1222
+ If the execution time exceeds this value, system will terminate the task.
1223
+ 0 means endless wait. The defaults for AI Core and AICPU operators vary on different hardware.
1224
+ For more information,
1225
+ please refer to `Ascend Community
1226
+ <https://www.hiascend.com/>`_.
1227
+ Default: ``900`` .
1154
1228
  save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: ``0`` .
1155
1229
  Available values are:
1156
1230
 
@@ -1162,7 +1236,7 @@ def set_context(**kwargs):
1162
1236
  When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
1163
1237
  `save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
1164
1238
  are saved in the current directory.
1165
- save_graphs_path (str): Path to save graphs. Default: ".".
1239
+ save_graphs_path (str): Path to save graphs. Default: ``"."``.
1166
1240
  If the specified directory does not exist, the system will automatically create the directory.
1167
1241
  During distributed training, graphs will be saved to the directory of
1168
1242
  `save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
@@ -1226,7 +1300,7 @@ def set_context(**kwargs):
1226
1300
  If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
1227
1301
  For details of graph kernel fusion, please check
1228
1302
  `Enabling Graph Kernel Fusion
1229
- <https://www.mindspore.cn/tutorials/experts/en/r2.2/optimize/graph_fusion_engine.html>`_.
1303
+ <https://www.mindspore.cn/tutorials/experts/en/master/optimize/graph_fusion_engine.html>`_.
1230
1304
  graph_kernel_flags (str):
1231
1305
  Optimization options of graph kernel fusion, and the priority is higher when it conflicts
1232
1306
  with enable_graph_kernel. Only for experienced users.
@@ -1282,7 +1356,7 @@ def set_context(**kwargs):
1282
1356
  the compile cache is loaded. Note that only limited automatic detection for the changes of
1283
1357
  python scripts is supported by now, which means that there is a correctness risk. Default: ``False`` .
1284
1358
  This is an experimental prototype that is subject to change and/or deletion.
1285
- compile_cache_path (str): Path to save the compile cache. Default: ".".
1359
+ compile_cache_path (str): Path to save the compile cache. Default: ``"."``.
1286
1360
  If the specified directory does not exist, the system will automatically create the directory.
1287
1361
  The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
1288
1362
  the ID of the current device in the cluster.
@@ -1299,16 +1373,18 @@ def set_context(**kwargs):
1299
1373
  of the interfaces would be compiled by MindSpore to the interfaces definition .py file that should be
1300
1374
  guaranteed to be writable. Then compile the .py file to the .pyc or .so file, and could run in Graph mode.
1301
1375
  memory_optimize_level (str): The memory optimize level.
1302
- Default: O0. The value must be in ['O0', 'O1'].
1376
+ On Ascend hardware platform, default: ``O1``, on other hardware platforms, default: ``O0``.
1377
+ The value must be in ['O0', 'O1'].
1303
1378
 
1304
- - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver).
1305
- - O1: priority memory option, enable SOMAS.
1379
+ - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver)
1380
+ and some other memory optimizations.
1381
+ - O1: priority memory option, enable SOMAS and some other memory optimizations.
1306
1382
  memory_offload (str): Whether to enable the memory offload function. When it is enabled, the idle data will be
1307
1383
  temporarily copied to the host side in the case of insufficient device memory. The value must be in the
1308
1384
  range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
1309
1385
 
1310
1386
  - ON: Enable the memory Offload function. On Ascend hardware platform, this parameter does not take effect
1311
- when the environment variable "GRAPH_OP_RUN=1" is not set; This parameter does not take effect when
1387
+ when the graph compilation level is not 'O0'; This parameter does not take effect when
1312
1388
  memory_optimize_level is set 'O1'.
1313
1389
  - OFF: Turn off the memory Offload function.
1314
1390
  ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
@@ -1319,22 +1395,27 @@ def set_context(**kwargs):
1319
1395
  is ``force_fp16`` . The value range is as follows:
1320
1396
 
1321
1397
  - force_fp16: When the operator supports both float16 and float32, select float16 directly.
1322
- - allow_fp32_to_fp16: When the operator does not support the float32 data type, directly reduce
1323
- the precision of float16.
1398
+ - allow_fp32_to_fp16: For cube operators, use the float16. For vector operators,
1399
+ prefer to keep the origin dtype, if the operator in model can support float32,
1400
+ it will keep original dtype, otherwise it will reduce to float16.
1324
1401
  - allow_mix_precision: Automatic mixing precision, facing the whole network operator, according
1325
1402
  to the built-in optimization strategy, automatically reduces the precision of some operators
1326
1403
  to float16 or bfloat16.
1327
1404
  - must_keep_origin_dtype: Keep the accuracy of the original drawing.
1328
1405
  - force_fp32: When the input of the matrix calculation operator is float16 and the output supports
1329
1406
  float16 and float32, output is forced to float32.
1330
- - allow_fp32_to_bf16: When the operator does not support the float32 data type, directly reduce
1331
- the precision of bfloat16.
1407
+ - allow_fp32_to_bf16: For cube operators, use the bfloat16. For vector operators,
1408
+ prefer to keep the origin dtype, if the operator in model can support float32,
1409
+ it will keep original dtype, otherwise it will reduce to bfloat16.
1332
1410
  - allow_mix_precision_fp16: Automatic mixing precision, facing the whole network operator, automatically
1333
1411
  reduces the precision of some operators to float16 according to the built-in optimization strategy.
1334
1412
  - allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
1335
1413
  the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
1336
1414
 
1337
- - jit_compile (bool): Whether to select online compilation. the default value is based on CANN.
1415
+ - jit_compile (bool): Whether to select online compilation. When set to 'True', online compilation is
1416
+ prioritized. When set to 'False', compiled operator binary files are prioritized to improve compilation
1417
+ performance. The default settings are online compilation for static shape, and compiled operator binary
1418
+ files for dynamic shape.
1338
1419
  - atomic_clean_policy (int): The policy for cleaning memory occupied by atomic operators in the network.
1339
1420
  Default: ``1`` .
1340
1421
 
@@ -1354,20 +1435,50 @@ def set_context(**kwargs):
1354
1435
  for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
1355
1436
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1356
1437
  to `Ascend community <https://www.hiascend.com/>`_ .
1438
+ - ge_options (dict): Set options for CANN. The options are divided into two categories: global and session.
1439
+ This is an experimental prototype that is subject to change and/or deletion.
1440
+ For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/70RC1/inferapplicationdev/graphdevg/atlasgeapi_07_0119.html>`_ .
1441
+ The configuration options in `ge_options` may be duplicated with the options in `ascend_config`. If the
1442
+ same configuration options are set in both `ascend_config` and `ge_options`, the one set in `ge_options`
1443
+ shall prevail.
1444
+
1445
+ - global (dict): Set global options.
1446
+ - session (dict): Set session options.
1447
+
1357
1448
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
1358
1449
  can refer to `parallel_speed_up.json
1359
- <https://gitee.com/mindspore/mindspore/blob/r2.2/config/parallel_speed_up.json>`_ .
1450
+ <https://gitee.com/mindspore/mindspore/blob/master/config/parallel_speed_up.json>`_ .
1360
1451
  If its value is None or '', it does not take effect. Default None.
1361
1452
 
1362
1453
  - recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
1363
1454
  Default: False.
1364
- - matmul_grad_comm_overlap (bool): Enable overlap between grad ops and communication ops if True.
1365
- Default: False.
1455
+ - matmul_grad_comm_overlap (bool): Enable overlap between dw matmul and
1456
+ tensor parallel communication ops if True. Default: False.
1366
1457
  - enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
1367
1458
  Default: False.
1368
- - interleaved_matmul_comm (bool): Enable interleaved optimization of Matmul-Comm if True. Default: False.
1369
- - interleaved_layernorm_comm (bool): Enable interleaved optimization of LayerNorm-Comm if True.
1459
+ - enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
1460
+ Currently, do not support
1461
+ `LazyInline <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.lazy_inline.html>`
1462
+ Default: False.
1463
+ - enable_opt_shard_comm_opt (bool): Enable overlap between forward ops
1464
+ and optimizer parallel allgather communication if True. Currently, do not support
1465
+ `LazyInline <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.lazy_inline.html>`
1370
1466
  Default: False.
1467
+ - compute_communicate_fusion_level (int): Enable the fusion between compute and communicate.
1468
+ Default: ``0``.
1469
+
1470
+ - 0: Disable fusion.
1471
+
1472
+ - 1: Apply fusion to forward nodes.
1473
+
1474
+ - 2: Apply fusion to backward nodes.
1475
+
1476
+ - 3: Apply fusion to all nodes.
1477
+ - host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
1478
+ used when run the static graph, the default value is 0. When the number of operations in the static graph
1479
+ is less than the max threshold, this graph will be executed in dynamic shape process. In large model
1480
+ scenarios, this approach can save stream resources. If the number of operations in the static graph is
1481
+ greater than the maximum threshold, this graph will be executed in original static process.
1371
1482
 
1372
1483
  jit_syntax_level (int): Set JIT syntax level for graph compiling, triggered by GRAPH_MODE and @jit decorator.
1373
1484
  The value must be ``STRICT`` or ``LAX`` . Default: ``LAX`` . All levels support all backends.
@@ -1378,6 +1489,12 @@ def set_context(**kwargs):
1378
1489
  affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
1379
1490
  able to be exported.
1380
1491
 
1492
+ debug_level (int): Set config for debugging. Default value: ``RELEASE``.
1493
+
1494
+ - ``RELEASE``: Used for normally running, and some debug information will be discard to get a better
1495
+ compiling performance.
1496
+ - ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
1497
+
1381
1498
  gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
1382
1499
  Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
1383
1500
  and `matmul_allow_tf32` are supported on GPU hardware platform.
@@ -1482,8 +1599,11 @@ def set_context(**kwargs):
1482
1599
  >>> ms.set_context(memory_offload='ON')
1483
1600
  >>> ms.set_context(deterministic='ON')
1484
1601
  >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
1485
- ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file"})
1602
+ ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file",
1603
+ ... "ge_options": {"global": {"ge.opSelectImplmode": "high_precision"},
1604
+ ... "session": {"ge.exec.atomicCleanPolicy": "0"}}})
1486
1605
  >>> ms.set_context(jit_syntax_level=ms.STRICT)
1606
+ >>> ms.set_context(debug_level=ms.DEBUG)
1487
1607
  >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
1488
1608
  ... "matmul_allow_tf32": True})
1489
1609
  """
@@ -1502,7 +1622,7 @@ def set_context(**kwargs):
1502
1622
  "For details, please see the interface parameter API comments")
1503
1623
  continue
1504
1624
  if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1505
- 'op_precision_mode'):
1625
+ 'op_precision_mode', 'host_scheduling_max_threshold', 'ge_options'):
1506
1626
  raise ValueError(f"Please set '{key}' through parameter ascend_config")
1507
1627
  if key == 'save_graphs':
1508
1628
  if value is True:
@@ -1514,6 +1634,9 @@ def set_context(**kwargs):
1514
1634
  if key == 'jit_syntax_level' and value not in (STRICT, COMPATIBLE, LAX):
1515
1635
  raise ValueError(f"For 'jit_syntax_level', the value should be context.STRICT"
1516
1636
  f" or context.LAX, but got {value}.")
1637
+ if key == 'debug_level' and value not in (RELEASE, DEBUG):
1638
+ raise ValueError(f"For 'debug_level', the value should be context.DEBUG"
1639
+ f" or context.RELEASE, but got {value}.")
1517
1640
  if not _check_target_specific_cfgs(device, key):
1518
1641
  continue
1519
1642
  if hasattr(ctx, key):
@@ -1668,9 +1791,7 @@ def get_ps_context(attr_key):
1668
1791
 
1669
1792
  def reset_ps_context():
1670
1793
  """
1671
- Reset parameter server training mode context attributes to the default values:
1672
-
1673
- - enable_ps: False.
1794
+ Reset parameter server training mode context attributes to the default values.
1674
1795
 
1675
1796
  Meaning of each field and its default value refer to :func:`mindspore.set_ps_context`.
1676
1797
 
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
21
21
 
22
22
  We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
23
23
  on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
24
- can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.2/dataset/cache.html>`_ .
24
+ can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/master/dataset/cache.html>`_ .
25
25
 
26
26
  Common imported modules in corresponding API examples are as follows:
27
27
 
@@ -55,11 +55,11 @@ The specific steps are as follows:
55
55
  - Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
56
56
  `.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
57
57
  - Dataset sample transform operation: The user can add data transform operations
58
- ( `vision transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
58
+ ( `vision transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
59
59
  dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
60
- `NLP transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
60
+ `NLP transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
61
61
  dataset.transforms.html#module-mindspore.dataset.text>`_ ,
62
- `audio transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
62
+ `audio transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
63
63
  dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
64
64
  operation to perform transformations. During data preprocessing, multiple map operations can be defined to
65
65
  perform different transform operations to different fields. The data transform operation can also be a
@@ -73,7 +73,7 @@ Quick start of Dataset Pipeline
73
73
  -------------------------------
74
74
 
75
75
  For a quick start of using Dataset Pipeline, download `Load & Process Data With Dataset Pipeline
76
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/dataset_gallery.html>`_
76
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/dataset_gallery.html>`_
77
77
  to local and run in sequence.
78
78
 
79
79
  """
@@ -40,22 +40,22 @@ Descriptions of common data processing terms are as follows:
40
40
  The data transform operation can be executed in the data processing pipeline or in the eager mode:
41
41
 
42
42
  - Pipeline mode is generally used to process big datasets. Examples refer to
43
- `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.2/api_python/
43
+ `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/master/api_python/
44
44
  mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
45
45
  - Eager mode is more like a function call to process data. Examples refer to
46
- `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.2/advanced/dataset/eager.html>`_ .
46
+ `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/master/advanced/dataset/eager.html>`_ .
47
47
  """
48
48
  from __future__ import absolute_import
49
49
 
50
- from mindspore.dataset.audio import transforms
51
- from mindspore.dataset.audio import utils
52
- from mindspore.dataset.audio.transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
50
+ from . import transforms
51
+ from . import utils
52
+ from .transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
53
53
  BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, ComplexNorm, ComputeDeltas, Contrast, DBToAmplitude, \
54
54
  DCShift, DeemphBiquad, DetectPitchFrequency, Dither, EqualizerBiquad, Fade, Filtfilt, Flanger, FrequencyMasking, \
55
55
  Gain, GriffinLim, HighpassBiquad, InverseMelScale, InverseSpectrogram, LFCC, LFilter, LowpassBiquad, Magphase, \
56
56
  MaskAlongAxis, MaskAlongAxisIID, MelScale, MelSpectrogram, MFCC, MuLawDecoding, MuLawEncoding, Overdrive, \
57
57
  Phaser, PhaseVocoder, PitchShift, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram, \
58
58
  TimeMasking, TimeStretch, TrebleBiquad, Vad, Vol
59
- from mindspore.dataset.audio.utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
59
+ from .utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
60
60
  MelType, Modulation, NormMode, NormType, ResampleMethod, ScaleType, WindowType, create_dct, linear_fbanks, \
61
61
  melscale_fbanks