mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1166) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -5
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +76 -18
  9. mindspore/_extends/builtin_operations.py +2 -1
  10. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  11. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  12. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  13. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  14. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  15. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  16. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  17. mindspore/_extends/parse/__init__.py +18 -14
  18. mindspore/_extends/parse/compile_config.py +258 -0
  19. mindspore/_extends/parse/namespace.py +2 -2
  20. mindspore/_extends/parse/parser.py +174 -62
  21. mindspore/_extends/parse/resources.py +45 -14
  22. mindspore/_extends/parse/standard_method.py +142 -240
  23. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  24. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  25. mindspore/_extends/remote/kernel_build_server.py +2 -0
  26. mindspore/_profiler.py +30 -0
  27. mindspore/amp.py +51 -24
  28. mindspore/atlprov.dll +0 -0
  29. mindspore/avcodec-59.dll +0 -0
  30. mindspore/avdevice-59.dll +0 -0
  31. mindspore/avfilter-8.dll +0 -0
  32. mindspore/avformat-59.dll +0 -0
  33. mindspore/avutil-57.dll +0 -0
  34. mindspore/boost/adasum.py +1 -1
  35. mindspore/boost/base.py +1 -1
  36. mindspore/boost/boost_cell_wrapper.py +2 -2
  37. mindspore/boost/grad_freeze.py +2 -2
  38. mindspore/boost/group_loss_scale_manager.py +1 -1
  39. mindspore/boost/less_batch_normalization.py +9 -6
  40. mindspore/c1.dll +0 -0
  41. mindspore/c1xx.dll +0 -0
  42. mindspore/c2.dll +0 -0
  43. mindspore/common/__init__.py +15 -4
  44. mindspore/common/_jit_fallback_utils.py +2 -3
  45. mindspore/common/_register_for_adapter.py +7 -0
  46. mindspore/common/_register_for_recompute.py +48 -0
  47. mindspore/common/_register_for_tensor.py +8 -9
  48. mindspore/common/_stub_tensor.py +7 -1
  49. mindspore/common/_utils.py +5 -17
  50. mindspore/common/api.py +411 -106
  51. mindspore/common/auto_dynamic_shape.py +27 -14
  52. mindspore/common/dtype.py +17 -10
  53. mindspore/common/dump.py +6 -8
  54. mindspore/common/file_system.py +48 -0
  55. mindspore/common/generator.py +260 -0
  56. mindspore/common/hook_handle.py +51 -4
  57. mindspore/common/initializer.py +1 -1
  58. mindspore/common/jit_config.py +34 -14
  59. mindspore/common/lazy_inline.py +72 -19
  60. mindspore/common/mindir_util.py +12 -2
  61. mindspore/common/mutable.py +79 -14
  62. mindspore/common/no_inline.py +54 -0
  63. mindspore/common/np_dtype.py +25 -0
  64. mindspore/common/parameter.py +30 -11
  65. mindspore/common/recompute.py +262 -0
  66. mindspore/common/seed.py +9 -9
  67. mindspore/common/sparse_tensor.py +272 -24
  68. mindspore/common/symbol.py +122 -0
  69. mindspore/common/tensor.py +468 -494
  70. mindspore/communication/__init__.py +6 -11
  71. mindspore/communication/_comm_helper.py +5 -0
  72. mindspore/communication/comm_func.py +1140 -0
  73. mindspore/communication/management.py +115 -102
  74. mindspore/config/op_info.config +22 -54
  75. mindspore/context.py +346 -63
  76. mindspore/dataset/__init__.py +5 -5
  77. mindspore/dataset/audio/__init__.py +6 -6
  78. mindspore/dataset/audio/transforms.py +711 -158
  79. mindspore/dataset/callback/ds_callback.py +2 -2
  80. mindspore/dataset/engine/cache_client.py +2 -2
  81. mindspore/dataset/engine/datasets.py +140 -83
  82. mindspore/dataset/engine/datasets_audio.py +14 -14
  83. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  84. mindspore/dataset/engine/datasets_text.py +38 -38
  85. mindspore/dataset/engine/datasets_user_defined.py +78 -59
  86. mindspore/dataset/engine/datasets_vision.py +77 -73
  87. mindspore/dataset/engine/offload.py +5 -7
  88. mindspore/dataset/engine/queue.py +56 -38
  89. mindspore/dataset/engine/validators.py +11 -5
  90. mindspore/dataset/text/__init__.py +3 -3
  91. mindspore/dataset/text/transforms.py +408 -121
  92. mindspore/dataset/text/utils.py +9 -9
  93. mindspore/dataset/transforms/__init__.py +1 -1
  94. mindspore/dataset/transforms/transforms.py +261 -76
  95. mindspore/dataset/utils/browse_dataset.py +9 -9
  96. mindspore/dataset/vision/__init__.py +8 -8
  97. mindspore/dataset/vision/c_transforms.py +10 -10
  98. mindspore/dataset/vision/py_transforms_util.py +1 -1
  99. mindspore/dataset/vision/transforms.py +2844 -549
  100. mindspore/dataset/vision/utils.py +161 -10
  101. mindspore/dataset/vision/validators.py +14 -2
  102. mindspore/dnnl.dll +0 -0
  103. mindspore/dpcmi.dll +0 -0
  104. mindspore/experimental/optim/__init__.py +12 -2
  105. mindspore/experimental/optim/adadelta.py +161 -0
  106. mindspore/experimental/optim/adagrad.py +168 -0
  107. mindspore/experimental/optim/adam.py +35 -34
  108. mindspore/experimental/optim/adamax.py +170 -0
  109. mindspore/experimental/optim/adamw.py +40 -16
  110. mindspore/experimental/optim/asgd.py +153 -0
  111. mindspore/experimental/optim/lr_scheduler.py +66 -121
  112. mindspore/experimental/optim/nadam.py +157 -0
  113. mindspore/experimental/optim/optimizer.py +15 -8
  114. mindspore/experimental/optim/radam.py +194 -0
  115. mindspore/experimental/optim/rmsprop.py +154 -0
  116. mindspore/experimental/optim/rprop.py +164 -0
  117. mindspore/experimental/optim/sgd.py +28 -19
  118. mindspore/hal/__init__.py +40 -0
  119. mindspore/hal/_ascend.py +57 -0
  120. mindspore/hal/_base.py +57 -0
  121. mindspore/hal/_cpu.py +56 -0
  122. mindspore/hal/_gpu.py +57 -0
  123. mindspore/hal/device.py +356 -0
  124. mindspore/hal/event.py +179 -0
  125. mindspore/hal/memory.py +326 -0
  126. mindspore/hal/stream.py +339 -0
  127. mindspore/include/api/data_type.h +2 -2
  128. mindspore/include/api/dual_abi_helper.h +16 -3
  129. mindspore/include/api/model.h +4 -3
  130. mindspore/include/api/status.h +14 -0
  131. mindspore/include/c_api/model_c.h +173 -0
  132. mindspore/include/c_api/ms/base/types.h +1 -0
  133. mindspore/include/c_api/types_c.h +19 -0
  134. mindspore/include/dataset/execute.h +1 -3
  135. mindspore/include/dataset/vision.h +54 -2
  136. mindspore/jpeg62.dll +0 -0
  137. mindspore/log.py +2 -2
  138. mindspore/mindrecord/__init__.py +5 -1
  139. mindspore/mindrecord/config.py +809 -0
  140. mindspore/mindrecord/filereader.py +25 -0
  141. mindspore/mindrecord/filewriter.py +76 -58
  142. mindspore/mindrecord/mindpage.py +40 -6
  143. mindspore/mindrecord/shardutils.py +3 -2
  144. mindspore/mindrecord/shardwriter.py +7 -0
  145. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  146. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  147. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  148. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  149. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  150. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  151. mindspore/mindspore_backend.dll +0 -0
  152. mindspore/mindspore_common.dll +0 -0
  153. mindspore/mindspore_core.dll +0 -0
  154. mindspore/mindspore_glog.dll +0 -0
  155. mindspore/mindspore_np_dtype.dll +0 -0
  156. mindspore/mindspore_shared_lib.dll +0 -0
  157. mindspore/mint/__init__.py +1137 -0
  158. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  159. mindspore/mint/nn/__init__.py +512 -0
  160. mindspore/mint/nn/functional.py +573 -0
  161. mindspore/mint/optim/__init__.py +24 -0
  162. mindspore/mint/optim/adamw.py +185 -0
  163. mindspore/msobj140.dll +0 -0
  164. mindspore/mspdb140.dll +0 -0
  165. mindspore/mspdbcore.dll +0 -0
  166. mindspore/mspdbst.dll +0 -0
  167. mindspore/mspft140.dll +0 -0
  168. mindspore/msvcdis140.dll +0 -0
  169. mindspore/msvcp140_1.dll +0 -0
  170. mindspore/msvcp140_2.dll +0 -0
  171. mindspore/msvcp140_atomic_wait.dll +0 -0
  172. mindspore/msvcp140_codecvt_ids.dll +0 -0
  173. mindspore/multiprocessing/__init__.py +72 -0
  174. mindspore/nn/__init__.py +1 -0
  175. mindspore/nn/cell.py +213 -257
  176. mindspore/nn/dynamic_lr.py +2 -2
  177. mindspore/nn/extend/__init__.py +29 -0
  178. mindspore/nn/extend/basic.py +140 -0
  179. mindspore/nn/extend/embedding.py +143 -0
  180. mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
  181. mindspore/nn/extend/layer/normalization.py +109 -0
  182. mindspore/nn/extend/pooling.py +117 -0
  183. mindspore/nn/layer/activation.py +83 -93
  184. mindspore/nn/layer/basic.py +177 -82
  185. mindspore/nn/layer/channel_shuffle.py +3 -16
  186. mindspore/nn/layer/container.py +3 -3
  187. mindspore/nn/layer/conv.py +75 -66
  188. mindspore/nn/layer/embedding.py +101 -43
  189. mindspore/nn/layer/embedding_service.py +531 -0
  190. mindspore/nn/layer/embedding_service_layer.py +393 -0
  191. mindspore/nn/layer/image.py +4 -7
  192. mindspore/nn/layer/math.py +1 -1
  193. mindspore/nn/layer/normalization.py +52 -66
  194. mindspore/nn/layer/padding.py +30 -39
  195. mindspore/nn/layer/pooling.py +18 -9
  196. mindspore/nn/layer/rnn_cells.py +6 -16
  197. mindspore/nn/layer/rnns.py +6 -5
  198. mindspore/nn/layer/thor_layer.py +1 -2
  199. mindspore/nn/layer/timedistributed.py +1 -1
  200. mindspore/nn/layer/transformer.py +52 -50
  201. mindspore/nn/learning_rate_schedule.py +6 -5
  202. mindspore/nn/loss/loss.py +62 -83
  203. mindspore/nn/optim/ada_grad.py +4 -2
  204. mindspore/nn/optim/adadelta.py +3 -1
  205. mindspore/nn/optim/adafactor.py +1 -1
  206. mindspore/nn/optim/adam.py +102 -181
  207. mindspore/nn/optim/adamax.py +4 -2
  208. mindspore/nn/optim/adasum.py +3 -3
  209. mindspore/nn/optim/asgd.py +4 -2
  210. mindspore/nn/optim/ftrl.py +31 -61
  211. mindspore/nn/optim/lamb.py +5 -3
  212. mindspore/nn/optim/lars.py +2 -2
  213. mindspore/nn/optim/lazyadam.py +6 -4
  214. mindspore/nn/optim/momentum.py +13 -25
  215. mindspore/nn/optim/optimizer.py +6 -3
  216. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  217. mindspore/nn/optim/rmsprop.py +9 -3
  218. mindspore/nn/optim/rprop.py +4 -2
  219. mindspore/nn/optim/sgd.py +5 -3
  220. mindspore/nn/optim/thor.py +2 -2
  221. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  222. mindspore/nn/probability/distribution/beta.py +2 -2
  223. mindspore/nn/probability/distribution/categorical.py +4 -6
  224. mindspore/nn/probability/distribution/cauchy.py +2 -2
  225. mindspore/nn/probability/distribution/exponential.py +2 -2
  226. mindspore/nn/probability/distribution/geometric.py +1 -1
  227. mindspore/nn/probability/distribution/gumbel.py +2 -2
  228. mindspore/nn/probability/distribution/logistic.py +1 -1
  229. mindspore/nn/probability/distribution/poisson.py +2 -2
  230. mindspore/nn/probability/distribution/uniform.py +2 -2
  231. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  232. mindspore/nn/wrap/__init__.py +2 -1
  233. mindspore/nn/wrap/cell_wrapper.py +58 -13
  234. mindspore/nn/wrap/grad_reducer.py +148 -8
  235. mindspore/nn/wrap/loss_scale.py +32 -9
  236. mindspore/numpy/__init__.py +2 -0
  237. mindspore/numpy/array_creations.py +2 -0
  238. mindspore/numpy/array_ops.py +6 -6
  239. mindspore/numpy/dtypes.py +3 -3
  240. mindspore/numpy/fft.py +431 -0
  241. mindspore/numpy/math_ops.py +62 -68
  242. mindspore/numpy/utils.py +3 -0
  243. mindspore/opencv_core452.dll +0 -0
  244. mindspore/opencv_imgcodecs452.dll +0 -0
  245. mindspore/opencv_imgproc452.dll +0 -0
  246. mindspore/ops/__init__.py +6 -5
  247. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  248. mindspore/ops/_grad_experimental/grad_comm_ops.py +89 -34
  249. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  250. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  251. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  252. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  253. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  254. mindspore/ops/_op_impl/__init__.py +0 -1
  255. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  256. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  257. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  258. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  259. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  260. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  261. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  262. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  263. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  264. mindspore/ops/_vmap/vmap_array_ops.py +164 -101
  265. mindspore/ops/_vmap/vmap_base.py +8 -1
  266. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  267. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  268. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  269. mindspore/ops/_vmap/vmap_math_ops.py +130 -58
  270. mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
  271. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  272. mindspore/ops/auto_generate/__init__.py +31 -0
  273. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  274. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  275. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  276. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  277. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  278. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  279. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  280. mindspore/ops/composite/__init__.py +5 -2
  281. mindspore/ops/composite/base.py +121 -23
  282. mindspore/ops/composite/math_ops.py +10 -49
  283. mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
  284. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  285. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  286. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  287. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  288. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  289. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  290. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  291. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  292. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  293. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  296. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  298. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  299. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  300. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  303. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  304. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  305. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  307. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  308. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  311. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  313. mindspore/ops/deprecated.py +14 -3
  314. mindspore/ops/extend/__init__.py +53 -0
  315. mindspore/ops/extend/array_func.py +218 -0
  316. mindspore/ops/extend/math_func.py +76 -0
  317. mindspore/ops/extend/nn_func.py +308 -0
  318. mindspore/ops/function/__init__.py +31 -11
  319. mindspore/ops/function/array_func.py +846 -1735
  320. mindspore/ops/function/clip_func.py +19 -31
  321. mindspore/ops/function/debug_func.py +1 -4
  322. mindspore/ops/function/fft_func.py +31 -0
  323. mindspore/ops/function/grad/grad_func.py +27 -20
  324. mindspore/ops/function/image_func.py +27 -21
  325. mindspore/ops/function/linalg_func.py +35 -68
  326. mindspore/ops/function/math_func.py +913 -2791
  327. mindspore/ops/function/nn_func.py +1439 -885
  328. mindspore/ops/function/other_func.py +6 -7
  329. mindspore/ops/function/parameter_func.py +5 -93
  330. mindspore/ops/function/random_func.py +254 -108
  331. mindspore/ops/function/reshard_func.py +102 -0
  332. mindspore/ops/function/sparse_func.py +4 -4
  333. mindspore/ops/function/sparse_unary_func.py +9 -16
  334. mindspore/ops/function/spectral_func.py +1 -1
  335. mindspore/ops/function/vmap_func.py +14 -14
  336. mindspore/ops/functional.py +342 -343
  337. mindspore/ops/op_info_register.py +16 -43
  338. mindspore/ops/operations/__init__.py +32 -23
  339. mindspore/ops/operations/_grad_ops.py +21 -853
  340. mindspore/ops/operations/_infer_ops.py +19 -0
  341. mindspore/ops/operations/_inner_ops.py +107 -518
  342. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  343. mindspore/ops/operations/_scalar_ops.py +5 -480
  344. mindspore/ops/operations/_sequence_ops.py +6 -36
  345. mindspore/ops/operations/_tensor_array.py +8 -8
  346. mindspore/ops/operations/array_ops.py +108 -2705
  347. mindspore/ops/operations/comm_ops.py +801 -118
  348. mindspore/ops/operations/custom_ops.py +61 -120
  349. mindspore/ops/operations/debug_ops.py +104 -35
  350. mindspore/ops/operations/image_ops.py +1 -217
  351. mindspore/ops/operations/inner_ops.py +5 -40
  352. mindspore/ops/operations/linalg_ops.py +1 -49
  353. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  354. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  355. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  356. mindspore/ops/operations/math_ops.py +572 -4667
  357. mindspore/ops/operations/nn_ops.py +248 -2162
  358. mindspore/ops/operations/other_ops.py +53 -45
  359. mindspore/ops/operations/random_ops.py +4 -53
  360. mindspore/ops/operations/reshard_ops.py +53 -0
  361. mindspore/ops/operations/sparse_ops.py +4 -4
  362. mindspore/ops/primitive.py +204 -103
  363. mindspore/ops/silent_check.py +5 -5
  364. mindspore/ops_generate/__init__.py +27 -0
  365. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  366. mindspore/ops_generate/arg_handler.py +197 -0
  367. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  368. mindspore/ops_generate/gen_ops.py +1084 -0
  369. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  370. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  371. mindspore/ops_generate/gen_utils.py +209 -0
  372. mindspore/ops_generate/op_proto.py +138 -0
  373. mindspore/ops_generate/pyboost_utils.py +354 -0
  374. mindspore/ops_generate/template.py +239 -0
  375. mindspore/parallel/__init__.py +6 -4
  376. mindspore/parallel/_auto_parallel_context.py +73 -3
  377. mindspore/parallel/_cell_wrapper.py +16 -9
  378. mindspore/parallel/_cost_model_context.py +1 -1
  379. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  380. mindspore/parallel/_parallel_serialization.py +29 -13
  381. mindspore/parallel/_ps_context.py +1 -1
  382. mindspore/parallel/_recovery_context.py +1 -1
  383. mindspore/parallel/_tensor.py +18 -11
  384. mindspore/parallel/_transformer/__init__.py +1 -1
  385. mindspore/parallel/_transformer/layers.py +1 -1
  386. mindspore/parallel/_transformer/loss.py +1 -1
  387. mindspore/parallel/_transformer/moe.py +1 -1
  388. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  389. mindspore/parallel/_transformer/transformer.py +2 -2
  390. mindspore/parallel/_utils.py +161 -6
  391. mindspore/parallel/algo_parameter_config.py +6 -8
  392. mindspore/parallel/checkpoint_transform.py +191 -32
  393. mindspore/parallel/cluster/__init__.py +15 -0
  394. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  395. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  396. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  397. mindspore/parallel/cluster/run.py +136 -0
  398. mindspore/parallel/mpi/__init__.py +1 -1
  399. mindspore/parallel/mpi/_mpi_config.py +1 -1
  400. mindspore/parallel/parameter_broadcast.py +152 -0
  401. mindspore/parallel/shard.py +128 -17
  402. mindspore/pgodb140.dll +0 -0
  403. mindspore/pgort140.dll +0 -0
  404. mindspore/profiler/__init__.py +3 -2
  405. mindspore/profiler/common/process_pool.py +41 -0
  406. mindspore/profiler/common/singleton.py +28 -0
  407. mindspore/profiler/common/util.py +125 -0
  408. mindspore/profiler/envprofiling.py +2 -2
  409. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  410. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  411. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  412. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  413. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  414. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  415. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  416. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  417. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  418. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  419. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  420. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  421. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  422. mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
  423. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  424. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  425. mindspore/profiler/parser/ascend_msprof_exporter.py +147 -146
  426. mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
  427. mindspore/profiler/parser/ascend_op_generator.py +92 -42
  428. mindspore/profiler/parser/ascend_timeline_generator.py +296 -133
  429. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  430. mindspore/profiler/parser/framework_parser.py +3 -2
  431. mindspore/profiler/parser/integrator.py +3 -1
  432. mindspore/profiler/parser/minddata_parser.py +72 -3
  433. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  434. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  435. mindspore/profiler/parser/profiler_info.py +16 -1
  436. mindspore/profiler/profiling.py +445 -190
  437. mindspore/rewrite/__init__.py +2 -13
  438. mindspore/rewrite/api/node.py +122 -36
  439. mindspore/rewrite/api/pattern_engine.py +2 -3
  440. mindspore/rewrite/api/scoped_value.py +16 -15
  441. mindspore/rewrite/api/symbol_tree.py +45 -29
  442. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  443. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  444. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  445. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  446. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  447. mindspore/rewrite/common/__init__.py +1 -2
  448. mindspore/rewrite/common/config.py +24 -0
  449. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  450. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  451. mindspore/rewrite/common/namespace.py +118 -0
  452. mindspore/rewrite/node/__init__.py +5 -5
  453. mindspore/rewrite/node/call_function.py +23 -7
  454. mindspore/rewrite/node/cell_container.py +7 -3
  455. mindspore/rewrite/node/control_flow.py +53 -28
  456. mindspore/rewrite/node/node.py +212 -196
  457. mindspore/rewrite/node/node_manager.py +51 -22
  458. mindspore/rewrite/node/node_topological_manager.py +3 -23
  459. mindspore/rewrite/parsers/__init__.py +12 -0
  460. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  461. mindspore/rewrite/parsers/assign_parser.py +637 -413
  462. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  463. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  464. mindspore/rewrite/parsers/constant_parser.py +5 -5
  465. mindspore/rewrite/parsers/container_parser.py +4 -6
  466. mindspore/rewrite/parsers/expr_parser.py +55 -0
  467. mindspore/rewrite/parsers/for_parser.py +31 -98
  468. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  469. mindspore/rewrite/parsers/if_parser.py +28 -10
  470. mindspore/rewrite/parsers/module_parser.py +8 -182
  471. mindspore/rewrite/parsers/parser.py +1 -5
  472. mindspore/rewrite/parsers/parser_register.py +1 -1
  473. mindspore/rewrite/parsers/return_parser.py +5 -10
  474. mindspore/rewrite/parsers/while_parser.py +59 -0
  475. mindspore/rewrite/sparsify/utils.py +1 -1
  476. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  477. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  478. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  479. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  480. mindspore/run_check/_check_version.py +6 -14
  481. mindspore/run_check/run_check.py +1 -1
  482. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  483. mindspore/swresample-4.dll +0 -0
  484. mindspore/swscale-6.dll +0 -0
  485. mindspore/tbbmalloc.dll +0 -0
  486. mindspore/tinyxml2.dll +0 -0
  487. mindspore/train/__init__.py +6 -5
  488. mindspore/train/_utils.py +178 -4
  489. mindspore/train/amp.py +167 -245
  490. mindspore/train/anf_ir_pb2.py +14 -2
  491. mindspore/train/callback/__init__.py +5 -2
  492. mindspore/train/callback/_backup_and_restore.py +5 -5
  493. mindspore/train/callback/_callback.py +4 -4
  494. mindspore/train/callback/_checkpoint.py +143 -29
  495. mindspore/train/callback/_cluster_monitor.py +201 -0
  496. mindspore/train/callback/_early_stop.py +2 -2
  497. mindspore/train/callback/_flops_collector.py +238 -0
  498. mindspore/train/callback/_landscape.py +15 -9
  499. mindspore/train/callback/_loss_monitor.py +2 -2
  500. mindspore/train/callback/_mindio_ttp.py +443 -0
  501. mindspore/train/callback/_on_request_exit.py +2 -2
  502. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  503. mindspore/train/callback/_summary_collector.py +7 -7
  504. mindspore/train/callback/_time_monitor.py +3 -3
  505. mindspore/train/data_sink.py +6 -5
  506. mindspore/train/dataset_helper.py +60 -21
  507. mindspore/train/loss_scale_manager.py +2 -2
  508. mindspore/train/metrics/accuracy.py +7 -7
  509. mindspore/train/metrics/confusion_matrix.py +8 -6
  510. mindspore/train/metrics/cosine_similarity.py +6 -4
  511. mindspore/train/metrics/error.py +2 -2
  512. mindspore/train/metrics/metric.py +3 -3
  513. mindspore/train/metrics/perplexity.py +2 -1
  514. mindspore/train/metrics/topk.py +2 -2
  515. mindspore/train/mind_ir_pb2.py +89 -15
  516. mindspore/train/model.py +290 -60
  517. mindspore/train/serialization.py +495 -220
  518. mindspore/train/summary/_summary_adapter.py +1 -1
  519. mindspore/train/summary/summary_record.py +51 -28
  520. mindspore/train/train_thor/convert_utils.py +3 -3
  521. mindspore/turbojpeg.dll +0 -0
  522. mindspore/vcmeta.dll +0 -0
  523. mindspore/vcruntime140.dll +0 -0
  524. mindspore/vcruntime140_1.dll +0 -0
  525. mindspore/version.py +1 -1
  526. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
  527. mindspore-2.3.0.dist-info/RECORD +1400 -0
  528. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
  529. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  530. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  531. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  532. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  533. mindspore/gen_ops.py +0 -273
  534. mindspore/nn/layer/flash_attention.py +0 -189
  535. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  536. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  537. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  538. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  539. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  540. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  541. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  542. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  543. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  544. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  545. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  546. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  547. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  548. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  549. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  550. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  551. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  552. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  553. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  554. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  555. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  556. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  557. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  558. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  559. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  560. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  561. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  562. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  563. mindspore/ops/_op_impl/tbe/add.py +0 -42
  564. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  565. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  566. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  567. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  568. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  569. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  570. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  571. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  572. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  573. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  574. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  575. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  576. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  577. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  578. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  579. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  580. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  581. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  582. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  583. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  584. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  585. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  586. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  587. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  588. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  589. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  590. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  591. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  592. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  593. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  594. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  595. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  596. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  597. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  598. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  599. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  600. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  601. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  602. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  603. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  604. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  605. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  606. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  607. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  608. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  609. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  610. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  611. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  612. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  613. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  614. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  615. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  616. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  617. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  618. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  620. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  621. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  622. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  623. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  624. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  625. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  626. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  627. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  628. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  629. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  630. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  631. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  632. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  633. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  634. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  635. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  636. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  637. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  638. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  639. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  640. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  641. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  642. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  643. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  644. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  645. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  646. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  647. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  648. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  649. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  650. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  651. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  652. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  653. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  654. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  655. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  656. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  657. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  658. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  659. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  660. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  661. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  662. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  663. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  664. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  665. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  666. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  667. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  668. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  669. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  670. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  671. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  672. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  673. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  674. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  675. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  676. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  677. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  678. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  679. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  680. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  681. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  682. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  683. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  684. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  685. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  686. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  687. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  688. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  689. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  690. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  691. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  692. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  693. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  694. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  695. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  696. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  697. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  698. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  699. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  700. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  701. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  702. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  703. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  704. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  705. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  706. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  707. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  708. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  709. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  710. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  711. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  712. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  713. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  714. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  715. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  716. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  717. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  718. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  719. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  720. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  721. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  722. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  723. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  724. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  725. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  726. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  727. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  728. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  729. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  730. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  731. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  732. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  733. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  734. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  735. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  736. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  737. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  738. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  739. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  740. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  741. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  742. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  743. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  744. mindspore/ops/_op_impl/tbe/div.py +0 -41
  745. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  746. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  747. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  748. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  749. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  750. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  751. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  752. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  753. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  754. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  755. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  756. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  757. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  758. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  759. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  760. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  761. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  762. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  763. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  764. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  765. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  766. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  767. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  768. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  769. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  770. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  771. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  772. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  773. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  774. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  775. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  776. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  777. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  778. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  779. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  780. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  781. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  782. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  783. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  784. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  785. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  786. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  787. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  788. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  789. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  790. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  791. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  792. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  793. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  794. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  795. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  796. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  797. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  798. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  799. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  800. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  801. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  802. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  803. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  804. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  805. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  806. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  807. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  808. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  809. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  810. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  811. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  812. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  813. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  814. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  815. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  816. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  817. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  818. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  819. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  820. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  821. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  822. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  823. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  824. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  825. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  826. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  827. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  828. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  829. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  830. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  831. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  832. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  833. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  834. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  835. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  836. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  837. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  838. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  839. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  840. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  841. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  842. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  843. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  844. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  845. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  846. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  847. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  848. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  849. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  850. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  851. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  852. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  853. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  854. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  855. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  856. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  857. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  858. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  859. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  860. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  861. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  862. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  863. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  864. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  865. mindspore/ops/_op_impl/tbe/less.py +0 -41
  866. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  867. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  868. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  869. mindspore/ops/_op_impl/tbe/log.py +0 -40
  870. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  871. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  872. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  873. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  874. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  875. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  876. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  877. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  878. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  879. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  880. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  881. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  882. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  883. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  884. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  885. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  886. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  887. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  888. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  889. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  890. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  891. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  892. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  893. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  894. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  895. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  896. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  897. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  898. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  899. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  900. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  901. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  902. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  903. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  904. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  905. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  906. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  907. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  908. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  909. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  910. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  911. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  912. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  913. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  914. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  915. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  916. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  917. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  918. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  919. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  920. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  921. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  922. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  923. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  924. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  925. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  926. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  927. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  928. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  929. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  930. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  931. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  932. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  933. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  934. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  935. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  936. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  937. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  938. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  939. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  940. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  941. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  942. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  943. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  944. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  945. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  946. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  947. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  948. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  949. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  950. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  951. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  952. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  953. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  954. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  955. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  956. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  957. mindspore/ops/_op_impl/tbe/range.py +0 -39
  958. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  959. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  960. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  961. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  962. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  963. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  964. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  965. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  966. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  967. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  968. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  969. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  970. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  971. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  972. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  973. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  974. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  975. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  977. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  978. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  979. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  980. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  981. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  982. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  983. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  984. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  985. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  986. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  987. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  988. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  989. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  990. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  991. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  992. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  993. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  994. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  995. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  996. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  997. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  998. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  999. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  1000. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  1001. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  1002. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  1003. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  1004. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  1005. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  1006. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  1007. mindspore/ops/_op_impl/tbe/round.py +0 -38
  1008. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  1009. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  1010. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  1011. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1012. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1013. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1014. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1015. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1016. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1017. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1018. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1019. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1020. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1021. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1022. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1023. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1024. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1025. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1026. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1027. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1028. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1029. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1030. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1031. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1032. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1033. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1034. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1035. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1036. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1037. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1038. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1039. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1040. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1041. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1042. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1043. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1044. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1045. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1046. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1047. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1049. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1051. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1052. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1053. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1054. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1055. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1057. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1058. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1060. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1061. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1062. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1063. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1064. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1065. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1066. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1067. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1068. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1069. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1070. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1071. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1072. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1073. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1074. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1075. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1076. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1077. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1078. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1079. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1080. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1081. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1082. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1083. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1084. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1085. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1086. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1087. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1088. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1089. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1090. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1091. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1097. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1098. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1099. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1100. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1101. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1102. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1103. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1104. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1107. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1108. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1109. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1110. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1111. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1112. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1113. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1114. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1115. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1116. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1117. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1118. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1119. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1120. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1122. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1123. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1124. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1125. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1126. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1127. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1128. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1129. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1130. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1131. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1132. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1133. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1134. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1135. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1136. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1137. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1138. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1139. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1140. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1141. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1143. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1144. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1145. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1146. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1147. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1148. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1149. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1150. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1151. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1152. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1153. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1154. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1155. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1156. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1157. mindspore/ops/_tracefunc.py +0 -241
  1158. mindspore/ops/arg_dtype_cast.py +0 -54
  1159. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1160. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1161. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1162. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1163. mindspore/rewrite/namespace.py +0 -53
  1164. mindspore-2.2.14.dist-info/RECORD +0 -1924
  1165. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
  1166. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -34,6 +34,7 @@ from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context
34
34
  from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context, \
35
35
  _need_reset_device_target_for_ps
36
36
  from mindspore.parallel._offload_context import _set_offload_context, _get_offload_context
37
+ from mindspore.hal.device import is_initialized
37
38
 
38
39
  __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'STRICT', 'COMPATIBLE', 'LAX', 'set_context', 'get_context',
39
40
  'set_auto_parallel_context', 'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode',
@@ -50,6 +51,10 @@ STRICT = 0
50
51
  COMPATIBLE = 1
51
52
  LAX = 2
52
53
 
54
+ # Enumerate for the property 'debug_level'.
55
+ RELEASE = 0
56
+ DEBUG = 1
57
+
53
58
 
54
59
  def _make_directory(path):
55
60
  """Make directory."""
@@ -161,6 +166,9 @@ class _Context:
161
166
  self._context_switches = _ContextSwitchInfo(False)
162
167
  self._context_handle = MSContext.get_instance()
163
168
  self._support_binary = False
169
+ self.enable_compile_cache = None
170
+ self._mode = PYNATIVE_MODE
171
+ self._jit_config = {}
164
172
 
165
173
  def __getattribute__(self, attr):
166
174
  value = object.__getattribute__(self, attr)
@@ -176,7 +184,11 @@ class _Context:
176
184
 
177
185
  def get_mode(self):
178
186
  """Get current mode."""
179
- return self.get_param(ms_ctx_param.mode)
187
+ return self._mode
188
+
189
+ def get_jit_config(self):
190
+ """Get current jit_config."""
191
+ return self._jit_config
180
192
 
181
193
  def set_mode(self, mode):
182
194
  """
@@ -204,6 +216,7 @@ class _Context:
204
216
  raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
205
217
  f"or context.PYNATIVE_MODE (1), but got {mode}.")
206
218
  self.set_param(ms_ctx_param.mode, mode)
219
+ self._mode = mode
207
220
 
208
221
  def set_jit_syntax_level(self, level):
209
222
  """"Set the JIT syntax level for graph compiling"""
@@ -212,6 +225,13 @@ class _Context:
212
225
  f"or context.LAX, but got {level}.")
213
226
  self.set_param(ms_ctx_param.jit_syntax_level, level)
214
227
 
228
+ def set_debug_level(self, level):
229
+ """"Set the debug level for graph compiling"""
230
+ if level != RELEASE and level != DEBUG:
231
+ raise ValueError(f"For 'context.set_debug_level', the argument 'level' should be context.RELEASE "
232
+ f"or context.DEBUG, but got {level}.")
233
+ self.set_param(ms_ctx_param.debug_level, level)
234
+
215
235
  def set_memory_optimize_level(self, memory_optimize_level):
216
236
  """
217
237
  The memory optimize level, support "O0", "O1".
@@ -268,11 +288,16 @@ class _Context:
268
288
  "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
269
289
  - jit_compile (bool): ``False`` and ``True``.
270
290
  - atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
291
+ - op_precision_mode (str): precision mode config file path.
292
+ - op_debug_option (str): Enable debugging options for Ascend operators,
293
+ default not enabled, only supports ``"oom"`` currently.
294
+ ``"oom"``: Detect memory out of bounds.
295
+ - ge_options (dict): Global or session CANN options.
271
296
  - exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
272
297
  Default: ``"2"`` .
273
- - op_precision_mode (str): config file path.
274
298
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
275
299
  If its value is None or '', it does not take effect. Default None.
300
+ - host_scheduling_max_threshold(int): The host scheduling max threshold.
276
301
  """
277
302
  ascend_cfg_modes = {
278
303
  'precision_mode': ["force_fp16", "allow_fp32_to_fp16", "allow_mix_precision", "must_keep_origin_dtype",
@@ -284,8 +309,15 @@ class _Context:
284
309
  'conv_allow_hf32': [True, False],
285
310
  'exception_dump': ["0", "1", "2"],
286
311
  'op_precision_mode': (str,),
312
+ 'ge_options': (dict,),
287
313
  'parallel_speed_up_json_path': (str, None),
288
- 'topo_order': (dict,)
314
+ 'host_scheduling_max_threshold': (int,),
315
+ 'cur_step_num': (int,),
316
+ 'save_checkpoint_steps': (int,),
317
+ 'need_ckpt': (bool,),
318
+ 'last_triggered_step': (int,),
319
+ 'topo_order': (dict,),
320
+ 'op_debug_option': (str, None),
289
321
  }
290
322
  ascend_cfg_setters = {
291
323
  'precision_mode': self._get_ascend_config_setter('precision_mode'),
@@ -294,8 +326,15 @@ class _Context:
294
326
  'matmul_allow_hf32': self._get_ascend_config_setter('matmul_allow_hf32', lambda v: "1" if v else "0"),
295
327
  'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
296
328
  'exception_dump': self._get_ascend_config_setter('exception_dump'),
329
+ 'op_debug_option': self._set_op_debug_option,
297
330
  'op_precision_mode': self._set_op_precision_mode,
331
+ 'ge_options': self._set_ge_options,
298
332
  'parallel_speed_up_json_path': self._set_speedup_config_path,
333
+ 'host_scheduling_max_threshold': self._get_ascend_config_setter('host_scheduling_max_threshold', str),
334
+ 'cur_step_num': self._set_cur_step_num,
335
+ 'save_checkpoint_steps': self._set_save_checkpoint_steps,
336
+ 'need_ckpt': self._set_need_ckpt,
337
+ 'last_triggered_step': self._set_last_triggered_step,
299
338
  'topo_order': self._set_topo_order
300
339
  }
301
340
  ascend_cfg_set = tuple(ascend_cfg_modes.keys())
@@ -354,6 +393,31 @@ class _Context:
354
393
  if gpu_key == 'matmul_allow_tf32':
355
394
  self.set_param(ms_ctx_param.matmul_allow_tf32, gpu_config[gpu_key])
356
395
 
396
+ def set_jit_config(self, jit_config):
397
+ """
398
+ Enable jit config.
399
+
400
+ Args:
401
+ jit_config (dict):
402
+
403
+ - jit_level (str): "O0", "O1" or "O2" to control the compilation optimization level.
404
+ """
405
+ jit_cfgs = {'jit_level': ["O0", "O1", "O2"], 'infer_boost': ["on", "off"]}
406
+ key_args_map = {'jit_level': ms_ctx_param.jit_level, 'infer_boost': ms_ctx_param.infer_boost}
407
+ for jit_key in jit_config:
408
+ if jit_key not in jit_cfgs:
409
+ raise ValueError(f"For 'context.set_context', the key of argument 'jit_config' must be one of "
410
+ f"{jit_cfgs}, but got {jit_key}.")
411
+ supported_value = jit_cfgs.get(jit_key)
412
+ if jit_config[jit_key] not in supported_value:
413
+ raise ValueError(f"For 'jit_cfgs', the value of argument {jit_key} must be one of "
414
+ f"{supported_value}, but got {jit_config[jit_key]}.")
415
+ self._jit_config = jit_config
416
+ self.set_param(key_args_map[jit_key], jit_config[jit_key])
417
+
418
+ if 'infer_boost' in jit_config and jit_config['infer_boost'] == "on" and jit_config['jit_level'] != "O0":
419
+ raise ValueError(f"Only jit_level set O0 can set infer_boost to on.")
420
+
357
421
  def set_backend_policy(self, policy):
358
422
  success = self._context_handle.set_backend_policy(policy)
359
423
  if not success:
@@ -475,9 +539,13 @@ class _Context:
475
539
 
476
540
  def set_mempool_block_size(self, mempool_block_size):
477
541
  """Set the block size of memory pool."""
478
- if _get_mode() == GRAPH_MODE:
542
+ global_jit_config = get_jit_config()
543
+ is_force_kbk = False
544
+ if global_jit_config:
545
+ is_force_kbk = global_jit_config.get('jit_level') == "O0" or global_jit_config.get('jit_level') == "O1"
546
+ if _get_mode() == GRAPH_MODE and not is_force_kbk:
479
547
  logger.warning("Graph mode doesn't support to set parameter 'mempool_block_size' of context currently, "
480
- "you can use context.set_context to set pynative mode.")
548
+ "you can use context.set_context to set pynative mode or set jit_level=O0/O1.")
481
549
  return
482
550
  if not Validator.check_str_by_regular(mempool_block_size, _RE_PATTERN):
483
551
  raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
@@ -563,8 +631,10 @@ class _Context:
563
631
  'deterministic': set_deterministic,
564
632
  'ascend_config': set_ascend_config,
565
633
  'jit_syntax_level': set_jit_syntax_level,
634
+ 'debug_level': set_debug_level,
566
635
  'gpu_config': set_gpu_config,
567
636
  'aoe_config': set_aoe_config,
637
+ 'jit_config': set_jit_config,
568
638
  }
569
639
 
570
640
  @property
@@ -612,6 +682,16 @@ class _Context:
612
682
  trans_fn = lambda x: x
613
683
  return _config_setter
614
684
 
685
+ def _set_op_debug_option(self, option_value):
686
+ valid_order = {'oom'}
687
+ if not isinstance(option_value, str):
688
+ raise TypeError(f"For 'ascend_config', the type of 'op_debug_option' must be str, "
689
+ f"but got {type(option_value)}.")
690
+ if option_value not in valid_order:
691
+ raise ValueError(f"For 'ascend_config', the 'op_debug_option' supports being set to 'oom' currently, "
692
+ f"but got {option_value}.")
693
+ self.set_param(ms_ctx_param.op_debug_option, option_value)
694
+
615
695
  def _set_op_precision_mode(self, ascend_value):
616
696
  op_precision_path = ascend_value
617
697
  real_path = os.path.realpath(op_precision_path)
@@ -620,6 +700,28 @@ class _Context:
620
700
  f"got '{op_precision_path}'.")
621
701
  self.set_param(ms_ctx_param.op_precision_mode, ascend_value)
622
702
 
703
+ def _set_ge_options(self, ge_options):
704
+ """Set ge options."""
705
+ for level, options in ge_options.items():
706
+ if level not in ['global', 'session']:
707
+ raise ValueError(f"For 'ascend_config', the key of ge_options must be one of "
708
+ f"('global', 'session'), but got {level}.")
709
+
710
+ if not isinstance(options, dict):
711
+ raise TypeError(f"For 'ge_options', the type of {level} options must be dict, "
712
+ f"but got {type(options)}. The error options: {options}.")
713
+
714
+ for key, value in options.items():
715
+ if not isinstance(key, str):
716
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
717
+ f"but got {type(key)}. The error key is {key}.")
718
+ if not isinstance(value, str):
719
+ raise TypeError(f"For 'ge_options', the type of key and value must be str, "
720
+ f"but got {type(value)}. The error value is {value}")
721
+
722
+ options_str = json.dumps(ge_options)
723
+ self.set_param(ms_ctx_param.ge_options, options_str)
724
+
623
725
  def _set_topo_order(self, topo_order):
624
726
  """
625
727
  Set topo order.
@@ -642,6 +744,30 @@ class _Context:
642
744
  options_str = json.dumps(topo_order)
643
745
  self.set_param(ms_ctx_param.topo_order, options_str)
644
746
 
747
+ def _set_need_ckpt(self, need_ckpt):
748
+ """Set need ckpt flag"""
749
+ if not isinstance(need_ckpt, bool):
750
+ raise TypeError(f"For step num, the value type should be int, but got {type(need_ckpt)}, {need_ckpt}")
751
+ self.set_param(ms_ctx_param.need_ckpt, need_ckpt)
752
+
753
+ def _set_cur_step_num(self, step_num):
754
+ """set current step num at every step begin"""
755
+ if not isinstance(step_num, int):
756
+ raise TypeError(f"For step num, the value type should be int, but got {type(step_num)}, {step_num}")
757
+ self.set_param(ms_ctx_param.cur_step_num, step_num)
758
+
759
+ def _set_save_checkpoint_steps(self, steps):
760
+ """set save checkpoint steps before run"""
761
+ if not isinstance(steps, int):
762
+ raise TypeError(f"For step num, the value type should be int, but got {type(steps)}, {steps}")
763
+ self.set_param(ms_ctx_param.save_checkpoint_steps, steps)
764
+
765
+ def _set_last_triggered_step(self, step):
766
+ """set last triggered save ckpt steps before run"""
767
+ if not isinstance(step, int):
768
+ raise TypeError(f"For step num, the value type should be int, but got {type(step)}, {step}")
769
+ self.set_param(ms_ctx_param.last_triggered_step, step)
770
+
645
771
  def _set_speedup_config_path(self, speedup_config_path):
646
772
  """"Check and set speedup config for auto parallel."""
647
773
  if speedup_config_path is None or speedup_config_path == "":
@@ -652,23 +778,34 @@ class _Context:
652
778
  f"{speedup_config_real_path} does not exist, please check whether the "
653
779
  f"'parallel_speed_up_json_path' is correct.")
654
780
  try:
655
- valid_option = {"recompute_comm_overlap": ms_ctx_param.recompute_comm_overlap,
656
- "matmul_grad_comm_overlap": ms_ctx_param.matmul_grad_comm_overlap,
657
- "enable_task_opt": ms_ctx_param.enable_task_opt,
658
- "enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
659
- "interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
660
- "enable_opt_shard_comm_opt": ms_ctx_param.enable_opt_shard_comm_opt,
661
- "interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
781
+ valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
782
+ "matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
783
+ "enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
784
+ "enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
785
+ "recompute_allgather_overlap_fagrad":
786
+ (ms_ctx_param.recompute_allgather_overlap_fagrad, bool),
787
+ "interleaved_matmul_comm": (ms_ctx_param.interleaved_matmul_comm, bool),
788
+ "bias_add_comm_swap": (ms_ctx_param.bias_add_comm_swap, bool),
789
+ "enable_opt_shard_comm_opt": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
790
+ "enable_begin_end_inline_opt": (ms_ctx_param.enable_begin_end_inline_opt, bool),
791
+ "enable_concat_eliminate_opt": (ms_ctx_param.enable_concat_eliminate_opt, bool),
792
+ "interleaved_layernorm_comm": (ms_ctx_param.interleaved_layernorm_comm, bool),
793
+ "compute_communicate_fusion_level":
794
+ (ms_ctx_param.compute_communicate_fusion_level, int),
795
+ "enable_flash_attention_load_balance":
796
+ (ms_ctx_param.enable_flash_attention_load_balance, bool)}
662
797
  with open(speedup_config_real_path, 'r') as f:
663
798
  speedup_config = json.load(f)
664
- for k, v in speedup_config.items():
665
- if not isinstance(k, str):
666
- raise TypeError("key {} is not a str".format(k))
667
- if k not in valid_option:
668
- raise ValueError("key {} should be one of {}.".format(k, valid_option.keys()))
669
- if not isinstance(v, bool):
670
- raise TypeError("value {} is not a bool".format(v))
671
- self.set_param(valid_option.get(k), v)
799
+ for key, value in speedup_config.items():
800
+ if not isinstance(key, str):
801
+ raise TypeError("key {} is not a str".format(key))
802
+ if key not in valid_option:
803
+ raise ValueError("key {} should be one of {}.".format(key, valid_option.keys()))
804
+ set_func, valid_type = valid_option.get(key)
805
+ if not isinstance(value, valid_type):
806
+ raise TypeError(f"The value type of {key} must be {valid_type}, "
807
+ f"but got value is {value} and type is {type(value)}.")
808
+ self.set_param(set_func, value)
672
809
  except (TypeError, ValueError) as exo:
673
810
  raise ValueError(str(exo) + "\nFor 'context.set_context', "
674
811
  "open or load the 'speedup_config_path' file {} "
@@ -705,8 +842,9 @@ def _context():
705
842
  auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
706
843
  strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
707
844
  all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
708
- pipeline_config=dict, parallel_optimizer_config=dict,
709
- comm_fusion=dict, strategy_ckpt_config=dict)
845
+ pipeline_result_broadcast=bool, parallel_optimizer_config=dict,
846
+ pipeline_config=dict,
847
+ comm_fusion=dict, strategy_ckpt_config=dict, force_fp32_communication=bool)
710
848
  def set_auto_parallel_context(**kwargs):
711
849
  r"""
712
850
  Set auto parallel context, only data parallel supported on CPU.
@@ -733,8 +871,11 @@ def set_auto_parallel_context(**kwargs):
733
871
  parallel_optimizer_config dataset_strategy
734
872
  enable_alltoall pipeline_stages
735
873
  pipeline_config auto_parallel_search_mode
874
+ force_fp32_communication pipeline_result_broadcast
736
875
  \ comm_fusion
737
876
  \ strategy_ckpt_config
877
+ \ group_ckpt_save_file
878
+ \ auto_pipeline
738
879
  =========================== ===========================
739
880
 
740
881
  Args:
@@ -744,6 +885,8 @@ def set_auto_parallel_context(**kwargs):
744
885
  "stand_alone" do not support gradients_mean. Default: ``False`` .
745
886
  gradient_fp32_sync (bool): Run allreduce of gradients in fp32. "stand_alone", "data_parallel"
746
887
  and "hybrid_parallel" do not support gradient_fp32_sync. Default: ``True`` .
888
+ loss_repeated_mean (bool) - Indicates whether the mean operator is executed backwards when the
889
+ calculation is repeated. Default: ``True`` .
747
890
  parallel_mode (str): There are five kinds of parallel modes, ``"stand_alone"`` , ``"data_parallel"`` ,
748
891
  ``"hybrid_parallel"`` , ``"semi_auto_parallel"`` and ``"auto_parallel"`` . Note the pynative mode
749
892
  only supports the ``"stand_alone"`` and ``"data_parallel"`` mode. Default: ``"stand_alone"`` .
@@ -758,15 +901,16 @@ def set_auto_parallel_context(**kwargs):
758
901
 
759
902
  - auto_parallel: Achieving parallelism automatically.
760
903
  search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
761
- ``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"recursive_programming"`` .
904
+ ``"sharding_propagation"`` and ``"dynamic_programming"`` (Not recommended).
905
+ Default: ``"recursive_programming"`` .
762
906
 
763
907
  - recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
764
908
  it is recommended that users set the batch size to be greater than or equal to the product of
765
909
  the number of devices and the number of multi-copy parallelism.
766
910
 
767
- - dynamic_programming: Dynamic programming search mode.
768
-
769
911
  - sharding_propagation: Propagate shardings from configured ops to non-configured ops.
912
+
913
+ - dynamic_programming: Dynamic programming search mode.
770
914
  auto_parallel_search_mode (str): This is the old version of 'search_mode'. Here, remaining this attribute is
771
915
  for forward compatibility, and this attribute will be deleted in a future MindSpore version.
772
916
  parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
@@ -792,6 +936,9 @@ def set_auto_parallel_context(**kwargs):
792
936
  data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
793
937
  parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
794
938
  `Lamb` and `AdamWeightDecay` in Ascend . Default: ``False`` .
939
+ force_fp32_communication (bool): A switch that determines whether reduce operators (AllReduce, ReduceScatter)
940
+ are forced to use the fp32 data type for communication during communication. True is the enable
941
+ switch. Default: ``False`` .
795
942
  enable_alltoall (bool): A switch that allows AllToAll operators to be generated during communication. If its
796
943
  value is ``False`` , there will be a combination of operators such as AllGather, Split and
797
944
  Concat instead of AllToAll. Default: ``False`` .
@@ -801,6 +948,8 @@ def set_auto_parallel_context(**kwargs):
801
948
  distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
802
949
  stages.
803
950
  Default: ``1`` .
951
+ pipeline_result_broadcast (bool): A switch that broadcast the last stage result to all other stage in pipeline
952
+ parallel inference. Default: ``False`` .
804
953
  pipeline_config (dict): A dict contains the keys and values for setting the pipeline parallelism configuration.
805
954
  It supports the following keys:
806
955
 
@@ -866,14 +1015,18 @@ def set_auto_parallel_context(**kwargs):
866
1015
  - load_file (str): The path to load parallel strategy checkpoint. If the file name extension is
867
1016
  `.json`, the file is loaded in JSON format. Otherwise, the file is loaded in ProtoBuf
868
1017
  format.
869
- Default: ''
1018
+ Default: ``''``
870
1019
 
871
1020
  - save_file (str): The path to save parallel strategy checkpoint. If the file name extension is
872
1021
  `.json`, the file is saved in JSON format. Otherwise, the file is saved in ProtoBuf format.
873
- Default: ''
1022
+ Default: ``''``
874
1023
 
875
1024
  - only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
876
1025
  Default: ``True`` .
1026
+ group_ckpt_save_file (str): The path to save parallel group checkpoint.
1027
+ auto_pipeline (bool): Set the pipeline stage number to automatic. Its value will be selected between 1 and the
1028
+ parameter `pipeline_stages`. This option requires the `parallel_mode` to be ``auto_parallel``
1029
+ and the `search_mode` to be ``recursive_programming``. Default: ``False`` .
877
1030
 
878
1031
  Raises:
879
1032
  ValueError: If input key is not attribute in auto parallel context.
@@ -885,8 +1038,8 @@ def set_auto_parallel_context(**kwargs):
885
1038
  >>> ms.set_auto_parallel_context(gradients_mean=True)
886
1039
  >>> ms.set_auto_parallel_context(gradient_fp32_sync=False)
887
1040
  >>> ms.set_auto_parallel_context(parallel_mode="auto_parallel")
888
- >>> ms.set_auto_parallel_context(search_mode="dynamic_programming")
889
- >>> ms.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
1041
+ >>> ms.set_auto_parallel_context(search_mode="recursive_programming")
1042
+ >>> ms.set_auto_parallel_context(auto_parallel_search_mode="recursive_programming")
890
1043
  >>> ms.set_auto_parallel_context(parameter_broadcast=False)
891
1044
  >>> ms.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
892
1045
  >>> ms.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
@@ -895,6 +1048,7 @@ def set_auto_parallel_context(**kwargs):
895
1048
  >>> ms.set_auto_parallel_context(enable_alltoall=False)
896
1049
  >>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
897
1050
  >>> ms.set_auto_parallel_context(pipeline_stages=2)
1051
+ >>> ms.set_auto_parallel_context(pipeline_stages=2, pipeline_result_broadcast=True)
898
1052
  >>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
899
1053
  ... "optimizer_weight_shard_size": 2}
900
1054
  >>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
@@ -943,9 +1097,12 @@ def reset_auto_parallel_context():
943
1097
  - strategy_ckpt_save_file: ''.
944
1098
  - full_batch: False.
945
1099
  - enable_parallel_optimizer: False.
1100
+ - force_fp32_communication: False
946
1101
  - enable_alltoall: False.
947
1102
  - pipeline_stages: 1.
1103
+ - pipeline_result_broadcast: False.
948
1104
  - fusion_threshold: 64.
1105
+ - auto_pipeline: False.
949
1106
 
950
1107
  Examples:
951
1108
  >>> import mindspore as ms
@@ -1035,6 +1192,23 @@ def _check_target_specific_cfgs(device, arg_key):
1035
1192
  return False
1036
1193
 
1037
1194
 
1195
+ def _check_ascend_device_context_initialized(device_target, settings):
1196
+ if device_target == 'Ascend' and is_initialized(device_target):
1197
+ for key, _ in settings.items():
1198
+ if key in ('ascend_config', 'deterministic', 'jit_compile', 'exception_dump', 'device_id'):
1199
+ logger.warning(f"For 'context.set_context' in Ascend backend, the backend is already initialized, "
1200
+ "please set it before the definition of any Tensor and Parameter, and the "
1201
+ "instantiation and execution of any operation and net, otherwise the settings may not "
1202
+ "take effect. ")
1203
+ break
1204
+
1205
+
1206
+ def _check_key(key):
1207
+ if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1208
+ 'op_precision_mode', 'host_scheduling_max_threshold', 'ge_options', 'op_debug_option'):
1209
+ raise ValueError(f"Please set '{key}' through parameter ascend_config")
1210
+
1211
+
1038
1212
  @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
1039
1213
  save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str, aoe_config=dict,
1040
1214
  save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
@@ -1043,8 +1217,8 @@ def _check_target_specific_cfgs(device, arg_key):
1043
1217
  max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
1044
1218
  graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
1045
1219
  grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
1046
- op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int,
1047
- jit_enable_inplace_ops=bool, gpu_config=dict)
1220
+ op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int, debug_level=int,
1221
+ jit_enable_inplace_ops=bool, gpu_config=dict, jit_config=dict, enable_compile_cache=bool)
1048
1222
  def set_context(**kwargs):
1049
1223
  """
1050
1224
  Set context for running environment.
@@ -1093,6 +1267,8 @@ def set_context(**kwargs):
1093
1267
  | | reserve_class_name_in_scope | CPU/GPU/Ascend |
1094
1268
  | +------------------------------+----------------------------+
1095
1269
  | | pynative_synchronize | CPU/GPU/Ascend |
1270
+ | +------------------------------+----------------------------+
1271
+ | | debug_level | CPU/GPU/Ascend |
1096
1272
  +-------------------------+------------------------------+----------------------------+
1097
1273
  | Executive Control | mode | CPU/GPU/Ascend |
1098
1274
  | +------------------------------+----------------------------+
@@ -1133,6 +1309,8 @@ def set_context(**kwargs):
1133
1309
  | | jit_syntax_level | CPU/GPU/Ascend |
1134
1310
  | +------------------------------+----------------------------+
1135
1311
  | | gpu_config | GPU |
1312
+ | +------------------------------+----------------------------+
1313
+ | | jit_config | CPU/GPU/Ascend |
1136
1314
  +-------------------------+------------------------------+----------------------------+
1137
1315
 
1138
1316
  Args:
@@ -1145,12 +1323,16 @@ def set_context(**kwargs):
1145
1323
  and max_device_memory. 'max_device_memory' should be set before the program runs.
1146
1324
  variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
1147
1325
  Please use parameter 'max_device_memory' instead.
1148
- mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
1149
- The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block size is the
1150
- minimum of the available memory of the device and mempool_block_size.
1326
+ mempool_block_size (str): Set the size of the memory pool block in PyNative mode or jit level is 'O0'/'O1'
1327
+ for devices. The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block
1328
+ size is the minimum of the available memory of the device and mempool_block_size.
1151
1329
  op_timeout (int): Set the maximum duration of executing an operator in seconds.
1152
- If the execution time exceeds this value, system will terminate the task. 0 means endless wait.
1153
- Default: ``1900`` .
1330
+ If the execution time exceeds this value, system will terminate the task.
1331
+ 0 means endless wait. The defaults for AI Core and AICPU operators vary on different hardware.
1332
+ For more information,
1333
+ please refer to `Ascend Community document about aclrtSetOpExecuteTimeOut
1334
+ <https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/infacldevg/aclcppdevg/aclcppdevg_03_0069.html>`_.
1335
+ Default: ``900`` .
1154
1336
  save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: ``0`` .
1155
1337
  Available values are:
1156
1338
 
@@ -1159,10 +1341,13 @@ def set_context(**kwargs):
1159
1341
  - True or 2: Generate more ir files related to backend process.
1160
1342
  - 3: Generate visualization computing graphs and detailed frontend ir graphs.
1161
1343
 
1344
+ When the network structure is complex, setting `save_graphs` attribute to ``2`` or ``3`` may take too long.
1345
+ If you need quick problem locating, you can switch to ``1`` first.
1346
+
1162
1347
  When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
1163
1348
  `save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
1164
1349
  are saved in the current directory.
1165
- save_graphs_path (str): Path to save graphs. Default: ".".
1350
+ save_graphs_path (str): Path to save graphs. Default: ``"."``.
1166
1351
  If the specified directory does not exist, the system will automatically create the directory.
1167
1352
  During distributed training, graphs will be saved to the directory of
1168
1353
  `save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
@@ -1182,6 +1367,8 @@ def set_context(**kwargs):
1182
1367
  If the saved file already exists, the timestamp suffix will be added to the file. Saving data to a file
1183
1368
  solves the problem of data loss in screen printing when a large amount of data is generated.
1184
1369
  If it is not set, an error will be reported: prompt to set the upper absolute path.
1370
+ When print data to file, the total output bytes of single print must be less then 2GB(limited by
1371
+ protobuf).
1185
1372
  env_config_path (str): Config path for DFX.
1186
1373
  Through mindspore.set_context(env_config_path="./mindspore_config.json")
1187
1374
 
@@ -1226,7 +1413,7 @@ def set_context(**kwargs):
1226
1413
  If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
1227
1414
  For details of graph kernel fusion, please check
1228
1415
  `Enabling Graph Kernel Fusion
1229
- <https://www.mindspore.cn/tutorials/experts/en/r2.2/optimize/graph_fusion_engine.html>`_.
1416
+ <https://www.mindspore.cn/tutorials/experts/en/master/optimize/graph_fusion_engine.html>`_.
1230
1417
  graph_kernel_flags (str):
1231
1418
  Optimization options of graph kernel fusion, and the priority is higher when it conflicts
1232
1419
  with enable_graph_kernel. Only for experienced users.
@@ -1282,7 +1469,7 @@ def set_context(**kwargs):
1282
1469
  the compile cache is loaded. Note that only limited automatic detection for the changes of
1283
1470
  python scripts is supported by now, which means that there is a correctness risk. Default: ``False`` .
1284
1471
  This is an experimental prototype that is subject to change and/or deletion.
1285
- compile_cache_path (str): Path to save the compile cache. Default: ".".
1472
+ compile_cache_path (str): Path to save the compile cache. Default: ``"."``.
1286
1473
  If the specified directory does not exist, the system will automatically create the directory.
1287
1474
  The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
1288
1475
  the ID of the current device in the cluster.
@@ -1299,16 +1486,18 @@ def set_context(**kwargs):
1299
1486
  of the interfaces would be compiled by MindSpore to the interfaces definition .py file that should be
1300
1487
  guaranteed to be writable. Then compile the .py file to the .pyc or .so file, and could run in Graph mode.
1301
1488
  memory_optimize_level (str): The memory optimize level.
1302
- Default: O0. The value must be in ['O0', 'O1'].
1489
+ On Ascend hardware platform, default: ``O1``, on other hardware platforms, default: ``O0``.
1490
+ The value must be in ['O0', 'O1'].
1303
1491
 
1304
- - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver).
1305
- - O1: priority memory option, enable SOMAS.
1492
+ - O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver)
1493
+ and some other memory optimizations.
1494
+ - O1: priority memory option, enable SOMAS and some other memory optimizations.
1306
1495
  memory_offload (str): Whether to enable the memory offload function. When it is enabled, the idle data will be
1307
1496
  temporarily copied to the host side in the case of insufficient device memory. The value must be in the
1308
1497
  range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
1309
1498
 
1310
1499
  - ON: Enable the memory Offload function. On Ascend hardware platform, this parameter does not take effect
1311
- when the environment variable "GRAPH_OP_RUN=1" is not set; This parameter does not take effect when
1500
+ when the graph compilation level is not 'O0'; This parameter does not take effect when
1312
1501
  memory_optimize_level is set 'O1'.
1313
1502
  - OFF: Turn off the memory Offload function.
1314
1503
  ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
@@ -1319,22 +1508,27 @@ def set_context(**kwargs):
1319
1508
  is ``force_fp16`` . The value range is as follows:
1320
1509
 
1321
1510
  - force_fp16: When the operator supports both float16 and float32, select float16 directly.
1322
- - allow_fp32_to_fp16: When the operator does not support the float32 data type, directly reduce
1323
- the precision of float16.
1511
+ - allow_fp32_to_fp16: For cube operators, use the float16. For vector operators,
1512
+ prefer to keep the origin dtype, if the operator in model can support float32,
1513
+ it will keep original dtype, otherwise it will reduce to float16.
1324
1514
  - allow_mix_precision: Automatic mixing precision, facing the whole network operator, according
1325
1515
  to the built-in optimization strategy, automatically reduces the precision of some operators
1326
1516
  to float16 or bfloat16.
1327
1517
  - must_keep_origin_dtype: Keep the accuracy of the original drawing.
1328
1518
  - force_fp32: When the input of the matrix calculation operator is float16 and the output supports
1329
1519
  float16 and float32, output is forced to float32.
1330
- - allow_fp32_to_bf16: When the operator does not support the float32 data type, directly reduce
1331
- the precision of bfloat16.
1520
+ - allow_fp32_to_bf16: For cube operators, use the bfloat16. For vector operators,
1521
+ prefer to keep the origin dtype, if the operator in model can support float32,
1522
+ it will keep original dtype, otherwise it will reduce to bfloat16.
1332
1523
  - allow_mix_precision_fp16: Automatic mixing precision, facing the whole network operator, automatically
1333
1524
  reduces the precision of some operators to float16 according to the built-in optimization strategy.
1334
1525
  - allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
1335
1526
  the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
1336
1527
 
1337
- - jit_compile (bool): Whether to select online compilation. the default value is based on CANN.
1528
+ - jit_compile (bool): Whether to select online compilation. When set to 'True', online compilation is
1529
+ prioritized. When set to 'False', compiled operator binary files are prioritized to improve compilation
1530
+ performance. The default settings are online compilation for static shape, and compiled operator binary
1531
+ files for dynamic shape.
1338
1532
  - atomic_clean_policy (int): The policy for cleaning memory occupied by atomic operators in the network.
1339
1533
  Default: ``1`` .
1340
1534
 
@@ -1350,24 +1544,66 @@ def set_context(**kwargs):
1350
1544
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
1351
1545
  - exception_dump (str): Enable exception dump for Ascend operators, providing the input and output data for
1352
1546
  failing Ascend operators. The value can be ``"0"`` , ``"1"`` and ``"2"``. For ``"0"`` , exception dump is
1353
- turned off; for ``"1"``, all inputs and outputs will be dumped for AICore and AICPU exception operators;
1354
- for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
1547
+ turned off; for ``"1"``, all inputs and outputs will be dumped for AICore exception operators;
1548
+ for ``"2"``, inputs will be dumped for AICore exception operators, reducing the saved information
1549
+ but improving performance. Default: ``"2"`` .
1355
1550
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1356
1551
  to `Ascend community <https://www.hiascend.com/>`_ .
1552
+ - op_debug_option (str): Enable debugging options for Ascend operators, default not enabled.
1553
+ The value currently only supports being set to ``"oom"``.
1554
+
1555
+ - ``"oom"``: When there is a memory out of bounds during the execution of an operator,
1556
+ AscendCL will return an error code of ``EZ9999``.
1557
+
1558
+ - ge_options (dict): Set options for CANN. The options are divided into two categories: global and session.
1559
+ This is an experimental prototype that is subject to change and/or deletion.
1560
+ For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/70RC1/inferapplicationdev/graphdevg/atlasgeapi_07_0119.html>`_ .
1561
+ The configuration options in `ge_options` may be duplicated with the options in `ascend_config`. If the
1562
+ same configuration options are set in both `ascend_config` and `ge_options`, the one set in `ge_options`
1563
+ shall prevail.
1564
+
1565
+ - global (dict): Set global options.
1566
+ - session (dict): Set session options.
1567
+
1357
1568
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
1358
1569
  can refer to `parallel_speed_up.json
1359
- <https://gitee.com/mindspore/mindspore/blob/r2.2/config/parallel_speed_up.json>`_ .
1570
+ <https://gitee.com/mindspore/mindspore/blob/master/config/parallel_speed_up.json>`_ .
1360
1571
  If its value is None or '', it does not take effect. Default None.
1361
1572
 
1362
1573
  - recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
1363
1574
  Default: False.
1364
- - matmul_grad_comm_overlap (bool): Enable overlap between grad ops and communication ops if True.
1575
+ - matmul_grad_comm_overlap (bool): Enable overlap between dw matmul and
1576
+ tensor parallel communication ops if True. Default: False.
1577
+ - recompute_allgather_overlap_fagrad (bool): Enable overlap between duplicated allgather by recomputing
1578
+ in sequence parallel and flashattentionscoregrad ops if True. Default: False.
1579
+ - enable_task_opt (bool): Enable communication fusion to optimize the number of communication operator
1580
+ tasks if True.
1365
1581
  Default: False.
1366
- - enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
1582
+ - enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
1583
+ Currently, do not support
1584
+ `LazyInline <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.lazy_inline.html>`
1367
1585
  Default: False.
1368
- - interleaved_matmul_comm (bool): Enable interleaved optimization of Matmul-Comm if True. Default: False.
1369
- - interleaved_layernorm_comm (bool): Enable interleaved optimization of LayerNorm-Comm if True.
1586
+ - enable_opt_shard_comm_opt (bool): Enable overlap between forward ops
1587
+ and optimizer parallel allgather communication if True. Currently, do not support
1588
+ `LazyInline <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.lazy_inline.html>`
1370
1589
  Default: False.
1590
+ - compute_communicate_fusion_level (int): Enable the fusion between compute and communicate.
1591
+ Default: ``0``.
1592
+
1593
+ - 0: Disable fusion.
1594
+
1595
+ - 1: Apply fusion to forward nodes.
1596
+
1597
+ - 2: Apply fusion to backward nodes.
1598
+
1599
+ - 3: Apply fusion to all nodes.
1600
+ - bias_add_comm_swap (bool): Enable node execution order swap communication operators and add operators
1601
+ if ``True``. Only 1-dimension bias node is supported. Default: ``False``.
1602
+ - host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
1603
+ used when run the static graph, the default value is 0. When the number of operations in the static graph
1604
+ is less than the max threshold, this graph will be executed in dynamic shape process. In large model
1605
+ scenarios, this approach can save stream resources. If the number of operations in the static graph is
1606
+ greater than the maximum threshold, this graph will be executed in original static process.
1371
1607
 
1372
1608
  jit_syntax_level (int): Set JIT syntax level for graph compiling, triggered by GRAPH_MODE and @jit decorator.
1373
1609
  The value must be ``STRICT`` or ``LAX`` . Default: ``LAX`` . All levels support all backends.
@@ -1378,6 +1614,12 @@ def set_context(**kwargs):
1378
1614
  affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
1379
1615
  able to be exported.
1380
1616
 
1617
+ debug_level (int): Set config for debugging. Default value: ``RELEASE``.
1618
+
1619
+ - ``RELEASE``: Used for normally running, and some debug information will be discard to get a better
1620
+ compiling performance.
1621
+ - ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
1622
+
1381
1623
  gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
1382
1624
  Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
1383
1625
  and `matmul_allow_tf32` are supported on GPU hardware platform.
@@ -1449,6 +1691,26 @@ def set_context(**kwargs):
1449
1691
  - matmul_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUBLAS and the
1450
1692
  default value is ``False``.
1451
1693
 
1694
+ jit_config (dict): Set the global jit config for compile, take effect in network defined in Cell or jit
1695
+ decorators. It is not set by default.
1696
+ The setting in context is the global jit config, while JitConfig is the local network's jit config.
1697
+ When both exist simultaneously, the global jit config will not overwrite the local network's jit config.
1698
+
1699
+ - jit_level (str): Used to control the compilation optimization level. Default: ``""`` , The framework
1700
+ automatically selects the execution method based on product, Altas training product is O2, and all other
1701
+ products are O0. The value range is as follows:
1702
+
1703
+ - ``"O0"``: Except for optimizations that may affect functionality, all other optimizations are turned
1704
+ off, adopt KernelByKernel execution mode.
1705
+ - ``"O1"``: Using commonly used optimizations and automatic operator fusion optimizations,
1706
+ adopt KernelByKernel execution mode.
1707
+ - ``"O2"``: Ultimate performance optimization, adopt Sink execution mode.
1708
+
1709
+ - infer_boost (str): Used to control the infer mode. Default: ``"off"`` . The value range is as follows:
1710
+
1711
+ - ``"on"``: Enable infer mode, get better infer performance.
1712
+ - ``"off"``: Disable infer mode, use forward to infer, performance is not good.
1713
+
1452
1714
  Raises:
1453
1715
  ValueError: If input key is not an attribute in context.
1454
1716
 
@@ -1482,16 +1744,23 @@ def set_context(**kwargs):
1482
1744
  >>> ms.set_context(memory_offload='ON')
1483
1745
  >>> ms.set_context(deterministic='ON')
1484
1746
  >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
1485
- ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file"})
1747
+ ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file",
1748
+ ... "op_debug_option": "oom",
1749
+ ... "ge_options": {"global": {"ge.opSelectImplmode": "high_precision"},
1750
+ ... "session": {"ge.exec.atomicCleanPolicy": "0"}}})
1486
1751
  >>> ms.set_context(jit_syntax_level=ms.STRICT)
1752
+ >>> ms.set_context(debug_level=ms.context.DEBUG)
1487
1753
  >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
1488
1754
  ... "matmul_allow_tf32": True})
1755
+ >>> ms.set_context(jit_config={"jit_level": "O0"})
1489
1756
  """
1490
1757
  ctx = _context()
1491
1758
  # set device target first
1492
1759
  if 'device_target' in kwargs:
1493
1760
  ctx.set_device_target(kwargs['device_target'])
1494
1761
  device = ctx.get_param(ms_ctx_param.device_target)
1762
+ _check_ascend_device_context_initialized(device, kwargs)
1763
+
1495
1764
  for key, value in kwargs.items():
1496
1765
  if key in ('enable_sparse', 'auto_tune_mode'):
1497
1766
  logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
@@ -1501,9 +1770,7 @@ def set_context(**kwargs):
1501
1770
  logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated. "
1502
1771
  "For details, please see the interface parameter API comments")
1503
1772
  continue
1504
- if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1505
- 'op_precision_mode'):
1506
- raise ValueError(f"Please set '{key}' through parameter ascend_config")
1773
+ _check_key(key)
1507
1774
  if key == 'save_graphs':
1508
1775
  if value is True:
1509
1776
  value = 2
@@ -1514,6 +1781,13 @@ def set_context(**kwargs):
1514
1781
  if key == 'jit_syntax_level' and value not in (STRICT, COMPATIBLE, LAX):
1515
1782
  raise ValueError(f"For 'jit_syntax_level', the value should be context.STRICT"
1516
1783
  f" or context.LAX, but got {value}.")
1784
+ if key == 'debug_level' and value not in (RELEASE, DEBUG):
1785
+ raise ValueError(f"For 'debug_level', the value should be context.DEBUG"
1786
+ f" or context.RELEASE, but got {value}.")
1787
+ if key == 'enable_compile_cache':
1788
+ setattr(ctx, key, value)
1789
+ ctx.set_param(ms_ctx_param.__members__[key], int(value))
1790
+ continue
1517
1791
  if not _check_target_specific_cfgs(device, key):
1518
1792
  continue
1519
1793
  if hasattr(ctx, key):
@@ -1571,6 +1845,17 @@ def _get_mode():
1571
1845
  return ctx.get_mode()
1572
1846
 
1573
1847
 
1848
+ def get_jit_config():
1849
+ """
1850
+ Get global jit config.
1851
+
1852
+ Returns:
1853
+ Object: The Value of jit config.
1854
+ """
1855
+ ctx = _context()
1856
+ return ctx.get_jit_config()
1857
+
1858
+
1574
1859
  class ParallelMode:
1575
1860
  """
1576
1861
  Parallel mode options.
@@ -1668,9 +1953,7 @@ def get_ps_context(attr_key):
1668
1953
 
1669
1954
  def reset_ps_context():
1670
1955
  """
1671
- Reset parameter server training mode context attributes to the default values:
1672
-
1673
- - enable_ps: False.
1956
+ Reset parameter server training mode context attributes to the default values.
1674
1957
 
1675
1958
  Meaning of each field and its default value refer to :func:`mindspore.set_ps_context`.
1676
1959