mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -34,7 +34,6 @@ from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context
34
34
  from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context, \
35
35
  _need_reset_device_target_for_ps
36
36
  from mindspore.parallel._offload_context import _set_offload_context, _get_offload_context
37
- from mindspore.hal.device import is_initialized
38
37
 
39
38
  __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'STRICT', 'COMPATIBLE', 'LAX', 'set_context', 'get_context',
40
39
  'set_auto_parallel_context', 'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode',
@@ -166,9 +165,7 @@ class _Context:
166
165
  self._context_switches = _ContextSwitchInfo(False)
167
166
  self._context_handle = MSContext.get_instance()
168
167
  self._support_binary = False
169
- self.enable_compile_cache = None
170
168
  self._mode = PYNATIVE_MODE
171
- self._jit_config = {}
172
169
 
173
170
  def __getattribute__(self, attr):
174
171
  value = object.__getattribute__(self, attr)
@@ -186,10 +183,6 @@ class _Context:
186
183
  """Get current mode."""
187
184
  return self._mode
188
185
 
189
- def get_jit_config(self):
190
- """Get current jit_config."""
191
- return self._jit_config
192
-
193
186
  def set_mode(self, mode):
194
187
  """
195
188
  Switch between Graph mode and PyNative mode.
@@ -232,6 +225,7 @@ class _Context:
232
225
  f"or context.DEBUG, but got {level}.")
233
226
  self.set_param(ms_ctx_param.debug_level, level)
234
227
 
228
+
235
229
  def set_memory_optimize_level(self, memory_optimize_level):
236
230
  """
237
231
  The memory optimize level, support "O0", "O1".
@@ -289,9 +283,6 @@ class _Context:
289
283
  - jit_compile (bool): ``False`` and ``True``.
290
284
  - atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
291
285
  - op_precision_mode (str): precision mode config file path.
292
- - op_debug_option (str): Enable debugging options for Ascend operators,
293
- default not enabled, only supports ``"oom"`` currently.
294
- ``"oom"``: Detect memory out of bounds.
295
286
  - ge_options (dict): Global or session CANN options.
296
287
  - exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
297
288
  Default: ``"2"`` .
@@ -312,12 +303,7 @@ class _Context:
312
303
  'ge_options': (dict,),
313
304
  'parallel_speed_up_json_path': (str, None),
314
305
  'host_scheduling_max_threshold': (int,),
315
- 'cur_step_num': (int,),
316
- 'save_checkpoint_steps': (int,),
317
- 'need_ckpt': (bool,),
318
- 'last_triggered_step': (int,),
319
- 'topo_order': (dict,),
320
- 'op_debug_option': (str, None),
306
+ 'topo_order': (dict,)
321
307
  }
322
308
  ascend_cfg_setters = {
323
309
  'precision_mode': self._get_ascend_config_setter('precision_mode'),
@@ -326,15 +312,10 @@ class _Context:
326
312
  'matmul_allow_hf32': self._get_ascend_config_setter('matmul_allow_hf32', lambda v: "1" if v else "0"),
327
313
  'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
328
314
  'exception_dump': self._get_ascend_config_setter('exception_dump'),
329
- 'op_debug_option': self._set_op_debug_option,
330
315
  'op_precision_mode': self._set_op_precision_mode,
331
316
  'ge_options': self._set_ge_options,
332
317
  'parallel_speed_up_json_path': self._set_speedup_config_path,
333
318
  'host_scheduling_max_threshold': self._get_ascend_config_setter('host_scheduling_max_threshold', str),
334
- 'cur_step_num': self._set_cur_step_num,
335
- 'save_checkpoint_steps': self._set_save_checkpoint_steps,
336
- 'need_ckpt': self._set_need_ckpt,
337
- 'last_triggered_step': self._set_last_triggered_step,
338
319
  'topo_order': self._set_topo_order
339
320
  }
340
321
  ascend_cfg_set = tuple(ascend_cfg_modes.keys())
@@ -393,31 +374,6 @@ class _Context:
393
374
  if gpu_key == 'matmul_allow_tf32':
394
375
  self.set_param(ms_ctx_param.matmul_allow_tf32, gpu_config[gpu_key])
395
376
 
396
- def set_jit_config(self, jit_config):
397
- """
398
- Enable jit config.
399
-
400
- Args:
401
- jit_config (dict):
402
-
403
- - jit_level (str): "O0", "O1" or "O2" to control the compilation optimization level.
404
- """
405
- jit_cfgs = {'jit_level': ["O0", "O1", "O2"], 'infer_boost': ["on", "off"]}
406
- key_args_map = {'jit_level': ms_ctx_param.jit_level, 'infer_boost': ms_ctx_param.infer_boost}
407
- for jit_key in jit_config:
408
- if jit_key not in jit_cfgs:
409
- raise ValueError(f"For 'context.set_context', the key of argument 'jit_config' must be one of "
410
- f"{jit_cfgs}, but got {jit_key}.")
411
- supported_value = jit_cfgs.get(jit_key)
412
- if jit_config[jit_key] not in supported_value:
413
- raise ValueError(f"For 'jit_cfgs', the value of argument {jit_key} must be one of "
414
- f"{supported_value}, but got {jit_config[jit_key]}.")
415
- self._jit_config = jit_config
416
- self.set_param(key_args_map[jit_key], jit_config[jit_key])
417
-
418
- if 'infer_boost' in jit_config and jit_config['infer_boost'] == "on" and jit_config['jit_level'] != "O0":
419
- raise ValueError(f"Only jit_level set O0 can set infer_boost to on.")
420
-
421
377
  def set_backend_policy(self, policy):
422
378
  success = self._context_handle.set_backend_policy(policy)
423
379
  if not success:
@@ -539,13 +495,10 @@ class _Context:
539
495
 
540
496
  def set_mempool_block_size(self, mempool_block_size):
541
497
  """Set the block size of memory pool."""
542
- global_jit_config = get_jit_config()
543
- is_force_kbk = False
544
- if global_jit_config:
545
- is_force_kbk = global_jit_config.get('jit_level') == "O0" or global_jit_config.get('jit_level') == "O1"
546
- if _get_mode() == GRAPH_MODE and not is_force_kbk:
498
+ is_force_kbk = os.getenv("GRAPH_OP_RUN")
499
+ if _get_mode() == GRAPH_MODE and is_force_kbk != "1":
547
500
  logger.warning("Graph mode doesn't support to set parameter 'mempool_block_size' of context currently, "
548
- "you can use context.set_context to set pynative mode or set jit_level=O0/O1.")
501
+ "you can use context.set_context to set pynative mode or set env GRAPH_OP_RUN=1.")
549
502
  return
550
503
  if not Validator.check_str_by_regular(mempool_block_size, _RE_PATTERN):
551
504
  raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
@@ -634,7 +587,6 @@ class _Context:
634
587
  'debug_level': set_debug_level,
635
588
  'gpu_config': set_gpu_config,
636
589
  'aoe_config': set_aoe_config,
637
- 'jit_config': set_jit_config,
638
590
  }
639
591
 
640
592
  @property
@@ -682,16 +634,6 @@ class _Context:
682
634
  trans_fn = lambda x: x
683
635
  return _config_setter
684
636
 
685
- def _set_op_debug_option(self, option_value):
686
- valid_order = {'oom'}
687
- if not isinstance(option_value, str):
688
- raise TypeError(f"For 'ascend_config', the type of 'op_debug_option' must be str, "
689
- f"but got {type(option_value)}.")
690
- if option_value not in valid_order:
691
- raise ValueError(f"For 'ascend_config', the 'op_debug_option' supports being set to 'oom' currently, "
692
- f"but got {option_value}.")
693
- self.set_param(ms_ctx_param.op_debug_option, option_value)
694
-
695
637
  def _set_op_precision_mode(self, ascend_value):
696
638
  op_precision_path = ascend_value
697
639
  real_path = os.path.realpath(op_precision_path)
@@ -744,30 +686,6 @@ class _Context:
744
686
  options_str = json.dumps(topo_order)
745
687
  self.set_param(ms_ctx_param.topo_order, options_str)
746
688
 
747
- def _set_need_ckpt(self, need_ckpt):
748
- """Set need ckpt flag"""
749
- if not isinstance(need_ckpt, bool):
750
- raise TypeError(f"For step num, the value type should be int, but got {type(need_ckpt)}, {need_ckpt}")
751
- self.set_param(ms_ctx_param.need_ckpt, need_ckpt)
752
-
753
- def _set_cur_step_num(self, step_num):
754
- """set current step num at every step begin"""
755
- if not isinstance(step_num, int):
756
- raise TypeError(f"For step num, the value type should be int, but got {type(step_num)}, {step_num}")
757
- self.set_param(ms_ctx_param.cur_step_num, step_num)
758
-
759
- def _set_save_checkpoint_steps(self, steps):
760
- """set save checkpoint steps before run"""
761
- if not isinstance(steps, int):
762
- raise TypeError(f"For step num, the value type should be int, but got {type(steps)}, {steps}")
763
- self.set_param(ms_ctx_param.save_checkpoint_steps, steps)
764
-
765
- def _set_last_triggered_step(self, step):
766
- """set last triggered save ckpt steps before run"""
767
- if not isinstance(step, int):
768
- raise TypeError(f"For step num, the value type should be int, but got {type(step)}, {step}")
769
- self.set_param(ms_ctx_param.last_triggered_step, step)
770
-
771
689
  def _set_speedup_config_path(self, speedup_config_path):
772
690
  """"Check and set speedup config for auto parallel."""
773
691
  if speedup_config_path is None or speedup_config_path == "":
@@ -782,8 +700,6 @@ class _Context:
782
700
  "matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
783
701
  "enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
784
702
  "enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
785
- "recompute_allgather_overlap_fagrad":
786
- (ms_ctx_param.recompute_allgather_overlap_fagrad, bool),
787
703
  "interleaved_matmul_comm": (ms_ctx_param.interleaved_matmul_comm, bool),
788
704
  "bias_add_comm_swap": (ms_ctx_param.bias_add_comm_swap, bool),
789
705
  "enable_opt_shard_comm_opt": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
@@ -875,7 +791,6 @@ def set_auto_parallel_context(**kwargs):
875
791
  \ comm_fusion
876
792
  \ strategy_ckpt_config
877
793
  \ group_ckpt_save_file
878
- \ auto_pipeline
879
794
  =========================== ===========================
880
795
 
881
796
  Args:
@@ -1024,9 +939,6 @@ def set_auto_parallel_context(**kwargs):
1024
939
  - only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
1025
940
  Default: ``True`` .
1026
941
  group_ckpt_save_file (str): The path to save parallel group checkpoint.
1027
- auto_pipeline (bool): Set the pipeline stage number to automatic. Its value will be selected between 1 and the
1028
- parameter `pipeline_stages`. This option requires the `parallel_mode` to be ``auto_parallel``
1029
- and the `search_mode` to be ``recursive_programming``. Default: ``False`` .
1030
942
 
1031
943
  Raises:
1032
944
  ValueError: If input key is not attribute in auto parallel context.
@@ -1102,7 +1014,6 @@ def reset_auto_parallel_context():
1102
1014
  - pipeline_stages: 1.
1103
1015
  - pipeline_result_broadcast: False.
1104
1016
  - fusion_threshold: 64.
1105
- - auto_pipeline: False.
1106
1017
 
1107
1018
  Examples:
1108
1019
  >>> import mindspore as ms
@@ -1192,23 +1103,6 @@ def _check_target_specific_cfgs(device, arg_key):
1192
1103
  return False
1193
1104
 
1194
1105
 
1195
- def _check_ascend_device_context_initialized(device_target, settings):
1196
- if device_target == 'Ascend' and is_initialized(device_target):
1197
- for key, _ in settings.items():
1198
- if key in ('ascend_config', 'deterministic', 'jit_compile', 'exception_dump', 'device_id'):
1199
- logger.warning(f"For 'context.set_context' in Ascend backend, the backend is already initialized, "
1200
- "please set it before the definition of any Tensor and Parameter, and the "
1201
- "instantiation and execution of any operation and net, otherwise the settings may not "
1202
- "take effect. ")
1203
- break
1204
-
1205
-
1206
- def _check_key(key):
1207
- if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1208
- 'op_precision_mode', 'host_scheduling_max_threshold', 'ge_options', 'op_debug_option'):
1209
- raise ValueError(f"Please set '{key}' through parameter ascend_config")
1210
-
1211
-
1212
1106
  @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
1213
1107
  save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str, aoe_config=dict,
1214
1108
  save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
@@ -1218,7 +1112,7 @@ def _check_key(key):
1218
1112
  graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
1219
1113
  grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
1220
1114
  op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int, debug_level=int,
1221
- jit_enable_inplace_ops=bool, gpu_config=dict, jit_config=dict, enable_compile_cache=bool)
1115
+ jit_enable_inplace_ops=bool, gpu_config=dict)
1222
1116
  def set_context(**kwargs):
1223
1117
  """
1224
1118
  Set context for running environment.
@@ -1309,8 +1203,6 @@ def set_context(**kwargs):
1309
1203
  | | jit_syntax_level | CPU/GPU/Ascend |
1310
1204
  | +------------------------------+----------------------------+
1311
1205
  | | gpu_config | GPU |
1312
- | +------------------------------+----------------------------+
1313
- | | jit_config | CPU/GPU/Ascend |
1314
1206
  +-------------------------+------------------------------+----------------------------+
1315
1207
 
1316
1208
  Args:
@@ -1323,15 +1215,15 @@ def set_context(**kwargs):
1323
1215
  and max_device_memory. 'max_device_memory' should be set before the program runs.
1324
1216
  variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
1325
1217
  Please use parameter 'max_device_memory' instead.
1326
- mempool_block_size (str): Set the size of the memory pool block in PyNative mode or jit level is 'O0'/'O1'
1327
- for devices. The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block
1328
- size is the minimum of the available memory of the device and mempool_block_size.
1218
+ mempool_block_size (str): Set the size of the memory pool block in PyNative mode or GRAPH_OP_RUN=1 for devices.
1219
+ The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory block size is the
1220
+ minimum of the available memory of the device and mempool_block_size.
1329
1221
  op_timeout (int): Set the maximum duration of executing an operator in seconds.
1330
1222
  If the execution time exceeds this value, system will terminate the task.
1331
1223
  0 means endless wait. The defaults for AI Core and AICPU operators vary on different hardware.
1332
1224
  For more information,
1333
- please refer to `Ascend Community document about aclrtSetOpExecuteTimeOut
1334
- <https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/infacldevg/aclcppdevg/aclcppdevg_03_0069.html>`_.
1225
+ please refer to `Ascend Community
1226
+ <https://www.hiascend.com/>`_.
1335
1227
  Default: ``900`` .
1336
1228
  save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: ``0`` .
1337
1229
  Available values are:
@@ -1341,9 +1233,6 @@ def set_context(**kwargs):
1341
1233
  - True or 2: Generate more ir files related to backend process.
1342
1234
  - 3: Generate visualization computing graphs and detailed frontend ir graphs.
1343
1235
 
1344
- When the network structure is complex, setting `save_graphs` attribute to ``2`` or ``3`` may take too long.
1345
- If you need quick problem locating, you can switch to ``1`` first.
1346
-
1347
1236
  When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
1348
1237
  `save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
1349
1238
  are saved in the current directory.
@@ -1367,8 +1256,6 @@ def set_context(**kwargs):
1367
1256
  If the saved file already exists, the timestamp suffix will be added to the file. Saving data to a file
1368
1257
  solves the problem of data loss in screen printing when a large amount of data is generated.
1369
1258
  If it is not set, an error will be reported: prompt to set the upper absolute path.
1370
- When print data to file, the total output bytes of single print must be less then 2GB(limited by
1371
- protobuf).
1372
1259
  env_config_path (str): Config path for DFX.
1373
1260
  Through mindspore.set_context(env_config_path="./mindspore_config.json")
1374
1261
 
@@ -1544,17 +1431,10 @@ def set_context(**kwargs):
1544
1431
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
1545
1432
  - exception_dump (str): Enable exception dump for Ascend operators, providing the input and output data for
1546
1433
  failing Ascend operators. The value can be ``"0"`` , ``"1"`` and ``"2"``. For ``"0"`` , exception dump is
1547
- turned off; for ``"1"``, all inputs and outputs will be dumped for AICore exception operators;
1548
- for ``"2"``, inputs will be dumped for AICore exception operators, reducing the saved information
1549
- but improving performance. Default: ``"2"`` .
1434
+ turned off; for ``"1"``, all inputs and outputs will be dumped for AICore and AICPU exception operators;
1435
+ for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
1550
1436
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1551
1437
  to `Ascend community <https://www.hiascend.com/>`_ .
1552
- - op_debug_option (str): Enable debugging options for Ascend operators, default not enabled.
1553
- The value currently only supports being set to ``"oom"``.
1554
-
1555
- - ``"oom"``: When there is a memory out of bounds during the execution of an operator,
1556
- AscendCL will return an error code of ``EZ9999``.
1557
-
1558
1438
  - ge_options (dict): Set options for CANN. The options are divided into two categories: global and session.
1559
1439
  This is an experimental prototype that is subject to change and/or deletion.
1560
1440
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/70RC1/inferapplicationdev/graphdevg/atlasgeapi_07_0119.html>`_ .
@@ -1574,10 +1454,7 @@ def set_context(**kwargs):
1574
1454
  Default: False.
1575
1455
  - matmul_grad_comm_overlap (bool): Enable overlap between dw matmul and
1576
1456
  tensor parallel communication ops if True. Default: False.
1577
- - recompute_allgather_overlap_fagrad (bool): Enable overlap between duplicated allgather by recomputing
1578
- in sequence parallel and flashattentionscoregrad ops if True. Default: False.
1579
- - enable_task_opt (bool): Enable communication fusion to optimize the number of communication operator
1580
- tasks if True.
1457
+ - enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
1581
1458
  Default: False.
1582
1459
  - enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
1583
1460
  Currently, do not support
@@ -1597,8 +1474,6 @@ def set_context(**kwargs):
1597
1474
  - 2: Apply fusion to backward nodes.
1598
1475
 
1599
1476
  - 3: Apply fusion to all nodes.
1600
- - bias_add_comm_swap (bool): Enable node execution order swap communication operators and add operators
1601
- if ``True``. Only 1-dimension bias node is supported. Default: ``False``.
1602
1477
  - host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
1603
1478
  used when run the static graph, the default value is 0. When the number of operations in the static graph
1604
1479
  is less than the max threshold, this graph will be executed in dynamic shape process. In large model
@@ -1691,26 +1566,6 @@ def set_context(**kwargs):
1691
1566
  - matmul_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUBLAS and the
1692
1567
  default value is ``False``.
1693
1568
 
1694
- jit_config (dict): Set the global jit config for compile, take effect in network defined in Cell or jit
1695
- decorators. It is not set by default.
1696
- The setting in context is the global jit config, while JitConfig is the local network's jit config.
1697
- When both exist simultaneously, the global jit config will not overwrite the local network's jit config.
1698
-
1699
- - jit_level (str): Used to control the compilation optimization level. Default: ``""`` , The framework
1700
- automatically selects the execution method based on product, Altas training product is O2, and all other
1701
- products are O0. The value range is as follows:
1702
-
1703
- - ``"O0"``: Except for optimizations that may affect functionality, all other optimizations are turned
1704
- off, adopt KernelByKernel execution mode.
1705
- - ``"O1"``: Using commonly used optimizations and automatic operator fusion optimizations,
1706
- adopt KernelByKernel execution mode.
1707
- - ``"O2"``: Ultimate performance optimization, adopt Sink execution mode.
1708
-
1709
- - infer_boost (str): Used to control the infer mode. Default: ``"off"`` . The value range is as follows:
1710
-
1711
- - ``"on"``: Enable infer mode, get better infer performance.
1712
- - ``"off"``: Disable infer mode, use forward to infer, performance is not good.
1713
-
1714
1569
  Raises:
1715
1570
  ValueError: If input key is not an attribute in context.
1716
1571
 
@@ -1745,22 +1600,18 @@ def set_context(**kwargs):
1745
1600
  >>> ms.set_context(deterministic='ON')
1746
1601
  >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
1747
1602
  ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file",
1748
- ... "op_debug_option": "oom",
1749
1603
  ... "ge_options": {"global": {"ge.opSelectImplmode": "high_precision"},
1750
1604
  ... "session": {"ge.exec.atomicCleanPolicy": "0"}}})
1751
1605
  >>> ms.set_context(jit_syntax_level=ms.STRICT)
1752
- >>> ms.set_context(debug_level=ms.context.DEBUG)
1606
+ >>> ms.set_context(debug_level=ms.DEBUG)
1753
1607
  >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
1754
1608
  ... "matmul_allow_tf32": True})
1755
- >>> ms.set_context(jit_config={"jit_level": "O0"})
1756
1609
  """
1757
1610
  ctx = _context()
1758
1611
  # set device target first
1759
1612
  if 'device_target' in kwargs:
1760
1613
  ctx.set_device_target(kwargs['device_target'])
1761
1614
  device = ctx.get_param(ms_ctx_param.device_target)
1762
- _check_ascend_device_context_initialized(device, kwargs)
1763
-
1764
1615
  for key, value in kwargs.items():
1765
1616
  if key in ('enable_sparse', 'auto_tune_mode'):
1766
1617
  logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
@@ -1770,7 +1621,9 @@ def set_context(**kwargs):
1770
1621
  logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated. "
1771
1622
  "For details, please see the interface parameter API comments")
1772
1623
  continue
1773
- _check_key(key)
1624
+ if key in ('precision_mode', 'jit_compile', 'atomic_clean_policy', 'matmul_allow_hf32', 'conv_allow_hf32',
1625
+ 'op_precision_mode', 'host_scheduling_max_threshold', 'ge_options'):
1626
+ raise ValueError(f"Please set '{key}' through parameter ascend_config")
1774
1627
  if key == 'save_graphs':
1775
1628
  if value is True:
1776
1629
  value = 2
@@ -1784,10 +1637,6 @@ def set_context(**kwargs):
1784
1637
  if key == 'debug_level' and value not in (RELEASE, DEBUG):
1785
1638
  raise ValueError(f"For 'debug_level', the value should be context.DEBUG"
1786
1639
  f" or context.RELEASE, but got {value}.")
1787
- if key == 'enable_compile_cache':
1788
- setattr(ctx, key, value)
1789
- ctx.set_param(ms_ctx_param.__members__[key], int(value))
1790
- continue
1791
1640
  if not _check_target_specific_cfgs(device, key):
1792
1641
  continue
1793
1642
  if hasattr(ctx, key):
@@ -1845,17 +1694,6 @@ def _get_mode():
1845
1694
  return ctx.get_mode()
1846
1695
 
1847
1696
 
1848
- def get_jit_config():
1849
- """
1850
- Get global jit config.
1851
-
1852
- Returns:
1853
- Object: The Value of jit config.
1854
- """
1855
- ctx = _context()
1856
- return ctx.get_jit_config()
1857
-
1858
-
1859
1697
  class ParallelMode:
1860
1698
  """
1861
1699
  Parallel mode options.