mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -71,8 +71,7 @@ from .queue import _SharedQueue, _Queue
71
71
  from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
72
72
  check_rename, check_device_send, check_take, check_output_shape, check_project, \
73
73
  check_sync_wait, check_zip_dataset, check_add_column, check_concat, check_split, check_bucket_batch_by_length, \
74
- check_save, check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_padded_batch, \
75
- check_total_batch
74
+ check_save, check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_padded_batch
76
75
  from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
77
76
  get_enable_watchdog, get_seed, set_seed, get_debug_mode, get_multiprocessing_timeout_interval, _get_debug_hook_list
78
77
  from ..core.datatypes import mstype_to_detype
@@ -605,9 +604,8 @@ class Dataset:
605
604
 
606
605
  - max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
607
606
  allocation to copy data between processes, the total occupied shared memory will increase as
608
- ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set
609
- to -1, shared memory will be dynamically allocated with the actual size of data. This is only used if
610
- ``python_multiprocessing`` is set to True. If it is an int value, it represents
607
+ ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
608
+ used if python_multiprocessing is set to True. If it is an int value, it represents
611
609
  ``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
612
610
  If it is a list, the first element represents the ``input_columns`` use this value as the unit to
613
611
  create shared memory, and the second element represents ``output_columns`` use this value as the unit
@@ -899,9 +897,8 @@ class Dataset:
899
897
 
900
898
  - max_rowsize (Union[int, list[int]], optional): Maximum size of row in MB that is used for shared
901
899
  memory allocation to copy data between processes, the total occupied shared memory will increase as
902
- ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set
903
- to -1, shared memory will be dynamically allocated with the actual size of data. This is only used if
904
- ``python_multiprocessing`` is set to True. If it is an int value, it represents
900
+ ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
901
+ used if python_multiprocessing is set to True. If it is an int value, it represents
905
902
  ``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
906
903
  If it is a list, the first element represents the ``input_columns`` use this value as the unit to
907
904
  create shared memory, and the second element represents ``output_columns`` use this value as the unit
@@ -1571,34 +1568,32 @@ class Dataset:
1571
1568
  @check_tuple_iterator
1572
1569
  def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
1573
1570
  """
1574
- Create an iterator over the dataset that yields samples of type list, whose elements are
1575
- the data for each column.
1571
+ Create an iterator over the dataset. The datatype retrieved back will be a list of `numpy.ndarray` .
1572
+
1573
+ To specify which columns to list and the order needed, use columns_list. If columns_list
1574
+ is not provided, the order of the columns will remain unchanged.
1576
1575
 
1577
1576
  Args:
1578
- columns (list[str], optional): Specify the output columns and the order.
1579
- Default: ``None``, keep all the output columns and their original order.
1580
- num_epochs (int, optional): The number of epochs to iterate over the entire dataset.
1581
- Default: ``-1`` , the dataset can be iterated indefinitely.
1582
- output_numpy (bool, optional): Whether to keep the output data as NumPy ndarray, or
1583
- convert it to Tensor. Default: ``False`` .
1584
- do_copy (bool, optional): Whether to copy the data when converting output to Tensor,
1585
- or reuse the buffer for better performance, only works when `output_numpy` is ``False`` .
1586
- Default: ``True`` .
1577
+ columns (list[str], optional): List of columns to be used to specify the order of columns.
1578
+ Default: ``None``, means all columns.
1579
+ num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
1580
+ Default: ``-1``, iterator can be iterated infinite number of epochs.
1581
+ output_numpy (bool, optional): Whether or not to output NumPy datatype.
1582
+ If `output_numpy` is ``False``, iterator will output MSTensor. Default: ``False``.
1583
+ do_copy (bool, optional): When output data type is :class:`mindspore.Tensor`,
1584
+ use this param to select the conversion method, only take False for better performance.
1585
+ Default: ``True``.
1587
1586
 
1588
1587
  Returns:
1589
- Iterator, a dataset iterator that yields samples of type list.
1588
+ Iterator, a dataset iterator that returns data of type Tuple.
1590
1589
 
1591
1590
  Examples:
1592
1591
  >>> import mindspore.dataset as ds
1593
- >>>
1594
- >>> dataset = ds.GeneratorDataset([i for i in range(10)], "data")
1595
- >>> num_epochs = 3
1596
- >>> iterator = dataset.create_tuple_iterator(num_epochs=num_epochs)
1597
- >>> for epoch in range(num_epochs):
1598
- ... for item in iterator:
1599
- ... # output is of type tuple
1600
- ... print(type(item))
1601
- ... break
1592
+ >>> dataset = ds.GeneratorDataset([i for i in range(10)], "column1")
1593
+ >>> iterator = dataset.create_tuple_iterator()
1594
+ >>> for item in iterator:
1595
+ ... # item is a list
1596
+ ... print(type(item))
1602
1597
  ... break
1603
1598
  <class 'list'>
1604
1599
  """
@@ -1612,32 +1607,27 @@ class Dataset:
1612
1607
  @check_dict_iterator
1613
1608
  def create_dict_iterator(self, num_epochs=-1, output_numpy=False, do_copy=True):
1614
1609
  """
1615
- Create an iterator over the dataset that yields samples of type dict,
1616
- while the key is the column name and the value is the data.
1610
+ Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
1617
1611
 
1618
1612
  Args:
1619
- num_epochs (int, optional): The number of epochs to iterate over the entire dataset.
1620
- Default: ``-1`` , the dataset can be iterated indefinitely.
1621
- output_numpy (bool, optional): Whether to keep the output data as NumPy ndarray, or
1622
- convert it to Tensor. Default: ``False`` .
1623
- do_copy (bool, optional): Whether to copy the data when converting output to Tensor,
1624
- or reuse the buffer for better performance, only works when `output_numpy` is ``False`` .
1613
+ num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
1614
+ Default: ``-1`` , iterator can be iterated infinite number of epochs.
1615
+ output_numpy (bool, optional): Whether or not to output NumPy datatype,
1616
+ if `output_numpy` is ``False``, iterator will output MSTensor. Default: ``False`` .
1617
+ do_copy (bool, optional): When output data type is :class:`mindspore.Tensor`,
1618
+ use this param to select the conversion method, only take False for better performance.
1625
1619
  Default: ``True`` .
1626
1620
 
1627
1621
  Returns:
1628
- Iterator, a dataset iterator that yields samples of type dict.
1622
+ Iterator, a dataset iterator that returns data of type Dict.
1629
1623
 
1630
1624
  Examples:
1631
1625
  >>> import mindspore.dataset as ds
1632
- >>>
1633
- >>> dataset = ds.GeneratorDataset([i for i in range(10)], "data")
1634
- >>> num_epochs = 3
1635
- >>> iterator = dataset.create_dict_iterator(num_epochs=num_epochs)
1636
- >>> for epoch in range(num_epochs):
1637
- ... for item in iterator:
1638
- ... # output is of type dict
1639
- ... print(type(item))
1640
- ... break
1626
+ >>> dataset = ds.GeneratorDataset([i for i in range(10)], "column1")
1627
+ >>> iterator = dataset.create_dict_iterator()
1628
+ >>> for item in iterator:
1629
+ ... # item is a dict
1630
+ ... print(type(item))
1641
1631
  ... break
1642
1632
  <class 'dict'>
1643
1633
  """
@@ -2574,8 +2564,7 @@ def _check_shm_usage(num_worker, queue_size, in_rowsize, out_rowsize):
2574
2564
  when training in parallel mode.
2575
2565
  """
2576
2566
  threshold_ratio = 0.8
2577
- # Verify available size only when using static shared memory on Linux
2578
- if platform.system().lower() not in {"windows", "darwin"} and in_rowsize != -1 and out_rowsize != -1:
2567
+ if platform.system().lower() not in {"windows", "darwin"}:
2579
2568
  device_num = _get_device_num()
2580
2569
  # In the cluster, _get_device_num indicates the number of the entire cluster. The maximum number of cards
2581
2570
  # on the ascend server is 8.
@@ -2623,9 +2612,8 @@ class BatchDataset(UnionBaseDataset):
2623
2612
  name as the input columns, i.e., the columns will be replaced.
2624
2613
  max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
2625
2614
  allocation to copy data between processes, the total occupied shared memory will increase as
2626
- ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set to -1,
2627
- shared memory will be dynamically allocated with the actual size of data. This is only used if
2628
- ``python_multiprocessing`` is set to True. If it is an int value, it represents
2615
+ ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
2616
+ used if python_multiprocessing is set to True. If it is an int value, it represents
2629
2617
  ``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
2630
2618
  If it is a list, the first element represents the ``input_columns`` use this value as the unit to
2631
2619
  create shared memory, and the second element represents ``output_columns`` use this value as the unit
@@ -2656,7 +2644,7 @@ class BatchDataset(UnionBaseDataset):
2656
2644
  self.python_multiprocessing = python_multiprocessing
2657
2645
  self.process_pool = None
2658
2646
  if isinstance(max_rowsize, int):
2659
- self.max_rowsize = [max_rowsize * self.batch_size] * 2 if max_rowsize != -1 else [max_rowsize, max_rowsize]
2647
+ self.max_rowsize = [max_rowsize * self.batch_size] * 2
2660
2648
  else:
2661
2649
  self.max_rowsize = [max_rowsize[0] * self.batch_size, max_rowsize[1] * self.batch_size]
2662
2650
 
@@ -3188,8 +3176,7 @@ class _MPWorker(multiprocessing.Process):
3188
3176
  shared_memory = get_enable_shared_mem()
3189
3177
  self.pipe = Pipe(warning_ctl, shared_memory=shared_memory, max_rowsize=max_rowsize)
3190
3178
  self.check_interval = get_multiprocessing_timeout_interval()
3191
- super().__init__(target=worker_target(operations, worker_id), name="MapWorker" + str(worker_id),
3192
- args=(self.pipe,), daemon=True)
3179
+ super().__init__(target=worker_target(operations, worker_id), args=(self.pipe,), daemon=True)
3193
3180
 
3194
3181
  def execute(self, idx, *args):
3195
3182
  """Acquiring data from a worker in an infinite loop"""
@@ -3216,14 +3203,6 @@ class _MPWorker(multiprocessing.Process):
3216
3203
  logger.warning("Please `pip install py-spy` to get the stacks of the stuck process.")
3217
3204
  try:
3218
3205
  res = self.pipe.master_receive()
3219
- # Because there is no need to copy when creating Tensors in the C++layer, it reduces the time
3220
- # from np.ndarray to C++Tensor creation. However, when using shared memory in multiple processes,
3221
- # the address of the shared memory will always be passed to subsequent nodes in the dataset pipeline,
3222
- # and the shared memory will also be written by the current node, causing dirty data to be accessed
3223
- # by subsequent nodes in the pipeline. So make a memory copy here to solve the problem of
3224
- # shared memory being contaminated.
3225
- if get_enable_shared_mem():
3226
- res = copy.deepcopy(res)
3227
3206
  except queue.Empty:
3228
3207
  continue
3229
3208
  if res is None:
@@ -3584,16 +3563,16 @@ class _PythonMultiprocessing(cde.PythonMultiprocessingRuntime):
3584
3563
  if platform.system().lower() != 'windows':
3585
3564
  self.eof = multiprocessing.Event()
3586
3565
  self.cleaning_process = multiprocessing.Process(target=self._clean_process,
3587
- name="MapCleanProcess",
3588
3566
  args=(self.ppid, self.workers, self.eof),
3567
+ name="OrphanCleaner",
3589
3568
  daemon=True)
3590
3569
  self.cleaning_process.start()
3591
3570
 
3592
3571
  if get_enable_watchdog():
3593
3572
  self.eot = threading.Event()
3594
3573
  self.watch_dog = threading.Thread(target=self._watch_dog,
3595
- name="MapWatchDog",
3596
3574
  args=(self.eot, self.workers + [self.cleaning_process]),
3575
+ name="WatchDog",
3597
3576
  daemon=True)
3598
3577
  self.watch_dog.start()
3599
3578
 
@@ -3672,9 +3651,8 @@ class MapDataset(UnionBaseDataset):
3672
3651
  callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called. Default: ``None``.
3673
3652
  max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
3674
3653
  allocation to copy data between processes, the total occupied shared memory will increase as
3675
- ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set to -1,
3676
- shared memory will be dynamically allocated with the actual size of data. This is only used if
3677
- ``python_multiprocessing`` is set to True. If it is an int value, it represents ``input_columns`` and
3654
+ ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
3655
+ used if python_multiprocessing is set to True. If it is an int value, it represents ``input_columns`` and
3678
3656
  ``output_columns`` use this value as the unit to create shared memory. If it is a list, the first element
3679
3657
  represents the ``input_columns`` use this value as the unit to create shared memory, and the second element
3680
3658
  represents ``output_columns`` use this value as the unit to create shared memory. Default: 16.
@@ -4325,7 +4303,6 @@ class TransferDataset(Dataset):
4325
4303
  total_batch = 0
4326
4304
  if hasattr(self.children[0], "__total_batch__"):
4327
4305
  total_batch = self.children[0].__total_batch__
4328
- check_total_batch(total_batch)
4329
4306
  return cde.DataQueueNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
4330
4307
  total_batch, self._create_data_info_queue)
4331
4308
 
@@ -19,7 +19,6 @@ After declaring the dataset object, you can further apply dataset operations
19
19
  (e.g. filter, skip, concat, map, batch) on it.
20
20
  """
21
21
  import builtins
22
- import copy
23
22
  import errno
24
23
  import math
25
24
  import os
@@ -116,28 +115,33 @@ def _cpp_sampler_fn_mp(sample_ids, sample_fn):
116
115
  return sample_fn.process(sample_ids)
117
116
 
118
117
 
119
- def _fill_worker_indices(workers, indices, idx_cursor, worker_to_quit):
118
+ def _fill_worker_indices(workers, indices, idx):
120
119
  """
121
- Worker index queue filler, fill worker index queue in round robin order or QUIT flag.
120
+ Worker index queue filler, fill worker index queue in round robin order.
122
121
  """
123
122
  num_worker = len(workers)
124
- if idx_cursor < len(indices):
125
- while idx_cursor < len(indices):
123
+ while idx < len(indices):
124
+ try:
125
+ workers[idx % num_worker].put(indices[idx])
126
+ idx += 1
127
+ except queue.Full:
128
+ break
129
+ return idx
130
+
131
+
132
+ def _fill_worker_quit_flag(workers, worker_to_quit):
133
+ """
134
+ Worker index queue filler, fill worker index queue with QUIT flag.
135
+ """
136
+ num_worker = len(workers)
137
+ for i in range(num_worker):
138
+ # just put only one QUIT flag to the sub-thread / sub-process
139
+ if str(i) not in worker_to_quit:
126
140
  try:
127
- workers[idx_cursor % num_worker].put(indices[idx_cursor])
128
- idx_cursor += 1
141
+ workers[i].put("QUIT")
142
+ worker_to_quit[str(i)] = "QUIT"
129
143
  except queue.Full:
130
- break
131
- else:
132
- for i in range(num_worker):
133
- # just put only one QUIT flag to the sub-thread / sub-process
134
- if str(i) not in worker_to_quit:
135
- try:
136
- workers[i].put("QUIT")
137
- worker_to_quit[str(i)] = "QUIT"
138
- except queue.Full:
139
- continue
140
- return idx_cursor, worker_to_quit
144
+ continue
141
145
 
142
146
 
143
147
  def _convert_row(row):
@@ -217,11 +221,10 @@ class SamplerFn:
217
221
  # res_queue is used shared memory, so it' size is max_rowsize which is defined by user.
218
222
  _check_shm_usage(num_worker, queue_size, 0, max_rowsize)
219
223
  self.count = multiprocessing.Value('i', 0)
220
- for worker_id in range(num_worker):
224
+ for _ in range(num_worker):
221
225
  if multi_process is True:
222
226
  try:
223
- worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size, self.ppid, self.count,
224
- worker_id)
227
+ worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size, self.ppid, self.count)
225
228
  worker.daemon = True
226
229
  # When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,
227
230
  # which may cause deadlock. Therefore, the subprocess startup is performed in the initialization
@@ -240,18 +243,11 @@ class SamplerFn:
240
243
  self.pids.append(worker.pid)
241
244
  self.need_join = True
242
245
  else:
243
- worker = _GeneratorWorkerMt(dataset, self.eof, worker_id)
246
+ worker = _GeneratorWorkerMt(dataset, self.eof)
244
247
  worker.daemon = True
245
248
  self.workers.append(worker)
246
249
  self._launch_cleanup_worker(multi_process=multi_process)
247
250
 
248
- def _interval_log(self, i, start_time, wait_count):
249
- cost_time = int(time.time()) - start_time
250
- if cost_time / self.check_interval >= wait_count:
251
- wait_count += 1
252
- self._log_stuck_warning(self.workers[i % self.num_worker], cost_time)
253
- return wait_count
254
-
255
251
  def process(self, indices):
256
252
  """
257
253
  The main process, start the child process or child thread, and fill the index queue.
@@ -262,21 +258,19 @@ class SamplerFn:
262
258
  if not w.queue_empty():
263
259
  # in failover reset scenario the QUIT flag should be pop first
264
260
  while w.idx_queue.qsize() > 0:
265
- try:
266
- result = w.idx_queue.get(timeout=1)
267
- if result != "QUIT":
268
- raise Exception("The queue of the subprocess is not empty.")
269
- except queue.Empty:
270
- continue
261
+ result = w.idx_queue.get()
262
+ if result != "QUIT":
263
+ raise Exception("The queue of the subprocess is not empty.")
271
264
  # Start all workers
272
265
  if not w.is_alive():
273
266
  w.start()
274
267
 
275
268
  # Fill initial index queues
276
269
  idx_cursor = 0
270
+ idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
271
+
277
272
  # worker to quit
278
273
  worker_to_quit = {}
279
- idx_cursor, worker_to_quit = _fill_worker_indices(self.workers, indices, idx_cursor, worker_to_quit)
280
274
 
281
275
  # Fetch results
282
276
  for i in range(len(indices)):
@@ -298,16 +292,11 @@ class SamplerFn:
298
292
  self._stop_subprocess()
299
293
  return
300
294
  time.sleep(0.1)
301
- wait_count = self._interval_log(i, start_time, wait_count)
295
+ cost_time = int(time.time()) - start_time
296
+ if cost_time / self.check_interval >= wait_count:
297
+ wait_count += 1
298
+ self._log_stuck_warning(self.workers[i % self.num_worker], cost_time)
302
299
  result = self.workers[i % self.num_worker].get()
303
- # Because there is no need to copy when creating Tensors in the C++layer, it reduces the time
304
- # from np.ndarray to C++Tensor creation. However, when using shared memory in multiple processes,
305
- # the address of the shared memory will always be passed to subsequent nodes in the dataset pipeline,
306
- # and the shared memory will also be written by the current node, causing dirty data to be accessed
307
- # by subsequent nodes in the pipeline. So make a memory copy here to solve the problem of
308
- # shared memory being contaminated.
309
- if self.multi_process is True and get_enable_shared_mem():
310
- result = copy.deepcopy(result)
311
300
  if isinstance(result, ExceptionHandler):
312
301
  result.reraise()
313
302
  except queue.Empty:
@@ -319,9 +308,11 @@ class SamplerFn:
319
308
  if self.eof.is_set():
320
309
  self._stop_subprocess()
321
310
  return
322
-
323
- idx_cursor, worker_to_quit = _fill_worker_indices(self.workers, indices, idx_cursor, worker_to_quit)
324
-
311
+ if idx_cursor < len(indices):
312
+ idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
313
+ else:
314
+ # send QUIT flag to workers
315
+ _fill_worker_quit_flag(self.workers, worker_to_quit)
325
316
  yield _convert_row(result)
326
317
 
327
318
  def _log_stuck_warning(self, worker, waiting_time):
@@ -370,7 +361,6 @@ class SamplerFn:
370
361
  if multi_process is True and platform.system().lower() != 'windows':
371
362
  _clean_worker_func = _PythonMultiprocessing._clean_process # pylint: disable=W0212
372
363
  self.cleaning_process = multiprocessing.Process(target=_clean_worker_func,
373
- name="GeneratorCleanProcess",
374
364
  args=(self.ppid, self.workers, self.eof))
375
365
  self.cleaning_process.daemon = True
376
366
  self.cleaning_process.start()
@@ -378,7 +368,6 @@ class SamplerFn:
378
368
  if get_enable_watchdog():
379
369
  self.eot = threading.Event()
380
370
  self.watch_dog = threading.Thread(target=_PythonMultiprocessing._watch_dog, # pylint: disable=W0212
381
- name="GeneratorWatchDog",
382
371
  args=(self.eot, self.workers + [self.cleaning_process]))
383
372
  self.watch_dog.daemon = True
384
373
  self.watch_dog.start()
@@ -393,7 +382,7 @@ class SamplerFn:
393
382
  def _stop_subprocess(self):
394
383
  """Only the main process can call join."""
395
384
  if self.need_join is True and self.ppid == os.getpid():
396
- if hasattr(self, 'eof') and self.eof is not None:
385
+ if hasattr(self, 'eof') and self.eof is not None and not self.eof.is_set():
397
386
  self.eof.set()
398
387
  # close the watch dog first
399
388
  self._abort_watchdog()
@@ -405,12 +394,10 @@ class SamplerFn:
405
394
  del w.res_queue
406
395
  del w.idx_queue
407
396
 
408
- # let the quit event notify the worker process to exit
409
- w.join(timeout=5)
410
- if w.is_alive():
411
- # if the worker process did not exit, it may hang, try to terminate it
412
- w.terminate()
413
- w.close()
397
+ # close all the subprocess workers
398
+ w.terminate()
399
+ w.join()
400
+ w.close()
414
401
  except Exception: # pylint: disable=W0703
415
402
  # Block all errors when join
416
403
  continue
@@ -442,15 +429,10 @@ class SamplerFn:
442
429
  self.workers = None
443
430
 
444
431
  def _abort_watchdog(self):
445
- """Let watchdog quit."""
446
432
  if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():
447
433
  self.eot.set()
448
434
  if hasattr(self, 'cleaning_process') and self.cleaning_process is not None:
449
- # let the quit event notify the cleaning process to exit
450
- self.cleaning_process.join(timeout=5)
451
- if self.cleaning_process.is_alive():
452
- # if the cleaning process did not exit, it may hang, try to terminate it
453
- _PythonMultiprocessing._terminate_processes([self.cleaning_process]) # pylint: disable=W0212
435
+ _PythonMultiprocessing._terminate_processes([self.cleaning_process]) # pylint: disable=W0212
454
436
  del self.cleaning_process
455
437
  if hasattr(self, 'count'):
456
438
  del self.count
@@ -505,7 +487,7 @@ def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiproces
505
487
  if is_multiprocessing:
506
488
  result_queue.cancel_join_thread() # Ensure that the process does not hung when exiting
507
489
  signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
508
- while not eof.is_set():
490
+ while True:
509
491
  _ignore_sigint(is_multiprocessing=is_multiprocessing)
510
492
 
511
493
  # Fetch index, block
@@ -540,7 +522,7 @@ def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiproces
540
522
  except Exception: # pylint: disable=broad-except
541
523
  result = ExceptionHandler(where="in GeneratorDataset worker process")
542
524
  # Send data, block
543
- while not eof.is_set():
525
+ while True:
544
526
  try:
545
527
  result_queue.put(result, timeout=5)
546
528
  except queue.Full:
@@ -559,11 +541,11 @@ class _GeneratorWorkerMt(threading.Thread):
559
541
  Worker process for multi-thread Generator.
560
542
  """
561
543
 
562
- def __init__(self, dataset, eof, worker_id):
544
+ def __init__(self, dataset, eof):
563
545
  self.idx_queue = queue.Queue(16)
564
546
  self.res_queue = queue.Queue(16)
565
547
  super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False),
566
- name="GeneratorWorkerThread" + str(worker_id))
548
+ name="GeneratorWorkerThread")
567
549
 
568
550
  def put(self, item):
569
551
  """
@@ -592,7 +574,7 @@ class _GeneratorWorkerMp(multiprocessing.Process):
592
574
  Worker process for multiprocess Generator.
593
575
  """
594
576
 
595
- def __init__(self, dataset, eof, max_rowsize, queue_size, ppid, count, worker_id):
577
+ def __init__(self, dataset, eof, max_rowsize, queue_size, ppid, count):
596
578
  self.idx_queue = multiprocessing.Queue(queue_size)
597
579
  if get_enable_shared_mem():
598
580
  self.res_queue = _SharedQueue(queue_size, count, max_rowsize=max_rowsize)
@@ -600,7 +582,7 @@ class _GeneratorWorkerMp(multiprocessing.Process):
600
582
  self.res_queue = multiprocessing.Queue(queue_size)
601
583
  self.idx_queue.cancel_join_thread() # Ensure that the process does not hung when exiting
602
584
  super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True, ppid),
603
- name="GeneratorWorkerProcess" + str(worker_id))
585
+ name="GeneratorWorkerProcess")
604
586
 
605
587
  def put(self, item):
606
588
  """
@@ -673,9 +655,8 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
673
655
  option could be beneficial if the Python operation is computational heavy. Default: ``True``.
674
656
  max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory
675
657
  allocation to copy data between processes, the total occupied shared memory will increase as
676
- ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set to -1,
677
- shared memory will be dynamically allocated with the actual size of data. This is only used if
678
- ``python_multiprocessing`` is set to True. Default: 16.
658
+ ``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
659
+ used if python_multiprocessing is set to True. Default: 16.
679
660
 
680
661
  Raises:
681
662
  RuntimeError: If source raises an exception during execution.
@@ -2799,8 +2799,8 @@ class ManifestDataset(MappableDataset, VisionBaseDataset):
2799
2799
 
2800
2800
  Manifest file contains a list of files included in a dataset, including basic file info such as File name and File
2801
2801
  ID, along with extended file metadata. Manifest is a data format file supported by Huawei Modelarts. For details,
2802
- see `Specifications for Importing the Manifest File <https://support.huaweicloud.com/intl/en-us/dataprepare-modelarts/
2803
- dataprepare-modelarts-0015.html>`_ .
2802
+ see `Specifications for Importing the Manifest File <https://support.huaweicloud.com/engineers-modelarts/
2803
+ modelarts_23_0009.html>`_ .
2804
2804
 
2805
2805
  .. code-block::
2806
2806