mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2019-2024 Huawei Technologies Co., Ltd
1
+ # Copyright 2019-2022 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -101,22 +101,6 @@ class ImageTensorOperation(TensorOperation):
101
101
  raise NotImplementedError("ImageTensorOperation has to implement parse() method.")
102
102
 
103
103
 
104
- class VideoTensorOperation(TensorOperation):
105
- """
106
- Base class of Video Tensor Ops
107
- """
108
-
109
- def __call__(self, *input_tensor_list):
110
- for tensor in input_tensor_list:
111
- if not isinstance(tensor, np.ndarray):
112
- raise TypeError(
113
- "Input should be ndarray, got {}.".format(type(tensor)))
114
- return super().__call__(*input_tensor_list)
115
-
116
- def parse(self):
117
- raise NotImplementedError("VideoTensorOperation has to implement parse() method.")
118
-
119
-
120
104
  class AdjustBrightness(ImageTensorOperation, PyTensorOperation):
121
105
  """
122
106
  Adjust the brightness of the input image.
@@ -647,8 +631,6 @@ class AdjustSharpness(ImageTensorOperation):
647
631
  """
648
632
  Adjust the sharpness of the input image.
649
633
 
650
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
651
-
652
634
  Args:
653
635
  sharpness_factor (float): How much to adjust the sharpness, must be
654
636
  non negative. ``0`` gives a blurred image, ``1`` gives the
@@ -660,7 +642,7 @@ class AdjustSharpness(ImageTensorOperation):
660
642
  RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
661
643
 
662
644
  Supported Platforms:
663
- ``CPU`` ``Ascend``
645
+ ``CPU``
664
646
 
665
647
  Examples:
666
648
  >>> import numpy as np
@@ -695,56 +677,8 @@ class AdjustSharpness(ImageTensorOperation):
695
677
  self.sharpness_factor = sharpness_factor
696
678
  self.implementation = Implementation.C
697
679
 
698
- @check_device_target
699
- def device(self, device_target="CPU"):
700
- """
701
- Set the device for the current operator execution.
702
-
703
- - When the device is Ascend, input type supports `uint8` or `float32` , input channel supports 1 and 3.
704
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
705
-
706
- Args:
707
- device_target (str, optional): The operator will be executed on this device. Currently supports
708
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
709
-
710
- Raises:
711
- TypeError: If `device_target` is not of type str.
712
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
713
-
714
- Supported Platforms:
715
- ``CPU`` ``Ascend``
716
-
717
- Examples:
718
- >>> import numpy as np
719
- >>> import mindspore.dataset as ds
720
- >>> import mindspore.dataset.vision as vision
721
- >>>
722
- >>> # Use the transform in dataset pipeline mode
723
- >>> # create a dataset that reads all files in dataset_dir with 8 threads
724
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
725
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
726
- >>> transforms_list = [vision.AdjustSharpness(sharpness_factor=2.0).device("Ascend")]
727
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
728
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
729
- ... print(item["image"].shape, item["image"].dtype)
730
- ... break
731
- (100, 100, 3) uint8
732
- >>>
733
- >>> # Use the transform in eager mode
734
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
735
- >>> output = vision.AdjustSharpness(sharpness_factor=0).device("Ascend")(data)
736
- >>> print(output.shape, output.dtype)
737
- (100, 100, 3) uint8
738
-
739
- Tutorial Examples:
740
- - `Illustration of vision transforms
741
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
742
- """
743
- self.device_target = device_target
744
- return self
745
-
746
680
  def parse(self):
747
- return cde.AdjustSharpnessOperation(self.sharpness_factor, self.device_target)
681
+ return cde.AdjustSharpnessOperation(self.sharpness_factor)
748
682
 
749
683
 
750
684
  class Affine(ImageTensorOperation):
@@ -778,7 +712,7 @@ class Affine(ImageTensorOperation):
778
712
  RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
779
713
 
780
714
  Supported Platforms:
781
- ``CPU`` ``Ascend``
715
+ ``CPU``
782
716
 
783
717
  Examples:
784
718
  >>> import numpy as np
@@ -874,8 +808,6 @@ class Affine(ImageTensorOperation):
874
808
  <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
875
809
  """
876
810
  self.device_target = device_target
877
- if self.resample not in [Inter.BILINEAR, Inter.NEAREST] and self.device_target == "Ascend":
878
- raise RuntimeError("Invalid interpolation mode, only support BILINEAR and NEAREST.")
879
811
  return self
880
812
 
881
813
  def parse(self):
@@ -966,8 +898,6 @@ class AutoContrast(ImageTensorOperation, PyTensorOperation):
966
898
  Apply automatic contrast on input image. This operation calculates histogram of image, reassign cutoff percent
967
899
  of the lightest pixels from histogram to 255, and reassign cutoff percent of the darkest pixels from histogram to 0.
968
900
 
969
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
970
-
971
901
  Args:
972
902
  cutoff (float, optional): Percent of lightest and darkest pixels to cut off from
973
903
  the histogram of input image. The value must be in the range [0.0, 50.0]. Default: ``0.0``.
@@ -982,7 +912,7 @@ class AutoContrast(ImageTensorOperation, PyTensorOperation):
982
912
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
983
913
 
984
914
  Supported Platforms:
985
- ``CPU`` ``Ascend``
915
+ ``CPU``
986
916
 
987
917
  Examples:
988
918
  >>> import numpy as np
@@ -1021,56 +951,8 @@ class AutoContrast(ImageTensorOperation, PyTensorOperation):
1021
951
  self.ignore = ignore
1022
952
  self.random = False
1023
953
 
1024
- @check_device_target
1025
- def device(self, device_target="CPU"):
1026
- """
1027
- Set the device for the current operator execution.
1028
-
1029
- - When the device is Ascend, input type supports `uint8` or `float32` , input channel supports 1 and 3.
1030
- If the data type is float32, the expected input value is in the range [0, 1].
1031
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
1032
-
1033
- Args:
1034
- device_target (str, optional): The operator will be executed on this device. Currently supports
1035
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
1036
-
1037
- Raises:
1038
- TypeError: If `device_target` is not of type str.
1039
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
1040
-
1041
- Supported Platforms:
1042
- ``CPU`` ``Ascend``
1043
-
1044
- Examples:
1045
- >>> import numpy as np
1046
- >>> import mindspore.dataset as ds
1047
- >>> import mindspore.dataset.vision as vision
1048
- >>>
1049
- >>> # Use the transform in dataset pipeline mode
1050
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
1051
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
1052
- >>> transforms_list = [vision.AutoContrast(cutoff=10.0, ignore=[10, 20]).device("Ascend")]
1053
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
1054
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1055
- ... print(item["image"].shape, item["image"].dtype)
1056
- ... break
1057
- (100, 100, 3) uint8
1058
- >>>
1059
- >>> # Use the transform in eager mode
1060
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
1061
- >>> output = vision.AutoContrast(cutoff=10.0, ignore=[10, 20]).device("Ascend")(data)
1062
- >>> print(output.shape, output.dtype)
1063
- (100, 100, 3) uint8
1064
-
1065
- Tutorial Examples:
1066
- - `Illustration of vision transforms
1067
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
1068
- """
1069
- self.device_target = device_target
1070
- return self
1071
-
1072
954
  def parse(self):
1073
- return cde.AutoContrastOperation(self.cutoff, self.ignore, self.device_target)
955
+ return cde.AutoContrastOperation(self.cutoff, self.ignore)
1074
956
 
1075
957
  def _execute_py(self, img):
1076
958
  """
@@ -1244,8 +1126,6 @@ class ConvertColor(ImageTensorOperation):
1244
1126
  """
1245
1127
  Change the color space of the image.
1246
1128
 
1247
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
1248
-
1249
1129
  Args:
1250
1130
  convert_mode (ConvertMode): The mode of image channel conversion.
1251
1131
 
@@ -1294,7 +1174,7 @@ class ConvertColor(ImageTensorOperation):
1294
1174
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
1295
1175
 
1296
1176
  Supported Platforms:
1297
- ``CPU`` ``Ascend``
1177
+ ``CPU``
1298
1178
 
1299
1179
  Examples:
1300
1180
  >>> import numpy as np
@@ -1322,10 +1202,10 @@ class ConvertColor(ImageTensorOperation):
1322
1202
  (100, 100, 3) uint8
1323
1203
  >>>
1324
1204
  >>> # Use the transform in eager mode
1325
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
1205
+ >>> data = np.array([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], dtype=np.uint8).reshape((2, 2, 3))
1326
1206
  >>> output = vision.ConvertColor(vision.ConvertMode.COLOR_RGB2GRAY)(data)
1327
1207
  >>> print(output.shape, output.dtype)
1328
- (100, 100) uint8
1208
+ (2, 2) uint8
1329
1209
 
1330
1210
  Tutorial Examples:
1331
1211
  - `Illustration of vision transforms
@@ -1338,55 +1218,8 @@ class ConvertColor(ImageTensorOperation):
1338
1218
  self.convert_mode = convert_mode
1339
1219
  self.implementation = Implementation.C
1340
1220
 
1341
- @check_device_target
1342
- def device(self, device_target="CPU"):
1343
- """
1344
- Set the device for the current operator execution.
1345
-
1346
- - When the device is Ascend, input type only supports `uint8` , input channel supports 1 and 3.
1347
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
1348
-
1349
- Args:
1350
- device_target (str, optional): The operator will be executed on this device. Currently supports
1351
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
1352
-
1353
- Raises:
1354
- TypeError: If `device_target` is not of type str.
1355
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
1356
-
1357
- Supported Platforms:
1358
- ``CPU`` ``Ascend``
1359
-
1360
- Examples:
1361
- >>> import numpy as np
1362
- >>> import mindspore.dataset as ds
1363
- >>> import mindspore.dataset.vision as vision
1364
- >>>
1365
- >>> # Use the transform in dataset pipeline mode
1366
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
1367
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
1368
- >>> transforms_list = [vision.ConvertColor(vision.ConvertMode.COLOR_RGB2BGR).device("Ascend")]
1369
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
1370
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1371
- ... print(item["image"].shape, item["image"].dtype)
1372
- ... break
1373
- (100, 100, 3) uint8
1374
- >>>
1375
- >>> # Use the transform in eager mode
1376
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
1377
- >>> output = vision.ConvertColor(vision.ConvertMode.COLOR_RGB2BGR).device("Ascend")(data)
1378
- >>> print(output.shape, output.dtype)
1379
- (100, 100, 3) uint8
1380
-
1381
- Tutorial Examples:
1382
- - `Illustration of vision transforms
1383
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
1384
- """
1385
- self.device_target = device_target
1386
- return self
1387
-
1388
1221
  def parse(self):
1389
- return cde.ConvertColorOperation(ConvertMode.to_c_type(self.convert_mode), self.device_target)
1222
+ return cde.ConvertColorOperation(ConvertMode.to_c_type(self.convert_mode))
1390
1223
 
1391
1224
 
1392
1225
  class Crop(ImageTensorOperation):
@@ -1411,7 +1244,7 @@ class Crop(ImageTensorOperation):
1411
1244
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
1412
1245
 
1413
1246
  Supported Platforms:
1414
- ``CPU`` ``Ascend``
1247
+ ``CPU``
1415
1248
 
1416
1249
  Examples:
1417
1250
  >>> import numpy as np
@@ -1821,67 +1654,15 @@ class Decode(ImageTensorOperation, PyTensorOperation):
1821
1654
  return util.decode(img)
1822
1655
 
1823
1656
 
1824
- class DecodeVideo(VideoTensorOperation):
1825
- """
1826
- Decode the input raw video bytes.
1827
-
1828
- Supported video formats: AVI, H264, H265, MOV, MP4, WMV.
1829
-
1830
- Raises:
1831
- RuntimeError: If the input ndarray is not 1D array.
1832
- RuntimeError: If data type of the elements is not uint8.
1833
- RuntimeError: If the input ndarray is empty.
1834
-
1835
- Supported Platforms:
1836
- ``CPU``
1837
-
1838
- Examples:
1839
- >>> import numpy as np
1840
- >>> import mindspore.dataset as ds
1841
- >>> import mindspore.dataset.vision as vision
1842
- >>>
1843
- >>> # Use the transform in dataset pipeline mode
1844
- >>> # Custom class to generate and read video dataset
1845
- >>> class VideoDataset:
1846
- ... def __init__(self, file_list):
1847
- ... self.file_list = file_list
1848
- ...
1849
- ... def __getitem__(self, index):
1850
- ... filename = self.file_list[index]
1851
- ... return np.fromfile(filename, np.uint8)
1852
- ...
1853
- ... def __len__(self):
1854
- ... return len(self.file_list)
1855
- >>>
1856
- >>> dataset = ds.GeneratorDataset(VideoDataset(["/path/to/video/file"]), ["data"])
1857
- >>> decode_video = vision.DecodeVideo()
1858
- >>> dataset = dataset.map(operations=[decode_video], input_columns=["data"], output_columns=["video", "audio"])
1859
- >>>
1860
- >>> # Use the transform in eager mode
1861
- >>> filename = "/path/to/video/file"
1862
- >>> raw_ndarray = np.fromfile(filename, np.uint8)
1863
- >>> mindspore_output = vision.DecodeVideo()(raw_ndarray)
1864
- """
1865
-
1866
- def __init__(self):
1867
- super().__init__()
1868
- self.implementation = Implementation.C
1869
-
1870
- def parse(self):
1871
- return cde.DecodeVideoOperation()
1872
-
1873
-
1874
1657
  class Equalize(ImageTensorOperation, PyTensorOperation):
1875
1658
  """
1876
1659
  Apply histogram equalization on input image.
1877
1660
 
1878
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
1879
-
1880
1661
  Raises:
1881
1662
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
1882
1663
 
1883
1664
  Supported Platforms:
1884
- ``CPU`` ``Ascend``
1665
+ ``CPU``
1885
1666
 
1886
1667
  Examples:
1887
1668
  >>> import numpy as np
@@ -1913,55 +1694,8 @@ class Equalize(ImageTensorOperation, PyTensorOperation):
1913
1694
  super().__init__()
1914
1695
  self.random = False
1915
1696
 
1916
- @check_device_target
1917
- def device(self, device_target="CPU"):
1918
- """
1919
- Set the device for the current operator execution.
1920
-
1921
- - When the device is Ascend, input type only supports `uint8` , input channel supports 1 and 3.
1922
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
1923
-
1924
- Args:
1925
- device_target (str, optional): The operator will be executed on this device. Currently supports
1926
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
1927
-
1928
- Raises:
1929
- TypeError: If `device_target` is not of type str.
1930
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
1931
-
1932
- Supported Platforms:
1933
- ``CPU`` ``Ascend``
1934
-
1935
- Examples:
1936
- >>> import numpy as np
1937
- >>> import mindspore.dataset as ds
1938
- >>> import mindspore.dataset.vision as vision
1939
- >>>
1940
- >>> # Use the transform in dataset pipeline mode
1941
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
1942
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
1943
- >>> transforms_list = [vision.Equalize().device("Ascend")]
1944
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
1945
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1946
- ... print(item["image"].shape, item["image"].dtype)
1947
- ... break
1948
- (100, 100, 3) uint8
1949
- >>>
1950
- >>> # Use the transform in eager mode
1951
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
1952
- >>> output = vision.Equalize().device("Ascend")(data)
1953
- >>> print(output.shape, output.dtype)
1954
- (100, 100, 3) uint8
1955
-
1956
- Tutorial Examples:
1957
- - `Illustration of vision transforms
1958
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
1959
- """
1960
- self.device_target = device_target
1961
- return self
1962
-
1963
1697
  def parse(self):
1964
- return cde.EqualizeOperation(self.device_target)
1698
+ return cde.EqualizeOperation()
1965
1699
 
1966
1700
  def _execute_py(self, img):
1967
1701
  """
@@ -1981,16 +1715,14 @@ class Erase(ImageTensorOperation):
1981
1715
  """
1982
1716
  Erase the input image with given value.
1983
1717
 
1984
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
1985
-
1986
1718
  Args:
1987
1719
  top (int): Vertical ordinate of the upper left corner of erased region.
1988
1720
  left (int): Horizontal ordinate of the upper left corner of erased region.
1989
1721
  height (int): Height of erased region.
1990
1722
  width (int): Width of erased region.
1991
- value (Union[float, Sequence[float, float, float]], optional): Pixel value used to pad the erased area.
1992
- Default: ``0``. If float is provided, it will be used for all RGB channels.
1993
- If Sequence[float, float, float] is provided, it will be used for R, G, B channels respectively.
1723
+ value (Union[int, Sequence[int, int, int]], optional): Pixel value used to pad the erased area.
1724
+ Default: ``0``. If int is provided, it will be used for all RGB channels.
1725
+ If Sequence[int, int, int] is provided, it will be used for R, G, B channels respectively.
1994
1726
  inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
1995
1727
 
1996
1728
  Raises:
@@ -2002,13 +1734,13 @@ class Erase(ImageTensorOperation):
2002
1734
  ValueError: If `height` is not positive.
2003
1735
  TypeError: If `width` is not of type int.
2004
1736
  ValueError: If `width` is not positive.
2005
- TypeError: If `value` is not of type float or Sequence[float, float, float].
1737
+ TypeError: If `value` is not of type int or Sequence[int, int, int].
2006
1738
  ValueError: If `value` is not in range of [0, 255].
2007
1739
  TypeError: If `inplace` is not of type bool.
2008
1740
  RuntimeError: If shape of the input image is not <H, W, C>.
2009
1741
 
2010
1742
  Supported Platforms:
2011
- ``CPU`` ``Ascend``
1743
+ ``CPU``
2012
1744
 
2013
1745
  Examples:
2014
1746
  >>> import numpy as np
@@ -2026,10 +1758,10 @@ class Erase(ImageTensorOperation):
2026
1758
  (100, 100, 3) uint8
2027
1759
  >>>
2028
1760
  >>> # Use the transform in eager mode
2029
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
2030
- >>> output = vision.Erase(10, 10, 10, 10)(data)
1761
+ >>> data = np.array([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], dtype=np.uint8).reshape((2, 2, 3))
1762
+ >>> output = vision.Erase(0, 0, 2, 1)(data)
2031
1763
  >>> print(output.shape, output.dtype)
2032
- (100, 100, 3) uint8
1764
+ (2, 2, 3) uint8
2033
1765
 
2034
1766
  Tutorial Examples:
2035
1767
  - `Illustration of vision transforms
@@ -2043,62 +1775,13 @@ class Erase(ImageTensorOperation):
2043
1775
  self.left = left
2044
1776
  self.height = height
2045
1777
  self.width = width
2046
- if isinstance(value, (int, float)):
2047
- value = tuple([value])
1778
+ if isinstance(value, int):
1779
+ value = tuple([value] * 3)
2048
1780
  self.value = value
2049
1781
  self.inplace = inplace
2050
1782
 
2051
- @check_device_target
2052
- def device(self, device_target="CPU"):
2053
- """
2054
- Set the device for the current operator execution.
2055
-
2056
- - When the device is Ascend, input type supports `uint8` or `float32` , input channel supports 1 and 3.
2057
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
2058
- The inplace parameter is not supported.
2059
-
2060
- Args:
2061
- device_target (str, optional): The operator will be executed on this device. Currently supports
2062
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
2063
-
2064
- Raises:
2065
- TypeError: If `device_target` is not of type str.
2066
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
2067
-
2068
- Supported Platforms:
2069
- ``CPU`` ``Ascend``
2070
-
2071
- Examples:
2072
- >>> import numpy as np
2073
- >>> import mindspore.dataset as ds
2074
- >>> import mindspore.dataset.vision as vision
2075
- >>>
2076
- >>> # Use the transform in dataset pipeline mode
2077
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
2078
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
2079
- >>> transforms_list = [vision.Erase(10, 10, 10, 10, (100, 100, 100)).device("Ascend")]
2080
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
2081
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2082
- ... print(item["image"].shape, item["image"].dtype)
2083
- ... break
2084
- (100, 100, 3) uint8
2085
- >>>
2086
- >>> # Use the transform in eager mode
2087
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
2088
- >>> output = vision.Erase(10, 10, 10, 10, (100, 100, 100)).device("Ascend")(data)
2089
- >>> print(output.shape, output.dtype)
2090
- (100, 100, 3) uint8
2091
-
2092
- Tutorial Examples:
2093
- - `Illustration of vision transforms
2094
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
2095
- """
2096
- self.device_target = device_target
2097
- return self
2098
-
2099
1783
  def parse(self):
2100
- return cde.EraseOperation(self.top, self.left, self.height, self.width, self.value, self.inplace,
2101
- self.device_target)
1784
+ return cde.EraseOperation(self.top, self.left, self.height, self.width, self.value, self.inplace)
2102
1785
 
2103
1786
 
2104
1787
  class FiveCrop(PyTensorOperation):
@@ -2163,10 +1846,8 @@ class FiveCrop(PyTensorOperation):
2163
1846
  >>> img.save("./2.jpg")
2164
1847
  >>> data = Image.open("./2.jpg")
2165
1848
  >>> output = vision.FiveCrop(size=20)(data)
2166
- >>> for cropped_img in output:
2167
- ... print(cropped_img.size)
2168
- ... break
2169
- (20, 20)
1849
+ >>> print(np.array(output).shape, np.array(output).dtype)
1850
+ (5,) object
2170
1851
  >>> os.remove("./2.jpg")
2171
1852
 
2172
1853
 
@@ -2223,7 +1904,7 @@ class GaussianBlur(ImageTensorOperation):
2223
1904
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
2224
1905
 
2225
1906
  Supported Platforms:
2226
- ``CPU`` ``Ascend``
1907
+ ``CPU``
2227
1908
 
2228
1909
  Examples:
2229
1910
  >>> import numpy as np
@@ -2413,13 +2094,11 @@ class HorizontalFlip(ImageTensorOperation):
2413
2094
  """
2414
2095
  Flip the input image horizontally.
2415
2096
 
2416
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
2417
-
2418
2097
  Raises:
2419
2098
  RuntimeError: If given tensor shape is not <H, W> or <..., H, W, C>.
2420
2099
 
2421
2100
  Supported Platforms:
2422
- ``CPU`` ``Ascend``
2101
+ ``CPU``
2423
2102
 
2424
2103
  Examples:
2425
2104
  >>> import numpy as np
@@ -2456,9 +2135,7 @@ class HorizontalFlip(ImageTensorOperation):
2456
2135
  """
2457
2136
  Set the device for the current operator execution.
2458
2137
 
2459
- - When the device is Ascend, input type supports `uint8` and `float32`,
2460
- input channel supports 1 and 3. The input data has a height limit of [4, 8192]
2461
- and a width limit of [6, 4096].
2138
+ - When the device is Ascend, input shape should be limited from [4, 6] to [8192, 4096].
2462
2139
 
2463
2140
  Args:
2464
2141
  device_target (str, optional): The operator will be executed on this device. Currently supports
@@ -2627,13 +2304,11 @@ class Invert(ImageTensorOperation, PyTensorOperation):
2627
2304
  For each pixel in the image, if the original pixel value is `pixel`,
2628
2305
  the inverted pixel value will be `255 - pixel`.
2629
2306
 
2630
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
2631
-
2632
2307
  Raises:
2633
2308
  RuntimeError: If the input image is not in shape of <H, W, C>.
2634
2309
 
2635
2310
  Supported Platforms:
2636
- ``CPU`` ``Ascend``
2311
+ ``CPU``
2637
2312
 
2638
2313
  Examples:
2639
2314
  >>> import numpy as np
@@ -2665,58 +2340,8 @@ class Invert(ImageTensorOperation, PyTensorOperation):
2665
2340
  super().__init__()
2666
2341
  self.random = False
2667
2342
 
2668
- @check_device_target
2669
- def device(self, device_target="CPU"):
2670
- """
2671
- Set the device for the current operator execution.
2672
-
2673
- - When the device is CPU, input type only support `uint8` , input channel support 1/2/3.
2674
- - When the device is Ascend, input type supports `uint8`/`float32`, input channel supports 1/3.
2675
- input shape should be limited from [4, 6] to [8192, 4096].
2676
-
2677
- Args:
2678
- device_target (str, optional): The operator will be executed on this device. Currently supports
2679
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
2680
-
2681
- Raises:
2682
- TypeError: If `device_target` is not of type str.
2683
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
2684
-
2685
- Supported Platforms:
2686
- ``CPU`` ``Ascend``
2687
-
2688
- Examples:
2689
- >>> import numpy as np
2690
- >>> import mindspore.dataset as ds
2691
- >>> import mindspore.dataset.vision as vision
2692
- >>> from mindspore.dataset.vision import Inter
2693
- >>>
2694
- >>> # Use the transform in dataset pipeline mode
2695
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
2696
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
2697
- >>> invert_op = vision.Invert()
2698
- >>> transforms_list = [invert_op]
2699
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
2700
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2701
- ... print(item["image"].shape, item["image"].dtype)
2702
- ... break
2703
- (100, 100, 3) uint8
2704
- >>>
2705
- >>> # Use the transform in eager mode
2706
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
2707
- >>> output = vision.Invert().device("Ascend")(data)
2708
- >>> print(output.shape, output.dtype)
2709
- (100, 100, 3) uint8
2710
-
2711
- Tutorial Examples:
2712
- - `Illustration of vision transforms
2713
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
2714
- """
2715
- self.device_target = device_target
2716
- return self
2717
-
2718
2343
  def parse(self):
2719
- return cde.InvertOperation(self.device_target)
2344
+ return cde.InvertOperation()
2720
2345
 
2721
2346
  def _execute_py(self, img):
2722
2347
  """
@@ -3206,7 +2831,7 @@ class Pad(ImageTensorOperation, PyTensorOperation):
3206
2831
  RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
3207
2832
 
3208
2833
  Supported Platforms:
3209
- ``CPU`` ``Ascend``
2834
+ ``CPU``
3210
2835
 
3211
2836
  Examples:
3212
2837
  >>> import numpy as np
@@ -3391,8 +3016,6 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
3391
3016
  """
3392
3017
  Apply perspective transformation on input image.
3393
3018
 
3394
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
3395
-
3396
3019
  Args:
3397
3020
  start_points (Sequence[Sequence[int, int]]): Sequence of the starting point coordinates, containing four
3398
3021
  two-element subsequences, corresponding to [top-left, top-right, bottom-right, bottom-left] of the
@@ -3410,7 +3033,7 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
3410
3033
  RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
3411
3034
 
3412
3035
  Supported Platforms:
3413
- ``CPU`` ``Ascend``
3036
+ ``CPU``
3414
3037
 
3415
3038
  Examples:
3416
3039
  >>> import numpy as np
@@ -3461,9 +3084,7 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
3461
3084
  """
3462
3085
  Set the device for the current operator execution.
3463
3086
 
3464
- - When the device is Ascend, input type supports `uint8` and `float32`,
3465
- input channel supports 1 and 3. The input data has a height limit of [6, 8192]
3466
- and a width limit of [10, 4096].
3087
+ - When the device is Ascend, input shape should be limited from [6, 10] to [8192, 4096].
3467
3088
 
3468
3089
  Args:
3469
3090
  device_target (str, optional): The operator will be executed on this device. Currently supports
@@ -3508,8 +3129,6 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
3508
3129
  <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
3509
3130
  """
3510
3131
  self.device_target = device_target
3511
- if self.interpolation not in [Inter.BILINEAR, Inter.NEAREST] and self.device_target == "Ascend":
3512
- raise RuntimeError("Invalid interpolation mode, only support BILINEAR and NEAREST.")
3513
3132
  return self
3514
3133
 
3515
3134
  def parse(self):
@@ -3538,8 +3157,6 @@ class Posterize(ImageTensorOperation):
3538
3157
  Reduce the bit depth of the color channels of image to create a high contrast and vivid color effect,
3539
3158
  similar to that seen in posters or printed materials.
3540
3159
 
3541
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
3542
-
3543
3160
  Args:
3544
3161
  bits (int): The number of bits to keep for each channel, should be in range of [0, 8].
3545
3162
 
@@ -3580,56 +3197,8 @@ class Posterize(ImageTensorOperation):
3580
3197
  self.bits = bits
3581
3198
  self.implementation = Implementation.C
3582
3199
 
3583
- @check_device_target
3584
- def device(self, device_target="CPU"):
3585
- """
3586
- Set the device for the current operator execution.
3587
-
3588
- - When the device is Ascend, input type supports `uint8`/`float32`, input channel supports 1 and 3.
3589
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
3590
-
3591
- Args:
3592
- device_target (str, optional): The operator will be executed on this device. Currently supports
3593
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
3594
-
3595
- Raises:
3596
- TypeError: If `device_target` is not of type str.
3597
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
3598
-
3599
- Supported Platforms:
3600
- ``CPU`` ``Ascend``
3601
-
3602
- Examples:
3603
- >>> import numpy as np
3604
- >>> import mindspore.dataset as ds
3605
- >>> import mindspore.dataset.vision as vision
3606
- >>>
3607
- >>> # Use the transform in dataset pipeline mode
3608
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
3609
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
3610
- >>> posterize_op = vision.Posterize(4).device("Ascend")
3611
- >>> transforms_list = [posterize_op]
3612
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
3613
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3614
- ... print(item["image"].shape, item["image"].dtype)
3615
- ... break
3616
- (100, 100, 3) uint8
3617
- >>>
3618
- >>> # Use the transform in eager mode
3619
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
3620
- >>> output = vision.Posterize(4).device("Ascend")(data)
3621
- >>> print(output.shape, output.dtype)
3622
- (100, 100, 3) uint8
3623
-
3624
- Tutorial Examples:
3625
- - `Illustration of vision transforms
3626
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
3627
- """
3628
- self.device_target = device_target
3629
- return self
3630
-
3631
3200
  def parse(self):
3632
- return cde.PosterizeOperation(self.bits, self.device_target)
3201
+ return cde.PosterizeOperation(self.bits)
3633
3202
 
3634
3203
 
3635
3204
  class RandAugment(ImageTensorOperation):
@@ -6096,8 +5665,9 @@ class Resize(ImageTensorOperation, PyTensorOperation):
6096
5665
  <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
6097
5666
  """
6098
5667
  self.device_target = device_target
6099
- if self.interpolation not in [Inter.BILINEAR, Inter.CUBIC, Inter.NEAREST] and self.device_target == "Ascend":
6100
- raise RuntimeError("Invalid interpolation mode, only support BILINEAR, CUBIC and NEAREST.")
5668
+ if self.interpolation == Inter.ANTIALIAS and self.device_target == "Ascend":
5669
+ raise ValueError("The current InterpolationMode is not supported by DVPP. It is {}."
5670
+ .format(self.interpolation))
6101
5671
  return self
6102
5672
 
6103
5673
  def parse(self):
@@ -6124,8 +5694,6 @@ class ResizedCrop(ImageTensorOperation):
6124
5694
  """
6125
5695
  Crop the input image at a specific region and resize it to desired size.
6126
5696
 
6127
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
6128
-
6129
5697
  Args:
6130
5698
  top (int): Horizontal ordinate of the upper left corner of the crop region.
6131
5699
  left (int): Vertical ordinate of the upper left corner of the crop region.
@@ -6153,7 +5721,7 @@ class ResizedCrop(ImageTensorOperation):
6153
5721
  RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
6154
5722
 
6155
5723
  Supported Platforms:
6156
- ``CPU`` ``Ascend``
5724
+ ``CPU``
6157
5725
 
6158
5726
  Examples:
6159
5727
  >>> import numpy as np
@@ -6201,9 +5769,7 @@ class ResizedCrop(ImageTensorOperation):
6201
5769
  """
6202
5770
  Set the device for the current operator execution.
6203
5771
 
6204
- - When the device is Ascend, input type supports `uint8` and `float32`,
6205
- input channel supports 1 and 3. The input data has a height limit of [4, 32768]
6206
- and a width limit of [6, 32768].
5772
+ - When the device is Ascend, input/output shape should be limited from [4, 6] to [32768, 32768].
6207
5773
 
6208
5774
  Args:
6209
5775
  device_target (str, optional): The operator will be executed on this device. Currently supports
@@ -6244,8 +5810,6 @@ class ResizedCrop(ImageTensorOperation):
6244
5810
  <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
6245
5811
  """
6246
5812
  self.device_target = device_target
6247
- if self.interpolation not in [Inter.BILINEAR, Inter.CUBIC, Inter.NEAREST] and self.device_target == "Ascend":
6248
- raise RuntimeError("Invalid interpolation mode, only support BILINEAR, CUBIC and NEAREST.")
6249
5813
  return self
6250
5814
 
6251
5815
  def parse(self):
@@ -6394,8 +5958,6 @@ class Rotate(ImageTensorOperation):
6394
5958
  """
6395
5959
  Rotate the input image by specified degrees.
6396
5960
 
6397
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
6398
-
6399
5961
  Args:
6400
5962
  degrees (Union[int, float]): Rotation degrees.
6401
5963
  resample (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
@@ -6421,7 +5983,7 @@ class Rotate(ImageTensorOperation):
6421
5983
  RuntimeError: If given tensor shape is not <H, W> or <..., H, W, C>.
6422
5984
 
6423
5985
  Supported Platforms:
6424
- ``CPU`` ``Ascend``
5986
+ ``CPU``
6425
5987
 
6426
5988
  Examples:
6427
5989
  >>> import numpy as np
@@ -6466,62 +6028,9 @@ class Rotate(ImageTensorOperation):
6466
6028
  self.fill_value = fill_value
6467
6029
  self.implementation = Implementation.C
6468
6030
 
6469
- @check_device_target
6470
- def device(self, device_target="CPU"):
6471
- """
6472
- Set the device for the current operator execution.
6473
-
6474
- - When the device is Ascend, input type supports `uint8`/`float32`, input channel supports 1 and 3.
6475
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
6476
- - When the device is Ascend and `expand` is True, `center` does not take effect
6477
- and the image is rotated according to the center of the image.
6478
-
6479
- Args:
6480
- device_target (str, optional): The operator will be executed on this device. Currently supports
6481
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
6482
-
6483
- Raises:
6484
- TypeError: If `device_target` is not of type str.
6485
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
6486
-
6487
- Supported Platforms:
6488
- ``CPU`` ``Ascend``
6489
-
6490
- Examples:
6491
- >>> import numpy as np
6492
- >>> import mindspore.dataset as ds
6493
- >>> import mindspore.dataset.vision as vision
6494
- >>> from mindspore.dataset.vision import Inter
6495
- >>>
6496
- >>> # Use the transform in dataset pipeline mode
6497
- >>> data = np.random.randint(0, 255, size=(1, 300, 400, 3)).astype(np.uint8)
6498
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
6499
- >>> rotate_op = vision.Rotate(degrees=90.0, resample=Inter.NEAREST, expand=True).device("Ascend")
6500
- >>> transforms_list = [rotate_op]
6501
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
6502
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
6503
- ... print(item["image"].shape, item["image"].dtype)
6504
- ... break
6505
- (400, 300, 3) uint8
6506
- >>>
6507
- >>> # Use the transform in eager mode
6508
- >>> data = np.random.randint(0, 255, size=(300, 400, 3)).astype(np.uint8)
6509
- >>> output = vision.Rotate(degrees=90.0, resample=Inter.NEAREST, expand=True).device("Ascend")(data)
6510
- >>> print(output.shape, output.dtype)
6511
- (400, 300, 3) uint8
6512
-
6513
- Tutorial Examples:
6514
- - `Illustration of vision transforms
6515
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
6516
- """
6517
- self.device_target = device_target
6518
- if self.resample not in [Inter.BILINEAR, Inter.NEAREST] and self.device_target == "Ascend":
6519
- raise RuntimeError("Invalid interpolation mode, only support BILINEAR and NEAREST.")
6520
- return self
6521
-
6522
6031
  def parse(self):
6523
6032
  return cde.RotateOperation(self.degrees, Inter.to_c_type(self.resample), self.expand, self.center,
6524
- self.fill_value, self.device_target)
6033
+ self.fill_value)
6525
6034
 
6526
6035
 
6527
6036
  class SlicePatches(ImageTensorOperation):
@@ -6606,8 +6115,6 @@ class Solarize(ImageTensorOperation):
6606
6115
  """
6607
6116
  Solarize the image by inverting all pixel values within the threshold.
6608
6117
 
6609
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
6610
-
6611
6118
  Args:
6612
6119
  threshold (Union[float, Sequence[float, float]]): Range of solarize threshold, should always
6613
6120
  be in (min, max) format, where min and max are integers in range of [0, 255], and min <= max.
@@ -6619,7 +6126,7 @@ class Solarize(ImageTensorOperation):
6619
6126
  ValueError: If `threshold` is not in range of [0, 255].
6620
6127
 
6621
6128
  Supported Platforms:
6622
- ``CPU`` ``Ascend``
6129
+ ``CPU``
6623
6130
 
6624
6131
  Examples:
6625
6132
  >>> import numpy as np
@@ -6655,56 +6162,8 @@ class Solarize(ImageTensorOperation):
6655
6162
  self.threshold = threshold
6656
6163
  self.implementation = Implementation.C
6657
6164
 
6658
- @check_device_target
6659
- def device(self, device_target="CPU"):
6660
- """
6661
- Set the device for the current operator execution.
6662
-
6663
- - When the device is Ascend, input type only supports `uint8` , input channel supports 1 and 3.
6664
- The input data has a height limit of [4, 8192] and a width limit of [6, 4096].
6665
-
6666
- Args:
6667
- device_target (str, optional): The operator will be executed on this device. Currently supports
6668
- ``CPU`` and ``Ascend`` . Default: ``CPU`` .
6669
-
6670
- Raises:
6671
- TypeError: If `device_target` is not of type str.
6672
- ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
6673
-
6674
- Supported Platforms:
6675
- ``CPU`` ``Ascend``
6676
-
6677
- Examples:
6678
- >>> import numpy as np
6679
- >>> import mindspore.dataset as ds
6680
- >>> import mindspore.dataset.vision as vision
6681
- >>>
6682
- >>> # Use the transform in dataset pipeline mode
6683
- >>> data = np.random.randint(0, 255, size=(1, 100, 100, 3)).astype(np.uint8)
6684
- >>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["image"])
6685
- >>> solarize_op = vision.Solarize(threshold=(10, 100)).device("Ascend")
6686
- >>> transforms_list = [solarize_op]
6687
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms_list, input_columns=["image"])
6688
- >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
6689
- ... print(item["image"].shape, item["image"].dtype)
6690
- ... break
6691
- (100, 100, 3) uint8
6692
- >>>
6693
- >>> # Use the transform in eager mode
6694
- >>> data = np.random.randint(0, 255, size=(100, 100, 3)).astype(np.uint8)
6695
- >>> output = vision.Solarize(threshold=(10, 100)).device("Ascend")(data)
6696
- >>> print(output.shape, output.dtype)
6697
- (100, 100, 3) uint8
6698
-
6699
- Tutorial Examples:
6700
- - `Illustration of vision transforms
6701
- <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/vision_gallery.html>`_
6702
- """
6703
- self.device_target = device_target
6704
- return self
6705
-
6706
6165
  def parse(self):
6707
- return cde.SolarizeOperation(self.threshold, self.device_target)
6166
+ return cde.SolarizeOperation(self.threshold)
6708
6167
 
6709
6168
 
6710
6169
  class TenCrop(PyTensorOperation):
@@ -7195,13 +6654,11 @@ class VerticalFlip(ImageTensorOperation):
7195
6654
  """
7196
6655
  Flip the input image vertically.
7197
6656
 
7198
- Supports Ascend hardware acceleration and can be enabled through the `.device("Ascend")` method.
7199
-
7200
6657
  Raises:
7201
6658
  RuntimeError: If given tensor shape is not <H, W> or <..., H, W, C>.
7202
6659
 
7203
6660
  Supported Platforms:
7204
- ``CPU`` ``Ascend``
6661
+ ``CPU``
7205
6662
 
7206
6663
  Examples:
7207
6664
  >>> import numpy as np
@@ -7238,9 +6695,7 @@ class VerticalFlip(ImageTensorOperation):
7238
6695
  """
7239
6696
  Set the device for the current operator execution.
7240
6697
 
7241
- - When the device is Ascend, input type supports `uint8` and `float32`,
7242
- input channel supports 1 and 3. The input data has a height limit of [4, 8192]
7243
- and a width limit of [6, 4096].
6698
+ - When the device is Ascend, input shape should be limited from [4, 6] to [8192, 4096].
7244
6699
 
7245
6700
  Args:
7246
6701
  device_target (str, optional): The operator will be executed on this device. Currently supports