mindspore 2.0.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +647 -818
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
|
@@ -31,7 +31,11 @@ The Python implementation is mainly based on PIL.
|
|
|
31
31
|
class attributes (self.xxx) to support save() and load().
|
|
32
32
|
|
|
33
33
|
Examples:
|
|
34
|
+
>>> import mindspore.dataset as ds
|
|
35
|
+
>>> import mindspore.dataset.vision as vision
|
|
34
36
|
>>> from mindspore.dataset.vision import Border, Inter
|
|
37
|
+
>>> import mindspore.dataset.transforms as transforms
|
|
38
|
+
>>>
|
|
35
39
|
>>> image_folder_dataset_dir = "/path/to/image_folder_dataset_directory"
|
|
36
40
|
>>> # create a dataset that reads all files in dataset_dir with 8 threads
|
|
37
41
|
>>> image_folder_dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
|
|
@@ -74,7 +78,7 @@ from .validators import check_adjust_brightness, check_adjust_contrast, check_ad
|
|
|
74
78
|
check_random_select_subpolicy_op, check_random_solarize, check_range, check_rescale, check_resize, \
|
|
75
79
|
check_resize_interpolation, check_resized_crop, check_rgb_to_hsv, check_rotate, check_slice_patches, \
|
|
76
80
|
check_solarize, check_ten_crop, check_trivial_augment_wide, check_uniform_augment, check_to_tensor, \
|
|
77
|
-
FLOAT_MAX_INTEGER
|
|
81
|
+
check_device_target, FLOAT_MAX_INTEGER
|
|
78
82
|
from ..core.datatypes import mstype_to_detype, nptype_to_detype
|
|
79
83
|
from ..transforms.py_transforms_util import Implementation
|
|
80
84
|
from ..transforms.transforms import CompoundOperation, PyTensorOperation, TensorOperation, TypeCast
|
|
@@ -82,7 +86,7 @@ from ..transforms.transforms import CompoundOperation, PyTensorOperation, Tensor
|
|
|
82
86
|
|
|
83
87
|
class ImageTensorOperation(TensorOperation):
|
|
84
88
|
"""
|
|
85
|
-
Base class of Image Tensor Ops
|
|
89
|
+
Base class of Image Tensor Ops.
|
|
86
90
|
"""
|
|
87
91
|
|
|
88
92
|
def __call__(self, *input_tensor_list):
|
|
@@ -103,8 +107,8 @@ class AdjustBrightness(ImageTensorOperation, PyTensorOperation):
|
|
|
103
107
|
|
|
104
108
|
Args:
|
|
105
109
|
brightness_factor (float): How much to adjust the brightness, must be non negative.
|
|
106
|
-
0 gives a black image, 1 gives the original image,
|
|
107
|
-
while 2 increases the brightness by a factor of 2.
|
|
110
|
+
``0`` gives a black image, ``1`` gives the original image,
|
|
111
|
+
while ``2`` increases the brightness by a factor of 2.
|
|
108
112
|
|
|
109
113
|
Raises:
|
|
110
114
|
TypeError: If `brightness_factor` is not of type float.
|
|
@@ -112,12 +116,20 @@ class AdjustBrightness(ImageTensorOperation, PyTensorOperation):
|
|
|
112
116
|
RuntimeError: If shape of the input image is not <H, W, C>.
|
|
113
117
|
|
|
114
118
|
Supported Platforms:
|
|
115
|
-
``CPU``
|
|
119
|
+
``CPU`` ``Ascend``
|
|
116
120
|
|
|
117
121
|
Examples:
|
|
122
|
+
>>> import mindspore.dataset as ds
|
|
123
|
+
>>> import mindspore.dataset.vision as vision
|
|
124
|
+
>>>
|
|
125
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
118
126
|
>>> transforms_list = [vision.Decode(), vision.AdjustBrightness(brightness_factor=2.0)]
|
|
119
127
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
120
128
|
... input_columns=["image"])
|
|
129
|
+
|
|
130
|
+
Tutorial Examples:
|
|
131
|
+
- `Illustration of vision transforms
|
|
132
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
121
133
|
"""
|
|
122
134
|
|
|
123
135
|
@check_adjust_brightness
|
|
@@ -125,8 +137,39 @@ class AdjustBrightness(ImageTensorOperation, PyTensorOperation):
|
|
|
125
137
|
super().__init__()
|
|
126
138
|
self.brightness_factor = brightness_factor
|
|
127
139
|
|
|
140
|
+
@check_device_target
|
|
141
|
+
def device(self, device_target="CPU"):
|
|
142
|
+
"""
|
|
143
|
+
Set the device for the current operator execution.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
147
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
148
|
+
|
|
149
|
+
Raises:
|
|
150
|
+
TypeError: If `device_target` is not of type str.
|
|
151
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
152
|
+
|
|
153
|
+
Supported Platforms:
|
|
154
|
+
``CPU`` ``Ascend``
|
|
155
|
+
|
|
156
|
+
Examples:
|
|
157
|
+
>>> import mindspore.dataset as ds
|
|
158
|
+
>>> import mindspore.dataset.vision as vision
|
|
159
|
+
>>>
|
|
160
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
161
|
+
>>> transforms_list = [vision.Decode().device("CPU"), vision.AdjustBrightness(2.0).device("Ascend")]
|
|
162
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list, input_columns=["image"])
|
|
163
|
+
|
|
164
|
+
Tutorial Examples:
|
|
165
|
+
- `Illustration of vision transforms
|
|
166
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
167
|
+
"""
|
|
168
|
+
self.device_target = device_target
|
|
169
|
+
return self
|
|
170
|
+
|
|
128
171
|
def parse(self):
|
|
129
|
-
return cde.AdjustBrightnessOperation(self.brightness_factor)
|
|
172
|
+
return cde.AdjustBrightnessOperation(self.brightness_factor, self.device_target)
|
|
130
173
|
|
|
131
174
|
def _execute_py(self, img):
|
|
132
175
|
"""
|
|
@@ -147,8 +190,8 @@ class AdjustContrast(ImageTensorOperation, PyTensorOperation):
|
|
|
147
190
|
|
|
148
191
|
Args:
|
|
149
192
|
contrast_factor (float): How much to adjust the contrast, must be non negative.
|
|
150
|
-
0 gives a solid gray image, 1 gives the original image,
|
|
151
|
-
while 2 increases the contrast by a factor of 2.
|
|
193
|
+
``0`` gives a solid gray image, ``1`` gives the original image,
|
|
194
|
+
while ``2`` increases the contrast by a factor of 2.
|
|
152
195
|
|
|
153
196
|
Raises:
|
|
154
197
|
TypeError: If `contrast_factor` is not of type float.
|
|
@@ -156,12 +199,20 @@ class AdjustContrast(ImageTensorOperation, PyTensorOperation):
|
|
|
156
199
|
RuntimeError: If shape of the input image is not <H, W, C>.
|
|
157
200
|
|
|
158
201
|
Supported Platforms:
|
|
159
|
-
``CPU``
|
|
202
|
+
``CPU`` ``Ascend``
|
|
160
203
|
|
|
161
204
|
Examples:
|
|
205
|
+
>>> import mindspore.dataset as ds
|
|
206
|
+
>>> import mindspore.dataset.vision as vision
|
|
207
|
+
>>>
|
|
208
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
162
209
|
>>> transforms_list = [vision.Decode(), vision.AdjustContrast(contrast_factor=2.0)]
|
|
163
210
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
164
211
|
... input_columns=["image"])
|
|
212
|
+
|
|
213
|
+
Tutorial Examples:
|
|
214
|
+
- `Illustration of vision transforms
|
|
215
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
165
216
|
"""
|
|
166
217
|
|
|
167
218
|
@check_adjust_contrast
|
|
@@ -169,8 +220,39 @@ class AdjustContrast(ImageTensorOperation, PyTensorOperation):
|
|
|
169
220
|
super().__init__()
|
|
170
221
|
self.contrast_factor = contrast_factor
|
|
171
222
|
|
|
223
|
+
@check_device_target
|
|
224
|
+
def device(self, device_target="CPU"):
|
|
225
|
+
"""
|
|
226
|
+
Set the device for the current operator execution.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
230
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
231
|
+
|
|
232
|
+
Raises:
|
|
233
|
+
TypeError: If `device_target` is not of type str.
|
|
234
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
235
|
+
|
|
236
|
+
Supported Platforms:
|
|
237
|
+
``CPU`` ``Ascend``
|
|
238
|
+
|
|
239
|
+
Examples:
|
|
240
|
+
>>> import mindspore.dataset as ds
|
|
241
|
+
>>> import mindspore.dataset.vision as vision
|
|
242
|
+
>>>
|
|
243
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
244
|
+
>>> transforms_list = [vision.Decode().device("CPU"), vision.AdjustContrast(0).device("Ascend")]
|
|
245
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list, input_columns=["image"])
|
|
246
|
+
|
|
247
|
+
Tutorial Examples:
|
|
248
|
+
- `Illustration of vision transforms
|
|
249
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
250
|
+
"""
|
|
251
|
+
self.device_target = device_target
|
|
252
|
+
return self
|
|
253
|
+
|
|
172
254
|
def parse(self):
|
|
173
|
-
return cde.AdjustContrastOperation(self.contrast_factor)
|
|
255
|
+
return cde.AdjustContrastOperation(self.contrast_factor, self.device_target)
|
|
174
256
|
|
|
175
257
|
def _execute_py(self, img):
|
|
176
258
|
"""
|
|
@@ -201,7 +283,7 @@ class AdjustGamma(ImageTensorOperation, PyTensorOperation):
|
|
|
201
283
|
The output image pixel value is exponentially related to the input image pixel value.
|
|
202
284
|
gamma larger than 1 make the shadows darker,
|
|
203
285
|
while gamma smaller than 1 make dark regions lighter.
|
|
204
|
-
gain (float, optional): The constant multiplier. Default: 1.0
|
|
286
|
+
gain (float, optional): The constant multiplier. Default: ``1.0``.
|
|
205
287
|
|
|
206
288
|
Raises:
|
|
207
289
|
TypeError: If `gain` is not of type float.
|
|
@@ -213,9 +295,17 @@ class AdjustGamma(ImageTensorOperation, PyTensorOperation):
|
|
|
213
295
|
``CPU``
|
|
214
296
|
|
|
215
297
|
Examples:
|
|
298
|
+
>>> import mindspore.dataset as ds
|
|
299
|
+
>>> import mindspore.dataset.vision as vision
|
|
300
|
+
>>>
|
|
301
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
216
302
|
>>> transforms_list = [vision.Decode(), vision.AdjustGamma(gamma=10.0, gain=1.0)]
|
|
217
303
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
218
304
|
... input_columns=["image"])
|
|
305
|
+
|
|
306
|
+
Tutorial Examples:
|
|
307
|
+
- `Illustration of vision transforms
|
|
308
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
219
309
|
"""
|
|
220
310
|
|
|
221
311
|
@check_adjust_gamma
|
|
@@ -255,12 +345,20 @@ class AdjustHue(ImageTensorOperation, PyTensorOperation):
|
|
|
255
345
|
RuntimeError: If shape of the input image is not <H, W, C>.
|
|
256
346
|
|
|
257
347
|
Supported Platforms:
|
|
258
|
-
``CPU``
|
|
348
|
+
``CPU`` ``Ascend``
|
|
259
349
|
|
|
260
350
|
Examples:
|
|
351
|
+
>>> import mindspore.dataset as ds
|
|
352
|
+
>>> import mindspore.dataset.vision as vision
|
|
353
|
+
>>>
|
|
354
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
261
355
|
>>> transforms_list = [vision.Decode(), vision.AdjustHue(hue_factor=0.2)]
|
|
262
356
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
263
357
|
... input_columns=["image"])
|
|
358
|
+
|
|
359
|
+
Tutorial Examples:
|
|
360
|
+
- `Illustration of vision transforms
|
|
361
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
264
362
|
"""
|
|
265
363
|
|
|
266
364
|
@check_adjust_hue
|
|
@@ -268,8 +366,39 @@ class AdjustHue(ImageTensorOperation, PyTensorOperation):
|
|
|
268
366
|
super().__init__()
|
|
269
367
|
self.hue_factor = hue_factor
|
|
270
368
|
|
|
369
|
+
@check_device_target
|
|
370
|
+
def device(self, device_target="CPU"):
|
|
371
|
+
"""
|
|
372
|
+
Set the device for the current operator execution.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
376
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
377
|
+
|
|
378
|
+
Raises:
|
|
379
|
+
TypeError: If `device_target` is not of type str.
|
|
380
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
381
|
+
|
|
382
|
+
Supported Platforms:
|
|
383
|
+
``CPU`` ``Ascend``
|
|
384
|
+
|
|
385
|
+
Examples:
|
|
386
|
+
>>> import mindspore.dataset as ds
|
|
387
|
+
>>> import mindspore.dataset.vision as vision
|
|
388
|
+
>>>
|
|
389
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
390
|
+
>>> transforms_list = [vision.Decode().device("CPU"), vision.AdjustHue(0.5).device("Ascend")]
|
|
391
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list, input_columns=["image"])
|
|
392
|
+
|
|
393
|
+
Tutorial Examples:
|
|
394
|
+
- `Illustration of vision transforms
|
|
395
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
396
|
+
"""
|
|
397
|
+
self.device_target = device_target
|
|
398
|
+
return self
|
|
399
|
+
|
|
271
400
|
def parse(self):
|
|
272
|
-
return cde.AdjustHueOperation(self.hue_factor)
|
|
401
|
+
return cde.AdjustHueOperation(self.hue_factor, self.device_target)
|
|
273
402
|
|
|
274
403
|
def _execute_py(self, img):
|
|
275
404
|
"""
|
|
@@ -290,7 +419,8 @@ class AdjustSaturation(ImageTensorOperation, PyTensorOperation):
|
|
|
290
419
|
|
|
291
420
|
Args:
|
|
292
421
|
saturation_factor (float): How much to adjust the saturation, must be non negative.
|
|
293
|
-
0 gives a black image, 1 gives the original image
|
|
422
|
+
``0`` gives a black image, ``1`` gives the original image
|
|
423
|
+
while ``2`` increases the saturation by a factor of 2.
|
|
294
424
|
|
|
295
425
|
Raises:
|
|
296
426
|
TypeError: If `saturation_factor` is not of type float.
|
|
@@ -299,12 +429,20 @@ class AdjustSaturation(ImageTensorOperation, PyTensorOperation):
|
|
|
299
429
|
RuntimeError: If channel of the input image is not 3.
|
|
300
430
|
|
|
301
431
|
Supported Platforms:
|
|
302
|
-
``CPU``
|
|
432
|
+
``CPU`` ``Ascend``
|
|
303
433
|
|
|
304
434
|
Examples:
|
|
435
|
+
>>> import mindspore.dataset as ds
|
|
436
|
+
>>> import mindspore.dataset.vision as vision
|
|
437
|
+
>>>
|
|
438
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
305
439
|
>>> transforms_list = [vision.Decode(), vision.AdjustSaturation(saturation_factor=2.0)]
|
|
306
440
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
307
441
|
... input_columns=["image"])
|
|
442
|
+
|
|
443
|
+
Tutorial Examples:
|
|
444
|
+
- `Illustration of vision transforms
|
|
445
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
308
446
|
"""
|
|
309
447
|
|
|
310
448
|
@check_adjust_saturation
|
|
@@ -312,8 +450,39 @@ class AdjustSaturation(ImageTensorOperation, PyTensorOperation):
|
|
|
312
450
|
super().__init__()
|
|
313
451
|
self.saturation_factor = saturation_factor
|
|
314
452
|
|
|
453
|
+
@check_device_target
|
|
454
|
+
def device(self, device_target="CPU"):
|
|
455
|
+
"""
|
|
456
|
+
Set the device for the current operator execution.
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
460
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
461
|
+
|
|
462
|
+
Raises:
|
|
463
|
+
TypeError: If `device_target` is not of type str.
|
|
464
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
465
|
+
|
|
466
|
+
Supported Platforms:
|
|
467
|
+
``CPU`` ``Ascend``
|
|
468
|
+
|
|
469
|
+
Examples:
|
|
470
|
+
>>> import mindspore.dataset as ds
|
|
471
|
+
>>> import mindspore.dataset.vision as vision
|
|
472
|
+
>>>
|
|
473
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
474
|
+
>>> transforms_list = [vision.Decode().device("CPU"), vision.AdjustSaturation(2.0).device("Ascend")]
|
|
475
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list, input_columns=["image"])
|
|
476
|
+
|
|
477
|
+
Tutorial Examples:
|
|
478
|
+
- `Illustration of vision transforms
|
|
479
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
480
|
+
"""
|
|
481
|
+
self.device_target = device_target
|
|
482
|
+
return self
|
|
483
|
+
|
|
315
484
|
def parse(self):
|
|
316
|
-
return cde.AdjustSaturationOperation(self.saturation_factor)
|
|
485
|
+
return cde.AdjustSaturationOperation(self.saturation_factor, self.device_target)
|
|
317
486
|
|
|
318
487
|
def _execute_py(self, img):
|
|
319
488
|
"""
|
|
@@ -334,8 +503,8 @@ class AdjustSharpness(ImageTensorOperation):
|
|
|
334
503
|
|
|
335
504
|
Args:
|
|
336
505
|
sharpness_factor (float): How much to adjust the sharpness, must be
|
|
337
|
-
non negative. 0 gives a blurred image, 1 gives the
|
|
338
|
-
original image while 2 increases the sharpness by a factor of 2.
|
|
506
|
+
non negative. ``0`` gives a blurred image, ``1`` gives the
|
|
507
|
+
original image while ``2`` increases the sharpness by a factor of 2.
|
|
339
508
|
|
|
340
509
|
Raises:
|
|
341
510
|
TypeError: If `sharpness_factor` is not of type float.
|
|
@@ -346,9 +515,17 @@ class AdjustSharpness(ImageTensorOperation):
|
|
|
346
515
|
``CPU``
|
|
347
516
|
|
|
348
517
|
Examples:
|
|
518
|
+
>>> import mindspore.dataset as ds
|
|
519
|
+
>>> import mindspore.dataset.vision as vision
|
|
520
|
+
>>>
|
|
521
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
349
522
|
>>> transforms_list = [vision.Decode(), vision.AdjustSharpness(sharpness_factor=2.0)]
|
|
350
523
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
351
524
|
... input_columns=["image"])
|
|
525
|
+
|
|
526
|
+
Tutorial Examples:
|
|
527
|
+
- `Illustration of vision transforms
|
|
528
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
352
529
|
"""
|
|
353
530
|
|
|
354
531
|
@check_adjust_sharpness
|
|
@@ -372,20 +549,11 @@ class Affine(ImageTensorOperation):
|
|
|
372
549
|
shear (Union[float, Sequence[float, float]]): Shear angle value in degrees between -180 to 180.
|
|
373
550
|
If float is provided, shear along the x axis with this value, without shearing along the y axis;
|
|
374
551
|
If Sequence[float, float] is provided, shear along the x axis and y axis with these two values separately.
|
|
375
|
-
resample (Inter, optional):
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
- Inter.BILINEAR, means resample method is bilinear interpolation.
|
|
379
|
-
|
|
380
|
-
- Inter.NEAREST, means resample method is nearest-neighbor interpolation.
|
|
381
|
-
|
|
382
|
-
- Inter.BICUBIC, means resample method is bicubic interpolation.
|
|
383
|
-
|
|
384
|
-
- Inter.AREA, means resample method is pixel area interpolation.
|
|
385
|
-
|
|
386
|
-
fill_value (Union[int, tuple[int, int, int]], optional): Optional fill_value to fill the area
|
|
552
|
+
resample (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
553
|
+
Default: ``Inter.NEAREST``.
|
|
554
|
+
fill_value (Union[int, tuple[int, int, int]], optional): Optional `fill_value` to fill the area
|
|
387
555
|
outside the transform in the output image. There must be three elements in tuple and the value
|
|
388
|
-
of single element is [0, 255]. Default: 0
|
|
556
|
+
of single element is [0, 255]. Default: ``0``.
|
|
389
557
|
|
|
390
558
|
Raises:
|
|
391
559
|
TypeError: If `degrees` is not of type float.
|
|
@@ -393,7 +561,7 @@ class Affine(ImageTensorOperation):
|
|
|
393
561
|
TypeError: If `scale` is not of type float.
|
|
394
562
|
ValueError: If `scale` is non positive.
|
|
395
563
|
TypeError: If `shear` is not of float or Sequence[float, float].
|
|
396
|
-
TypeError: If `resample` is not of type :class
|
|
564
|
+
TypeError: If `resample` is not of type :class:`~.vision.Inter` .
|
|
397
565
|
TypeError: If `fill_value` is not of type int or tuple[int, int, int].
|
|
398
566
|
RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
|
|
399
567
|
|
|
@@ -401,13 +569,21 @@ class Affine(ImageTensorOperation):
|
|
|
401
569
|
``CPU``
|
|
402
570
|
|
|
403
571
|
Examples:
|
|
572
|
+
>>> import mindspore.dataset as ds
|
|
573
|
+
>>> import mindspore.dataset.vision as vision
|
|
404
574
|
>>> from mindspore.dataset.vision import Inter
|
|
405
575
|
>>>
|
|
406
576
|
>>> decode_op = vision.Decode()
|
|
407
577
|
>>> affine_op = vision.Affine(degrees=15, translate=[0.2, 0.2], scale=1.1, shear=[1.0, 1.0],
|
|
408
578
|
... resample=Inter.BILINEAR)
|
|
409
579
|
>>> affine_list = [decode_op, affine_op]
|
|
580
|
+
>>>
|
|
581
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
410
582
|
>>> image_folder_dataset = image_folder_dataset.map(operations=affine_list, input_columns=["image"])
|
|
583
|
+
|
|
584
|
+
Tutorial Examples:
|
|
585
|
+
- `Illustration of vision transforms
|
|
586
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
411
587
|
"""
|
|
412
588
|
|
|
413
589
|
@check_affine
|
|
@@ -441,35 +617,26 @@ class AutoAugment(ImageTensorOperation):
|
|
|
441
617
|
|
|
442
618
|
Args:
|
|
443
619
|
policy (AutoAugmentPolicy, optional): AutoAugment policies learned on different datasets.
|
|
444
|
-
Default: AutoAugmentPolicy.IMAGENET
|
|
445
|
-
It can be
|
|
620
|
+
Default: ``AutoAugmentPolicy.IMAGENET``.
|
|
621
|
+
It can be ``AutoAugmentPolicy.IMAGENET``, ``AutoAugmentPolicy.CIFAR10``, ``AutoAugmentPolicy.SVHN``.
|
|
446
622
|
Randomly apply 2 operations from a candidate set. See auto augmentation details in AutoAugmentPolicy.
|
|
447
623
|
|
|
448
|
-
- AutoAugmentPolicy.IMAGENET
|
|
449
|
-
|
|
450
|
-
- AutoAugmentPolicy.CIFAR10, means to apply AutoAugment learned on Cifar10 dataset.
|
|
451
|
-
|
|
452
|
-
- AutoAugmentPolicy.SVHN, means to apply AutoAugment learned on SVHN dataset.
|
|
624
|
+
- ``AutoAugmentPolicy.IMAGENET``, means to apply AutoAugment learned on ImageNet dataset.
|
|
453
625
|
|
|
454
|
-
|
|
455
|
-
It can be any of [Inter.NEAREST, Inter.BILINEAR, Inter.BICUBIC, Inter.AREA].
|
|
626
|
+
- ``AutoAugmentPolicy.CIFAR10``, means to apply AutoAugment learned on Cifar10 dataset.
|
|
456
627
|
|
|
457
|
-
-
|
|
458
|
-
|
|
459
|
-
- Inter.BILINEAR: means interpolation method is bilinear interpolation.
|
|
460
|
-
|
|
461
|
-
- Inter.BICUBIC: means the interpolation method is bicubic interpolation.
|
|
462
|
-
|
|
463
|
-
- Inter.AREA: means the interpolation method is pixel area interpolation.
|
|
628
|
+
- ``AutoAugmentPolicy.SVHN``, means to apply AutoAugment learned on SVHN dataset.
|
|
464
629
|
|
|
630
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
631
|
+
Default: ``Inter.NEAREST``.
|
|
465
632
|
fill_value (Union[int, tuple[int]], optional): Pixel fill value for the area outside the transformed image.
|
|
466
633
|
It can be an int or a 3-tuple. If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
467
634
|
If it is an integer, it is used for all RGB channels. The fill_value values must be in range [0, 255].
|
|
468
|
-
Default: 0
|
|
635
|
+
Default: ``0``.
|
|
469
636
|
|
|
470
637
|
Raises:
|
|
471
638
|
TypeError: If `policy` is not of type :class:`mindspore.dataset.vision.AutoAugmentPolicy` .
|
|
472
|
-
TypeError: If `interpolation` is not of type :class
|
|
639
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
473
640
|
TypeError: If `fill_value` is not an integer or a tuple of length 3.
|
|
474
641
|
RuntimeError: If given tensor shape is not <H, W, C>.
|
|
475
642
|
|
|
@@ -477,13 +644,21 @@ class AutoAugment(ImageTensorOperation):
|
|
|
477
644
|
``CPU``
|
|
478
645
|
|
|
479
646
|
Examples:
|
|
647
|
+
>>> import mindspore.dataset as ds
|
|
648
|
+
>>> import mindspore.dataset.vision as vision
|
|
480
649
|
>>> from mindspore.dataset.vision import AutoAugmentPolicy, Inter
|
|
481
650
|
>>>
|
|
482
651
|
>>> transforms_list = [vision.Decode(), vision.AutoAugment(policy=AutoAugmentPolicy.IMAGENET,
|
|
483
652
|
... interpolation=Inter.NEAREST,
|
|
484
653
|
... fill_value=0)]
|
|
654
|
+
>>>
|
|
655
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
485
656
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
486
657
|
... input_columns=["image"])
|
|
658
|
+
|
|
659
|
+
Tutorial Examples:
|
|
660
|
+
- `Illustration of vision transforms
|
|
661
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
487
662
|
"""
|
|
488
663
|
|
|
489
664
|
@check_auto_augment
|
|
@@ -508,9 +683,9 @@ class AutoContrast(ImageTensorOperation, PyTensorOperation):
|
|
|
508
683
|
|
|
509
684
|
Args:
|
|
510
685
|
cutoff (float, optional): Percent of lightest and darkest pixels to cut off from
|
|
511
|
-
the histogram of input image. The value must be in the range [0.0, 50.0]. Default: 0.0
|
|
686
|
+
the histogram of input image. The value must be in the range [0.0, 50.0]. Default: ``0.0``.
|
|
512
687
|
ignore (Union[int, sequence], optional): The background pixel values to ignore,
|
|
513
|
-
The ignore values must be in range [0, 255]. Default: None
|
|
688
|
+
The ignore values must be in range [0, 255]. Default: ``None``.
|
|
514
689
|
|
|
515
690
|
Raises:
|
|
516
691
|
TypeError: If `cutoff` is not of type float.
|
|
@@ -523,9 +698,17 @@ class AutoContrast(ImageTensorOperation, PyTensorOperation):
|
|
|
523
698
|
``CPU``
|
|
524
699
|
|
|
525
700
|
Examples:
|
|
701
|
+
>>> import mindspore.dataset as ds
|
|
702
|
+
>>> import mindspore.dataset.vision as vision
|
|
703
|
+
>>>
|
|
704
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
526
705
|
>>> transforms_list = [vision.Decode(), vision.AutoContrast(cutoff=10.0, ignore=[10, 20])]
|
|
527
706
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
528
707
|
... input_columns=["image"])
|
|
708
|
+
|
|
709
|
+
Tutorial Examples:
|
|
710
|
+
- `Illustration of vision transforms
|
|
711
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
529
712
|
"""
|
|
530
713
|
|
|
531
714
|
@check_auto_contrast
|
|
@@ -563,7 +746,7 @@ class BoundingBoxAugment(ImageTensorOperation):
|
|
|
563
746
|
transform (TensorOperation): Transformation operation to be applied on random selection
|
|
564
747
|
of bounding box regions of a given image.
|
|
565
748
|
ratio (float, optional): Ratio of bounding boxes to apply augmentation on.
|
|
566
|
-
Range: [0.0, 1.0]. Default: 0.3
|
|
749
|
+
Range: [0.0, 1.0]. Default: ``0.3``.
|
|
567
750
|
|
|
568
751
|
Raises:
|
|
569
752
|
TypeError: If `transform` is an image processing operation in `mindspore.dataset.vision` .
|
|
@@ -575,12 +758,20 @@ class BoundingBoxAugment(ImageTensorOperation):
|
|
|
575
758
|
``CPU``
|
|
576
759
|
|
|
577
760
|
Examples:
|
|
761
|
+
>>> import mindspore.dataset as ds
|
|
762
|
+
>>> import mindspore.dataset.vision as vision
|
|
763
|
+
>>>
|
|
764
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
578
765
|
>>> # set bounding box operation with ratio of 1 to apply rotation on all bounding boxes
|
|
579
766
|
>>> bbox_aug_op = vision.BoundingBoxAugment(vision.RandomRotation(90), 1)
|
|
580
767
|
>>> # map to apply ops
|
|
581
768
|
>>> image_folder_dataset = image_folder_dataset.map(operations=[bbox_aug_op],
|
|
582
769
|
... input_columns=["image", "bbox"],
|
|
583
770
|
... output_columns=["image", "bbox"])
|
|
771
|
+
|
|
772
|
+
Tutorial Examples:
|
|
773
|
+
- `Illustration of vision transforms
|
|
774
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
584
775
|
"""
|
|
585
776
|
|
|
586
777
|
@check_bounding_box_augment_cpp
|
|
@@ -618,14 +809,23 @@ class CenterCrop(ImageTensorOperation, PyTensorOperation):
|
|
|
618
809
|
``CPU``
|
|
619
810
|
|
|
620
811
|
Examples:
|
|
812
|
+
>>> import mindspore.dataset as ds
|
|
813
|
+
>>> import mindspore.dataset.vision as vision
|
|
814
|
+
>>>
|
|
815
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
816
|
+
>>>
|
|
621
817
|
>>> # crop image to a square
|
|
622
818
|
>>> transforms_list1 = [vision.Decode(), vision.CenterCrop(50)]
|
|
623
819
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list1,
|
|
624
820
|
... input_columns=["image"])
|
|
625
821
|
>>> # crop image to portrait style
|
|
626
822
|
>>> transforms_list2 = [vision.Decode(), vision.CenterCrop((60, 40))]
|
|
627
|
-
>>>
|
|
628
|
-
...
|
|
823
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list2,
|
|
824
|
+
... input_columns=["image"])
|
|
825
|
+
|
|
826
|
+
Tutorial Examples:
|
|
827
|
+
- `Illustration of vision transforms
|
|
828
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
629
829
|
"""
|
|
630
830
|
|
|
631
831
|
@check_center_crop
|
|
@@ -707,15 +907,23 @@ class ConvertColor(ImageTensorOperation):
|
|
|
707
907
|
``CPU``
|
|
708
908
|
|
|
709
909
|
Examples:
|
|
710
|
-
>>> import mindspore.dataset
|
|
910
|
+
>>> import mindspore.dataset as ds
|
|
911
|
+
>>> import mindspore.dataset.vision as vision
|
|
912
|
+
>>>
|
|
913
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
914
|
+
>>>
|
|
711
915
|
>>> # Convert RGB images to GRAY images
|
|
712
|
-
>>> convert_op = vision.ConvertColor(
|
|
916
|
+
>>> convert_op = vision.ConvertColor(vision.ConvertMode.COLOR_RGB2GRAY)
|
|
713
917
|
>>> image_folder_dataset = image_folder_dataset.map(operations=convert_op,
|
|
714
918
|
... input_columns=["image"])
|
|
715
919
|
>>> # Convert RGB images to BGR images
|
|
716
|
-
>>> convert_op = vision.ConvertColor(
|
|
717
|
-
>>>
|
|
718
|
-
...
|
|
920
|
+
>>> convert_op = vision.ConvertColor(vision.ConvertMode.COLOR_RGB2BGR)
|
|
921
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=convert_op,
|
|
922
|
+
... input_columns=["image"])
|
|
923
|
+
|
|
924
|
+
Tutorial Examples:
|
|
925
|
+
- `Illustration of vision transforms
|
|
926
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
719
927
|
"""
|
|
720
928
|
|
|
721
929
|
@check_convert_color
|
|
@@ -751,11 +959,19 @@ class Crop(ImageTensorOperation):
|
|
|
751
959
|
``CPU``
|
|
752
960
|
|
|
753
961
|
Examples:
|
|
962
|
+
>>> import mindspore.dataset as ds
|
|
963
|
+
>>> import mindspore.dataset.vision as vision
|
|
964
|
+
>>>
|
|
965
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
754
966
|
>>> decode_op = vision.Decode()
|
|
755
967
|
>>> crop_op = vision.Crop((0, 0), 32)
|
|
756
968
|
>>> transforms_list = [decode_op, crop_op]
|
|
757
969
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
758
970
|
... input_columns=["image"])
|
|
971
|
+
|
|
972
|
+
Tutorial Examples:
|
|
973
|
+
- `Illustration of vision transforms
|
|
974
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
759
975
|
"""
|
|
760
976
|
|
|
761
977
|
@check_crop
|
|
@@ -779,9 +995,9 @@ class CutMixBatch(ImageTensorOperation):
|
|
|
779
995
|
Args:
|
|
780
996
|
image_batch_format (ImageBatchFormat): The method of padding. Can be any of
|
|
781
997
|
[ImageBatchFormat.NHWC, ImageBatchFormat.NCHW].
|
|
782
|
-
alpha (float, optional): Hyperparameter of beta distribution, must be larger than 0. Default: 1.0
|
|
998
|
+
alpha (float, optional): Hyperparameter of beta distribution, must be larger than 0. Default: ``1.0``.
|
|
783
999
|
prob (float, optional): The probability by which CutMix is applied to each image,
|
|
784
|
-
which must be in range: [0.0, 1.0]. Default: 1.0
|
|
1000
|
+
which must be in range: [0.0, 1.0]. Default: ``1.0``.
|
|
785
1001
|
|
|
786
1002
|
Raises:
|
|
787
1003
|
TypeError: If `image_batch_format` is not of type :class:`mindspore.dataset.vision.ImageBatchFormat` .
|
|
@@ -795,7 +1011,12 @@ class CutMixBatch(ImageTensorOperation):
|
|
|
795
1011
|
``CPU``
|
|
796
1012
|
|
|
797
1013
|
Examples:
|
|
1014
|
+
>>> import mindspore.dataset as ds
|
|
1015
|
+
>>> import mindspore.dataset.vision as vision
|
|
1016
|
+
>>> import mindspore.dataset.transforms as transforms
|
|
798
1017
|
>>> from mindspore.dataset.vision import ImageBatchFormat
|
|
1018
|
+
>>>
|
|
1019
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
799
1020
|
>>> onehot_op = transforms.OneHot(num_classes=10)
|
|
800
1021
|
>>> image_folder_dataset= image_folder_dataset.map(operations=onehot_op,
|
|
801
1022
|
... input_columns=["label"])
|
|
@@ -803,6 +1024,10 @@ class CutMixBatch(ImageTensorOperation):
|
|
|
803
1024
|
>>> image_folder_dataset = image_folder_dataset.batch(5)
|
|
804
1025
|
>>> image_folder_dataset = image_folder_dataset.map(operations=cutmix_batch_op,
|
|
805
1026
|
... input_columns=["image", "label"])
|
|
1027
|
+
|
|
1028
|
+
Tutorial Examples:
|
|
1029
|
+
- `Illustration of vision transforms
|
|
1030
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
806
1031
|
"""
|
|
807
1032
|
|
|
808
1033
|
@check_cut_mix_batch_c
|
|
@@ -823,9 +1048,9 @@ class CutOut(ImageTensorOperation):
|
|
|
823
1048
|
|
|
824
1049
|
Args:
|
|
825
1050
|
length (int): The side length of each square patch, must be larger than 0.
|
|
826
|
-
num_patches (int, optional): Number of patches to be cut out of an image, must be larger than 0. Default: 1
|
|
1051
|
+
num_patches (int, optional): Number of patches to be cut out of an image, must be larger than 0. Default: ``1``.
|
|
827
1052
|
is_hwc (bool, optional): Whether the input image is in HWC format.
|
|
828
|
-
True - HWC format, False - CHW format. Default: True
|
|
1053
|
+
``True`` - HWC format, ``False`` - CHW format. Default: ``True``.
|
|
829
1054
|
|
|
830
1055
|
Raises:
|
|
831
1056
|
TypeError: If `length` is not of type integer.
|
|
@@ -839,9 +1064,17 @@ class CutOut(ImageTensorOperation):
|
|
|
839
1064
|
``CPU``
|
|
840
1065
|
|
|
841
1066
|
Examples:
|
|
1067
|
+
>>> import mindspore.dataset as ds
|
|
1068
|
+
>>> import mindspore.dataset.vision as vision
|
|
1069
|
+
>>>
|
|
1070
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
842
1071
|
>>> transforms_list = [vision.Decode(), vision.CutOut(80, num_patches=10)]
|
|
843
1072
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
844
1073
|
... input_columns=["image"])
|
|
1074
|
+
|
|
1075
|
+
Tutorial Examples:
|
|
1076
|
+
- `Illustration of vision transforms
|
|
1077
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
845
1078
|
"""
|
|
846
1079
|
|
|
847
1080
|
@check_cutout_new
|
|
@@ -863,8 +1096,9 @@ class Decode(ImageTensorOperation, PyTensorOperation):
|
|
|
863
1096
|
Supported image formats: JPEG, BMP, PNG, TIFF, GIF(need `to_pil=True` ), WEBP(need `to_pil=True` ).
|
|
864
1097
|
|
|
865
1098
|
Args:
|
|
866
|
-
to_pil (bool, optional): Whether to decode the image to the PIL data type. If True
|
|
867
|
-
to the PIL data type, otherwise it will be decoded to the
|
|
1099
|
+
to_pil (bool, optional): Whether to decode the image to the PIL data type. If ``True``,
|
|
1100
|
+
the image will be decoded to the PIL data type, otherwise it will be decoded to the
|
|
1101
|
+
NumPy data type. Default: ``False``.
|
|
868
1102
|
|
|
869
1103
|
Raises:
|
|
870
1104
|
RuntimeError: If given tensor is not a 1D sequence.
|
|
@@ -872,18 +1106,26 @@ class Decode(ImageTensorOperation, PyTensorOperation):
|
|
|
872
1106
|
RuntimeError: If the input image is already decoded.
|
|
873
1107
|
|
|
874
1108
|
Supported Platforms:
|
|
875
|
-
``CPU``
|
|
1109
|
+
``CPU`` ``Ascend``
|
|
876
1110
|
|
|
877
1111
|
Examples:
|
|
1112
|
+
>>> import mindspore.dataset as ds
|
|
1113
|
+
>>> import mindspore.dataset.vision as vision
|
|
1114
|
+
>>>
|
|
878
1115
|
>>> # Eager usage
|
|
879
1116
|
>>> import numpy as np
|
|
880
1117
|
>>> raw_image = np.fromfile("/path/to/image/file", np.uint8)
|
|
881
1118
|
>>> decoded_image = vision.Decode()(raw_image)
|
|
882
1119
|
>>>
|
|
883
1120
|
>>> # Pipeline usage
|
|
1121
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
884
1122
|
>>> transforms_list = [vision.Decode(), vision.RandomHorizontalFlip()]
|
|
885
1123
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
886
1124
|
... input_columns=["image"])
|
|
1125
|
+
|
|
1126
|
+
Tutorial Examples:
|
|
1127
|
+
- `Illustration of vision transforms
|
|
1128
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
887
1129
|
"""
|
|
888
1130
|
|
|
889
1131
|
@check_decode
|
|
@@ -910,8 +1152,43 @@ class Decode(ImageTensorOperation, PyTensorOperation):
|
|
|
910
1152
|
"but got {0}.".format(img.ndim))
|
|
911
1153
|
return super().__call__(img)
|
|
912
1154
|
|
|
1155
|
+
@check_device_target
|
|
1156
|
+
def device(self, device_target="CPU"):
|
|
1157
|
+
"""
|
|
1158
|
+
Set the device for the current operator execution.
|
|
1159
|
+
|
|
1160
|
+
Args:
|
|
1161
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
1162
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
1163
|
+
|
|
1164
|
+
Raises:
|
|
1165
|
+
TypeError: If `device_target` is not of type str.
|
|
1166
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
1167
|
+
|
|
1168
|
+
Supported Platforms:
|
|
1169
|
+
``CPU`` ``Ascend``
|
|
1170
|
+
|
|
1171
|
+
Examples:
|
|
1172
|
+
>>> import mindspore.dataset as ds
|
|
1173
|
+
>>> import mindspore.dataset.vision as vision
|
|
1174
|
+
>>> from mindspore.dataset.vision import Inter
|
|
1175
|
+
>>>
|
|
1176
|
+
>>> decode_op = vision.Decode().device("Ascend")
|
|
1177
|
+
>>> resize_op = vision.Resize([100, 75], Inter.BICUBIC)
|
|
1178
|
+
>>> transforms_list = [decode_op, resize_op]
|
|
1179
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1180
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1181
|
+
... input_columns=["image"])
|
|
1182
|
+
|
|
1183
|
+
Tutorial Examples:
|
|
1184
|
+
- `Illustration of vision transforms
|
|
1185
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1186
|
+
"""
|
|
1187
|
+
self.device_target = device_target
|
|
1188
|
+
return self
|
|
1189
|
+
|
|
913
1190
|
def parse(self):
|
|
914
|
-
return cde.DecodeOperation(True)
|
|
1191
|
+
return cde.DecodeOperation(True, self.device_target)
|
|
915
1192
|
|
|
916
1193
|
def _execute_py(self, img):
|
|
917
1194
|
"""
|
|
@@ -937,9 +1214,17 @@ class Equalize(ImageTensorOperation, PyTensorOperation):
|
|
|
937
1214
|
``CPU``
|
|
938
1215
|
|
|
939
1216
|
Examples:
|
|
1217
|
+
>>> import mindspore.dataset as ds
|
|
1218
|
+
>>> import mindspore.dataset.vision as vision
|
|
1219
|
+
>>>
|
|
1220
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
940
1221
|
>>> transforms_list = [vision.Decode(), vision.Equalize()]
|
|
941
1222
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
942
1223
|
... input_columns=["image"])
|
|
1224
|
+
|
|
1225
|
+
Tutorial Examples:
|
|
1226
|
+
- `Illustration of vision transforms
|
|
1227
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
943
1228
|
"""
|
|
944
1229
|
|
|
945
1230
|
def __init__(self):
|
|
@@ -972,10 +1257,10 @@ class Erase(ImageTensorOperation):
|
|
|
972
1257
|
left (int): Horizontal ordinate of the upper left corner of erased region.
|
|
973
1258
|
height (int): Height of erased region.
|
|
974
1259
|
width (int): Width of erased region.
|
|
975
|
-
value (Union[int, Sequence[int, int, int]], optional): Pixel value used to pad the erased area.
|
|
976
|
-
If int is provided, it will be used for all RGB channels.
|
|
1260
|
+
value (Union[int, Sequence[int, int, int]], optional): Pixel value used to pad the erased area.
|
|
1261
|
+
Default: ``0``. If int is provided, it will be used for all RGB channels.
|
|
977
1262
|
If Sequence[int, int, int] is provided, it will be used for R, G, B channels respectively.
|
|
978
|
-
inplace (bool, optional): Whether to apply erasing inplace. Default: False
|
|
1263
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
979
1264
|
|
|
980
1265
|
Raises:
|
|
981
1266
|
TypeError: If `top` is not of type int.
|
|
@@ -995,9 +1280,17 @@ class Erase(ImageTensorOperation):
|
|
|
995
1280
|
``CPU``
|
|
996
1281
|
|
|
997
1282
|
Examples:
|
|
1283
|
+
>>> import mindspore.dataset as ds
|
|
1284
|
+
>>> import mindspore.dataset.vision as vision
|
|
1285
|
+
>>>
|
|
1286
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
998
1287
|
>>> transforms_list = [vision.Decode(), vision.Erase(10,10,10,10)]
|
|
999
1288
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1000
1289
|
... input_columns=["image"])
|
|
1290
|
+
|
|
1291
|
+
Tutorial Examples:
|
|
1292
|
+
- `Illustration of vision transforms
|
|
1293
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1001
1294
|
"""
|
|
1002
1295
|
|
|
1003
1296
|
@check_erase
|
|
@@ -1033,6 +1326,8 @@ class FiveCrop(PyTensorOperation):
|
|
|
1033
1326
|
``CPU``
|
|
1034
1327
|
|
|
1035
1328
|
Examples:
|
|
1329
|
+
>>> import mindspore.dataset as ds
|
|
1330
|
+
>>> import mindspore.dataset.vision as vision
|
|
1036
1331
|
>>> import numpy
|
|
1037
1332
|
>>> from mindspore.dataset.transforms import Compose
|
|
1038
1333
|
>>>
|
|
@@ -1041,8 +1336,13 @@ class FiveCrop(PyTensorOperation):
|
|
|
1041
1336
|
... # 4D stack of 5 images
|
|
1042
1337
|
... lambda *images: numpy.stack([vision.ToTensor()(image) for image in images])])
|
|
1043
1338
|
>>> # apply the transform to dataset through map function
|
|
1339
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1044
1340
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1045
1341
|
... input_columns="image")
|
|
1342
|
+
|
|
1343
|
+
Tutorial Examples:
|
|
1344
|
+
- `Illustration of vision transforms
|
|
1345
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1046
1346
|
"""
|
|
1047
1347
|
|
|
1048
1348
|
@check_five_crop
|
|
@@ -1066,17 +1366,22 @@ class FiveCrop(PyTensorOperation):
|
|
|
1066
1366
|
|
|
1067
1367
|
|
|
1068
1368
|
class GaussianBlur(ImageTensorOperation):
|
|
1069
|
-
"""
|
|
1369
|
+
r"""
|
|
1070
1370
|
Blur input image with the specified Gaussian kernel.
|
|
1071
1371
|
|
|
1072
1372
|
Args:
|
|
1073
|
-
kernel_size (Union[int, Sequence[int]]):
|
|
1074
|
-
If
|
|
1075
|
-
is
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
If
|
|
1373
|
+
kernel_size (Union[int, Sequence[int, int]]): The size of the Gaussian kernel. Must be positive and odd.
|
|
1374
|
+
If the input type is int, the value will be used as both the width and height of the Gaussian kernel.
|
|
1375
|
+
If the input type is Sequence[int, int], the two elements will be used as the width and height of the
|
|
1376
|
+
Gaussian kernel respectively.
|
|
1377
|
+
sigma (Union[float, Sequence[float, float]], optional): The standard deviation of the Gaussian kernel.
|
|
1378
|
+
Must be positive.
|
|
1379
|
+
If the input type is float, the value will be used as the standard deviation of both the width and
|
|
1380
|
+
height of the Gaussian kernel.
|
|
1381
|
+
If the input type is Sequence[float, float], the two elements will be used as the standard deviation
|
|
1382
|
+
of the width and height of the Gaussian kernel respectively.
|
|
1383
|
+
Default: ``None`` , the standard deviation of the Gaussian kernel will be obtained by the
|
|
1384
|
+
formula :math:`((kernel\_size - 1) * 0.5 - 1) * 0.3 + 0.8` .
|
|
1080
1385
|
|
|
1081
1386
|
Raises:
|
|
1082
1387
|
TypeError: If `kernel_size` is not of type int or Sequence[int].
|
|
@@ -1089,9 +1394,17 @@ class GaussianBlur(ImageTensorOperation):
|
|
|
1089
1394
|
``CPU``
|
|
1090
1395
|
|
|
1091
1396
|
Examples:
|
|
1397
|
+
>>> import mindspore.dataset as ds
|
|
1398
|
+
>>> import mindspore.dataset.vision as vision
|
|
1399
|
+
>>>
|
|
1400
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1092
1401
|
>>> transforms_list = [vision.Decode(to_pil=True), vision.GaussianBlur(3, 3)]
|
|
1093
1402
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1094
1403
|
... input_columns=["image"])
|
|
1404
|
+
|
|
1405
|
+
Tutorial Examples:
|
|
1406
|
+
- `Illustration of vision transforms
|
|
1407
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1095
1408
|
"""
|
|
1096
1409
|
|
|
1097
1410
|
@check_gaussian_blur
|
|
@@ -1116,25 +1429,32 @@ class Grayscale(PyTensorOperation):
|
|
|
1116
1429
|
Convert the input PIL Image to grayscale.
|
|
1117
1430
|
|
|
1118
1431
|
Args:
|
|
1119
|
-
num_output_channels (int): The number of channels desired for the output image, must be 1 or 3
|
|
1120
|
-
If 3 is provided, the returned image will have 3 identical RGB channels. Default: 1
|
|
1432
|
+
num_output_channels (int): The number of channels desired for the output image, must be ``1`` or ``3``.
|
|
1433
|
+
If ``3`` is provided, the returned image will have 3 identical RGB channels. Default: ``1``.
|
|
1121
1434
|
|
|
1122
1435
|
Raises:
|
|
1123
1436
|
TypeError: If `num_output_channels` is not of type integer.
|
|
1124
|
-
ValueError: If `num_output_channels` is not 1 or 3
|
|
1437
|
+
ValueError: If `num_output_channels` is not ``1`` or ``3``.
|
|
1125
1438
|
|
|
1126
1439
|
Supported Platforms:
|
|
1127
1440
|
``CPU``
|
|
1128
1441
|
|
|
1129
1442
|
Examples:
|
|
1443
|
+
>>> import mindspore.dataset as ds
|
|
1444
|
+
>>> import mindspore.dataset.vision as vision
|
|
1130
1445
|
>>> from mindspore.dataset.transforms import Compose
|
|
1131
1446
|
>>>
|
|
1132
1447
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
1133
1448
|
... vision.Grayscale(3),
|
|
1134
1449
|
... vision.ToTensor()])
|
|
1135
1450
|
>>> # apply the transform to dataset through map function
|
|
1451
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1136
1452
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1137
1453
|
... input_columns="image")
|
|
1454
|
+
|
|
1455
|
+
Tutorial Examples:
|
|
1456
|
+
- `Illustration of vision transforms
|
|
1457
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1138
1458
|
"""
|
|
1139
1459
|
|
|
1140
1460
|
@check_num_channels
|
|
@@ -1168,9 +1488,17 @@ class HorizontalFlip(ImageTensorOperation):
|
|
|
1168
1488
|
``CPU``
|
|
1169
1489
|
|
|
1170
1490
|
Examples:
|
|
1491
|
+
>>> import mindspore.dataset as ds
|
|
1492
|
+
>>> import mindspore.dataset.vision as vision
|
|
1493
|
+
>>>
|
|
1494
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1171
1495
|
>>> transforms_list = [vision.Decode(to_pil=True), vision.HorizontalFlip()]
|
|
1172
1496
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1173
1497
|
... input_columns=["image"])
|
|
1498
|
+
|
|
1499
|
+
Tutorial Examples:
|
|
1500
|
+
- `Illustration of vision transforms
|
|
1501
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1174
1502
|
"""
|
|
1175
1503
|
|
|
1176
1504
|
def __init__(self):
|
|
@@ -1186,8 +1514,8 @@ class HsvToRgb(PyTensorOperation):
|
|
|
1186
1514
|
Convert the input numpy.ndarray images from HSV to RGB.
|
|
1187
1515
|
|
|
1188
1516
|
Args:
|
|
1189
|
-
is_hwc (bool): If True
|
|
1190
|
-
Otherwise, it is in shape of <C, H, W> or <N, C, H, W>. Default: False
|
|
1517
|
+
is_hwc (bool): If ``True``, means the input image is in shape of <H, W, C> or <N, H, W, C>.
|
|
1518
|
+
Otherwise, it is in shape of <C, H, W> or <N, C, H, W>. Default: ``False``.
|
|
1191
1519
|
|
|
1192
1520
|
Raises:
|
|
1193
1521
|
TypeError: If `is_hwc` is not of type bool.
|
|
@@ -1196,6 +1524,8 @@ class HsvToRgb(PyTensorOperation):
|
|
|
1196
1524
|
``CPU``
|
|
1197
1525
|
|
|
1198
1526
|
Examples:
|
|
1527
|
+
>>> import mindspore.dataset as ds
|
|
1528
|
+
>>> import mindspore.dataset.vision as vision
|
|
1199
1529
|
>>> from mindspore.dataset.transforms import Compose
|
|
1200
1530
|
>>>
|
|
1201
1531
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
@@ -1203,8 +1533,13 @@ class HsvToRgb(PyTensorOperation):
|
|
|
1203
1533
|
... vision.ToTensor(),
|
|
1204
1534
|
... vision.HsvToRgb()])
|
|
1205
1535
|
>>> # apply the transform to dataset through map function
|
|
1536
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1206
1537
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1207
1538
|
... input_columns="image")
|
|
1539
|
+
|
|
1540
|
+
Tutorial Examples:
|
|
1541
|
+
- `Illustration of vision transforms
|
|
1542
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1208
1543
|
"""
|
|
1209
1544
|
|
|
1210
1545
|
@check_hsv_to_rgb
|
|
@@ -1233,21 +1568,30 @@ class HWC2CHW(ImageTensorOperation):
|
|
|
1233
1568
|
If the input image is of shape <H, W>, it will remain unchanged.
|
|
1234
1569
|
|
|
1235
1570
|
Note:
|
|
1236
|
-
This operation
|
|
1571
|
+
This operation is executed on the CPU by default, but it is also supported
|
|
1572
|
+
to be executed on the GPU or Ascend via heterogeneous acceleration.
|
|
1237
1573
|
|
|
1238
1574
|
Raises:
|
|
1239
1575
|
RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
|
|
1240
1576
|
|
|
1241
1577
|
Supported Platforms:
|
|
1242
|
-
``CPU``
|
|
1578
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
1243
1579
|
|
|
1244
1580
|
Examples:
|
|
1581
|
+
>>> import mindspore.dataset as ds
|
|
1582
|
+
>>> import mindspore.dataset.vision as vision
|
|
1583
|
+
>>>
|
|
1584
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1245
1585
|
>>> transforms_list = [vision.Decode(),
|
|
1246
1586
|
... vision.RandomHorizontalFlip(0.75),
|
|
1247
1587
|
... vision.RandomCrop(512),
|
|
1248
1588
|
... vision.HWC2CHW()]
|
|
1249
1589
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1250
1590
|
... input_columns=["image"])
|
|
1591
|
+
|
|
1592
|
+
Tutorial Examples:
|
|
1593
|
+
- `Illustration of vision transforms
|
|
1594
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1251
1595
|
"""
|
|
1252
1596
|
|
|
1253
1597
|
def __init__(self):
|
|
@@ -1261,18 +1605,29 @@ class HWC2CHW(ImageTensorOperation):
|
|
|
1261
1605
|
|
|
1262
1606
|
class Invert(ImageTensorOperation, PyTensorOperation):
|
|
1263
1607
|
"""
|
|
1264
|
-
|
|
1608
|
+
Invert the colors of the input RGB image.
|
|
1609
|
+
|
|
1610
|
+
For each pixel in the image, if the original pixel value is `pixel`,
|
|
1611
|
+
the inverted pixel value will be `255 - pixel`.
|
|
1265
1612
|
|
|
1266
1613
|
Raises:
|
|
1267
|
-
RuntimeError: If
|
|
1614
|
+
RuntimeError: If the input image is not in shape of <H, W, C>.
|
|
1268
1615
|
|
|
1269
1616
|
Supported Platforms:
|
|
1270
1617
|
``CPU``
|
|
1271
1618
|
|
|
1272
1619
|
Examples:
|
|
1620
|
+
>>> import mindspore.dataset as ds
|
|
1621
|
+
>>> import mindspore.dataset.vision as vision
|
|
1622
|
+
>>>
|
|
1623
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1273
1624
|
>>> transforms_list = [vision.Decode(), vision.Invert()]
|
|
1274
1625
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1275
1626
|
... input_columns=["image"])
|
|
1627
|
+
|
|
1628
|
+
Tutorial Examples:
|
|
1629
|
+
- `Illustration of vision transforms
|
|
1630
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1276
1631
|
"""
|
|
1277
1632
|
|
|
1278
1633
|
def __init__(self):
|
|
@@ -1316,6 +1671,8 @@ class LinearTransformation(PyTensorOperation):
|
|
|
1316
1671
|
``CPU``
|
|
1317
1672
|
|
|
1318
1673
|
Examples:
|
|
1674
|
+
>>> import mindspore.dataset as ds
|
|
1675
|
+
>>> import mindspore.dataset.vision as vision
|
|
1319
1676
|
>>> import numpy as np
|
|
1320
1677
|
>>> from mindspore.dataset.transforms import Compose
|
|
1321
1678
|
>>>
|
|
@@ -1328,8 +1685,13 @@ class LinearTransformation(PyTensorOperation):
|
|
|
1328
1685
|
... vision.ToTensor(),
|
|
1329
1686
|
... vision.LinearTransformation(transformation_matrix, mean_vector)])
|
|
1330
1687
|
>>> # apply the transform to dataset through map function
|
|
1688
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1331
1689
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1332
1690
|
... input_columns="image")
|
|
1691
|
+
|
|
1692
|
+
Tutorial Examples:
|
|
1693
|
+
- `Illustration of vision transforms
|
|
1694
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1333
1695
|
"""
|
|
1334
1696
|
|
|
1335
1697
|
@check_linear_transform
|
|
@@ -1364,9 +1726,9 @@ class MixUp(PyTensorOperation):
|
|
|
1364
1726
|
Args:
|
|
1365
1727
|
batch_size (int): The number of images in a batch.
|
|
1366
1728
|
alpha (float): The alpha and beta parameter for the Beta distribution.
|
|
1367
|
-
is_single (bool, optional): If True
|
|
1729
|
+
is_single (bool, optional): If ``True``, it will randomly mix up [img0, ..., img(n-1), img(n)] with
|
|
1368
1730
|
[img1, ..., img(n), img0] in each batch. Otherwise, it will randomly mix up images with the
|
|
1369
|
-
output of the previous batch. Default: True
|
|
1731
|
+
output of the previous batch. Default: ``True``.
|
|
1370
1732
|
|
|
1371
1733
|
Raises:
|
|
1372
1734
|
TypeError: If `batch_size` is not of type integer.
|
|
@@ -1379,6 +1741,11 @@ class MixUp(PyTensorOperation):
|
|
|
1379
1741
|
``CPU``
|
|
1380
1742
|
|
|
1381
1743
|
Examples:
|
|
1744
|
+
>>> import mindspore.dataset as ds
|
|
1745
|
+
>>> import mindspore.dataset.vision as vision
|
|
1746
|
+
>>> import mindspore.dataset.transforms as transforms
|
|
1747
|
+
>>>
|
|
1748
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1382
1749
|
>>> # first decode the image
|
|
1383
1750
|
>>> image_folder_dataset = image_folder_dataset.map(operations=vision.Decode(),
|
|
1384
1751
|
... input_columns="image")
|
|
@@ -1390,8 +1757,12 @@ class MixUp(PyTensorOperation):
|
|
|
1390
1757
|
>>> image_folder_dataset = image_folder_dataset.batch(batch_size=batch_size)
|
|
1391
1758
|
>>> # finally mix up the images and labels
|
|
1392
1759
|
>>> image_folder_dataset = image_folder_dataset.map(
|
|
1393
|
-
... operations=
|
|
1760
|
+
... operations=vision.MixUp(batch_size=batch_size, alpha=0.2),
|
|
1394
1761
|
... input_columns=["image", "label"])
|
|
1762
|
+
|
|
1763
|
+
Tutorial Examples:
|
|
1764
|
+
- `Illustration of vision transforms
|
|
1765
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1395
1766
|
"""
|
|
1396
1767
|
|
|
1397
1768
|
@check_mix_up
|
|
@@ -1437,7 +1808,8 @@ class MixUpBatch(ImageTensorOperation):
|
|
|
1437
1808
|
Note that you need to make labels into one-hot format and batched before calling this operation.
|
|
1438
1809
|
|
|
1439
1810
|
Args:
|
|
1440
|
-
alpha (float, optional): Hyperparameter of beta distribution. The value must be positive.
|
|
1811
|
+
alpha (float, optional): Hyperparameter of beta distribution. The value must be positive.
|
|
1812
|
+
Default: ``1.0``.
|
|
1441
1813
|
|
|
1442
1814
|
Raises:
|
|
1443
1815
|
TypeError: If `alpha` is not of type float.
|
|
@@ -1448,6 +1820,11 @@ class MixUpBatch(ImageTensorOperation):
|
|
|
1448
1820
|
``CPU``
|
|
1449
1821
|
|
|
1450
1822
|
Examples:
|
|
1823
|
+
>>> import mindspore.dataset as ds
|
|
1824
|
+
>>> import mindspore.dataset.vision as vision
|
|
1825
|
+
>>> import mindspore.dataset.transforms as transforms
|
|
1826
|
+
>>>
|
|
1827
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1451
1828
|
>>> onehot_op = transforms.OneHot(num_classes=10)
|
|
1452
1829
|
>>> image_folder_dataset= image_folder_dataset.map(operations=onehot_op,
|
|
1453
1830
|
... input_columns=["label"])
|
|
@@ -1455,6 +1832,10 @@ class MixUpBatch(ImageTensorOperation):
|
|
|
1455
1832
|
>>> image_folder_dataset = image_folder_dataset.batch(5)
|
|
1456
1833
|
>>> image_folder_dataset = image_folder_dataset.map(operations=mixup_batch_op,
|
|
1457
1834
|
... input_columns=["image", "label"])
|
|
1835
|
+
|
|
1836
|
+
Tutorial Examples:
|
|
1837
|
+
- `Illustration of vision transforms
|
|
1838
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1458
1839
|
"""
|
|
1459
1840
|
|
|
1460
1841
|
@check_mix_up_batch_c
|
|
@@ -1473,7 +1854,8 @@ class Normalize(ImageTensorOperation):
|
|
|
1473
1854
|
the input image with: output[channel] = (input[channel] - mean[channel]) / std[channel], where channel >= 1.
|
|
1474
1855
|
|
|
1475
1856
|
Note:
|
|
1476
|
-
This operation
|
|
1857
|
+
This operation is executed on the CPU by default, but it is also supported
|
|
1858
|
+
to be executed on the GPU or Ascend via heterogeneous acceleration.
|
|
1477
1859
|
|
|
1478
1860
|
Args:
|
|
1479
1861
|
mean (sequence): List or tuple of mean values for each channel, with respect to channel order.
|
|
@@ -1481,7 +1863,7 @@ class Normalize(ImageTensorOperation):
|
|
|
1481
1863
|
std (sequence): List or tuple of standard deviations for each channel, with respect to channel order.
|
|
1482
1864
|
The standard deviation values must be in range (0.0, 255.0].
|
|
1483
1865
|
is_hwc (bool, optional): Whether the input image is HWC.
|
|
1484
|
-
True - HWC format, False - CHW format. Default: True
|
|
1866
|
+
``True`` - HWC format, ``False`` - CHW format. Default: ``True``.
|
|
1485
1867
|
|
|
1486
1868
|
Raises:
|
|
1487
1869
|
TypeError: If `mean` is not of type sequence.
|
|
@@ -1492,14 +1874,22 @@ class Normalize(ImageTensorOperation):
|
|
|
1492
1874
|
RuntimeError: If given tensor format is not <H, W> or <..., H, W, C>.
|
|
1493
1875
|
|
|
1494
1876
|
Supported Platforms:
|
|
1495
|
-
``CPU``
|
|
1877
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
1496
1878
|
|
|
1497
1879
|
Examples:
|
|
1880
|
+
>>> import mindspore.dataset as ds
|
|
1881
|
+
>>> import mindspore.dataset.vision as vision
|
|
1882
|
+
>>>
|
|
1883
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1498
1884
|
>>> decode_op = vision.Decode() ## Decode output is expected to be HWC format
|
|
1499
1885
|
>>> normalize_op = vision.Normalize(mean=[121.0, 115.0, 100.0], std=[70.0, 68.0, 71.0], is_hwc=True)
|
|
1500
1886
|
>>> transforms_list = [decode_op, normalize_op]
|
|
1501
1887
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1502
1888
|
... input_columns=["image"])
|
|
1889
|
+
|
|
1890
|
+
Tutorial Examples:
|
|
1891
|
+
- `Illustration of vision transforms
|
|
1892
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1503
1893
|
"""
|
|
1504
1894
|
|
|
1505
1895
|
@check_normalize
|
|
@@ -1511,8 +1901,45 @@ class Normalize(ImageTensorOperation):
|
|
|
1511
1901
|
self.random = False
|
|
1512
1902
|
self.implementation = Implementation.C
|
|
1513
1903
|
|
|
1904
|
+
@check_device_target
|
|
1905
|
+
def device(self, device_target="CPU"):
|
|
1906
|
+
"""
|
|
1907
|
+
Set the device for the current operator execution.
|
|
1908
|
+
|
|
1909
|
+
Args:
|
|
1910
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
1911
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
1912
|
+
|
|
1913
|
+
Raises:
|
|
1914
|
+
TypeError: If `device_target` is not of type str.
|
|
1915
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
1916
|
+
|
|
1917
|
+
Supported Platforms:
|
|
1918
|
+
``CPU`` ``Ascend``
|
|
1919
|
+
|
|
1920
|
+
Examples:
|
|
1921
|
+
>>> import mindspore.dataset as ds
|
|
1922
|
+
>>> import mindspore.dataset.vision as vision
|
|
1923
|
+
>>> from mindspore.dataset.vision import Inter
|
|
1924
|
+
>>>
|
|
1925
|
+
>>> decode_op = vision.Decode()
|
|
1926
|
+
>>> resize_op = vision.Resize([100, 75], Inter.BICUBIC)
|
|
1927
|
+
>>> transforms_list = [decode_op, resize_op]
|
|
1928
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1929
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1930
|
+
... input_columns=["image"])
|
|
1931
|
+
>>> normalize_op = vision.Normalize(mean=[121.0, 115.0, 100.0], std=[70.0, 68.0, 71.0]).device("Ascend")
|
|
1932
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=normalize_op, input_columns=["image"])
|
|
1933
|
+
|
|
1934
|
+
Tutorial Examples:
|
|
1935
|
+
- `Illustration of vision transforms
|
|
1936
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1937
|
+
"""
|
|
1938
|
+
self.device_target = device_target
|
|
1939
|
+
return self
|
|
1940
|
+
|
|
1514
1941
|
def parse(self):
|
|
1515
|
-
return cde.NormalizeOperation(self.mean, self.std, self.is_hwc)
|
|
1942
|
+
return cde.NormalizeOperation(self.mean, self.std, self.is_hwc, self.device_target)
|
|
1516
1943
|
|
|
1517
1944
|
|
|
1518
1945
|
class NormalizePad(ImageTensorOperation):
|
|
@@ -1524,9 +1951,9 @@ class NormalizePad(ImageTensorOperation):
|
|
|
1524
1951
|
The mean values must be in range (0.0, 255.0].
|
|
1525
1952
|
std (sequence): List or tuple of standard deviations for each channel, with respect to channel order.
|
|
1526
1953
|
The standard deviation values must be in range (0.0, 255.0].
|
|
1527
|
-
dtype (str, optional): Set the output data type of normalized image. Default: "float32"
|
|
1954
|
+
dtype (str, optional): Set the output data type of normalized image. Default: ``"float32"``.
|
|
1528
1955
|
is_hwc (bool, optional): Whether the input image is HWC.
|
|
1529
|
-
True - HWC format, False - CHW format. Default: True
|
|
1956
|
+
``True`` - HWC format, ``False`` - CHW format. Default: ``True``.
|
|
1530
1957
|
|
|
1531
1958
|
Raises:
|
|
1532
1959
|
TypeError: If `mean` is not of type sequence.
|
|
@@ -1541,6 +1968,10 @@ class NormalizePad(ImageTensorOperation):
|
|
|
1541
1968
|
``CPU``
|
|
1542
1969
|
|
|
1543
1970
|
Examples:
|
|
1971
|
+
>>> import mindspore.dataset as ds
|
|
1972
|
+
>>> import mindspore.dataset.vision as vision
|
|
1973
|
+
>>>
|
|
1974
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1544
1975
|
>>> decode_op = vision.Decode()
|
|
1545
1976
|
>>> normalize_pad_op = vision.NormalizePad(mean=[121.0, 115.0, 100.0],
|
|
1546
1977
|
... std=[70.0, 68.0, 71.0],
|
|
@@ -1577,20 +2008,20 @@ class Pad(ImageTensorOperation, PyTensorOperation):
|
|
|
1577
2008
|
If 4 values are provided as a list or tuple, it pads the left, top, right and bottom respectively.
|
|
1578
2009
|
The pad values must be non-negative.
|
|
1579
2010
|
fill_value (Union[int, tuple[int]], optional): The pixel intensity of the borders, only valid for
|
|
1580
|
-
padding_mode Border.CONSTANT
|
|
2011
|
+
`padding_mode` ``Border.CONSTANT``. If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
1581
2012
|
If it is an integer, it is used for all RGB channels.
|
|
1582
|
-
The fill_value values must be in range [0, 255]. Default: 0
|
|
1583
|
-
padding_mode (Border, optional): The method of padding. Default: Border.CONSTANT
|
|
1584
|
-
|
|
2013
|
+
The fill_value values must be in range [0, 255]. Default: ``0``.
|
|
2014
|
+
padding_mode (Border, optional): The method of padding. Default: ``Border.CONSTANT``. Can be
|
|
2015
|
+
``Border.CONSTANT``, ``Border.EDGE``, ``Border.REFLECT``, ``Border.SYMMETRIC``.
|
|
1585
2016
|
|
|
1586
|
-
- Border.CONSTANT, means it fills the border with constant values.
|
|
2017
|
+
- ``Border.CONSTANT`` , means it fills the border with constant values.
|
|
1587
2018
|
|
|
1588
|
-
- Border.EDGE, means it pads with the last value on the edge.
|
|
2019
|
+
- ``Border.EDGE`` , means it pads with the last value on the edge.
|
|
1589
2020
|
|
|
1590
|
-
- Border.REFLECT, means it reflects the values on the edge omitting the last
|
|
2021
|
+
- ``Border.REFLECT`` , means it reflects the values on the edge omitting the last
|
|
1591
2022
|
value of edge.
|
|
1592
2023
|
|
|
1593
|
-
- Border.SYMMETRIC, means it reflects the values on the edge repeating the last
|
|
2024
|
+
- ``Border.SYMMETRIC`` , means it reflects the values on the edge repeating the last
|
|
1594
2025
|
value of edge.
|
|
1595
2026
|
|
|
1596
2027
|
Raises:
|
|
@@ -1605,9 +2036,17 @@ class Pad(ImageTensorOperation, PyTensorOperation):
|
|
|
1605
2036
|
``CPU``
|
|
1606
2037
|
|
|
1607
2038
|
Examples:
|
|
2039
|
+
>>> import mindspore.dataset as ds
|
|
2040
|
+
>>> import mindspore.dataset.vision as vision
|
|
2041
|
+
>>>
|
|
2042
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1608
2043
|
>>> transforms_list = [vision.Decode(), vision.Pad([100, 100, 100, 100])]
|
|
1609
2044
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1610
2045
|
... input_columns=["image"])
|
|
2046
|
+
|
|
2047
|
+
Tutorial Examples:
|
|
2048
|
+
- `Illustration of vision transforms
|
|
2049
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1611
2050
|
"""
|
|
1612
2051
|
|
|
1613
2052
|
@check_pad
|
|
@@ -1648,18 +2087,18 @@ class PadToSize(ImageTensorOperation):
|
|
|
1648
2087
|
offset (Union[int, Sequence[int, int]], optional): The lengths to pad on the top and left.
|
|
1649
2088
|
If int is provided, pad both top and left borders with this value.
|
|
1650
2089
|
If Sequence[int, int] is provided, is should be in order of [top, left].
|
|
1651
|
-
Default: None
|
|
2090
|
+
Default: ``None``, means to pad symmetrically, keeping the original image in center.
|
|
1652
2091
|
fill_value (Union[int, tuple[int, int, int]], optional): Pixel value used to pad the borders,
|
|
1653
|
-
only valid when `padding_mode` is Border.CONSTANT
|
|
2092
|
+
only valid when `padding_mode` is ``Border.CONSTANT``.
|
|
1654
2093
|
If int is provided, it will be used for all RGB channels.
|
|
1655
2094
|
If tuple[int, int, int] is provided, it will be used for R, G, B channels respectively. Default: 0.
|
|
1656
|
-
padding_mode (Border, optional): Method of padding. It can be Border.CONSTANT
|
|
1657
|
-
or Border.SYMMETRIC. Default: Border.CONSTANT
|
|
2095
|
+
padding_mode (Border, optional): Method of padding. It can be ``Border.CONSTANT``, ``Border.EDGE``,
|
|
2096
|
+
``Border.REFLECT`` or Border.SYMMETRIC. Default: ``Border.CONSTANT``.
|
|
1658
2097
|
|
|
1659
|
-
- Border.CONSTANT, pads with a constant value.
|
|
1660
|
-
- Border.EDGE, pads with the last value at the edge of the image.
|
|
1661
|
-
- Border.REFLECT, pads with reflection of the image omitting the last value on the edge.
|
|
1662
|
-
- Border.SYMMETRIC, pads with reflection of the image repeating the last value on the edge.
|
|
2098
|
+
- ``Border.CONSTANT`` , pads with a constant value.
|
|
2099
|
+
- ``Border.EDGE`` , pads with the last value at the edge of the image.
|
|
2100
|
+
- ``Border.REFLECT`` , pads with reflection of the image omitting the last value on the edge.
|
|
2101
|
+
- ``Border.SYMMETRIC`` , pads with reflection of the image repeating the last value on the edge.
|
|
1663
2102
|
|
|
1664
2103
|
Raises:
|
|
1665
2104
|
TypeError: If `size` is not of type int or Sequence[int, int].
|
|
@@ -1675,9 +2114,17 @@ class PadToSize(ImageTensorOperation):
|
|
|
1675
2114
|
``CPU``
|
|
1676
2115
|
|
|
1677
2116
|
Examples:
|
|
2117
|
+
>>> import mindspore.dataset as ds
|
|
2118
|
+
>>> import mindspore.dataset.vision as vision
|
|
2119
|
+
>>>
|
|
2120
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1678
2121
|
>>> transforms_list = [vision.Decode(), vision.PadToSize([256, 256])]
|
|
1679
2122
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1680
2123
|
... input_columns=["image"])
|
|
2124
|
+
|
|
2125
|
+
Tutorial Examples:
|
|
2126
|
+
- `Illustration of vision transforms
|
|
2127
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1681
2128
|
"""
|
|
1682
2129
|
|
|
1683
2130
|
@check_pad_to_size
|
|
@@ -1707,28 +2154,21 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
|
|
|
1707
2154
|
end_points (Sequence[Sequence[int, int]]): Sequence of the ending point coordinates, containing four
|
|
1708
2155
|
two-element subsequences, corresponding to [top-left, top-right, bottom-right, bottom-left] of the
|
|
1709
2156
|
quadrilateral in the target image.
|
|
1710
|
-
interpolation (Inter, optional):
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
- Inter.BILINEAR, bilinear interpolation.
|
|
1714
|
-
- Inter.LINEAR, linear interpolation, the same as Inter.BILINEAR.
|
|
1715
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
1716
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
1717
|
-
- Inter.CUBIC, cubic interpolation, the same as Inter.BICUBIC.
|
|
1718
|
-
- Inter.PILCUBIC, cubic interpolation based on the implementation of Pillow,
|
|
1719
|
-
only numpy.ndarray input is supported.
|
|
1720
|
-
- Inter.AREA, pixel area interpolation, only numpy.ndarray input is supported.
|
|
2157
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
2158
|
+
Default: ``Inter.BILINEAR``.
|
|
1721
2159
|
|
|
1722
2160
|
Raises:
|
|
1723
2161
|
TypeError: If `start_points` is not of type Sequence[Sequence[int, int]].
|
|
1724
2162
|
TypeError: If `end_points` is not of type Sequence[Sequence[int, int]].
|
|
1725
|
-
TypeError: If `interpolation` is not of type :class
|
|
2163
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
1726
2164
|
RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
|
|
1727
2165
|
|
|
1728
2166
|
Supported Platforms:
|
|
1729
2167
|
``CPU``
|
|
1730
2168
|
|
|
1731
2169
|
Examples:
|
|
2170
|
+
>>> import mindspore.dataset as ds
|
|
2171
|
+
>>> import mindspore.dataset.vision as vision
|
|
1732
2172
|
>>> from mindspore.dataset.transforms import Compose
|
|
1733
2173
|
>>> from mindspore.dataset.vision import Inter
|
|
1734
2174
|
>>>
|
|
@@ -1737,8 +2177,13 @@ class Perspective(ImageTensorOperation, PyTensorOperation):
|
|
|
1737
2177
|
>>> transforms_list = Compose([vision.Decode(),
|
|
1738
2178
|
... vision.Perspective(start_points, end_points, Inter.BILINEAR)])
|
|
1739
2179
|
>>> # apply the transform to dataset through map function
|
|
2180
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1740
2181
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1741
2182
|
... input_columns="image")
|
|
2183
|
+
|
|
2184
|
+
Tutorial Examples:
|
|
2185
|
+
- `Illustration of vision transforms
|
|
2186
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1742
2187
|
"""
|
|
1743
2188
|
|
|
1744
2189
|
@check_perspective
|
|
@@ -1785,6 +2230,19 @@ class Posterize(ImageTensorOperation):
|
|
|
1785
2230
|
TypeError: If `bits` is not of type int.
|
|
1786
2231
|
ValueError: If `bits` is not in range [0, 8].
|
|
1787
2232
|
RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
|
|
2233
|
+
|
|
2234
|
+
Examples:
|
|
2235
|
+
>>> import mindspore.dataset as ds
|
|
2236
|
+
>>> import mindspore.dataset.vision as vision
|
|
2237
|
+
>>>
|
|
2238
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2239
|
+
>>> transforms_list = [vision.Decode(), vision.Posterize(4)]
|
|
2240
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2241
|
+
... input_columns=["image"])
|
|
2242
|
+
|
|
2243
|
+
Tutorial Examples:
|
|
2244
|
+
- `Illustration of vision transforms
|
|
2245
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1788
2246
|
"""
|
|
1789
2247
|
|
|
1790
2248
|
@check_posterize
|
|
@@ -1806,21 +2264,15 @@ class RandAugment(ImageTensorOperation):
|
|
|
1806
2264
|
Only support 3-channel RGB image.
|
|
1807
2265
|
|
|
1808
2266
|
Args:
|
|
1809
|
-
num_ops (int, optional): Number of augmentation transformations to apply sequentially. Default: 2
|
|
2267
|
+
num_ops (int, optional): Number of augmentation transformations to apply sequentially. Default: ``2``.
|
|
1810
2268
|
magnitude (int, optional): Magnitude for all the transformations, must be smaller than
|
|
1811
|
-
`num_magnitude_bins`. Default: 9
|
|
2269
|
+
`num_magnitude_bins`. Default: ``9``.
|
|
1812
2270
|
num_magnitude_bins (int, optional): The number of different magnitude values,
|
|
1813
|
-
must be no less than 2. Default: 31
|
|
1814
|
-
interpolation (Inter, optional): Image interpolation method
|
|
1815
|
-
|
|
1816
|
-
|
|
1817
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
1818
|
-
- Inter.BILINEAR, bilinear interpolation.
|
|
1819
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
1820
|
-
- Inter.AREA, pixel area interpolation.
|
|
1821
|
-
|
|
2271
|
+
must be no less than 2. Default: ``31``.
|
|
2272
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
2273
|
+
Default: ``Inter.NEAREST``.
|
|
1822
2274
|
fill_value (Union[int, tuple[int, int, int]], optional): Pixel fill value for the area outside the
|
|
1823
|
-
transformed image, must be in range of [0, 255]. Default: 0
|
|
2275
|
+
transformed image, must be in range of [0, 255]. Default: ``0``.
|
|
1824
2276
|
If int is provided, pad all RGB channels with this value.
|
|
1825
2277
|
If tuple[int, int, int] is provided, pad R, G, B channels respectively.
|
|
1826
2278
|
|
|
@@ -1831,7 +2283,7 @@ class RandAugment(ImageTensorOperation):
|
|
|
1831
2283
|
ValueError: If `magnitude` is not positive.
|
|
1832
2284
|
TypeError: If `num_magnitude_bins` is not of type int.
|
|
1833
2285
|
ValueError: If `num_magnitude_bins` is less than 2.
|
|
1834
|
-
TypeError: If `interpolation` not of type :class
|
|
2286
|
+
TypeError: If `interpolation` not of type :class:`~.vision.Inter` .
|
|
1835
2287
|
TypeError: If `fill_value` is not of type int or tuple[int, int, int].
|
|
1836
2288
|
ValueError: If `fill_value` is not in range of [0, 255].
|
|
1837
2289
|
RuntimeError: If shape of the input image is not <H, W, C>.
|
|
@@ -1840,8 +2292,16 @@ class RandAugment(ImageTensorOperation):
|
|
|
1840
2292
|
``CPU``
|
|
1841
2293
|
|
|
1842
2294
|
Examples:
|
|
2295
|
+
>>> import mindspore.dataset as ds
|
|
2296
|
+
>>> import mindspore.dataset.vision as vision
|
|
2297
|
+
>>>
|
|
2298
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1843
2299
|
>>> transforms_list = [vision.Decode(), vision.RandAugment()]
|
|
1844
2300
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list, input_columns=["image"])
|
|
2301
|
+
|
|
2302
|
+
Tutorial Examples:
|
|
2303
|
+
- `Illustration of vision transforms
|
|
2304
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1845
2305
|
"""
|
|
1846
2306
|
|
|
1847
2307
|
@check_rand_augment
|
|
@@ -1867,10 +2327,10 @@ class RandomAdjustSharpness(ImageTensorOperation):
|
|
|
1867
2327
|
|
|
1868
2328
|
Args:
|
|
1869
2329
|
degree (float): Sharpness adjustment degree, which must be non negative.
|
|
1870
|
-
Degree of 0.0 gives a blurred image, degree of 1.0 gives the original image,
|
|
1871
|
-
and degree of 2.0 increases the sharpness by a factor of 2.
|
|
2330
|
+
Degree of ``0.0`` gives a blurred image, degree of ``1.0`` gives the original image,
|
|
2331
|
+
and degree of ``2.0`` increases the sharpness by a factor of 2.
|
|
1872
2332
|
prob (float, optional): Probability of the image being sharpness adjusted, which
|
|
1873
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
2333
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
1874
2334
|
|
|
1875
2335
|
Raises:
|
|
1876
2336
|
TypeError: If `degree` is not of type float.
|
|
@@ -1883,9 +2343,17 @@ class RandomAdjustSharpness(ImageTensorOperation):
|
|
|
1883
2343
|
``CPU``
|
|
1884
2344
|
|
|
1885
2345
|
Examples:
|
|
2346
|
+
>>> import mindspore.dataset as ds
|
|
2347
|
+
>>> import mindspore.dataset.vision as vision
|
|
2348
|
+
>>>
|
|
2349
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1886
2350
|
>>> transforms_list = [vision.Decode(), vision.RandomAdjustSharpness(2.0, 0.5)]
|
|
1887
2351
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1888
2352
|
... input_columns=["image"])
|
|
2353
|
+
|
|
2354
|
+
Tutorial Examples:
|
|
2355
|
+
- `Illustration of vision transforms
|
|
2356
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1889
2357
|
"""
|
|
1890
2358
|
|
|
1891
2359
|
@check_random_adjust_sharpness
|
|
@@ -1908,7 +2376,7 @@ class RandomAffine(ImageTensorOperation, PyTensorOperation):
|
|
|
1908
2376
|
If `degrees` is a number, the range will be (-degrees, degrees).
|
|
1909
2377
|
If `degrees` is a sequence, it should be (min, max).
|
|
1910
2378
|
translate (sequence, optional): Sequence (tx_min, tx_max, ty_min, ty_max) of minimum/maximum translation in
|
|
1911
|
-
x(horizontal) and y(vertical) directions, range [-1.0, 1.0]. Default: None
|
|
2379
|
+
x(horizontal) and y(vertical) directions, range [-1.0, 1.0]. Default: ``None``.
|
|
1912
2380
|
The horizontal and vertical shift is selected randomly from the range:
|
|
1913
2381
|
(tx_min*width, tx_max*width) and (ty_min*height, ty_max*height), respectively.
|
|
1914
2382
|
If a tuple or list of size 2, then a translate parallel to the X axis in the range of
|
|
@@ -1916,9 +2384,9 @@ class RandomAffine(ImageTensorOperation, PyTensorOperation):
|
|
|
1916
2384
|
If a tuple or list of size 4, then a translate parallel to the X axis in the range of
|
|
1917
2385
|
(translate[0], translate[1]) and a translate parallel to the Y axis in the range of
|
|
1918
2386
|
(translate[2], translate[3]) are applied.
|
|
1919
|
-
If None
|
|
2387
|
+
If ``None``, no translation is applied.
|
|
1920
2388
|
scale (sequence, optional): Scaling factor interval, which must be non negative.
|
|
1921
|
-
Default: None
|
|
2389
|
+
Default: ``None``, original scale is used.
|
|
1922
2390
|
shear (Union[float, Sequence[float, float], Sequence[float, float, float, float]], optional):
|
|
1923
2391
|
Range of shear factor to select from.
|
|
1924
2392
|
If float is provided, a shearing parallel to X axis with a factor selected from
|
|
@@ -1927,28 +2395,19 @@ class RandomAffine(ImageTensorOperation, PyTensorOperation):
|
|
|
1927
2395
|
from ( `shear` [0], `shear` [1]) will be applied.
|
|
1928
2396
|
If Sequence[float, float, float, float] is provided, a shearing parallel to X axis with a factor selected
|
|
1929
2397
|
from ( `shear` [0], `shear` [1]) and a shearing parallel to Y axis with a factor selected from
|
|
1930
|
-
( `shear` [2], `shear` [3]) will be applied. Default: None
|
|
1931
|
-
resample (Inter, optional):
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
- Inter.BILINEAR, means resample method is bilinear interpolation.
|
|
1935
|
-
|
|
1936
|
-
- Inter.NEAREST, means resample method is nearest-neighbor interpolation.
|
|
1937
|
-
|
|
1938
|
-
- Inter.BICUBIC, means resample method is bicubic interpolation.
|
|
1939
|
-
|
|
1940
|
-
- Inter.AREA, means resample method is pixel area interpolation.
|
|
1941
|
-
|
|
2398
|
+
( `shear` [2], `shear` [3]) will be applied. Default: ``None``, means no shearing.
|
|
2399
|
+
resample (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
2400
|
+
Default: ``Inter.NEAREST``.
|
|
1942
2401
|
fill_value (Union[int, tuple[int]], optional): Optional fill_value to fill the area outside the transform
|
|
1943
2402
|
in the output image. There must be three elements in tuple and the value of single element is [0, 255].
|
|
1944
|
-
Default: 0
|
|
2403
|
+
Default: ``0``, filling is performed.
|
|
1945
2404
|
|
|
1946
2405
|
Raises:
|
|
1947
2406
|
TypeError: If `degrees` is not of type int, float or sequence.
|
|
1948
2407
|
TypeError: If `translate` is not of type sequence.
|
|
1949
2408
|
TypeError: If `scale` is not of type sequence.
|
|
1950
2409
|
TypeError: If `shear` is not of type int, float or sequence.
|
|
1951
|
-
TypeError: If `resample` is not of type :class
|
|
2410
|
+
TypeError: If `resample` is not of type :class:`~.vision.Inter` .
|
|
1952
2411
|
TypeError: If `fill_value` is not of type int or tuple[int].
|
|
1953
2412
|
ValueError: If `degrees` is negative.
|
|
1954
2413
|
ValueError: If `translate` is not in range [-1.0, 1.0].
|
|
@@ -1960,15 +2419,23 @@ class RandomAffine(ImageTensorOperation, PyTensorOperation):
|
|
|
1960
2419
|
``CPU``
|
|
1961
2420
|
|
|
1962
2421
|
Examples:
|
|
2422
|
+
>>> import mindspore.dataset as ds
|
|
2423
|
+
>>> import mindspore.dataset.vision as vision
|
|
1963
2424
|
>>> from mindspore.dataset.vision import Inter
|
|
2425
|
+
>>>
|
|
1964
2426
|
>>> decode_op = vision.Decode()
|
|
1965
2427
|
>>> random_affine_op = vision.RandomAffine(degrees=15,
|
|
1966
2428
|
... translate=(-0.1, 0.1, 0, 0),
|
|
1967
2429
|
... scale=(0.9, 1.1),
|
|
1968
2430
|
... resample=Inter.NEAREST)
|
|
1969
2431
|
>>> transforms_list = [decode_op, random_affine_op]
|
|
2432
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
1970
2433
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
1971
2434
|
... input_columns=["image"])
|
|
2435
|
+
|
|
2436
|
+
Tutorial Examples:
|
|
2437
|
+
- `Illustration of vision transforms
|
|
2438
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
1972
2439
|
"""
|
|
1973
2440
|
|
|
1974
2441
|
@check_random_affine
|
|
@@ -2046,11 +2513,11 @@ class RandomAutoContrast(ImageTensorOperation):
|
|
|
2046
2513
|
|
|
2047
2514
|
Args:
|
|
2048
2515
|
cutoff (float, optional): Percent of the lightest and darkest pixels to be cut off from
|
|
2049
|
-
the histogram of the input image. The value must be in range of [0.0, 50.0]. Default: 0.0
|
|
2516
|
+
the histogram of the input image. The value must be in range of [0.0, 50.0]. Default: ``0.0``.
|
|
2050
2517
|
ignore (Union[int, sequence], optional): The background pixel values to be ignored, each of
|
|
2051
|
-
which must be in range of [0, 255]. Default: None
|
|
2518
|
+
which must be in range of [0, 255]. Default: ``None``.
|
|
2052
2519
|
prob (float, optional): Probability of the image being automatically contrasted, which
|
|
2053
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
2520
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2054
2521
|
|
|
2055
2522
|
Raises:
|
|
2056
2523
|
TypeError: If `cutoff` is not of type float.
|
|
@@ -2065,9 +2532,17 @@ class RandomAutoContrast(ImageTensorOperation):
|
|
|
2065
2532
|
``CPU``
|
|
2066
2533
|
|
|
2067
2534
|
Examples:
|
|
2535
|
+
>>> import mindspore.dataset as ds
|
|
2536
|
+
>>> import mindspore.dataset.vision as vision
|
|
2537
|
+
>>>
|
|
2538
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2068
2539
|
>>> transforms_list = [vision.Decode(), vision.RandomAutoContrast(cutoff=0.0, ignore=None, prob=0.5)]
|
|
2069
2540
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2070
2541
|
... input_columns=["image"])
|
|
2542
|
+
|
|
2543
|
+
Tutorial Examples:
|
|
2544
|
+
- `Illustration of vision transforms
|
|
2545
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2071
2546
|
"""
|
|
2072
2547
|
|
|
2073
2548
|
@check_random_auto_contrast
|
|
@@ -2094,7 +2569,7 @@ class RandomColor(ImageTensorOperation, PyTensorOperation):
|
|
|
2094
2569
|
Args:
|
|
2095
2570
|
degrees (Sequence[float], optional): Range of random color adjustment degrees, which must be non-negative.
|
|
2096
2571
|
It should be in (min, max) format. If min=max, then it is a
|
|
2097
|
-
single fixed magnitude operation. Default: (0.1, 1.9)
|
|
2572
|
+
single fixed magnitude operation. Default: ``(0.1, 1.9)``.
|
|
2098
2573
|
|
|
2099
2574
|
Raises:
|
|
2100
2575
|
TypeError: If `degrees` is not of type Sequence[float].
|
|
@@ -2105,9 +2580,17 @@ class RandomColor(ImageTensorOperation, PyTensorOperation):
|
|
|
2105
2580
|
``CPU``
|
|
2106
2581
|
|
|
2107
2582
|
Examples:
|
|
2583
|
+
>>> import mindspore.dataset as ds
|
|
2584
|
+
>>> import mindspore.dataset.vision as vision
|
|
2585
|
+
>>>
|
|
2586
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2108
2587
|
>>> transforms_list = [vision.Decode(), vision.RandomColor((0.5, 2.0))]
|
|
2109
2588
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2110
2589
|
... input_columns=["image"])
|
|
2590
|
+
|
|
2591
|
+
Tutorial Examples:
|
|
2592
|
+
- `Illustration of vision transforms
|
|
2593
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2111
2594
|
"""
|
|
2112
2595
|
|
|
2113
2596
|
@check_positive_degrees
|
|
@@ -2137,22 +2620,23 @@ class RandomColorAdjust(ImageTensorOperation, PyTensorOperation):
|
|
|
2137
2620
|
Randomly adjust the brightness, contrast, saturation, and hue of the input image.
|
|
2138
2621
|
|
|
2139
2622
|
Note:
|
|
2140
|
-
This operation
|
|
2623
|
+
This operation is executed on the CPU by default, but it is also supported
|
|
2624
|
+
to be executed on the GPU or Ascend via heterogeneous acceleration.
|
|
2141
2625
|
|
|
2142
2626
|
Args:
|
|
2143
|
-
brightness (Union[float, Sequence[float]], optional): Brightness adjustment factor. Default: (1, 1)
|
|
2627
|
+
brightness (Union[float, Sequence[float]], optional): Brightness adjustment factor. Default: ``(1, 1)``.
|
|
2144
2628
|
Cannot be negative.
|
|
2145
2629
|
If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness].
|
|
2146
2630
|
If it is a sequence, it should be [min, max] for the range.
|
|
2147
|
-
contrast (Union[float, Sequence[float]], optional): Contrast adjustment factor. Default: (1, 1)
|
|
2631
|
+
contrast (Union[float, Sequence[float]], optional): Contrast adjustment factor. Default: ``(1, 1)``.
|
|
2148
2632
|
Cannot be negative.
|
|
2149
2633
|
If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast), 1+contrast].
|
|
2150
2634
|
If it is a sequence, it should be [min, max] for the range.
|
|
2151
|
-
saturation (Union[float, Sequence[float]], optional): Saturation adjustment factor. Default: (1, 1)
|
|
2635
|
+
saturation (Union[float, Sequence[float]], optional): Saturation adjustment factor. Default: ``(1, 1)``.
|
|
2152
2636
|
Cannot be negative.
|
|
2153
2637
|
If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation), 1+saturation].
|
|
2154
2638
|
If it is a sequence, it should be [min, max] for the range.
|
|
2155
|
-
hue (Union[float, Sequence[float]], optional): Hue adjustment factor. Default: (0, 0)
|
|
2639
|
+
hue (Union[float, Sequence[float]], optional): Hue adjustment factor. Default: ``(0, 0)``.
|
|
2156
2640
|
If it is a float, the range will be [-hue, hue]. Value should be 0 <= hue <= 0.5.
|
|
2157
2641
|
If it is a sequence, it should be [min, max] where -0.5 <= min <= max <= 0.5.
|
|
2158
2642
|
|
|
@@ -2168,9 +2652,13 @@ class RandomColorAdjust(ImageTensorOperation, PyTensorOperation):
|
|
|
2168
2652
|
RuntimeError: If given tensor shape is not <H, W, C>.
|
|
2169
2653
|
|
|
2170
2654
|
Supported Platforms:
|
|
2171
|
-
``CPU``
|
|
2655
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
2172
2656
|
|
|
2173
2657
|
Examples:
|
|
2658
|
+
>>> import mindspore.dataset as ds
|
|
2659
|
+
>>> import mindspore.dataset.vision as vision
|
|
2660
|
+
>>>
|
|
2661
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2174
2662
|
>>> decode_op = vision.Decode()
|
|
2175
2663
|
>>> transform_op = vision.RandomColorAdjust(brightness=(0.5, 1),
|
|
2176
2664
|
... contrast=(0.4, 1),
|
|
@@ -2178,6 +2666,10 @@ class RandomColorAdjust(ImageTensorOperation, PyTensorOperation):
|
|
|
2178
2666
|
>>> transforms_list = [decode_op, transform_op]
|
|
2179
2667
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2180
2668
|
... input_columns=["image"])
|
|
2669
|
+
|
|
2670
|
+
Tutorial Examples:
|
|
2671
|
+
- `Illustration of vision transforms
|
|
2672
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2181
2673
|
"""
|
|
2182
2674
|
|
|
2183
2675
|
@check_random_color_adjust
|
|
@@ -2233,30 +2725,30 @@ class RandomCrop(ImageTensorOperation, PyTensorOperation):
|
|
|
2233
2725
|
If size is an integer, a square crop of size (size, size) is returned.
|
|
2234
2726
|
If size is a sequence of length 2, an image of size (height, width) will be cropped.
|
|
2235
2727
|
padding (Union[int, Sequence[int]], optional): The number of pixels to pad each border of the image.
|
|
2236
|
-
The padding value(s) must be non-negative. Default: None
|
|
2237
|
-
If padding is not None
|
|
2728
|
+
The padding value(s) must be non-negative. Default: ``None``.
|
|
2729
|
+
If `padding` is not ``None``, pad image first with padding values.
|
|
2238
2730
|
If a single number is provided, pad all borders with this value.
|
|
2239
2731
|
If a tuple or lists of 2 values are provided, pad the (left and right)
|
|
2240
2732
|
with the first value and (top and bottom) with the second value.
|
|
2241
2733
|
If 4 values are provided as a list or tuple,
|
|
2242
2734
|
pad the left, top, right and bottom respectively.
|
|
2243
2735
|
pad_if_needed (bool, optional): Pad the image if either side is smaller than
|
|
2244
|
-
the given output size. Default: False
|
|
2736
|
+
the given output size. Default: ``False``.
|
|
2245
2737
|
fill_value (Union[int, tuple[int]], optional): The pixel intensity of the borders, only valid for
|
|
2246
2738
|
padding_mode Border.CONSTANT. If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
2247
2739
|
If it is an integer, it is used for all RGB channels.
|
|
2248
|
-
The fill_value values must be in range [0, 255]. Default: 0
|
|
2249
|
-
padding_mode (Border, optional): The method of padding. Default: Border.CONSTANT
|
|
2250
|
-
|
|
2740
|
+
The fill_value values must be in range [0, 255]. Default: ``0``.
|
|
2741
|
+
padding_mode (Border, optional): The method of padding. Default: ``Border.CONSTANT``. It can be any of
|
|
2742
|
+
``Border.CONSTANT``, ``Border.EDGE``, ``Border.REFLECT``, ``Border.SYMMETRIC``.
|
|
2251
2743
|
|
|
2252
|
-
- Border.CONSTANT, means it fills the border with constant values.
|
|
2744
|
+
- ``Border.CONSTANT`` , means it fills the border with constant values.
|
|
2253
2745
|
|
|
2254
|
-
- Border.EDGE, means it pads with the last value on the edge.
|
|
2746
|
+
- ``Border.EDGE`` , means it pads with the last value on the edge.
|
|
2255
2747
|
|
|
2256
|
-
- Border.REFLECT, means it reflects the values on the edge omitting the last
|
|
2748
|
+
- ``Border.REFLECT`` , means it reflects the values on the edge omitting the last
|
|
2257
2749
|
value of edge.
|
|
2258
2750
|
|
|
2259
|
-
- Border.SYMMETRIC, means it reflects the values on the edge repeating the last
|
|
2751
|
+
- ``Border.SYMMETRIC`` , means it reflects the values on the edge repeating the last
|
|
2260
2752
|
value of edge.
|
|
2261
2753
|
|
|
2262
2754
|
Raises:
|
|
@@ -2274,12 +2766,20 @@ class RandomCrop(ImageTensorOperation, PyTensorOperation):
|
|
|
2274
2766
|
``CPU``
|
|
2275
2767
|
|
|
2276
2768
|
Examples:
|
|
2769
|
+
>>> import mindspore.dataset as ds
|
|
2770
|
+
>>> import mindspore.dataset.vision as vision
|
|
2277
2771
|
>>> from mindspore.dataset.vision import Border
|
|
2772
|
+
>>>
|
|
2278
2773
|
>>> decode_op = vision.Decode()
|
|
2279
2774
|
>>> random_crop_op = vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=Border.EDGE)
|
|
2280
2775
|
>>> transforms_list = [decode_op, random_crop_op]
|
|
2776
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2281
2777
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2282
2778
|
... input_columns=["image"])
|
|
2779
|
+
|
|
2780
|
+
Tutorial Examples:
|
|
2781
|
+
- `Illustration of vision transforms
|
|
2782
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2283
2783
|
"""
|
|
2284
2784
|
|
|
2285
2785
|
@check_random_crop
|
|
@@ -2328,31 +2828,19 @@ class RandomCropDecodeResize(ImageTensorOperation):
|
|
|
2328
2828
|
If size is an integer, a square crop of size (size, size) is returned.
|
|
2329
2829
|
If size is a sequence of length 2, it should be (height, width).
|
|
2330
2830
|
scale (Union[list, tuple], optional): Range [min, max) of respective size of the
|
|
2331
|
-
original size to be cropped, which must be non-negative. Default: (0.08, 1.0)
|
|
2831
|
+
original size to be cropped, which must be non-negative. Default: ``(0.08, 1.0)``.
|
|
2332
2832
|
ratio (Union[list, tuple], optional): Range [min, max) of aspect ratio to be
|
|
2333
|
-
cropped, which must be non-negative. Default: (3. / 4., 4. / 3.)
|
|
2334
|
-
interpolation (Inter, optional): Image interpolation
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
- Inter.NEAREST, means interpolation method is nearest-neighbor interpolation.
|
|
2340
|
-
|
|
2341
|
-
- Inter.BICUBIC, means interpolation method is bicubic interpolation.
|
|
2342
|
-
|
|
2343
|
-
- Inter.AREA, means interpolation method is pixel area interpolation.
|
|
2344
|
-
|
|
2345
|
-
- Inter.PILCUBIC, means interpolation method is bicubic interpolation like implemented in pillow, input
|
|
2346
|
-
should be in 3 channels format.
|
|
2347
|
-
|
|
2348
|
-
max_attempts (int, optional): The maximum number of attempts to propose a valid crop_area. Default: 10.
|
|
2349
|
-
If exceeded, fall back to use center_crop instead. The max_attempts value must be positive.
|
|
2833
|
+
cropped, which must be non-negative. Default: ``(3. / 4., 4. / 3.)``.
|
|
2834
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
2835
|
+
Default: ``Inter.BILINEAR``.
|
|
2836
|
+
max_attempts (int, optional): The maximum number of attempts to propose a valid crop_area. Default: ``10``.
|
|
2837
|
+
If exceeded, fall back to use center_crop instead. The `max_attempts` value must be positive.
|
|
2350
2838
|
|
|
2351
2839
|
Raises:
|
|
2352
2840
|
TypeError: If `size` is not of type int or Sequence[int].
|
|
2353
2841
|
TypeError: If `scale` is not of type tuple.
|
|
2354
2842
|
TypeError: If `ratio` is not of type tuple.
|
|
2355
|
-
TypeError: If `interpolation` is not of type :class
|
|
2843
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
2356
2844
|
TypeError: If `max_attempts` is not of type integer.
|
|
2357
2845
|
ValueError: If `size` is not positive.
|
|
2358
2846
|
ValueError: If `scale` is negative.
|
|
@@ -2364,14 +2852,22 @@ class RandomCropDecodeResize(ImageTensorOperation):
|
|
|
2364
2852
|
``CPU``
|
|
2365
2853
|
|
|
2366
2854
|
Examples:
|
|
2855
|
+
>>> import mindspore.dataset as ds
|
|
2856
|
+
>>> import mindspore.dataset.vision as vision
|
|
2367
2857
|
>>> from mindspore.dataset.vision import Inter
|
|
2858
|
+
>>>
|
|
2368
2859
|
>>> resize_crop_decode_op = vision.RandomCropDecodeResize(size=(50, 75),
|
|
2369
2860
|
... scale=(0.25, 0.5),
|
|
2370
2861
|
... interpolation=Inter.NEAREST,
|
|
2371
2862
|
... max_attempts=5)
|
|
2372
2863
|
>>> transforms_list = [resize_crop_decode_op]
|
|
2864
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2373
2865
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2374
2866
|
... input_columns=["image"])
|
|
2867
|
+
|
|
2868
|
+
Tutorial Examples:
|
|
2869
|
+
- `Illustration of vision transforms
|
|
2870
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2375
2871
|
"""
|
|
2376
2872
|
|
|
2377
2873
|
@check_random_resize_crop
|
|
@@ -2411,29 +2907,29 @@ class RandomCropWithBBox(ImageTensorOperation):
|
|
|
2411
2907
|
If size is an integer, a square crop of size (size, size) is returned.
|
|
2412
2908
|
If size is a sequence of length 2, an image of size (height, width) will be cropped.
|
|
2413
2909
|
padding (Union[int, Sequence[int]], optional): The number of pixels to pad the image
|
|
2414
|
-
The padding value(s) must be non-negative. Default: None
|
|
2415
|
-
If padding is not None
|
|
2910
|
+
The padding value(s) must be non-negative. Default: ``None``.
|
|
2911
|
+
If `padding` is not ``None``, first pad image with padding values.
|
|
2416
2912
|
If a single number is provided, pad all borders with this value.
|
|
2417
2913
|
If a tuple or lists of 2 values are provided, pad the (left and right)
|
|
2418
2914
|
with the first value and (top and bottom) with the second value.
|
|
2419
2915
|
If 4 values are provided as a list or tuple, pad the left, top, right and bottom respectively.
|
|
2420
2916
|
pad_if_needed (bool, optional): Pad the image if either side is smaller than
|
|
2421
|
-
the given output size. Default: False
|
|
2917
|
+
the given output size. Default: ``False``.
|
|
2422
2918
|
fill_value (Union[int, tuple[int]], optional): The pixel intensity of the borders, only valid for
|
|
2423
2919
|
padding_mode Border.CONSTANT. If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
2424
2920
|
If it is an integer, it is used for all RGB channels.
|
|
2425
|
-
The fill_value values must be in range [0, 255]. Default: 0
|
|
2426
|
-
padding_mode (Border, optional): The method of padding. Default: Border.CONSTANT
|
|
2427
|
-
|
|
2921
|
+
The fill_value values must be in range [0, 255]. Default: ``0``.
|
|
2922
|
+
padding_mode (Border, optional): The method of padding. Default: ``Border.CONSTANT``. It can be any of
|
|
2923
|
+
``Border.CONSTANT``, ``Border.EDGE``, ``Border.REFLECT``, ``Border.SYMMETRIC``.
|
|
2428
2924
|
|
|
2429
|
-
- Border.CONSTANT, means it fills the border with constant values.
|
|
2925
|
+
- ``Border.CONSTANT`` , means it fills the border with constant values.
|
|
2430
2926
|
|
|
2431
|
-
- Border.EDGE, means it pads with the last value on the edge.
|
|
2927
|
+
- ``Border.EDGE`` , means it pads with the last value on the edge.
|
|
2432
2928
|
|
|
2433
|
-
- Border.REFLECT, means it reflects the values on the edge omitting the last
|
|
2929
|
+
- ``Border.REFLECT`` , means it reflects the values on the edge omitting the last
|
|
2434
2930
|
value of edge.
|
|
2435
2931
|
|
|
2436
|
-
- Border.SYMMETRIC, means it reflects the values on the edge repeating the last
|
|
2932
|
+
- ``Border.SYMMETRIC`` , means it reflects the values on the edge repeating the last
|
|
2437
2933
|
|
|
2438
2934
|
value of edge.
|
|
2439
2935
|
|
|
@@ -2452,11 +2948,19 @@ class RandomCropWithBBox(ImageTensorOperation):
|
|
|
2452
2948
|
``CPU``
|
|
2453
2949
|
|
|
2454
2950
|
Examples:
|
|
2951
|
+
>>> import mindspore.dataset as ds
|
|
2952
|
+
>>> import mindspore.dataset.vision as vision
|
|
2953
|
+
>>>
|
|
2954
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2455
2955
|
>>> decode_op = vision.Decode()
|
|
2456
2956
|
>>> random_crop_with_bbox_op = vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200])
|
|
2457
2957
|
>>> transforms_list = [decode_op, random_crop_with_bbox_op]
|
|
2458
2958
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2459
2959
|
... input_columns=["image"])
|
|
2960
|
+
|
|
2961
|
+
Tutorial Examples:
|
|
2962
|
+
- `Illustration of vision transforms
|
|
2963
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2460
2964
|
"""
|
|
2461
2965
|
|
|
2462
2966
|
@check_random_crop
|
|
@@ -2491,7 +2995,7 @@ class RandomEqualize(ImageTensorOperation):
|
|
|
2491
2995
|
|
|
2492
2996
|
Args:
|
|
2493
2997
|
prob (float, optional): Probability of the image being equalized, which
|
|
2494
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
2998
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2495
2999
|
|
|
2496
3000
|
Raises:
|
|
2497
3001
|
TypeError: If `prob` is not of type float.
|
|
@@ -2502,9 +3006,17 @@ class RandomEqualize(ImageTensorOperation):
|
|
|
2502
3006
|
``CPU``
|
|
2503
3007
|
|
|
2504
3008
|
Examples:
|
|
3009
|
+
>>> import mindspore.dataset as ds
|
|
3010
|
+
>>> import mindspore.dataset.vision as vision
|
|
3011
|
+
>>>
|
|
3012
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2505
3013
|
>>> transforms_list = [vision.Decode(), vision.RandomEqualize(0.5)]
|
|
2506
3014
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2507
3015
|
... input_columns=["image"])
|
|
3016
|
+
|
|
3017
|
+
Tutorial Examples:
|
|
3018
|
+
- `Illustration of vision transforms
|
|
3019
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2508
3020
|
"""
|
|
2509
3021
|
|
|
2510
3022
|
@check_prob
|
|
@@ -2525,20 +3037,20 @@ class RandomErasing(PyTensorOperation):
|
|
|
2525
3037
|
|
|
2526
3038
|
Args:
|
|
2527
3039
|
prob (float, optional): Probability of performing erasing, which
|
|
2528
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
3040
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2529
3041
|
scale (Sequence[float, float], optional): Range of area scale of the erased area relative
|
|
2530
3042
|
to the original image to select from, arranged in order of (min, max).
|
|
2531
|
-
Default: (0.02, 0.33)
|
|
3043
|
+
Default: ``(0.02, 0.33)``.
|
|
2532
3044
|
ratio (Sequence[float, float], optional): Range of aspect ratio of the erased area to select
|
|
2533
|
-
from, arraged in order of (min, max). Default: (0.3, 3.3)
|
|
3045
|
+
from, arraged in order of (min, max). Default: ``(0.3, 3.3)``.
|
|
2534
3046
|
value (Union[int, str, Sequence[int, int, int]]): Pixel value used to pad the erased area.
|
|
2535
3047
|
If a single integer is provided, it will be used for all RGB channels.
|
|
2536
3048
|
If a sequence of length 3 is provided, it will be used for R, G, B channels respectively.
|
|
2537
|
-
If a string of 'random' is provided, each pixel will be erased with a random value obtained
|
|
2538
|
-
from a standard normal distribution. Default: 0
|
|
2539
|
-
inplace (bool, optional): Whether to apply erasing inplace. Default: False
|
|
3049
|
+
If a string of ``'random'`` is provided, each pixel will be erased with a random value obtained
|
|
3050
|
+
from a standard normal distribution. Default: ``0``.
|
|
3051
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
2540
3052
|
max_attempts (int, optional): The maximum number of attempts to propose a valid
|
|
2541
|
-
erased area, beyond which the original image will be returned. Default: 10
|
|
3053
|
+
erased area, beyond which the original image will be returned. Default: ``10``.
|
|
2542
3054
|
|
|
2543
3055
|
Raises:
|
|
2544
3056
|
TypeError: If `prob` is not of type float.
|
|
@@ -2557,14 +3069,21 @@ class RandomErasing(PyTensorOperation):
|
|
|
2557
3069
|
``CPU``
|
|
2558
3070
|
|
|
2559
3071
|
Examples:
|
|
3072
|
+
>>> import mindspore.dataset as ds
|
|
3073
|
+
>>> import mindspore.dataset.vision as vision
|
|
2560
3074
|
>>> from mindspore.dataset.transforms import Compose
|
|
2561
3075
|
>>>
|
|
2562
3076
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
2563
3077
|
... vision.ToTensor(),
|
|
2564
3078
|
... vision.RandomErasing(value='random')])
|
|
2565
3079
|
>>> # apply the transform to dataset through map function
|
|
3080
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2566
3081
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2567
3082
|
... input_columns="image")
|
|
3083
|
+
|
|
3084
|
+
Tutorial Examples:
|
|
3085
|
+
- `Illustration of vision transforms
|
|
3086
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2568
3087
|
"""
|
|
2569
3088
|
|
|
2570
3089
|
@check_random_erasing
|
|
@@ -2602,7 +3121,7 @@ class RandomGrayscale(PyTensorOperation):
|
|
|
2602
3121
|
|
|
2603
3122
|
Args:
|
|
2604
3123
|
prob (float, optional): Probability of performing grayscale conversion,
|
|
2605
|
-
which must be in range of [0.0, 1.0]. Default: 0.1
|
|
3124
|
+
which must be in range of [0.0, 1.0]. Default: ``0.1``.
|
|
2606
3125
|
|
|
2607
3126
|
Raises:
|
|
2608
3127
|
TypeError: If `prob` is not of type float.
|
|
@@ -2612,14 +3131,21 @@ class RandomGrayscale(PyTensorOperation):
|
|
|
2612
3131
|
``CPU``
|
|
2613
3132
|
|
|
2614
3133
|
Examples:
|
|
3134
|
+
>>> import mindspore.dataset as ds
|
|
3135
|
+
>>> import mindspore.dataset.vision as vision
|
|
2615
3136
|
>>> from mindspore.dataset.transforms import Compose
|
|
2616
3137
|
>>>
|
|
2617
3138
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
2618
3139
|
... vision.RandomGrayscale(0.3),
|
|
2619
3140
|
... vision.ToTensor()])
|
|
2620
3141
|
>>> # apply the transform to dataset through map function
|
|
3142
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2621
3143
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2622
3144
|
... input_columns="image")
|
|
3145
|
+
|
|
3146
|
+
Tutorial Examples:
|
|
3147
|
+
- `Illustration of vision transforms
|
|
3148
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2623
3149
|
"""
|
|
2624
3150
|
|
|
2625
3151
|
@check_prob
|
|
@@ -2656,7 +3182,7 @@ class RandomHorizontalFlip(ImageTensorOperation, PyTensorOperation):
|
|
|
2656
3182
|
|
|
2657
3183
|
Args:
|
|
2658
3184
|
prob (float, optional): Probability of the image being flipped,
|
|
2659
|
-
which must be in range of [0.0, 1.0]. Default: 0.5
|
|
3185
|
+
which must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2660
3186
|
|
|
2661
3187
|
Raises:
|
|
2662
3188
|
TypeError: If `prob` is not of type float.
|
|
@@ -2667,9 +3193,17 @@ class RandomHorizontalFlip(ImageTensorOperation, PyTensorOperation):
|
|
|
2667
3193
|
``CPU``
|
|
2668
3194
|
|
|
2669
3195
|
Examples:
|
|
3196
|
+
>>> import mindspore.dataset as ds
|
|
3197
|
+
>>> import mindspore.dataset.vision as vision
|
|
3198
|
+
>>>
|
|
3199
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2670
3200
|
>>> transforms_list = [vision.Decode(), vision.RandomHorizontalFlip(0.75)]
|
|
2671
3201
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2672
3202
|
... input_columns=["image"])
|
|
3203
|
+
|
|
3204
|
+
Tutorial Examples:
|
|
3205
|
+
- `Illustration of vision transforms
|
|
3206
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2673
3207
|
"""
|
|
2674
3208
|
|
|
2675
3209
|
@check_prob
|
|
@@ -2695,11 +3229,11 @@ class RandomHorizontalFlip(ImageTensorOperation, PyTensorOperation):
|
|
|
2695
3229
|
|
|
2696
3230
|
class RandomHorizontalFlipWithBBox(ImageTensorOperation):
|
|
2697
3231
|
"""
|
|
2698
|
-
|
|
3232
|
+
Randomly flip the input image and its bounding box horizontally with a given probability.
|
|
2699
3233
|
|
|
2700
3234
|
Args:
|
|
2701
3235
|
prob (float, optional): Probability of the image being flipped,
|
|
2702
|
-
which must be in range of [0.0, 1.0]. Default: 0.5
|
|
3236
|
+
which must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2703
3237
|
|
|
2704
3238
|
Raises:
|
|
2705
3239
|
TypeError: If `prob` is not of type float.
|
|
@@ -2710,9 +3244,17 @@ class RandomHorizontalFlipWithBBox(ImageTensorOperation):
|
|
|
2710
3244
|
``CPU``
|
|
2711
3245
|
|
|
2712
3246
|
Examples:
|
|
3247
|
+
>>> import mindspore.dataset as ds
|
|
3248
|
+
>>> import mindspore.dataset.vision as vision
|
|
3249
|
+
>>>
|
|
3250
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2713
3251
|
>>> transforms_list = [vision.Decode(), vision.RandomHorizontalFlipWithBBox(0.70)]
|
|
2714
3252
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2715
3253
|
... input_columns=["image"])
|
|
3254
|
+
|
|
3255
|
+
Tutorial Examples:
|
|
3256
|
+
- `Illustration of vision transforms
|
|
3257
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2716
3258
|
"""
|
|
2717
3259
|
|
|
2718
3260
|
@check_prob
|
|
@@ -2731,7 +3273,7 @@ class RandomInvert(ImageTensorOperation):
|
|
|
2731
3273
|
|
|
2732
3274
|
Args:
|
|
2733
3275
|
prob (float, optional): Probability of the image being inverted,
|
|
2734
|
-
which must be in range of [0.0, 1.0]. Default: 0.5
|
|
3276
|
+
which must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2735
3277
|
|
|
2736
3278
|
Raises:
|
|
2737
3279
|
TypeError: If `prob` is not of type float.
|
|
@@ -2742,9 +3284,17 @@ class RandomInvert(ImageTensorOperation):
|
|
|
2742
3284
|
``CPU``
|
|
2743
3285
|
|
|
2744
3286
|
Examples:
|
|
3287
|
+
>>> import mindspore.dataset as ds
|
|
3288
|
+
>>> import mindspore.dataset.vision as vision
|
|
3289
|
+
>>>
|
|
3290
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2745
3291
|
>>> transforms_list = [vision.Decode(), vision.RandomInvert(0.5)]
|
|
2746
3292
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2747
3293
|
... input_columns=["image"])
|
|
3294
|
+
|
|
3295
|
+
Tutorial Examples:
|
|
3296
|
+
- `Illustration of vision transforms
|
|
3297
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2748
3298
|
"""
|
|
2749
3299
|
|
|
2750
3300
|
@check_prob
|
|
@@ -2763,7 +3313,7 @@ class RandomLighting(ImageTensorOperation, PyTensorOperation):
|
|
|
2763
3313
|
calculated from the imagenet dataset.
|
|
2764
3314
|
|
|
2765
3315
|
Args:
|
|
2766
|
-
alpha (float, optional): Intensity of the image, which must be non-negative. Default: 0.05
|
|
3316
|
+
alpha (float, optional): Intensity of the image, which must be non-negative. Default: ``0.05``.
|
|
2767
3317
|
|
|
2768
3318
|
Raises:
|
|
2769
3319
|
TypeError: If `alpha` is not of type float.
|
|
@@ -2774,9 +3324,17 @@ class RandomLighting(ImageTensorOperation, PyTensorOperation):
|
|
|
2774
3324
|
``CPU``
|
|
2775
3325
|
|
|
2776
3326
|
Examples:
|
|
3327
|
+
>>> import mindspore.dataset as ds
|
|
3328
|
+
>>> import mindspore.dataset.vision as vision
|
|
3329
|
+
>>>
|
|
3330
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2777
3331
|
>>> transforms_list = [vision.Decode(), vision.RandomLighting(0.1)]
|
|
2778
3332
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2779
3333
|
... input_columns=["image"])
|
|
3334
|
+
|
|
3335
|
+
Tutorial Examples:
|
|
3336
|
+
- `Illustration of vision transforms
|
|
3337
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2780
3338
|
"""
|
|
2781
3339
|
|
|
2782
3340
|
@check_alpha
|
|
@@ -2806,20 +3364,16 @@ class RandomPerspective(PyTensorOperation):
|
|
|
2806
3364
|
Randomly apply perspective transformation to the input PIL Image with a given probability.
|
|
2807
3365
|
|
|
2808
3366
|
Args:
|
|
2809
|
-
distortion_scale (float, optional): Scale of distortion, in range of [0.0, 1.0]. Default: 0.5
|
|
3367
|
+
distortion_scale (float, optional): Scale of distortion, in range of [0.0, 1.0]. Default: ``0.5``.
|
|
2810
3368
|
prob (float, optional): Probability of performing perspective transformation, which
|
|
2811
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
2812
|
-
interpolation (Inter, optional):
|
|
2813
|
-
|
|
2814
|
-
|
|
2815
|
-
- Inter.BILINEAR, bilinear interpolation.
|
|
2816
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
2817
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
3369
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
3370
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
3371
|
+
Default: ``Inter.BICUBIC``.
|
|
2818
3372
|
|
|
2819
3373
|
Raises:
|
|
2820
3374
|
TypeError: If `distortion_scale` is not of type float.
|
|
2821
3375
|
TypeError: If `prob` is not of type float.
|
|
2822
|
-
TypeError: If `interpolation` is not of type :class
|
|
3376
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
2823
3377
|
ValueError: If `distortion_scale` is not in range of [0.0, 1.0].
|
|
2824
3378
|
ValueError: If `prob` is not in range of [0.0, 1.0].
|
|
2825
3379
|
|
|
@@ -2827,14 +3381,21 @@ class RandomPerspective(PyTensorOperation):
|
|
|
2827
3381
|
``CPU``
|
|
2828
3382
|
|
|
2829
3383
|
Examples:
|
|
3384
|
+
>>> import mindspore.dataset as ds
|
|
3385
|
+
>>> import mindspore.dataset.vision as vision
|
|
2830
3386
|
>>> from mindspore.dataset.transforms import Compose
|
|
2831
3387
|
>>>
|
|
2832
3388
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
2833
3389
|
... vision.RandomPerspective(prob=0.1),
|
|
2834
3390
|
... vision.ToTensor()])
|
|
2835
3391
|
>>> # apply the transform to dataset through map function
|
|
3392
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2836
3393
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2837
3394
|
... input_columns="image")
|
|
3395
|
+
|
|
3396
|
+
Tutorial Examples:
|
|
3397
|
+
- `Illustration of vision transforms
|
|
3398
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2838
3399
|
"""
|
|
2839
3400
|
|
|
2840
3401
|
@check_random_perspective
|
|
@@ -2876,7 +3437,7 @@ class RandomPosterize(ImageTensorOperation):
|
|
|
2876
3437
|
Bits values must be in range of [1,8], and include at
|
|
2877
3438
|
least one integer value in the given range. It must be in
|
|
2878
3439
|
(min, max) or integer format. If min=max, then it is a single fixed
|
|
2879
|
-
magnitude operation. Default: (8, 8)
|
|
3440
|
+
magnitude operation. Default: ``(8, 8)``.
|
|
2880
3441
|
|
|
2881
3442
|
Raises:
|
|
2882
3443
|
TypeError: If `bits` is not of type integer or sequence of integer.
|
|
@@ -2887,9 +3448,17 @@ class RandomPosterize(ImageTensorOperation):
|
|
|
2887
3448
|
``CPU``
|
|
2888
3449
|
|
|
2889
3450
|
Examples:
|
|
3451
|
+
>>> import mindspore.dataset as ds
|
|
3452
|
+
>>> import mindspore.dataset.vision as vision
|
|
3453
|
+
>>>
|
|
3454
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2890
3455
|
>>> transforms_list = [vision.Decode(), vision.RandomPosterize((6, 8))]
|
|
2891
3456
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2892
3457
|
... input_columns=["image"])
|
|
3458
|
+
|
|
3459
|
+
Tutorial Examples:
|
|
3460
|
+
- `Illustration of vision transforms
|
|
3461
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2893
3462
|
"""
|
|
2894
3463
|
|
|
2895
3464
|
@check_random_posterize
|
|
@@ -2908,7 +3477,7 @@ class RandomPosterize(ImageTensorOperation):
|
|
|
2908
3477
|
class RandomResizedCrop(ImageTensorOperation, PyTensorOperation):
|
|
2909
3478
|
"""
|
|
2910
3479
|
This operation will crop the input image randomly,
|
|
2911
|
-
and resize the cropped image using a selected interpolation mode :class
|
|
3480
|
+
and resize the cropped image using a selected interpolation mode :class:`~.vision.Inter` .
|
|
2912
3481
|
|
|
2913
3482
|
Note:
|
|
2914
3483
|
If the input image is more than one, then make sure that the image size is the same.
|
|
@@ -2918,33 +3487,19 @@ class RandomResizedCrop(ImageTensorOperation, PyTensorOperation):
|
|
|
2918
3487
|
If size is an integer, a square of size (size, size) will be cropped with this value.
|
|
2919
3488
|
If size is a sequence of length 2, an image of size (height, width) will be cropped.
|
|
2920
3489
|
scale (Union[list, tuple], optional): Range [min, max) of respective size of the original
|
|
2921
|
-
size to be cropped, which must be non-negative. Default: (0.08, 1.0)
|
|
3490
|
+
size to be cropped, which must be non-negative. Default: ``(0.08, 1.0)``.
|
|
2922
3491
|
ratio (Union[list, tuple], optional): Range [min, max) of aspect ratio to be
|
|
2923
|
-
cropped, which must be non-negative. Default: (3. / 4., 4. / 3.)
|
|
2924
|
-
interpolation (Inter, optional):
|
|
2925
|
-
|
|
2926
|
-
|
|
2927
|
-
- Inter.BILINEAR, means interpolation method is bilinear interpolation.
|
|
2928
|
-
|
|
2929
|
-
- Inter.NEAREST, means interpolation method is nearest-neighbor interpolation.
|
|
2930
|
-
|
|
2931
|
-
- Inter.BICUBIC, means interpolation method is bicubic interpolation.
|
|
2932
|
-
|
|
2933
|
-
- Inter.AREA, means interpolation method is pixel area interpolation.
|
|
2934
|
-
|
|
2935
|
-
- Inter.PILCUBIC, means interpolation method is bicubic interpolation like implemented in pillow, input
|
|
2936
|
-
should be in 3 channels format.
|
|
2937
|
-
|
|
2938
|
-
- Inter.ANTIALIAS, means the interpolation method is antialias interpolation.
|
|
2939
|
-
|
|
3492
|
+
cropped, which must be non-negative. Default: ``(3. / 4., 4. / 3.)``.
|
|
3493
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
3494
|
+
Default: ``Inter.BILINEAR``.
|
|
2940
3495
|
max_attempts (int, optional): The maximum number of attempts to propose a valid
|
|
2941
|
-
crop_area. Default: 10
|
|
3496
|
+
crop_area. Default: ``10``. If exceeded, fall back to use center_crop instead.
|
|
2942
3497
|
|
|
2943
3498
|
Raises:
|
|
2944
3499
|
TypeError: If `size` is not of type int or Sequence[int].
|
|
2945
3500
|
TypeError: If `scale` is not of type tuple or list.
|
|
2946
3501
|
TypeError: If `ratio` is not of type tuple or list.
|
|
2947
|
-
TypeError: If `interpolation` is not of type :class
|
|
3502
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
2948
3503
|
TypeError: If `max_attempts` is not of type int.
|
|
2949
3504
|
ValueError: If `size` is not positive.
|
|
2950
3505
|
ValueError: If `scale` is negative.
|
|
@@ -2955,13 +3510,21 @@ class RandomResizedCrop(ImageTensorOperation, PyTensorOperation):
|
|
|
2955
3510
|
``CPU``
|
|
2956
3511
|
|
|
2957
3512
|
Examples:
|
|
3513
|
+
>>> import mindspore.dataset as ds
|
|
3514
|
+
>>> import mindspore.dataset.vision as vision
|
|
2958
3515
|
>>> from mindspore.dataset.vision import Inter
|
|
3516
|
+
>>>
|
|
2959
3517
|
>>> decode_op = vision.Decode()
|
|
2960
3518
|
>>> resize_crop_op = vision.RandomResizedCrop(size=(50, 75), scale=(0.25, 0.5),
|
|
2961
3519
|
... interpolation=Inter.BILINEAR)
|
|
2962
3520
|
>>> transforms_list = [decode_op, resize_crop_op]
|
|
3521
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
2963
3522
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
2964
3523
|
... input_columns=["image"])
|
|
3524
|
+
|
|
3525
|
+
Tutorial Examples:
|
|
3526
|
+
- `Illustration of vision transforms
|
|
3527
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
2965
3528
|
"""
|
|
2966
3529
|
|
|
2967
3530
|
@check_random_resize_crop
|
|
@@ -3011,20 +3574,13 @@ class RandomResizedCropWithBBox(ImageTensorOperation):
|
|
|
3011
3574
|
If size is an integer, a square crop of size (size, size) is returned.
|
|
3012
3575
|
If size is a sequence of length 2, it should be (height, width).
|
|
3013
3576
|
scale (Union[list, tuple], optional): Range (min, max) of respective size of the original
|
|
3014
|
-
size to be cropped, which must be non-negative. Default: (0.08, 1.0)
|
|
3577
|
+
size to be cropped, which must be non-negative. Default: ``(0.08, 1.0)``.
|
|
3015
3578
|
ratio (Union[list, tuple], optional): Range (min, max) of aspect ratio to be
|
|
3016
|
-
cropped, which must be non-negative. Default: (3. / 4., 4. / 3.)
|
|
3017
|
-
interpolation (Inter, optional): Image interpolation
|
|
3018
|
-
|
|
3019
|
-
|
|
3020
|
-
- Inter.BILINEAR, means interpolation method is bilinear interpolation.
|
|
3021
|
-
|
|
3022
|
-
- Inter.NEAREST, means interpolation method is nearest-neighbor interpolation.
|
|
3023
|
-
|
|
3024
|
-
- Inter.BICUBIC, means interpolation method is bicubic interpolation.
|
|
3025
|
-
|
|
3579
|
+
cropped, which must be non-negative. Default: ``(3. / 4., 4. / 3.)``.
|
|
3580
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
3581
|
+
Default: ``Inter.BILINEAR``.
|
|
3026
3582
|
max_attempts (int, optional): The maximum number of attempts to propose a valid
|
|
3027
|
-
crop area. Default: 10
|
|
3583
|
+
crop area. Default: ``10``. If exceeded, fall back to use center crop instead.
|
|
3028
3584
|
|
|
3029
3585
|
Raises:
|
|
3030
3586
|
TypeError: If `size` is not of type int or Sequence[int].
|
|
@@ -3042,12 +3598,20 @@ class RandomResizedCropWithBBox(ImageTensorOperation):
|
|
|
3042
3598
|
``CPU``
|
|
3043
3599
|
|
|
3044
3600
|
Examples:
|
|
3601
|
+
>>> import mindspore.dataset as ds
|
|
3602
|
+
>>> import mindspore.dataset.vision as vision
|
|
3045
3603
|
>>> from mindspore.dataset.vision import Inter
|
|
3604
|
+
>>>
|
|
3046
3605
|
>>> decode_op = vision.Decode()
|
|
3047
3606
|
>>> bbox_op = vision.RandomResizedCropWithBBox(size=50, interpolation=Inter.NEAREST)
|
|
3048
3607
|
>>> transforms_list = [decode_op, bbox_op]
|
|
3608
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3049
3609
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3050
3610
|
... input_columns=["image"])
|
|
3611
|
+
|
|
3612
|
+
Tutorial Examples:
|
|
3613
|
+
- `Illustration of vision transforms
|
|
3614
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3051
3615
|
"""
|
|
3052
3616
|
|
|
3053
3617
|
@check_random_resize_crop
|
|
@@ -3070,7 +3634,7 @@ class RandomResizedCropWithBBox(ImageTensorOperation):
|
|
|
3070
3634
|
|
|
3071
3635
|
class RandomResize(ImageTensorOperation):
|
|
3072
3636
|
"""
|
|
3073
|
-
Resize the input image using :class
|
|
3637
|
+
Resize the input image using :class:`~.vision.Inter` , a randomly selected interpolation mode.
|
|
3074
3638
|
|
|
3075
3639
|
Args:
|
|
3076
3640
|
size (Union[int, Sequence[int]]): The output size of the resized image. The size value(s) must be positive.
|
|
@@ -3087,14 +3651,22 @@ class RandomResize(ImageTensorOperation):
|
|
|
3087
3651
|
``CPU``
|
|
3088
3652
|
|
|
3089
3653
|
Examples:
|
|
3090
|
-
>>>
|
|
3654
|
+
>>> import mindspore.dataset as ds
|
|
3655
|
+
>>> import mindspore.dataset.vision as vision
|
|
3656
|
+
>>>
|
|
3657
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3658
|
+
>>> # 1) randomly resize image, keeping aspect ratio
|
|
3091
3659
|
>>> transforms_list1 = [vision.Decode(), vision.RandomResize(50)]
|
|
3092
3660
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list1,
|
|
3093
3661
|
... input_columns=["image"])
|
|
3094
|
-
>>> # randomly resize image to landscape style
|
|
3662
|
+
>>> # 2) randomly resize image to landscape style
|
|
3095
3663
|
>>> transforms_list2 = [vision.Decode(), vision.RandomResize((40, 60))]
|
|
3096
|
-
>>>
|
|
3097
|
-
...
|
|
3664
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list2,
|
|
3665
|
+
... input_columns=["image"])
|
|
3666
|
+
|
|
3667
|
+
Tutorial Examples:
|
|
3668
|
+
- `Illustration of vision transforms
|
|
3669
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3098
3670
|
"""
|
|
3099
3671
|
|
|
3100
3672
|
@check_resize
|
|
@@ -3113,7 +3685,7 @@ class RandomResize(ImageTensorOperation):
|
|
|
3113
3685
|
class RandomResizeWithBBox(ImageTensorOperation):
|
|
3114
3686
|
"""
|
|
3115
3687
|
Tensor operation to resize the input image
|
|
3116
|
-
using a randomly selected interpolation mode :class
|
|
3688
|
+
using a randomly selected interpolation mode :class:`~.vision.Inter` and adjust
|
|
3117
3689
|
bounding boxes accordingly.
|
|
3118
3690
|
|
|
3119
3691
|
Args:
|
|
@@ -3131,14 +3703,24 @@ class RandomResizeWithBBox(ImageTensorOperation):
|
|
|
3131
3703
|
``CPU``
|
|
3132
3704
|
|
|
3133
3705
|
Examples:
|
|
3134
|
-
>>>
|
|
3706
|
+
>>> import mindspore.dataset as ds
|
|
3707
|
+
>>> import mindspore.dataset.vision as vision
|
|
3708
|
+
>>>
|
|
3709
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3710
|
+
>>>
|
|
3711
|
+
>>> # 1) randomly resize image with bounding boxes, keeping aspect ratio
|
|
3135
3712
|
>>> transforms_list1 = [vision.Decode(), vision.RandomResizeWithBBox(60)]
|
|
3136
3713
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list1,
|
|
3137
3714
|
... input_columns=["image"])
|
|
3138
|
-
>>>
|
|
3715
|
+
>>>
|
|
3716
|
+
>>> # 2) randomly resize image with bounding boxes to portrait style
|
|
3139
3717
|
>>> transforms_list2 = [vision.Decode(), vision.RandomResizeWithBBox((80, 60))]
|
|
3140
|
-
>>>
|
|
3141
|
-
...
|
|
3718
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list2,
|
|
3719
|
+
... input_columns=["image"])
|
|
3720
|
+
|
|
3721
|
+
Tutorial Examples:
|
|
3722
|
+
- `Illustration of vision transforms
|
|
3723
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3142
3724
|
"""
|
|
3143
3725
|
|
|
3144
3726
|
@check_resize
|
|
@@ -3162,27 +3744,18 @@ class RandomRotation(ImageTensorOperation, PyTensorOperation):
|
|
|
3162
3744
|
degrees (Union[int, float, sequence]): Range of random rotation degrees.
|
|
3163
3745
|
If `degrees` is a number, the range will be converted to (-degrees, degrees).
|
|
3164
3746
|
If `degrees` is a sequence, it should be (min, max).
|
|
3165
|
-
resample (Inter, optional):
|
|
3166
|
-
|
|
3167
|
-
|
|
3168
|
-
|
|
3169
|
-
|
|
3170
|
-
- Inter.NEAREST, means resample method is nearest-neighbor interpolation.
|
|
3171
|
-
|
|
3172
|
-
- Inter.BICUBIC, means resample method is bicubic interpolation.
|
|
3173
|
-
|
|
3174
|
-
- Inter.AREA, means the interpolation method is pixel area interpolation.
|
|
3175
|
-
|
|
3176
|
-
expand (bool, optional): Optional expansion flag. Default: False. If set to True, expand the output
|
|
3177
|
-
image to make it large enough to hold the entire rotated image.
|
|
3178
|
-
If set to False or omitted, make the output image the same size as the input.
|
|
3747
|
+
resample (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
3748
|
+
Default: ``Inter.NEAREST``.
|
|
3749
|
+
expand (bool, optional): Optional expansion flag. Default: ``False``. If set to ``True``,
|
|
3750
|
+
expand the output image to make it large enough to hold the entire rotated image.
|
|
3751
|
+
If set to ``False`` or omitted, make the output image the same size as the input.
|
|
3179
3752
|
Note that the expand flag assumes rotation around the center and no translation.
|
|
3180
|
-
center (tuple, optional): Optional center of rotation (a 2-tuple). Default: None
|
|
3181
|
-
Origin is the top left corner. None sets to the center of the image.
|
|
3753
|
+
center (tuple, optional): Optional center of rotation (a 2-tuple). Default: ``None``.
|
|
3754
|
+
Origin is the top left corner. ``None`` sets to the center of the image.
|
|
3182
3755
|
fill_value (Union[int, tuple[int]], optional): Optional fill color for the area outside the rotated image.
|
|
3183
3756
|
If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
3184
3757
|
If it is an integer, it is used for all RGB channels.
|
|
3185
|
-
The fill_value values must be in range [0, 255]. Default: 0
|
|
3758
|
+
The fill_value values must be in range [0, 255]. Default: ``0``.
|
|
3186
3759
|
|
|
3187
3760
|
Raises:
|
|
3188
3761
|
TypeError: If `degrees` is not of type integer, float or sequence.
|
|
@@ -3197,13 +3770,21 @@ class RandomRotation(ImageTensorOperation, PyTensorOperation):
|
|
|
3197
3770
|
``CPU``
|
|
3198
3771
|
|
|
3199
3772
|
Examples:
|
|
3773
|
+
>>> import mindspore.dataset as ds
|
|
3774
|
+
>>> import mindspore.dataset.vision as vision
|
|
3200
3775
|
>>> from mindspore.dataset.vision import Inter
|
|
3776
|
+
>>>
|
|
3201
3777
|
>>> transforms_list = [vision.Decode(),
|
|
3202
3778
|
... vision.RandomRotation(degrees=5.0,
|
|
3203
3779
|
... resample=Inter.NEAREST,
|
|
3204
3780
|
... expand=True)]
|
|
3781
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3205
3782
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3206
3783
|
... input_columns=["image"])
|
|
3784
|
+
|
|
3785
|
+
Tutorial Examples:
|
|
3786
|
+
- `Illustration of vision transforms
|
|
3787
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3207
3788
|
"""
|
|
3208
3789
|
|
|
3209
3790
|
@check_random_rotation
|
|
@@ -3274,6 +3855,10 @@ class RandomSelectSubpolicy(ImageTensorOperation):
|
|
|
3274
3855
|
``CPU``
|
|
3275
3856
|
|
|
3276
3857
|
Examples:
|
|
3858
|
+
>>> import mindspore.dataset as ds
|
|
3859
|
+
>>> import mindspore.dataset.vision as vision
|
|
3860
|
+
>>>
|
|
3861
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3277
3862
|
>>> policy = [[(vision.RandomRotation((45, 45)), 0.5),
|
|
3278
3863
|
... (vision.RandomVerticalFlip(), 1),
|
|
3279
3864
|
... (vision.RandomColorAdjust(), 0.8)],
|
|
@@ -3281,6 +3866,10 @@ class RandomSelectSubpolicy(ImageTensorOperation):
|
|
|
3281
3866
|
... (vision.RandomColorAdjust(), 0.2)]]
|
|
3282
3867
|
>>> image_folder_dataset = image_folder_dataset.map(operations=vision.RandomSelectSubpolicy(policy),
|
|
3283
3868
|
... input_columns=["image"])
|
|
3869
|
+
|
|
3870
|
+
Tutorial Examples:
|
|
3871
|
+
- `Illustration of vision transforms
|
|
3872
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3284
3873
|
"""
|
|
3285
3874
|
|
|
3286
3875
|
@check_random_select_subpolicy_op
|
|
@@ -3310,7 +3899,7 @@ class RandomSharpness(ImageTensorOperation, PyTensorOperation):
|
|
|
3310
3899
|
Args:
|
|
3311
3900
|
degrees (Union[list, tuple], optional): Range of random sharpness adjustment degrees,
|
|
3312
3901
|
which must be non-negative. It should be in (min, max) format. If min=max, then
|
|
3313
|
-
it is a single fixed magnitude operation. Default: (0.1, 1.9)
|
|
3902
|
+
it is a single fixed magnitude operation. Default: ``(0.1, 1.9)``.
|
|
3314
3903
|
|
|
3315
3904
|
Raises:
|
|
3316
3905
|
TypeError : If `degrees` is not a list or a tuple.
|
|
@@ -3321,9 +3910,17 @@ class RandomSharpness(ImageTensorOperation, PyTensorOperation):
|
|
|
3321
3910
|
``CPU``
|
|
3322
3911
|
|
|
3323
3912
|
Examples:
|
|
3913
|
+
>>> import mindspore.dataset as ds
|
|
3914
|
+
>>> import mindspore.dataset.vision as vision
|
|
3915
|
+
>>>
|
|
3916
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3324
3917
|
>>> transforms_list = [vision.Decode(), vision.RandomSharpness(degrees=(0.2, 1.9))]
|
|
3325
3918
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3326
3919
|
... input_columns=["image"])
|
|
3920
|
+
|
|
3921
|
+
Tutorial Examples:
|
|
3922
|
+
- `Illustration of vision transforms
|
|
3923
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3327
3924
|
"""
|
|
3328
3925
|
|
|
3329
3926
|
@check_positive_degrees
|
|
@@ -3354,7 +3951,7 @@ class RandomSolarize(ImageTensorOperation):
|
|
|
3354
3951
|
the subrange to (255 - pixel).
|
|
3355
3952
|
|
|
3356
3953
|
Args:
|
|
3357
|
-
threshold (tuple, optional): Range of random solarize threshold. Default: (0, 255)
|
|
3954
|
+
threshold (tuple, optional): Range of random solarize threshold. Default: ``(0, 255)``.
|
|
3358
3955
|
Threshold values should always be in (min, max) format,
|
|
3359
3956
|
where min and max are integers in the range [0, 255], and min <= max.
|
|
3360
3957
|
If min=max, then invert all pixel values above min(max).
|
|
@@ -3367,9 +3964,17 @@ class RandomSolarize(ImageTensorOperation):
|
|
|
3367
3964
|
``CPU``
|
|
3368
3965
|
|
|
3369
3966
|
Examples:
|
|
3967
|
+
>>> import mindspore.dataset as ds
|
|
3968
|
+
>>> import mindspore.dataset.vision as vision
|
|
3969
|
+
>>>
|
|
3970
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3370
3971
|
>>> transforms_list = [vision.Decode(), vision.RandomSolarize(threshold=(10,100))]
|
|
3371
3972
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3372
3973
|
... input_columns=["image"])
|
|
3974
|
+
|
|
3975
|
+
Tutorial Examples:
|
|
3976
|
+
- `Illustration of vision transforms
|
|
3977
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3373
3978
|
"""
|
|
3374
3979
|
|
|
3375
3980
|
@check_random_solarize
|
|
@@ -3388,7 +3993,7 @@ class RandomVerticalFlip(ImageTensorOperation, PyTensorOperation):
|
|
|
3388
3993
|
|
|
3389
3994
|
Args:
|
|
3390
3995
|
prob (float, optional): Probability of the image being flipped, which
|
|
3391
|
-
must be in range of [0.0, 1.0]. Default: 0.5
|
|
3996
|
+
must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
3392
3997
|
|
|
3393
3998
|
Raises:
|
|
3394
3999
|
TypeError: If `prob` is not of type float.
|
|
@@ -3399,9 +4004,17 @@ class RandomVerticalFlip(ImageTensorOperation, PyTensorOperation):
|
|
|
3399
4004
|
``CPU``
|
|
3400
4005
|
|
|
3401
4006
|
Examples:
|
|
4007
|
+
>>> import mindspore.dataset as ds
|
|
4008
|
+
>>> import mindspore.dataset.vision as vision
|
|
4009
|
+
>>>
|
|
4010
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3402
4011
|
>>> transforms_list = [vision.Decode(), vision.RandomVerticalFlip(0.25)]
|
|
3403
4012
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3404
4013
|
... input_columns=["image"])
|
|
4014
|
+
|
|
4015
|
+
Tutorial Examples:
|
|
4016
|
+
- `Illustration of vision transforms
|
|
4017
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3405
4018
|
"""
|
|
3406
4019
|
|
|
3407
4020
|
@check_prob
|
|
@@ -3431,7 +4044,7 @@ class RandomVerticalFlipWithBBox(ImageTensorOperation):
|
|
|
3431
4044
|
|
|
3432
4045
|
Args:
|
|
3433
4046
|
prob (float, optional): Probability of the image being flipped,
|
|
3434
|
-
which must be in range of [0.0, 1.0]. Default: 0.5
|
|
4047
|
+
which must be in range of [0.0, 1.0]. Default: ``0.5``.
|
|
3435
4048
|
|
|
3436
4049
|
Raises:
|
|
3437
4050
|
TypeError: If `prob` is not of type float.
|
|
@@ -3442,9 +4055,17 @@ class RandomVerticalFlipWithBBox(ImageTensorOperation):
|
|
|
3442
4055
|
``CPU``
|
|
3443
4056
|
|
|
3444
4057
|
Examples:
|
|
4058
|
+
>>> import mindspore.dataset as ds
|
|
4059
|
+
>>> import mindspore.dataset.vision as vision
|
|
4060
|
+
>>>
|
|
4061
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3445
4062
|
>>> transforms_list = [vision.Decode(), vision.RandomVerticalFlipWithBBox(0.20)]
|
|
3446
4063
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3447
4064
|
... input_columns=["image"])
|
|
4065
|
+
|
|
4066
|
+
Tutorial Examples:
|
|
4067
|
+
- `Illustration of vision transforms
|
|
4068
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3448
4069
|
"""
|
|
3449
4070
|
|
|
3450
4071
|
@check_prob
|
|
@@ -3463,7 +4084,8 @@ class Rescale(ImageTensorOperation):
|
|
|
3463
4084
|
with: output = image * rescale + shift.
|
|
3464
4085
|
|
|
3465
4086
|
Note:
|
|
3466
|
-
This operation
|
|
4087
|
+
This operation is executed on the CPU by default, but it is also supported
|
|
4088
|
+
to be executed on the GPU or Ascend via heterogeneous acceleration.
|
|
3467
4089
|
|
|
3468
4090
|
Args:
|
|
3469
4091
|
rescale (float): Rescale factor.
|
|
@@ -3474,12 +4096,20 @@ class Rescale(ImageTensorOperation):
|
|
|
3474
4096
|
TypeError: If `shift` is not of type float.
|
|
3475
4097
|
|
|
3476
4098
|
Supported Platforms:
|
|
3477
|
-
``
|
|
4099
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
3478
4100
|
|
|
3479
4101
|
Examples:
|
|
4102
|
+
>>> import mindspore.dataset as ds
|
|
4103
|
+
>>> import mindspore.dataset.vision as vision
|
|
4104
|
+
>>>
|
|
4105
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3480
4106
|
>>> transforms_list = [vision.Decode(), vision.Rescale(1.0 / 255.0, -1.0)]
|
|
3481
4107
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3482
4108
|
... input_columns=["image"])
|
|
4109
|
+
|
|
4110
|
+
Tutorial Examples:
|
|
4111
|
+
- `Illustration of vision transforms
|
|
4112
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3483
4113
|
"""
|
|
3484
4114
|
|
|
3485
4115
|
@check_rescale
|
|
@@ -3495,42 +4125,40 @@ class Rescale(ImageTensorOperation):
|
|
|
3495
4125
|
|
|
3496
4126
|
class Resize(ImageTensorOperation, PyTensorOperation):
|
|
3497
4127
|
"""
|
|
3498
|
-
Resize the input image to the given size with a given interpolation mode :class
|
|
4128
|
+
Resize the input image to the given size with a given interpolation mode :class:`~.vision.Inter` .
|
|
3499
4129
|
|
|
3500
4130
|
Args:
|
|
3501
4131
|
size (Union[int, Sequence[int]]): The output size of the resized image. The size value(s) must be positive.
|
|
3502
4132
|
If size is an integer, the smaller edge of the image will be resized to this value with
|
|
3503
4133
|
the same image aspect ratio.
|
|
3504
4134
|
If size is a sequence of length 2, it should be (height, width).
|
|
3505
|
-
interpolation (Inter, optional): Image interpolation
|
|
3506
|
-
|
|
3507
|
-
Inter.ANTIALIAS].
|
|
3508
|
-
|
|
3509
|
-
- Inter.BILINEAR, bilinear interpolation.
|
|
3510
|
-
- Inter.LINEAR, bilinear interpolation, here is the same as Inter.BILINEAR.
|
|
3511
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
3512
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
3513
|
-
- Inter.AREA, pixel area interpolation.
|
|
3514
|
-
- Inter.PILCUBIC, bicubic interpolation like implemented in Pillow, only valid when the input is
|
|
3515
|
-
a 3-channel image in the numpy.ndarray format.
|
|
3516
|
-
- Inter.ANTIALIAS, antialias interpolation.
|
|
4135
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
4136
|
+
Default: ``Inter.LINEAR``.
|
|
3517
4137
|
|
|
3518
4138
|
Raises:
|
|
3519
4139
|
TypeError: If `size` is not of type int or Sequence[int].
|
|
3520
|
-
TypeError: If `interpolation` is not of type :class
|
|
4140
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
3521
4141
|
ValueError: If `size` is not positive.
|
|
3522
4142
|
RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
|
|
3523
4143
|
|
|
3524
4144
|
Supported Platforms:
|
|
3525
|
-
``CPU``
|
|
4145
|
+
``CPU`` ``Ascend``
|
|
3526
4146
|
|
|
3527
4147
|
Examples:
|
|
4148
|
+
>>> import mindspore.dataset as ds
|
|
4149
|
+
>>> import mindspore.dataset.vision as vision
|
|
3528
4150
|
>>> from mindspore.dataset.vision import Inter
|
|
4151
|
+
>>>
|
|
3529
4152
|
>>> decode_op = vision.Decode()
|
|
3530
4153
|
>>> resize_op = vision.Resize([100, 75], Inter.BICUBIC)
|
|
3531
4154
|
>>> transforms_list = [decode_op, resize_op]
|
|
4155
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3532
4156
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3533
4157
|
... input_columns=["image"])
|
|
4158
|
+
|
|
4159
|
+
Tutorial Examples:
|
|
4160
|
+
- `Illustration of vision transforms
|
|
4161
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3534
4162
|
"""
|
|
3535
4163
|
|
|
3536
4164
|
@check_resize_interpolation
|
|
@@ -3547,10 +4175,48 @@ class Resize(ImageTensorOperation, PyTensorOperation):
|
|
|
3547
4175
|
self.implementation = Implementation.PY
|
|
3548
4176
|
self.random = False
|
|
3549
4177
|
|
|
4178
|
+
@check_device_target
|
|
4179
|
+
def device(self, device_target="CPU"):
|
|
4180
|
+
"""
|
|
4181
|
+
Set the device for the current operator execution.
|
|
4182
|
+
|
|
4183
|
+
Args:
|
|
4184
|
+
device_target (str, optional): The operator will be executed on this device. Currently supports
|
|
4185
|
+
``CPU`` and ``Ascend`` , where ``Ascend`` refers to Ascend910B device. Default: ``CPU`` .
|
|
4186
|
+
|
|
4187
|
+
Raises:
|
|
4188
|
+
TypeError: If `device_target` is not of type str.
|
|
4189
|
+
ValueError: If `device_target` is not within the valid set of ['CPU', 'Ascend'].
|
|
4190
|
+
|
|
4191
|
+
Supported Platforms:
|
|
4192
|
+
``CPU`` ``Ascend``
|
|
4193
|
+
|
|
4194
|
+
Examples:
|
|
4195
|
+
>>> import mindspore.dataset as ds
|
|
4196
|
+
>>> import mindspore.dataset.vision as vision
|
|
4197
|
+
>>> from mindspore.dataset.vision import Inter
|
|
4198
|
+
>>>
|
|
4199
|
+
>>> decode_op = vision.Decode()
|
|
4200
|
+
>>> resize_op = vision.Resize([100, 75], Inter.BICUBIC).device("Ascend")
|
|
4201
|
+
>>> transforms_list = [decode_op, resize_op]
|
|
4202
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4203
|
+
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4204
|
+
... input_columns=["image"])
|
|
4205
|
+
|
|
4206
|
+
Tutorial Examples:
|
|
4207
|
+
- `Illustration of vision transforms
|
|
4208
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4209
|
+
"""
|
|
4210
|
+
self.device_target = device_target
|
|
4211
|
+
if self.interpolation == Inter.ANTIALIAS and self.device_target == "Ascend":
|
|
4212
|
+
raise ValueError("The current InterpolationMode is not supported by DVPP. It is {}."
|
|
4213
|
+
.format(self.interpolation))
|
|
4214
|
+
return self
|
|
4215
|
+
|
|
3550
4216
|
def parse(self):
|
|
3551
4217
|
if self.interpolation == Inter.ANTIALIAS:
|
|
3552
|
-
raise TypeError("
|
|
3553
|
-
return cde.ResizeOperation(self.c_size, Inter.to_c_type(self.interpolation))
|
|
4218
|
+
raise TypeError("The current InterpolationMode is not supported with NumPy input.")
|
|
4219
|
+
return cde.ResizeOperation(self.c_size, Inter.to_c_type(self.interpolation), self.device_target)
|
|
3554
4220
|
|
|
3555
4221
|
def _execute_py(self, img):
|
|
3556
4222
|
"""
|
|
@@ -3580,14 +4246,8 @@ class ResizedCrop(ImageTensorOperation):
|
|
|
3580
4246
|
If int is provided, the smaller edge of the image will be resized to this value,
|
|
3581
4247
|
keeping the image aspect ratio the same.
|
|
3582
4248
|
If Sequence[int, int] is provided, it should be (height, width).
|
|
3583
|
-
interpolation (Inter, optional): Image interpolation method
|
|
3584
|
-
|
|
3585
|
-
|
|
3586
|
-
- Inter.LINEAR, bilinear interpolation.
|
|
3587
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
3588
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
3589
|
-
- Inter.AREA, pixel area interpolation.
|
|
3590
|
-
- Inter.PILCUBIC, cubic interpolation based on the implementation of Pillow
|
|
4249
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
4250
|
+
Default: ``Inter.BILINEAR``.
|
|
3591
4251
|
|
|
3592
4252
|
Raises:
|
|
3593
4253
|
TypeError: If `top` is not of type int.
|
|
@@ -3600,17 +4260,25 @@ class ResizedCrop(ImageTensorOperation):
|
|
|
3600
4260
|
ValueError: If `width` is not positive.
|
|
3601
4261
|
TypeError: If `size` is not of type int or Sequence[int, int].
|
|
3602
4262
|
ValueError: If `size` is not posotive.
|
|
3603
|
-
TypeError: If `interpolation` is not of type :class
|
|
4263
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
3604
4264
|
RuntimeError: If shape of the input image is not <H, W> or <H, W, C>.
|
|
3605
4265
|
|
|
3606
4266
|
Supported Platforms:
|
|
3607
4267
|
``CPU``
|
|
3608
4268
|
|
|
3609
4269
|
Examples:
|
|
4270
|
+
>>> import mindspore.dataset as ds
|
|
4271
|
+
>>> import mindspore.dataset.vision as vision
|
|
3610
4272
|
>>> from mindspore.dataset.vision import Inter
|
|
4273
|
+
>>>
|
|
3611
4274
|
>>> transforms_list = [vision.Decode(), vision.ResizedCrop(0, 0, 128, 128, (100, 75), Inter.BILINEAR)]
|
|
4275
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3612
4276
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3613
4277
|
... input_columns=["image"])
|
|
4278
|
+
|
|
4279
|
+
Tutorial Examples:
|
|
4280
|
+
- `Illustration of vision transforms
|
|
4281
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3614
4282
|
"""
|
|
3615
4283
|
|
|
3616
4284
|
@check_resized_crop
|
|
@@ -3641,18 +4309,12 @@ class ResizeWithBBox(ImageTensorOperation):
|
|
|
3641
4309
|
If size is an integer, smaller edge of the image will be resized to this value with
|
|
3642
4310
|
the same image aspect ratio.
|
|
3643
4311
|
If size is a sequence of length 2, it should be (height, width).
|
|
3644
|
-
interpolation (Inter, optional): Image interpolation
|
|
3645
|
-
|
|
3646
|
-
|
|
3647
|
-
- Inter.LINEAR, means interpolation method is bilinear interpolation.
|
|
3648
|
-
|
|
3649
|
-
- Inter.NEAREST, means interpolation method is nearest-neighbor interpolation.
|
|
3650
|
-
|
|
3651
|
-
- Inter.BICUBIC, means interpolation method is bicubic interpolation.
|
|
4312
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
4313
|
+
Default: ``Inter.LINEAR``.
|
|
3652
4314
|
|
|
3653
4315
|
Raises:
|
|
3654
4316
|
TypeError: If `size` is not of type int or Sequence[int].
|
|
3655
|
-
TypeError: If `interpolation` is not of type :class
|
|
4317
|
+
TypeError: If `interpolation` is not of type :class:`~.vision.Inter` .
|
|
3656
4318
|
ValueError: If `size` is not positive.
|
|
3657
4319
|
RuntimeError: If given tensor shape is not <H, W> or <H, W, C>.
|
|
3658
4320
|
|
|
@@ -3660,12 +4322,20 @@ class ResizeWithBBox(ImageTensorOperation):
|
|
|
3660
4322
|
``CPU``
|
|
3661
4323
|
|
|
3662
4324
|
Examples:
|
|
4325
|
+
>>> import mindspore.dataset as ds
|
|
4326
|
+
>>> import mindspore.dataset.vision as vision
|
|
3663
4327
|
>>> from mindspore.dataset.vision import Inter
|
|
4328
|
+
>>>
|
|
3664
4329
|
>>> decode_op = vision.Decode()
|
|
3665
4330
|
>>> bbox_op = vision.ResizeWithBBox(50, Inter.NEAREST)
|
|
3666
4331
|
>>> transforms_list = [decode_op, bbox_op]
|
|
4332
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3667
4333
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3668
4334
|
... input_columns=["image"])
|
|
4335
|
+
|
|
4336
|
+
Tutorial Examples:
|
|
4337
|
+
- `Illustration of vision transforms
|
|
4338
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3669
4339
|
"""
|
|
3670
4340
|
|
|
3671
4341
|
@check_resize_interpolation
|
|
@@ -3687,8 +4357,8 @@ class RgbToHsv(PyTensorOperation):
|
|
|
3687
4357
|
Convert the input numpy.ndarray images from RGB to HSV.
|
|
3688
4358
|
|
|
3689
4359
|
Args:
|
|
3690
|
-
is_hwc (bool): If True
|
|
3691
|
-
Otherwise, it is in shape of <C, H, W> or <N, C, H, W>. Default: False
|
|
4360
|
+
is_hwc (bool): If ``True``, means the input image is in shape of <H, W, C> or <N, H, W, C>.
|
|
4361
|
+
Otherwise, it is in shape of <C, H, W> or <N, C, H, W>. Default: ``False``.
|
|
3692
4362
|
|
|
3693
4363
|
Raises:
|
|
3694
4364
|
TypeError: If `is_hwc` is not of type bool.
|
|
@@ -3697,6 +4367,8 @@ class RgbToHsv(PyTensorOperation):
|
|
|
3697
4367
|
``CPU``
|
|
3698
4368
|
|
|
3699
4369
|
Examples:
|
|
4370
|
+
>>> import mindspore.dataset as ds
|
|
4371
|
+
>>> import mindspore.dataset.vision as vision
|
|
3700
4372
|
>>> from mindspore.dataset.transforms import Compose
|
|
3701
4373
|
>>>
|
|
3702
4374
|
>>> transforms_list = Compose([vision.Decode(to_pil=True),
|
|
@@ -3704,8 +4376,13 @@ class RgbToHsv(PyTensorOperation):
|
|
|
3704
4376
|
... vision.ToTensor(),
|
|
3705
4377
|
... vision.RgbToHsv()])
|
|
3706
4378
|
>>> # apply the transform to dataset through map function
|
|
4379
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3707
4380
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3708
4381
|
... input_columns="image")
|
|
4382
|
+
|
|
4383
|
+
Tutorial Examples:
|
|
4384
|
+
- `Illustration of vision transforms
|
|
4385
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3709
4386
|
"""
|
|
3710
4387
|
|
|
3711
4388
|
@check_rgb_to_hsv
|
|
@@ -3734,28 +4411,22 @@ class Rotate(ImageTensorOperation):
|
|
|
3734
4411
|
|
|
3735
4412
|
Args:
|
|
3736
4413
|
degrees (Union[int, float]): Rotation degrees.
|
|
3737
|
-
|
|
3738
|
-
|
|
3739
|
-
|
|
3740
|
-
|
|
3741
|
-
|
|
3742
|
-
- Inter.NEAREST, means resample method is nearest-neighbor interpolation.
|
|
3743
|
-
- Inter.BICUBIC, means resample method is bicubic interpolation.
|
|
3744
|
-
|
|
3745
|
-
expand (bool, optional): Optional expansion flag. Default: False. If set to True, expand the output
|
|
3746
|
-
image to make it large enough to hold the entire rotated image.
|
|
3747
|
-
If set to False or omitted, make the output image the same size as the input.
|
|
4414
|
+
resample (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
4415
|
+
Default: ``Inter.NEAREST``.
|
|
4416
|
+
expand (bool, optional): Optional expansion flag. Default: ``False``. If set to ``True``,
|
|
4417
|
+
expand the output image to make it large enough to hold the entire rotated image.
|
|
4418
|
+
If set to ``False`` or omitted, make the output image the same size as the input.
|
|
3748
4419
|
Note that the expand flag assumes rotation around the center and no translation.
|
|
3749
|
-
center (tuple, optional): Optional center of rotation (a 2-tuple). Default: None
|
|
3750
|
-
Origin is the top left corner. None sets to the center of the image.
|
|
4420
|
+
center (tuple, optional): Optional center of rotation (a 2-tuple). Default: ``None``.
|
|
4421
|
+
Origin is the top left corner. ``None`` sets to the center of the image.
|
|
3751
4422
|
fill_value (Union[int, tuple[int]], optional): Optional fill color for the area outside the rotated image.
|
|
3752
4423
|
If it is a 3-tuple, it is used to fill R, G, B channels respectively.
|
|
3753
4424
|
If it is an integer, it is used for all RGB channels.
|
|
3754
|
-
The fill_value values must be in range [0, 255]. Default: 0
|
|
4425
|
+
The fill_value values must be in range [0, 255]. Default: ``0``.
|
|
3755
4426
|
|
|
3756
4427
|
Raises:
|
|
3757
4428
|
TypeError: If `degrees` is not of type integer, float or sequence.
|
|
3758
|
-
TypeError: If `resample` is not of type :class
|
|
4429
|
+
TypeError: If `resample` is not of type :class:`~.vision.Inter` .
|
|
3759
4430
|
TypeError: If `expand` is not of type bool.
|
|
3760
4431
|
TypeError: If `center` is not of type tuple.
|
|
3761
4432
|
TypeError: If `fill_value` is not of type int or tuple[int].
|
|
@@ -3766,13 +4437,21 @@ class Rotate(ImageTensorOperation):
|
|
|
3766
4437
|
``CPU``
|
|
3767
4438
|
|
|
3768
4439
|
Examples:
|
|
4440
|
+
>>> import mindspore.dataset as ds
|
|
4441
|
+
>>> import mindspore.dataset.vision as vision
|
|
3769
4442
|
>>> from mindspore.dataset.vision import Inter
|
|
4443
|
+
>>>
|
|
3770
4444
|
>>> transforms_list = [vision.Decode(),
|
|
3771
4445
|
... vision.Rotate(degrees=30.0,
|
|
3772
4446
|
... resample=Inter.NEAREST,
|
|
3773
4447
|
... expand=True)]
|
|
4448
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3774
4449
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3775
4450
|
... input_columns=["image"])
|
|
4451
|
+
|
|
4452
|
+
Tutorial Examples:
|
|
4453
|
+
- `Illustration of vision transforms
|
|
4454
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3776
4455
|
"""
|
|
3777
4456
|
|
|
3778
4457
|
@check_rotate
|
|
@@ -3805,13 +4484,14 @@ class SlicePatches(ImageTensorOperation):
|
|
|
3805
4484
|
number of output tensors is equal to num_height*num_width.
|
|
3806
4485
|
|
|
3807
4486
|
Args:
|
|
3808
|
-
num_height (int, optional): The number of patches in vertical direction, which must be positive. Default: 1
|
|
3809
|
-
num_width (int, optional): The number of patches in horizontal direction, which must be positive.
|
|
3810
|
-
|
|
3811
|
-
|
|
4487
|
+
num_height (int, optional): The number of patches in vertical direction, which must be positive. Default: ``1``.
|
|
4488
|
+
num_width (int, optional): The number of patches in horizontal direction, which must be positive.
|
|
4489
|
+
Default: ``1``.
|
|
4490
|
+
slice_mode (SliceMode, optional): A mode represents pad or drop. Default: ``SliceMode.PAD``.
|
|
4491
|
+
It can be ``SliceMode.PAD``, ``SliceMode.DROP``.
|
|
3812
4492
|
fill_value (int, optional): The border width in number of pixels in
|
|
3813
4493
|
right and bottom direction if slice_mode is set to be SliceMode.PAD.
|
|
3814
|
-
The `fill_value` must be in range [0, 255]. Default: 0
|
|
4494
|
+
The `fill_value` must be in range [0, 255]. Default: ``0``.
|
|
3815
4495
|
|
|
3816
4496
|
Raises:
|
|
3817
4497
|
TypeError: If `num_height` is not of type integer.
|
|
@@ -3827,15 +4507,24 @@ class SlicePatches(ImageTensorOperation):
|
|
|
3827
4507
|
``CPU``
|
|
3828
4508
|
|
|
3829
4509
|
Examples:
|
|
4510
|
+
>>> import mindspore.dataset as ds
|
|
4511
|
+
>>> import mindspore.dataset.vision as vision
|
|
4512
|
+
>>>
|
|
3830
4513
|
>>> # default padding mode
|
|
3831
4514
|
>>> decode_op = vision.Decode()
|
|
3832
4515
|
>>> num_h, num_w = (1, 4)
|
|
3833
4516
|
>>> slice_patches_op = vision.SlicePatches(num_h, num_w)
|
|
3834
4517
|
>>> transforms_list = [decode_op, slice_patches_op]
|
|
3835
4518
|
>>> cols = ['img' + str(x) for x in range(num_h*num_w)]
|
|
4519
|
+
>>>
|
|
4520
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3836
4521
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3837
4522
|
... input_columns=["image"],
|
|
3838
4523
|
... output_columns=cols)
|
|
4524
|
+
|
|
4525
|
+
Tutorial Examples:
|
|
4526
|
+
- `Illustration of vision transforms
|
|
4527
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3839
4528
|
"""
|
|
3840
4529
|
|
|
3841
4530
|
@check_slice_patches
|
|
@@ -3869,9 +4558,17 @@ class Solarize(ImageTensorOperation):
|
|
|
3869
4558
|
``CPU``
|
|
3870
4559
|
|
|
3871
4560
|
Examples:
|
|
4561
|
+
>>> import mindspore.dataset as ds
|
|
4562
|
+
>>> import mindspore.dataset.vision as vision
|
|
4563
|
+
>>>
|
|
4564
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3872
4565
|
>>> transforms_list = [vision.Decode(), vision.Solarize(threshold=(10, 100))]
|
|
3873
4566
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3874
4567
|
... input_columns=["image"])
|
|
4568
|
+
|
|
4569
|
+
Tutorial Examples:
|
|
4570
|
+
- `Illustration of vision transforms
|
|
4571
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3875
4572
|
"""
|
|
3876
4573
|
|
|
3877
4574
|
@check_solarize
|
|
@@ -3894,8 +4591,8 @@ class TenCrop(PyTensorOperation):
|
|
|
3894
4591
|
size (Union[int, Sequence[int, int]]): The size of the cropped image.
|
|
3895
4592
|
If a single integer is provided, a square of size (size, size) will be cropped with this value.
|
|
3896
4593
|
If a sequence of length 2 is provided, an image of size (height, width) will be cropped.
|
|
3897
|
-
use_vertical_flip (bool, optional): If True
|
|
3898
|
-
horizontally. Default: False
|
|
4594
|
+
use_vertical_flip (bool, optional): If ``True``, flip the images vertically. Otherwise, flip them
|
|
4595
|
+
horizontally. Default: ``False``.
|
|
3899
4596
|
|
|
3900
4597
|
Raises:
|
|
3901
4598
|
TypeError: If `size` is not of type integer or sequence of integer.
|
|
@@ -3906,6 +4603,8 @@ class TenCrop(PyTensorOperation):
|
|
|
3906
4603
|
``CPU``
|
|
3907
4604
|
|
|
3908
4605
|
Examples:
|
|
4606
|
+
>>> import mindspore.dataset as ds
|
|
4607
|
+
>>> import mindspore.dataset.vision as vision
|
|
3909
4608
|
>>> import numpy
|
|
3910
4609
|
>>> from mindspore.dataset.transforms import Compose
|
|
3911
4610
|
>>>
|
|
@@ -3914,8 +4613,13 @@ class TenCrop(PyTensorOperation):
|
|
|
3914
4613
|
... # 4D stack of 10 images
|
|
3915
4614
|
... lambda *images: numpy.stack([vision.ToTensor()(image) for image in images])])
|
|
3916
4615
|
>>> # apply the transform to dataset through map function
|
|
4616
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3917
4617
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3918
4618
|
... input_columns="image")
|
|
4619
|
+
|
|
4620
|
+
Tutorial Examples:
|
|
4621
|
+
- `Illustration of vision transforms
|
|
4622
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3919
4623
|
"""
|
|
3920
4624
|
|
|
3921
4625
|
@check_ten_crop
|
|
@@ -3950,6 +4654,8 @@ class ToNumpy(PyTensorOperation):
|
|
|
3950
4654
|
``CPU``
|
|
3951
4655
|
|
|
3952
4656
|
Examples:
|
|
4657
|
+
>>> import mindspore.dataset as ds
|
|
4658
|
+
>>> import mindspore.dataset.vision as vision
|
|
3953
4659
|
>>> from mindspore.dataset.transforms import Compose
|
|
3954
4660
|
>>>
|
|
3955
4661
|
>>> # Use ToNumpy to explicitly select C++ implementation of subsequent op
|
|
@@ -3958,8 +4664,13 @@ class ToNumpy(PyTensorOperation):
|
|
|
3958
4664
|
... vision.ToNumpy(),
|
|
3959
4665
|
... vision.Resize((100, 120))])
|
|
3960
4666
|
>>> # apply the transform to dataset through map function
|
|
4667
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
3961
4668
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
3962
4669
|
... input_columns="image")
|
|
4670
|
+
|
|
4671
|
+
Tutorial Examples:
|
|
4672
|
+
- `Illustration of vision transforms
|
|
4673
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
3963
4674
|
"""
|
|
3964
4675
|
|
|
3965
4676
|
def __init__(self):
|
|
@@ -3985,9 +4696,6 @@ class ToPIL(PyTensorOperation):
|
|
|
3985
4696
|
"""
|
|
3986
4697
|
Convert the input decoded numpy.ndarray image to PIL Image.
|
|
3987
4698
|
|
|
3988
|
-
Note:
|
|
3989
|
-
The conversion mode will be determined by the data type using `PIL.Image.fromarray` .
|
|
3990
|
-
|
|
3991
4699
|
Raises:
|
|
3992
4700
|
TypeError: If the input image is not of type :class:`numpy.ndarray` or `PIL.Image.Image` .
|
|
3993
4701
|
|
|
@@ -3995,6 +4703,8 @@ class ToPIL(PyTensorOperation):
|
|
|
3995
4703
|
``CPU``
|
|
3996
4704
|
|
|
3997
4705
|
Examples:
|
|
4706
|
+
>>> import mindspore.dataset as ds
|
|
4707
|
+
>>> import mindspore.dataset.vision as vision
|
|
3998
4708
|
>>> from mindspore.dataset.transforms import Compose
|
|
3999
4709
|
>>>
|
|
4000
4710
|
>>> # data is already decoded, but not in PIL Image format
|
|
@@ -4002,8 +4712,13 @@ class ToPIL(PyTensorOperation):
|
|
|
4002
4712
|
... vision.RandomHorizontalFlip(0.5),
|
|
4003
4713
|
... vision.ToTensor()])
|
|
4004
4714
|
>>> # apply the transform to dataset through map function
|
|
4715
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4005
4716
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4006
4717
|
... input_columns="image")
|
|
4718
|
+
|
|
4719
|
+
Tutorial Examples:
|
|
4720
|
+
- `Illustration of vision transforms
|
|
4721
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4007
4722
|
"""
|
|
4008
4723
|
|
|
4009
4724
|
def __init__(self):
|
|
@@ -4031,7 +4746,7 @@ class ToTensor(ImageTensorOperation):
|
|
|
4031
4746
|
|
|
4032
4747
|
Args:
|
|
4033
4748
|
output_type (Union[mindspore.dtype, numpy.dtype], optional): The desired dtype of the output image.
|
|
4034
|
-
Default:
|
|
4749
|
+
Default: ``np.float32`` .
|
|
4035
4750
|
|
|
4036
4751
|
Raises:
|
|
4037
4752
|
TypeError: If the input image is not of type `PIL.Image.Image` or :class:`numpy.ndarray` .
|
|
@@ -4041,6 +4756,8 @@ class ToTensor(ImageTensorOperation):
|
|
|
4041
4756
|
``CPU``
|
|
4042
4757
|
|
|
4043
4758
|
Examples:
|
|
4759
|
+
>>> import mindspore.dataset as ds
|
|
4760
|
+
>>> import mindspore.dataset.vision as vision
|
|
4044
4761
|
>>> from mindspore.dataset.transforms import Compose
|
|
4045
4762
|
>>>
|
|
4046
4763
|
>>> # create a list of transformations to be applied to the "image" column of each data row
|
|
@@ -4048,8 +4765,13 @@ class ToTensor(ImageTensorOperation):
|
|
|
4048
4765
|
... vision.RandomHorizontalFlip(0.5),
|
|
4049
4766
|
... vision.ToTensor()])
|
|
4050
4767
|
>>> # apply the transform to dataset through map function
|
|
4768
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4051
4769
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4052
4770
|
... input_columns="image")
|
|
4771
|
+
|
|
4772
|
+
Tutorial Examples:
|
|
4773
|
+
- `Illustration of vision transforms
|
|
4774
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4053
4775
|
"""
|
|
4054
4776
|
|
|
4055
4777
|
@check_to_tensor
|
|
@@ -4074,19 +4796,22 @@ class ToType(TypeCast):
|
|
|
4074
4796
|
It is the same as that of :class:`mindspore.dataset.transforms.TypeCast` .
|
|
4075
4797
|
|
|
4076
4798
|
Note:
|
|
4077
|
-
This operation
|
|
4799
|
+
This operation is executed on the CPU by default, but it is also supported
|
|
4800
|
+
to be executed on the GPU or Ascend via heterogeneous acceleration.
|
|
4078
4801
|
|
|
4079
4802
|
Args:
|
|
4080
4803
|
data_type (Union[mindspore.dtype, numpy.dtype]): The desired data type of the output image,
|
|
4081
|
-
such as
|
|
4804
|
+
such as ``numpy.float32`` .
|
|
4082
4805
|
|
|
4083
4806
|
Raises:
|
|
4084
4807
|
TypeError: If `data_type` is not of type :class:`mindspore.dtype` or :class:`numpy.dtype` .
|
|
4085
4808
|
|
|
4086
4809
|
Supported Platforms:
|
|
4087
|
-
``
|
|
4810
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
4088
4811
|
|
|
4089
4812
|
Examples:
|
|
4813
|
+
>>> import mindspore.dataset as ds
|
|
4814
|
+
>>> import mindspore.dataset.vision as vision
|
|
4090
4815
|
>>> import numpy as np
|
|
4091
4816
|
>>> from mindspore.dataset.transforms import Compose
|
|
4092
4817
|
>>>
|
|
@@ -4095,8 +4820,13 @@ class ToType(TypeCast):
|
|
|
4095
4820
|
... vision.ToTensor(),
|
|
4096
4821
|
... vision.ToType(np.float32)])
|
|
4097
4822
|
>>> # apply the transform to dataset through map function
|
|
4823
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4098
4824
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4099
4825
|
... input_columns="image")
|
|
4826
|
+
|
|
4827
|
+
Tutorial Examples:
|
|
4828
|
+
- `Illustration of vision transforms
|
|
4829
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4100
4830
|
"""
|
|
4101
4831
|
|
|
4102
4832
|
|
|
@@ -4111,24 +4841,18 @@ class TrivialAugmentWide(ImageTensorOperation):
|
|
|
4111
4841
|
|
|
4112
4842
|
Args:
|
|
4113
4843
|
num_magnitude_bins (int, optional): The number of different magnitude values,
|
|
4114
|
-
must be greater than or equal to 2. Default: 31
|
|
4115
|
-
interpolation (Inter, optional): Image interpolation method
|
|
4116
|
-
|
|
4117
|
-
|
|
4118
|
-
- Inter.NEAREST, nearest-neighbor interpolation.
|
|
4119
|
-
- Inter.BILINEAR, bilinear interpolation.
|
|
4120
|
-
- Inter.BICUBIC, bicubic interpolation.
|
|
4121
|
-
- Inter.AREA, pixel area interpolation.
|
|
4122
|
-
|
|
4844
|
+
must be greater than or equal to 2. Default: ``31``.
|
|
4845
|
+
interpolation (Inter, optional): Image interpolation method defined by :class:`~.vision.Inter` .
|
|
4846
|
+
Default: ``Inter.NEAREST``.
|
|
4123
4847
|
fill_value (Union[int, tuple[int, int, int]], optional): Pixel fill value for the area outside the
|
|
4124
|
-
transformed image, must be in range of [0, 255]. Default: 0
|
|
4848
|
+
transformed image, must be in range of [0, 255]. Default: ``0``.
|
|
4125
4849
|
If int is provided, pad all RGB channels with this value.
|
|
4126
4850
|
If tuple[int, int, int] is provided, pad R, G, B channels respectively.
|
|
4127
4851
|
|
|
4128
4852
|
Raises:
|
|
4129
4853
|
TypeError: If `num_magnitude_bins` is not of type int.
|
|
4130
4854
|
ValueError: If `num_magnitude_bins` is less than 2.
|
|
4131
|
-
TypeError: If `interpolation` not of type :class
|
|
4855
|
+
TypeError: If `interpolation` not of type :class:`~.vision.Inter` .
|
|
4132
4856
|
TypeError: If `fill_value` is not of type int or tuple[int, int, int].
|
|
4133
4857
|
ValueError: If `fill_value` is not in range of [0, 255].
|
|
4134
4858
|
RuntimeError: If shape of the input image is not <H, W, C>.
|
|
@@ -4137,14 +4861,20 @@ class TrivialAugmentWide(ImageTensorOperation):
|
|
|
4137
4861
|
``CPU``
|
|
4138
4862
|
|
|
4139
4863
|
Examples:
|
|
4140
|
-
>>>
|
|
4864
|
+
>>> import mindspore.dataset as ds
|
|
4865
|
+
>>> import mindspore.dataset.vision as vision
|
|
4866
|
+
>>> from mindspore.dataset.vision import Inter
|
|
4141
4867
|
>>>
|
|
4142
|
-
>>> transforms_list = [vision.Decode(),
|
|
4143
|
-
...
|
|
4144
|
-
...
|
|
4145
|
-
|
|
4868
|
+
>>> transforms_list = [vision.Decode(), vision.TrivialAugmentWide(num_magnitude_bins=31,
|
|
4869
|
+
... interpolation=Inter.NEAREST,
|
|
4870
|
+
... fill_value=0)]
|
|
4871
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4146
4872
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4147
4873
|
... input_columns=["image"])
|
|
4874
|
+
|
|
4875
|
+
Tutorial Examples:
|
|
4876
|
+
- `Illustration of vision transforms
|
|
4877
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4148
4878
|
"""
|
|
4149
4879
|
|
|
4150
4880
|
@check_trivial_augment_wide
|
|
@@ -4173,7 +4903,8 @@ class UniformAugment(CompoundOperation):
|
|
|
4173
4903
|
|
|
4174
4904
|
Args:
|
|
4175
4905
|
transforms (Sequence): Sequence of transformations to select from.
|
|
4176
|
-
num_ops (int, optional): Number of transformations to be sequentially and randomly applied.
|
|
4906
|
+
num_ops (int, optional): Number of transformations to be sequentially and randomly applied.
|
|
4907
|
+
Default: ``2``.
|
|
4177
4908
|
|
|
4178
4909
|
Raises:
|
|
4179
4910
|
TypeError: If `transforms` is not a sequence of data processing operations.
|
|
@@ -4184,6 +4915,8 @@ class UniformAugment(CompoundOperation):
|
|
|
4184
4915
|
``CPU``
|
|
4185
4916
|
|
|
4186
4917
|
Examples:
|
|
4918
|
+
>>> import mindspore.dataset as ds
|
|
4919
|
+
>>> import mindspore.dataset.vision as vision
|
|
4187
4920
|
>>> from mindspore.dataset.transforms import Compose
|
|
4188
4921
|
>>>
|
|
4189
4922
|
>>> transforms = [vision.CenterCrop(64),
|
|
@@ -4194,8 +4927,13 @@ class UniformAugment(CompoundOperation):
|
|
|
4194
4927
|
... vision.UniformAugment(transforms),
|
|
4195
4928
|
... vision.ToTensor()])
|
|
4196
4929
|
>>> # apply the transform to dataset through map function
|
|
4930
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4197
4931
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4198
4932
|
... input_columns="image")
|
|
4933
|
+
|
|
4934
|
+
Tutorial Examples:
|
|
4935
|
+
- `Illustration of vision transforms
|
|
4936
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4199
4937
|
"""
|
|
4200
4938
|
|
|
4201
4939
|
@check_uniform_augment
|
|
@@ -4232,9 +4970,17 @@ class VerticalFlip(ImageTensorOperation):
|
|
|
4232
4970
|
``CPU``
|
|
4233
4971
|
|
|
4234
4972
|
Examples:
|
|
4973
|
+
>>> import mindspore.dataset as ds
|
|
4974
|
+
>>> import mindspore.dataset.vision as vision
|
|
4975
|
+
>>>
|
|
4976
|
+
>>> image_folder_dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory")
|
|
4235
4977
|
>>> transforms_list = [vision.Decode(), vision.VerticalFlip()]
|
|
4236
4978
|
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms_list,
|
|
4237
4979
|
... input_columns=["image"])
|
|
4980
|
+
|
|
4981
|
+
Tutorial Examples:
|
|
4982
|
+
- `Illustration of vision transforms
|
|
4983
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/vision_gallery.html>`_
|
|
4238
4984
|
"""
|
|
4239
4985
|
|
|
4240
4986
|
def __init__(self):
|