mindspore 2.1.0__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -1
- mindspore/_checkparam.py +26 -32
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +12 -15
- mindspore/_extends/parse/namespace.py +7 -33
- mindspore/_extends/parse/parser.py +61 -71
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +72 -95
- mindspore/_extends/parse/trope.py +1 -1
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +47 -11
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost.py +1 -8
- mindspore/boost/boost_cell_wrapper.py +3 -2
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +8 -7
- mindspore/common/__init__.py +5 -3
- mindspore/common/_jit_fallback_utils.py +6 -0
- mindspore/common/_register_for_adapter.py +2 -0
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +13 -0
- mindspore/common/_utils.py +13 -0
- mindspore/common/api.py +173 -258
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +18 -11
- mindspore/common/dump.py +6 -4
- mindspore/common/initializer.py +14 -14
- mindspore/common/jit_config.py +33 -15
- mindspore/common/lazy_inline.py +126 -7
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/parameter.py +51 -41
- mindspore/common/seed.py +4 -4
- mindspore/common/sparse_tensor.py +13 -14
- mindspore/common/tensor.py +240 -145
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +83 -4
- mindspore/communication/management.py +152 -84
- mindspore/config/op_info.config +13 -2
- mindspore/config/super_bar_config.json +4 -2
- mindspore/context.py +143 -59
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +52 -52
- mindspore/dataset/callback/ds_callback.py +16 -2
- mindspore/dataset/core/config.py +68 -51
- mindspore/dataset/engine/cache_client.py +28 -5
- mindspore/dataset/engine/datasets.py +250 -112
- mindspore/dataset/engine/datasets_audio.py +43 -211
- mindspore/dataset/engine/datasets_standard_format.py +11 -35
- mindspore/dataset/engine/datasets_text.py +43 -67
- mindspore/dataset/engine/datasets_user_defined.py +86 -100
- mindspore/dataset/engine/datasets_vision.py +219 -1029
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/samplers.py +1 -1
- mindspore/dataset/engine/validators.py +19 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +101 -127
- mindspore/dataset/text/utils.py +205 -138
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +95 -40
- mindspore/dataset/utils/browse_dataset.py +8 -2
- mindspore/dataset/utils/line_reader.py +17 -19
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +6 -3
- mindspore/dataset/vision/transforms.py +409 -287
- mindspore/dataset/vision/utils.py +13 -14
- mindspore/dataset/vision/validators.py +11 -1
- mindspore/experimental/map_parameter.py +14 -0
- mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
- mindspore/{nn/optim_ex → experimental/optim}/adam.py +59 -66
- mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
- mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +0 -14
- mindspore/include/api/types.h +37 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/dataset/constants.h +6 -5
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +13 -13
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/type_id.h +1 -0
- mindspore/include/mindapi/base/types.h +1 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/nn/__init__.py +0 -2
- mindspore/nn/cell.py +316 -74
- mindspore/nn/dynamic_lr.py +21 -21
- mindspore/nn/layer/activation.py +21 -28
- mindspore/nn/layer/basic.py +15 -13
- mindspore/nn/layer/channel_shuffle.py +1 -1
- mindspore/nn/layer/container.py +271 -9
- mindspore/nn/layer/conv.py +310 -207
- mindspore/nn/layer/dense.py +8 -5
- mindspore/nn/layer/embedding.py +33 -27
- mindspore/nn/layer/flash_attention.py +82 -41
- mindspore/nn/layer/image.py +8 -6
- mindspore/nn/layer/math.py +13 -18
- mindspore/nn/layer/normalization.py +107 -66
- mindspore/nn/layer/padding.py +1 -1
- mindspore/nn/layer/pooling.py +131 -109
- mindspore/nn/layer/rnn_cells.py +22 -17
- mindspore/nn/layer/rnns.py +13 -16
- mindspore/nn/layer/thor_layer.py +1 -1
- mindspore/nn/layer/transformer.py +221 -154
- mindspore/nn/learning_rate_schedule.py +9 -1
- mindspore/nn/loss/loss.py +235 -174
- mindspore/nn/optim/ada_grad.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -0
- mindspore/nn/optim/adafactor.py +2 -1
- mindspore/nn/optim/adam.py +7 -4
- mindspore/nn/optim/adamax.py +3 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -3
- mindspore/nn/optim/ftrl.py +6 -5
- mindspore/nn/optim/lamb.py +7 -4
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +5 -3
- mindspore/nn/optim/momentum.py +2 -1
- mindspore/nn/optim/optimizer.py +53 -4
- mindspore/nn/optim/proximal_ada_grad.py +3 -4
- mindspore/nn/optim/rmsprop.py +4 -3
- mindspore/nn/optim/rprop.py +23 -12
- mindspore/nn/optim/sgd.py +26 -11
- mindspore/nn/optim/thor.py +9 -7
- mindspore/nn/probability/bijector/bijector.py +5 -5
- mindspore/nn/probability/bijector/power_transform.py +27 -27
- mindspore/nn/probability/bijector/softplus.py +3 -3
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
- mindspore/nn/probability/distribution/bernoulli.py +5 -5
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +7 -7
- mindspore/nn/probability/distribution/cauchy.py +0 -1
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +4 -4
- mindspore/nn/probability/distribution/gumbel.py +4 -4
- mindspore/nn/probability/distribution/log_normal.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +4 -4
- mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
- mindspore/nn/probability/distribution/uniform.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +78 -34
- mindspore/nn/wrap/grad_reducer.py +8 -5
- mindspore/nn/wrap/loss_scale.py +105 -42
- mindspore/numpy/array_creations.py +1 -2
- mindspore/numpy/array_ops.py +3 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +0 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +1 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
- mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
- mindspore/ops/_grad_experimental/grad_implementations.py +10 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +165 -109
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +144 -86
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +172 -187
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +51 -57
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +6 -17
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/tbe/__init__.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +45 -13
- mindspore/ops/_utils/utils.py +4 -1
- mindspore/ops/_vmap/vmap_array_ops.py +3 -3
- mindspore/ops/_vmap/vmap_base.py +3 -3
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/base.py +37 -10
- mindspore/ops/composite/math_ops.py +5 -4
- mindspore/ops/composite/multitype_ops/_compile_utils.py +273 -72
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
- mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
- mindspore/ops/composite/multitype_ops/getitem_impl.py +40 -2
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/array_func.py +167 -189
- mindspore/ops/function/clip_func.py +81 -13
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +18 -8
- mindspore/ops/function/image_func.py +10 -4
- mindspore/ops/function/linalg_func.py +5 -5
- mindspore/ops/function/math_func.py +575 -386
- mindspore/ops/function/nn_func.py +470 -251
- mindspore/ops/function/random_func.py +86 -56
- mindspore/ops/function/sparse_func.py +1 -1
- mindspore/ops/function/sparse_unary_func.py +14 -12
- mindspore/ops/function/vmap_func.py +6 -5
- mindspore/ops/functional.py +15 -10
- mindspore/ops/op_info_register.py +235 -19
- mindspore/ops/operations/__init__.py +25 -17
- mindspore/ops/operations/_grad_ops.py +52 -7
- mindspore/ops/operations/_inner_ops.py +213 -12
- mindspore/ops/operations/_quant_ops.py +4 -8
- mindspore/ops/operations/_sequence_ops.py +42 -0
- mindspore/ops/operations/array_ops.py +64 -280
- mindspore/ops/operations/comm_ops.py +105 -57
- mindspore/ops/operations/custom_ops.py +10 -3
- mindspore/ops/operations/debug_ops.py +8 -4
- mindspore/ops/operations/image_ops.py +18 -12
- mindspore/ops/operations/math_ops.py +185 -138
- mindspore/ops/operations/nn_ops.py +716 -492
- mindspore/ops/operations/other_ops.py +0 -22
- mindspore/ops/operations/random_ops.py +53 -111
- mindspore/ops/operations/sparse_ops.py +3 -1
- mindspore/ops/primitive.py +24 -18
- mindspore/parallel/_auto_parallel_context.py +68 -8
- mindspore/parallel/_cost_model_context.py +2 -2
- mindspore/parallel/_offload_context.py +17 -3
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +12 -0
- mindspore/parallel/_tensor.py +14 -12
- mindspore/parallel/_transformer/layers.py +5 -3
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +2 -2
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +23 -3
- mindspore/parallel/_utils.py +11 -7
- mindspore/parallel/algo_parameter_config.py +85 -5
- mindspore/parallel/checkpoint_transform.py +6 -10
- mindspore/parallel/shard.py +4 -4
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +2 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
- mindspore/profiler/parser/ascend_hccl_generator.py +17 -12
- mindspore/profiler/parser/ascend_msprof_exporter.py +104 -252
- mindspore/profiler/parser/ascend_msprof_generator.py +8 -8
- mindspore/profiler/parser/ascend_op_generator.py +5 -5
- mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +9 -6
- mindspore/profiler/parser/base_timeline_generator.py +9 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +14 -10
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +37 -21
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +2 -2
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +2 -2
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +139 -71
- mindspore/rewrite/api/node.py +102 -19
- mindspore/rewrite/api/node_type.py +5 -1
- mindspore/rewrite/api/scoped_value.py +9 -17
- mindspore/rewrite/api/symbol_tree.py +131 -47
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +33 -24
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +273 -234
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +216 -221
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +174 -113
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +24 -16
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +196 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree.py +525 -577
- mindspore/rewrite/symbol_tree_builder.py +9 -193
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +2 -2
- mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/linalg.py +1 -1
- mindspore/scipy/optimize/minimize.py +7 -3
- mindspore/train/_utils.py +7 -3
- mindspore/train/amp.py +323 -123
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/_backup_and_restore.py +2 -12
- mindspore/train/callback/_callback.py +29 -4
- mindspore/train/callback/_checkpoint.py +23 -8
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
- mindspore/train/callback/_summary_collector.py +14 -7
- mindspore/train/callback/_time_monitor.py +58 -5
- mindspore/train/data_sink.py +5 -11
- mindspore/train/dataset_helper.py +83 -57
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/__init__.py +3 -3
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +3 -2
- mindspore/train/metrics/mean_surface_distance.py +3 -2
- mindspore/train/metrics/metric.py +39 -19
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
- mindspore/train/mind_ir_pb2.py +85 -36
- mindspore/train/model.py +185 -45
- mindspore/train/serialization.py +390 -150
- mindspore/train/summary/_writer_pool.py +3 -2
- mindspore/train/summary/summary_record.py +14 -10
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/train/train_thor/dataset_helper.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/METADATA +6 -7
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/RECORD +458 -518
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/dataset/datapreprocess/__init__.py +0 -20
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/include/api/net.h +0 -142
- mindspore/nn/lr_scheduler.py +0 -262
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
|
@@ -26,8 +26,7 @@ import mindspore.common.dtype as mstype
|
|
|
26
26
|
from mindspore.ops import operations as P
|
|
27
27
|
from mindspore.ops.primitive import constexpr
|
|
28
28
|
from mindspore.ops.primitive import _primexpr
|
|
29
|
-
import mindspore.ops
|
|
30
|
-
from mindspore.ops import functional as F
|
|
29
|
+
import mindspore.ops as ops
|
|
31
30
|
from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|
32
31
|
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
|
33
32
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
@@ -65,8 +64,6 @@ from mindspore.ops._utils.utils import ms_arrange
|
|
|
65
64
|
tuple_to_tensor_ = TupleToTensor()
|
|
66
65
|
eye_ = P.Eye()
|
|
67
66
|
fills_ = Fills()
|
|
68
|
-
fill_ = P.Fill()
|
|
69
|
-
fillv2_ = P.FillV2()
|
|
70
67
|
ones_ = P.Ones()
|
|
71
68
|
ones_like_ = P.OnesLike()
|
|
72
69
|
tile_ = P.Tile()
|
|
@@ -115,9 +112,9 @@ reduce_min = P.ReduceMin()
|
|
|
115
112
|
|
|
116
113
|
@_primexpr
|
|
117
114
|
def get_x_shape(x_shape):
|
|
118
|
-
if
|
|
115
|
+
if ops.is_sequence_shape_unknown(x_shape):
|
|
119
116
|
return (-2,)
|
|
120
|
-
if
|
|
117
|
+
if ops.is_sequence_value_unknown(x_shape):
|
|
121
118
|
return (-1,)
|
|
122
119
|
s = 1
|
|
123
120
|
for i in x_shape:
|
|
@@ -151,7 +148,7 @@ def _get_type(x):
|
|
|
151
148
|
"""get the dtype of input"""
|
|
152
149
|
if isinstance(x, Tensor):
|
|
153
150
|
return x.dtype
|
|
154
|
-
return
|
|
151
|
+
return ops.typeof(x)
|
|
155
152
|
|
|
156
153
|
|
|
157
154
|
def _get_max_type(start, end, step):
|
|
@@ -240,7 +237,8 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
240
237
|
if start.shape != () or end.shape != () or step.shape != ():
|
|
241
238
|
raise ValueError(f"For arange, the input args must be a TensorScalar,"
|
|
242
239
|
f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
|
|
243
|
-
|
|
240
|
+
range_op = _get_cache_prim(P.Range)()
|
|
241
|
+
data = range_op(start, end, step)
|
|
244
242
|
if dtype is not None:
|
|
245
243
|
data = cast_(data, dtype)
|
|
246
244
|
return data
|
|
@@ -653,7 +651,7 @@ def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_n
|
|
|
653
651
|
raise TypeError(f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
|
|
654
652
|
|
|
655
653
|
|
|
656
|
-
def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
654
|
+
def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
657
655
|
r"""
|
|
658
656
|
Computes a one-hot tensor.
|
|
659
657
|
|
|
@@ -665,14 +663,14 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
|
665
663
|
|
|
666
664
|
Args:
|
|
667
665
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
668
|
-
Data type must be
|
|
666
|
+
Data type must be int32 or int64.
|
|
669
667
|
depth(int): A scalar defining the depth of the one-hot dimension.
|
|
670
|
-
on_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] = i`.
|
|
668
|
+
on_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] = i`.
|
|
671
669
|
Support uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64,
|
|
672
|
-
bool, complex64, complex128.
|
|
673
|
-
off_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] != i`.
|
|
674
|
-
Has the same data type as `on_value`.
|
|
675
|
-
axis(int): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
|
|
670
|
+
bool, complex64, complex128. Default: ``1`` .
|
|
671
|
+
off_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] != i`.
|
|
672
|
+
Has the same data type as `on_value`. Default: ``0`` .
|
|
673
|
+
axis(int, optional): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
|
|
676
674
|
the output shape will be :math:`(N, C, depth)`, If `axis` is 0,
|
|
677
675
|
the output shape will be :math:`(depth, N, C)`.
|
|
678
676
|
Default: ``-1`` .
|
|
@@ -682,7 +680,7 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
|
682
680
|
|
|
683
681
|
Raises:
|
|
684
682
|
TypeError: If `axis` or `depth` is not an int.
|
|
685
|
-
TypeError: If dtype of `indices` is not
|
|
683
|
+
TypeError: If dtype of `indices` is not int32 or int64.
|
|
686
684
|
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
|
|
687
685
|
ValueError: If `axis` is not in range [-1, ndim].
|
|
688
686
|
ValueError: If `depth` is less than 0.
|
|
@@ -716,8 +714,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
716
714
|
|
|
717
715
|
Args:
|
|
718
716
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
719
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
720
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
717
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
|
|
718
|
+
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
721
719
|
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
722
720
|
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
723
721
|
|
|
@@ -743,10 +741,11 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
743
741
|
[0. 0. 0.]
|
|
744
742
|
[0. 0. 0.]]
|
|
745
743
|
"""
|
|
746
|
-
|
|
744
|
+
value = cast_(value, type)
|
|
745
|
+
return _get_cache_prim(P.FillV2)()(shape, value)
|
|
747
746
|
|
|
748
747
|
|
|
749
|
-
def full(size, fill_value, *, dtype=None):
|
|
748
|
+
def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
750
749
|
"""
|
|
751
750
|
Create a Tensor of the specified shape and fill it with the specified value.
|
|
752
751
|
|
|
@@ -788,7 +787,7 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
|
|
|
788
787
|
raise TypeError(f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
|
|
789
788
|
if isinstance(size, list):
|
|
790
789
|
size = tuple(size)
|
|
791
|
-
return
|
|
790
|
+
return ops.fill(dtype, size, fill_value)
|
|
792
791
|
|
|
793
792
|
|
|
794
793
|
def full_like(input, fill_value, *, dtype=None):
|
|
@@ -839,7 +838,7 @@ def chunk(input, chunks, axis=0):
|
|
|
839
838
|
Cut the input Tensor into `chunks` sub-tensors along the specified axis.
|
|
840
839
|
|
|
841
840
|
Note:
|
|
842
|
-
This function may return less
|
|
841
|
+
This function may return less than the specified number of chunks!
|
|
843
842
|
|
|
844
843
|
Args:
|
|
845
844
|
input (Tensor): A Tensor to be cut.
|
|
@@ -911,12 +910,12 @@ def fills(x, value):
|
|
|
911
910
|
value_ = float(value)
|
|
912
911
|
elif isinstance(value, Tensor):
|
|
913
912
|
if value.ndim != 0:
|
|
914
|
-
raise ValueError("For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
|
|
915
|
-
" should be 0, but got {
|
|
913
|
+
raise ValueError(f"For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
|
|
914
|
+
f" should be 0, but got {value.ndim}")
|
|
916
915
|
value_ = value.astype(mstype.float32)
|
|
917
916
|
else:
|
|
918
|
-
raise TypeError("For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
|
|
919
|
-
" but got {
|
|
917
|
+
raise TypeError(f"For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
|
|
918
|
+
f" but got {type(value)}")
|
|
920
919
|
return fills_(x, value_)
|
|
921
920
|
|
|
922
921
|
|
|
@@ -952,7 +951,7 @@ def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
|
|
|
952
951
|
[1. 1.]]
|
|
953
952
|
"""
|
|
954
953
|
_dtype = mstype.float32 if dtype is None else dtype
|
|
955
|
-
ones_op = P.FillV2()
|
|
954
|
+
ones_op = _get_cache_prim(P.FillV2)()
|
|
956
955
|
value = Tensor(1, _dtype)
|
|
957
956
|
if isinstance(shape, int):
|
|
958
957
|
shape = tuple([shape])
|
|
@@ -993,7 +992,7 @@ def ones_like(input, *, dtype=None):
|
|
|
993
992
|
[[1 1]
|
|
994
993
|
[1 1]]
|
|
995
994
|
"""
|
|
996
|
-
ones_like_op = P.OnesLike()
|
|
995
|
+
ones_like_op = _get_cache_prim(P.OnesLike)()
|
|
997
996
|
output = ones_like_op(input)
|
|
998
997
|
_dtype = input.dtype if dtype is None else dtype
|
|
999
998
|
output = cast_(output, _dtype)
|
|
@@ -1028,7 +1027,7 @@ def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
|
|
|
1028
1027
|
[[0. 0.]
|
|
1029
1028
|
[0. 0.]]
|
|
1030
1029
|
"""
|
|
1031
|
-
zero_op = P.FillV2()
|
|
1030
|
+
zero_op = _get_cache_prim(P.FillV2)()
|
|
1032
1031
|
_dtype = mstype.float32 if dtype is None else dtype
|
|
1033
1032
|
value = Tensor(0, _dtype)
|
|
1034
1033
|
if isinstance(size, int):
|
|
@@ -1074,9 +1073,10 @@ def zeros_like(input, *, dtype=None):
|
|
|
1074
1073
|
[0. 0.]]
|
|
1075
1074
|
"""
|
|
1076
1075
|
_dtype = input.dtype if dtype is None else dtype
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
output =
|
|
1076
|
+
_zeros_like = _get_cache_prim(P.ZerosLike)()
|
|
1077
|
+
_cast = _get_cache_prim(P.Cast)()
|
|
1078
|
+
output = _zeros_like(input)
|
|
1079
|
+
output = _cast(output, _dtype)
|
|
1080
1080
|
return output
|
|
1081
1081
|
|
|
1082
1082
|
|
|
@@ -1147,7 +1147,8 @@ def tile(input, multiples):
|
|
|
1147
1147
|
[1. 2. 1. 2.]
|
|
1148
1148
|
[3. 4. 3. 4.]]]
|
|
1149
1149
|
"""
|
|
1150
|
-
|
|
1150
|
+
tile_op = _get_cache_prim(P.Tile)()
|
|
1151
|
+
return tile_op(input, multiples)
|
|
1151
1152
|
|
|
1152
1153
|
|
|
1153
1154
|
def range(start, end, step):
|
|
@@ -1455,7 +1456,7 @@ def size(input_x):
|
|
|
1455
1456
|
|
|
1456
1457
|
Args:
|
|
1457
1458
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1458
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
1459
|
+
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1459
1460
|
|
|
1460
1461
|
Returns:
|
|
1461
1462
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1607,7 +1608,7 @@ def reshape(input, shape):
|
|
|
1607
1608
|
|
|
1608
1609
|
|
|
1609
1610
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
1610
|
-
"""
|
|
1611
|
+
r"""
|
|
1611
1612
|
Reverses variable length slices.
|
|
1612
1613
|
|
|
1613
1614
|
Args:
|
|
@@ -1621,7 +1622,12 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
|
1621
1622
|
|
|
1622
1623
|
Raises:
|
|
1623
1624
|
TypeError: If `seq_dim` or `batch_dim` is not an int.
|
|
1624
|
-
ValueError: If
|
|
1625
|
+
ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
|
|
1626
|
+
ValueError: If :math:`batch\_dim == seq\_dim`.
|
|
1627
|
+
ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
|
|
1628
|
+
ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
|
|
1629
|
+
RuntimeError: If any value of `seq_lengths` is less than 0.
|
|
1630
|
+
RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
|
|
1625
1631
|
|
|
1626
1632
|
Supported Platforms:
|
|
1627
1633
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1724,12 +1730,12 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1724
1730
|
if not isinstance(input, Tensor):
|
|
1725
1731
|
raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
|
|
1726
1732
|
if not isinstance(start_dim, int) or not isinstance(end_dim, int) or \
|
|
1727
|
-
|
|
1733
|
+
isinstance(start_dim, bool) or isinstance(end_dim, bool):
|
|
1728
1734
|
raise TypeError(f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
|
|
1729
1735
|
check_flatten_order_const(order)
|
|
1730
1736
|
if order == 'F':
|
|
1731
|
-
perm =
|
|
1732
|
-
new_order =
|
|
1737
|
+
perm = ops.make_range(0, ops.rank(input))
|
|
1738
|
+
new_order = ops.tuple_reversed(perm)
|
|
1733
1739
|
input = _get_cache_prim(P.Transpose)()(input, new_order)
|
|
1734
1740
|
|
|
1735
1741
|
# Handle the default case.
|
|
@@ -1911,15 +1917,15 @@ def select(cond, x, y):
|
|
|
1911
1917
|
input_y = cast_(input_y, mstype.float32)
|
|
1912
1918
|
|
|
1913
1919
|
if is_x_tensor and is_y_tensor and is_cond_tensor:
|
|
1914
|
-
x_shape =
|
|
1915
|
-
y_shape =
|
|
1916
|
-
cond_shape =
|
|
1917
|
-
all_constant =
|
|
1920
|
+
x_shape = ops.shape(x)
|
|
1921
|
+
y_shape = ops.shape(y)
|
|
1922
|
+
cond_shape = ops.shape(cond)
|
|
1923
|
+
all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
|
|
1918
1924
|
if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1919
1925
|
broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
|
|
1920
|
-
new_cond =
|
|
1921
|
-
new_x =
|
|
1922
|
-
new_y =
|
|
1926
|
+
new_cond = ops.broadcast_to(cond, broadcast_shape)
|
|
1927
|
+
new_x = ops.broadcast_to(x, broadcast_shape)
|
|
1928
|
+
new_y = ops.broadcast_to(y, broadcast_shape)
|
|
1923
1929
|
return tensor_select_(new_cond, new_x, new_y)
|
|
1924
1930
|
|
|
1925
1931
|
return tensor_select_(cond, input_x, input_y)
|
|
@@ -2010,9 +2016,7 @@ def strided_slice(input_x,
|
|
|
2010
2016
|
Args:
|
|
2011
2017
|
input_x (Tensor): The input Tensor to be extracted from.
|
|
2012
2018
|
begin (tuple[int]): A tuple which represents the location where to start.
|
|
2013
|
-
Only non-negative int is allowed.
|
|
2014
2019
|
end (tuple[int]): A tuple or which represents the maximum location where to end.
|
|
2015
|
-
Only non-negative int is allowed.
|
|
2016
2020
|
strides (tuple[int]): A tuple which represents the strides is continuously added
|
|
2017
2021
|
before reaching the maximum location. Only int is allowed, it can be negative
|
|
2018
2022
|
which results in reversed slicing.
|
|
@@ -2156,13 +2160,13 @@ def concat(tensors, axis=0):
|
|
|
2156
2160
|
Alias for :func:`mindspore.ops.cat()`.
|
|
2157
2161
|
|
|
2158
2162
|
Tutorial Examples:
|
|
2159
|
-
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.
|
|
2163
|
+
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
|
|
2160
2164
|
- `FGSM Network Adversarial Attack - Implementing FGSM
|
|
2161
|
-
<https://mindspore.cn/tutorials/application/en/r2.
|
|
2165
|
+
<https://mindspore.cn/tutorials/application/en/r2.2/cv/fgsm.html#implementing-fgsm>`_
|
|
2162
2166
|
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
2163
|
-
<https://mindspore.cn/tutorials/application/en/r2.
|
|
2167
|
+
<https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
|
|
2164
2168
|
- `Sentiment Classification Implemented by RNN - Dense
|
|
2165
|
-
<https://mindspore.cn/tutorials/application/en/r2.
|
|
2169
|
+
<https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
|
|
2166
2170
|
"""
|
|
2167
2171
|
return cat(tensors, axis)
|
|
2168
2172
|
|
|
@@ -2279,7 +2283,8 @@ def unbind(input, dim=0):
|
|
|
2279
2283
|
|
|
2280
2284
|
def expand_dims(input_x, axis):
|
|
2281
2285
|
"""
|
|
2282
|
-
Adds an additional dimension to `input_x` at the given axis
|
|
2286
|
+
Adds an additional dimension to `input_x` at the given axis, the dimension
|
|
2287
|
+
of `input_x` should be greater than or equal to 1.
|
|
2283
2288
|
|
|
2284
2289
|
Note:
|
|
2285
2290
|
If the specified axis is a negative number, the index is counted
|
|
@@ -2357,18 +2362,19 @@ def squeeze(input, axis=None):
|
|
|
2357
2362
|
If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
|
|
2358
2363
|
For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
|
|
2359
2364
|
then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
|
|
2360
|
-
is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will
|
|
2361
|
-
|
|
2365
|
+
is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will be changed
|
|
2366
|
+
to (A, B) when :math:`axis=1`, but when :math:`axis=0` or :math:`axis=2`, an error will occur.
|
|
2362
2367
|
|
|
2363
2368
|
Note:
|
|
2369
|
+
- Squeezing a dimension that is not 1 will raise an error.
|
|
2364
2370
|
- Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
|
|
2365
2371
|
and there is no Tensor data copy process.
|
|
2366
2372
|
- The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
|
|
2367
2373
|
|
|
2368
2374
|
Args:
|
|
2369
2375
|
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2370
|
-
axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will
|
|
2371
|
-
all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
|
|
2376
|
+
axis (Union[int, tuple(int), list(int)]): Specifies the dimension indexes of shape to be removed, which will
|
|
2377
|
+
remove all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
|
|
2372
2378
|
Default: ``None`` , an empty tuple will be used.
|
|
2373
2379
|
|
|
2374
2380
|
Returns:
|
|
@@ -2376,8 +2382,8 @@ def squeeze(input, axis=None):
|
|
|
2376
2382
|
|
|
2377
2383
|
Raises:
|
|
2378
2384
|
TypeError: If `input` is not a tensor.
|
|
2379
|
-
TypeError: If `axis` is
|
|
2380
|
-
TypeError: If `axis` is a tuple whose elements are not all int.
|
|
2385
|
+
TypeError: If `axis` is not an int, tuple or list.
|
|
2386
|
+
TypeError: If `axis` is a tuple or list whose elements are not all int.
|
|
2381
2387
|
ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
|
|
2382
2388
|
|
|
2383
2389
|
Supported Platforms:
|
|
@@ -2396,6 +2402,8 @@ def squeeze(input, axis=None):
|
|
|
2396
2402
|
"""
|
|
2397
2403
|
if axis is None:
|
|
2398
2404
|
axis = ()
|
|
2405
|
+
if isinstance(axis, list):
|
|
2406
|
+
axis = tuple(axis)
|
|
2399
2407
|
squeeze_ = _get_cache_prim(P.Squeeze)(axis)
|
|
2400
2408
|
return squeeze_(input)
|
|
2401
2409
|
|
|
@@ -2478,7 +2486,6 @@ def scatter_mul(input_x, indices, updates):
|
|
|
2478
2486
|
Tensor, the updated `input_x`, has the same shape and type as `input_x`.
|
|
2479
2487
|
|
|
2480
2488
|
Raises:
|
|
2481
|
-
TypeError: If `use_locking` is not a bool.
|
|
2482
2489
|
TypeError: If `indices` is not an int32 or int64.
|
|
2483
2490
|
ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
|
|
2484
2491
|
RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
|
|
@@ -3486,11 +3493,12 @@ def gather(input_params, input_indices, axis, batch_dims=0):
|
|
|
3486
3493
|
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
3487
3494
|
|
|
3488
3495
|
.. note::
|
|
3489
|
-
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])
|
|
3490
|
-
out of
|
|
3496
|
+
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
|
|
3497
|
+
On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
|
|
3498
|
+
undefined.
|
|
3491
3499
|
|
|
3492
3500
|
2. The data type of input_params cannot be
|
|
3493
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
3501
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
|
|
3494
3502
|
platform currently.
|
|
3495
3503
|
|
|
3496
3504
|
Args:
|
|
@@ -3512,6 +3520,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
|
|
|
3512
3520
|
ValueError: If `axis` is a Tensor and its size is not 1.
|
|
3513
3521
|
TypeError: If `input_params` is not a tensor.
|
|
3514
3522
|
TypeError: If `input_indices` is not a tensor of type int.
|
|
3523
|
+
RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
|
|
3515
3524
|
|
|
3516
3525
|
Supported Platforms:
|
|
3517
3526
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3976,7 +3985,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3976
3985
|
>>> reduction = "none"
|
|
3977
3986
|
>>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
|
3978
3987
|
>>> print(output)
|
|
3979
|
-
[[
|
|
3988
|
+
[[1 2 8 4 8]]
|
|
3980
3989
|
>>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32), name="x")
|
|
3981
3990
|
>>> indices = Tensor(np.array([[1, -1, 2], [0, 2, 1]]), mindspore.int32)
|
|
3982
3991
|
>>> updates = Tensor(np.array([[1, 2, 2], [4, 5, 8]]), mindspore.int32)
|
|
@@ -3984,7 +3993,9 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3984
3993
|
>>> reduction = "add"
|
|
3985
3994
|
>>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
|
3986
3995
|
>>> print(output)
|
|
3987
|
-
[[5
|
|
3996
|
+
[[ 5 2 3]
|
|
3997
|
+
[ 5 5 14]
|
|
3998
|
+
[ 7 15 11]]
|
|
3988
3999
|
"""
|
|
3989
4000
|
_tensor_scatter_elements = _get_cache_prim(TensorScatterElements)(axis, reduction)
|
|
3990
4001
|
return _tensor_scatter_elements(input_x, indices, updates)
|
|
@@ -4048,7 +4059,7 @@ def scatter(input, axis, index, src):
|
|
|
4048
4059
|
[0. 0. 0. 0. 0.]
|
|
4049
4060
|
[0. 0. 0. 0. 0.]]
|
|
4050
4061
|
"""
|
|
4051
|
-
return
|
|
4062
|
+
return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
|
|
4052
4063
|
|
|
4053
4064
|
|
|
4054
4065
|
def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
@@ -4087,6 +4098,7 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4087
4098
|
Tensor after embedding, has the same shape and type as `input` .
|
|
4088
4099
|
|
|
4089
4100
|
Raises:
|
|
4101
|
+
ValueError: The shape of `src` is not the same as the shape of `input` slice.
|
|
4090
4102
|
TypeError: If `input` is not a Tensor.
|
|
4091
4103
|
TypeError: If `src` is not a Tensor.
|
|
4092
4104
|
TypeError: If `axis` or `step` is not an integer.
|
|
@@ -4115,23 +4127,13 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4115
4127
|
for _ in builtins.range(axis):
|
|
4116
4128
|
index_tensor = index_tensor.expand_dims(0)
|
|
4117
4129
|
|
|
4118
|
-
if index_shape
|
|
4119
|
-
|
|
4120
|
-
|
|
4121
|
-
|
|
4122
|
-
|
|
4123
|
-
|
|
4124
|
-
for _ in builtins.range(axis):
|
|
4125
|
-
src = src.expand_dims(0)
|
|
4126
|
-
if axis == input_rank - 1:
|
|
4127
|
-
src = src.broadcast_to(input.shape[0:axis] + src_shape)
|
|
4128
|
-
else:
|
|
4129
|
-
for _ in builtins.range(len(src_shape)):
|
|
4130
|
-
index_tensor = index_tensor.expand_dims(-1)
|
|
4131
|
-
src = src.broadcast_to(input.shape[0:axis] + (len(index),) + src_shape)
|
|
4130
|
+
if index_shape != src_shape:
|
|
4131
|
+
raise ValueError(f"For slice_scatter, src shape should be equal to the slice size,"
|
|
4132
|
+
f"but got src shape {src_shape} and slice shape {index_shape}")
|
|
4133
|
+
for _ in builtins.range(input_rank - axis - 1):
|
|
4134
|
+
index_tensor = index_tensor.expand_dims(-1)
|
|
4132
4135
|
index_tensor = index_tensor.broadcast_to(src.shape)
|
|
4133
|
-
|
|
4134
|
-
return output
|
|
4136
|
+
return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
|
|
4135
4137
|
|
|
4136
4138
|
|
|
4137
4139
|
def select_scatter(input, src, axis, index):
|
|
@@ -4148,6 +4150,7 @@ def select_scatter(input, src, axis, index):
|
|
|
4148
4150
|
Tensor after embedding, has the same shape and type as `input` .
|
|
4149
4151
|
|
|
4150
4152
|
Raises:
|
|
4153
|
+
ValueError: The shape of `src` is not the same as the shape scattered over `input` .
|
|
4151
4154
|
TypeError: If `input` is not a Tensor.
|
|
4152
4155
|
TypeError: If `src` is not a Tensor.
|
|
4153
4156
|
TypeError: If `axis` or `index` is not an integer.
|
|
@@ -4169,6 +4172,9 @@ def select_scatter(input, src, axis, index):
|
|
|
4169
4172
|
[0. 0. 0.]]]
|
|
4170
4173
|
"""
|
|
4171
4174
|
src = src.expand_dims(axis=axis)
|
|
4175
|
+
x_rank = input.ndim
|
|
4176
|
+
axis = axis if axis >= 0 else axis + x_rank
|
|
4177
|
+
index = index if index >= 0 else index + x_rank
|
|
4172
4178
|
return slice_scatter(input, src, axis, start=index, end=index + 1)
|
|
4173
4179
|
|
|
4174
4180
|
|
|
@@ -4437,7 +4443,7 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
|
|
|
4437
4443
|
return matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
|
|
4438
4444
|
|
|
4439
4445
|
|
|
4440
|
-
def matrix_diag_part(x, k
|
|
4446
|
+
def matrix_diag_part(x, k, padding_value, align="RIGHT_LEFT"):
|
|
4441
4447
|
r"""
|
|
4442
4448
|
Returns the diagonal part of input tensor.
|
|
4443
4449
|
Returns a tensor with the k[0]-th to k[1]-th diagonals of `x`. Some diagonals are shorter than
|
|
@@ -4445,13 +4451,13 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
|
|
|
4445
4451
|
|
|
4446
4452
|
Args:
|
|
4447
4453
|
x (Tensor): The input Tensor with rank r, where r >= 2.
|
|
4448
|
-
k (
|
|
4454
|
+
k (Tensor): A Tensor of type int32. Diagonal offset(s). Positive value means
|
|
4449
4455
|
superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be
|
|
4450
4456
|
a single integer (for a single diagonal) or a pair of integers specifying the low and high ends
|
|
4451
4457
|
of a matrix band. k[0] must not be larger than k[1]. The value of k has restructions, meaning
|
|
4452
|
-
value of k must be in (-x.shape[-2], x.shape[-1]).
|
|
4453
|
-
padding_value (
|
|
4454
|
-
The number to fill the area outside the specified diagonal band.
|
|
4458
|
+
value of k must be in (-x.shape[-2], x.shape[-1]).
|
|
4459
|
+
padding_value (Tensor): A Tensor with only one value. Have the same dtype as x.
|
|
4460
|
+
The number to fill the area outside the specified diagonal band.
|
|
4455
4461
|
align (str, optional): An optional string from: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` ,
|
|
4456
4462
|
``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals
|
|
4457
4463
|
should be aligned, respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row)
|
|
@@ -4501,7 +4507,7 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
|
|
|
4501
4507
|
return matrix_diag_part_v3(x, k, padding_value)
|
|
4502
4508
|
|
|
4503
4509
|
|
|
4504
|
-
def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
4510
|
+
def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
|
|
4505
4511
|
r"""
|
|
4506
4512
|
Returns a batched matrix tensor with new batched diagonal values.
|
|
4507
4513
|
Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
|
|
@@ -4713,7 +4719,7 @@ def affine_grid(theta, size, align_corners=False):
|
|
|
4713
4719
|
return affine_grid_op(theta, size)
|
|
4714
4720
|
|
|
4715
4721
|
|
|
4716
|
-
def broadcast_to(input, shape):
|
|
4722
|
+
def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
4717
4723
|
"""
|
|
4718
4724
|
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
|
4719
4725
|
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
|
|
@@ -4787,7 +4793,7 @@ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
|
4787
4793
|
[[1. 1.]
|
|
4788
4794
|
[2. 2.]]
|
|
4789
4795
|
"""
|
|
4790
|
-
if isinstance(shape, Tensor) or
|
|
4796
|
+
if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
|
|
4791
4797
|
_dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
|
|
4792
4798
|
return _dyn_broadcast_to(input, shape)
|
|
4793
4799
|
_broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
|
|
@@ -5580,10 +5586,10 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
5580
5586
|
arr_shape = x.shape
|
|
5581
5587
|
length_along_dim = arr_shape[axis]
|
|
5582
5588
|
if split_size_or_sections > length_along_dim:
|
|
5583
|
-
res = P.Split(axis, 1)(x)
|
|
5589
|
+
res = _get_cache_prim(P.Split)(axis, 1)(x)
|
|
5584
5590
|
elif length_along_dim % split_size_or_sections == 0:
|
|
5585
5591
|
sections = length_along_dim // split_size_or_sections
|
|
5586
|
-
res = P.Split(axis, sections)(x)
|
|
5592
|
+
res = _get_cache_prim(P.Split)(axis, sections)(x)
|
|
5587
5593
|
else:
|
|
5588
5594
|
num_sections = length_along_dim // split_size_or_sections
|
|
5589
5595
|
length1 = num_sections * split_size_or_sections
|
|
@@ -5592,8 +5598,8 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
5592
5598
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5593
5599
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5594
5600
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5595
|
-
res = P.Split(axis, num_sections)(tensor_slice(x, start1, size1)) + \
|
|
5596
|
-
P.Split(axis, 1)(tensor_slice(x, start2, size2))
|
|
5601
|
+
res = _get_cache_prim(P.Split)(axis, num_sections)(tensor_slice(x, start1, size1)) + \
|
|
5602
|
+
_get_cache_prim(P.Split)(axis, 1)(tensor_slice(x, start2, size2))
|
|
5597
5603
|
return res
|
|
5598
5604
|
|
|
5599
5605
|
|
|
@@ -5687,7 +5693,7 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5687
5693
|
return tuple(res)
|
|
5688
5694
|
|
|
5689
5695
|
|
|
5690
|
-
def tril(input, diagonal=0):
|
|
5696
|
+
def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5691
5697
|
"""
|
|
5692
5698
|
Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
|
|
5693
5699
|
and set the other elements to zeros.
|
|
@@ -6102,7 +6108,7 @@ def dsplit(input, indices_or_sections):
|
|
|
6102
6108
|
return tensor_split(input, indices_or_sections, 2)
|
|
6103
6109
|
|
|
6104
6110
|
|
|
6105
|
-
def _init_and_select_elem(input, initial, where, cmp_fn):
|
|
6111
|
+
def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
|
|
6106
6112
|
"""Initialize the input according to Initial, and select the element according to where."""
|
|
6107
6113
|
if initial is not None:
|
|
6108
6114
|
initial = ops.fill(input.dtype, input.shape, initial)
|
|
@@ -6120,7 +6126,7 @@ def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=r
|
|
|
6120
6126
|
return input
|
|
6121
6127
|
|
|
6122
6128
|
|
|
6123
|
-
def max(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
6129
|
+
def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
6124
6130
|
"""
|
|
6125
6131
|
Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
|
|
6126
6132
|
indices.
|
|
@@ -6138,7 +6144,8 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
|
|
|
6138
6144
|
|
|
6139
6145
|
Args:
|
|
6140
6146
|
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
6141
|
-
axis (int): The dimension to reduce.
|
|
6147
|
+
axis (int): The dimension to reduce. When `axis` is ``None``, computing the maximum value of all elements
|
|
6148
|
+
in `input` .Default: ``None`` .
|
|
6142
6149
|
keepdims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
|
|
6143
6150
|
the output will reduce dimension if false. Default: ``False`` .
|
|
6144
6151
|
|
|
@@ -6174,14 +6181,20 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
|
|
|
6174
6181
|
>>> import numpy as np
|
|
6175
6182
|
>>> from mindspore import Tensor, ops
|
|
6176
6183
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
6177
|
-
>>> output, index
|
|
6184
|
+
>>> output, index = ops.max(x)
|
|
6178
6185
|
>>> print(output, index)
|
|
6179
6186
|
0.7 0
|
|
6187
|
+
>>> y = Tensor(np.array([[0.0, 0.3, 0.4, 0.5, 0.1],
|
|
6188
|
+
... [3.2, 0.4, 0.1, 2.9, 4.0]]), mindspore.float32)
|
|
6189
|
+
>>> output, index = ops.max(y, axis=0, keepdims=True)
|
|
6190
|
+
>>> print(output, index)
|
|
6191
|
+
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
6180
6192
|
"""
|
|
6181
6193
|
if not input.shape:
|
|
6182
6194
|
return (input, Tensor(0, dtype=mstype.int32))
|
|
6183
6195
|
if axis is None:
|
|
6184
|
-
|
|
6196
|
+
reduce_max_op = _get_cache_prim(P.ReduceMax)()
|
|
6197
|
+
return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
|
|
6185
6198
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6186
6199
|
raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
6187
6200
|
if axis is not None and not isinstance(axis, int):
|
|
@@ -6237,7 +6250,7 @@ def argmax(input, dim=None, keepdim=False):
|
|
|
6237
6250
|
return out
|
|
6238
6251
|
|
|
6239
6252
|
|
|
6240
|
-
def min(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
6253
|
+
def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
6241
6254
|
"""
|
|
6242
6255
|
Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
|
|
6243
6256
|
indices.
|
|
@@ -6366,6 +6379,9 @@ def aminmax(input, *, axis=0, keepdims=False):
|
|
|
6366
6379
|
argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
|
|
6367
6380
|
_, output0 = argmin_with_value_op(input)
|
|
6368
6381
|
_, output1 = argmax_with_value_op(input)
|
|
6382
|
+
if keepdims is True and input.ndim == 0:
|
|
6383
|
+
output0 = ops.reshape(output0, [1])
|
|
6384
|
+
output1 = ops.reshape(output1, [1])
|
|
6369
6385
|
return output0, output1
|
|
6370
6386
|
|
|
6371
6387
|
|
|
@@ -6477,7 +6493,6 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
|
6477
6493
|
return unsorted_segment_sum_(input_x, segment_ids, num_segments)
|
|
6478
6494
|
|
|
6479
6495
|
|
|
6480
|
-
|
|
6481
6496
|
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
6482
6497
|
r"""
|
|
6483
6498
|
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|
|
@@ -6500,12 +6515,8 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6500
6515
|
|
|
6501
6516
|
If the two compared elements are the same, the one with the smaller index value is returned first.
|
|
6502
6517
|
|
|
6503
|
-
Note:
|
|
6504
|
-
Currently, Ascend/CPU supported all common data types except bool and complex type,
|
|
6505
|
-
but GPU only supports float16, float32 currently.
|
|
6506
|
-
|
|
6507
6518
|
Args:
|
|
6508
|
-
input (Tensor): Input to be computed.
|
|
6519
|
+
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
6509
6520
|
k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
|
|
6510
6521
|
dim (int, optional): The dimension to sort along. Default: ``None`` .
|
|
6511
6522
|
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
|
|
@@ -6523,6 +6534,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6523
6534
|
TypeError: If `sorted` is not a bool.
|
|
6524
6535
|
TypeError: If `input` is not a Tensor.
|
|
6525
6536
|
TypeError: If `k` is not an int.
|
|
6537
|
+
TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
|
|
6526
6538
|
|
|
6527
6539
|
Supported Platforms:
|
|
6528
6540
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6574,50 +6586,8 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6574
6586
|
|
|
6575
6587
|
def expand(input_x, size):
|
|
6576
6588
|
r"""
|
|
6577
|
-
|
|
6578
|
-
|
|
6579
|
-
Note:
|
|
6580
|
-
- If the `size` for a dimension is -1, it means no change for the size of that dimension.
|
|
6581
|
-
- When a Tensor is expanded to a larger number of dimensions, the new ones will be appended at
|
|
6582
|
-
the front, and for the new dimensions, the `size` can not be -1.
|
|
6583
|
-
|
|
6584
|
-
Args:
|
|
6585
|
-
input_x (Tensor): A Tensor to be expanded.
|
|
6586
|
-
size (Tensor): The expanded shape of `input_x`.
|
|
6587
|
-
|
|
6588
|
-
Returns:
|
|
6589
|
-
y (Tensor) - Tensor after expansion whose shape is `size`.
|
|
6590
|
-
|
|
6591
|
-
Raises:
|
|
6592
|
-
TypeError: If `input_x` or `size` is not Tensor.
|
|
6593
|
-
TypeError: If the type of `size` is not one of the following dtype: int16, int32, int64.
|
|
6594
|
-
ValueError: If the size of `size` is less than the size of `input_x.shape`.
|
|
6595
|
-
ValueError: If `size` is not a 1-D tensor.
|
|
6596
|
-
ValueError: If the expanded `size` is not equal to the existing shape of `input_x` at a dimension
|
|
6597
|
-
that is not 1.
|
|
6598
|
-
ValueError: If the expanded `size` < 0 and it is in a leading position, corresponding to
|
|
6599
|
-
a non-existing dimension in `input_x`.
|
|
6600
|
-
ValueError: If the number of elements of output is more than 1000000.
|
|
6601
|
-
|
|
6602
|
-
Supported Platforms:
|
|
6603
|
-
``Ascend`` ``CPU``
|
|
6604
|
-
|
|
6605
|
-
Examples:
|
|
6606
|
-
>>> import mindspore
|
|
6607
|
-
>>> import numpy as np
|
|
6608
|
-
>>> from mindspore import Tensor, ops
|
|
6609
|
-
>>> input_x = Tensor(np.array([[2], [3], [4]]), mindspore.float32)
|
|
6610
|
-
>>> size = Tensor(np.array([3,4]), mindspore.int32)
|
|
6611
|
-
>>> y = ops.expand(input_x, size)
|
|
6612
|
-
>>> print(y)
|
|
6613
|
-
[[2. 2. 2. 2.]
|
|
6614
|
-
[3. 3. 3. 3.]
|
|
6615
|
-
[4. 4. 4. 4.]]
|
|
6616
|
-
>>> input_x = Tensor(2, mindspore.int16)
|
|
6617
|
-
>>> size = Tensor(np.array([1, 1]), mindspore.int32)
|
|
6618
|
-
>>> y = ops.expand(input_x, size)
|
|
6619
|
-
>>> print(y)
|
|
6620
|
-
[[2]]
|
|
6589
|
+
:func:`mindspore.ops.expand` will be deprecated in the future.
|
|
6590
|
+
Please use :func:`mindspore.ops.broadcast_to` instead.
|
|
6621
6591
|
"""
|
|
6622
6592
|
expand_op = _get_cache_prim(Expand)()
|
|
6623
6593
|
return expand_op(input_x, size)
|
|
@@ -6636,20 +6606,30 @@ def _check_fold_param(param, param_name):
|
|
|
6636
6606
|
return param
|
|
6637
6607
|
|
|
6638
6608
|
|
|
6639
|
-
@_primexpr
|
|
6640
|
-
def _check_fold_input(input):
|
|
6641
|
-
"""Check the rank of fold's input."""
|
|
6642
|
-
if not isinstance(input, (Tensor, Tensor_)) or F.rank(input) != 3:
|
|
6643
|
-
raise ValueError(
|
|
6644
|
-
f"For array function 'fold', 'input' must be a 3-D tensor.")
|
|
6645
|
-
|
|
6646
|
-
|
|
6647
6609
|
def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
6648
6610
|
r"""
|
|
6649
6611
|
Combines an array of sliding local blocks into a large containing tensor.
|
|
6650
6612
|
|
|
6613
|
+
Consider a batched input tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)` ,
|
|
6614
|
+
where :math:`N` is the batch dimension, :math:`C \times \prod(\text{kernel_size})` is the
|
|
6615
|
+
total number of values within each block (a block has :math:`\prod(\text{kernel_size})` spatial
|
|
6616
|
+
locations each containing a `C`-channeled vector), and :math:`L` is the total number of such blocks:
|
|
6617
|
+
|
|
6618
|
+
.. math::
|
|
6619
|
+
L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
|
|
6620
|
+
- \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
|
|
6621
|
+
|
|
6622
|
+
where :math:`d` is over all spatial dimensions.
|
|
6623
|
+
|
|
6624
|
+
Therefore, `output_size` is the spatial shape of the large containing tensor of the sliding local blocks.
|
|
6625
|
+
|
|
6626
|
+
The `dilation`, `padding` and `stride` arguments specify how the sliding blocks are retrieved.
|
|
6627
|
+
|
|
6651
6628
|
.. warning::
|
|
6652
|
-
- The input must be a 3-dimensional Tensor with shape
|
|
6629
|
+
- The input must be a 3-dimensional Tensor with shape
|
|
6630
|
+
:math:`(N, C \times \prod(\text{kernel_size}), L)` .
|
|
6631
|
+
- The output must be a 4-dimensional Tensor with shape
|
|
6632
|
+
:math:`(N, C, output\_size[0], output\_size[1], ...)` .
|
|
6653
6633
|
|
|
6654
6634
|
Args:
|
|
6655
6635
|
input (Tensor): 3-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
|
|
@@ -6664,7 +6644,7 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6664
6644
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6665
6645
|
|
|
6666
6646
|
Returns:
|
|
6667
|
-
A Tensor, with same type as `input
|
|
6647
|
+
A Tensor, with same type as `input` . And its shape is as described above.
|
|
6668
6648
|
|
|
6669
6649
|
Raises:
|
|
6670
6650
|
TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
|
|
@@ -6687,16 +6667,15 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6687
6667
|
>>> print(output.shape)
|
|
6688
6668
|
(16, 16, 8, 8)
|
|
6689
6669
|
"""
|
|
6690
|
-
_check_fold_input(input)
|
|
6691
6670
|
kernel_size = _check_fold_param(kernel_size, "kernel_size")
|
|
6692
6671
|
dilation = _check_fold_param(dilation, "dilation")
|
|
6693
6672
|
padding = _check_fold_param(padding, "padding")
|
|
6694
6673
|
stride = _check_fold_param(stride, "stride")
|
|
6695
6674
|
fold_op = _get_cache_prim(Col2Im)(kernel_size, dilation, padding, stride)
|
|
6696
|
-
input_shape =
|
|
6675
|
+
input_shape = ops.shape(input)
|
|
6697
6676
|
k = kernel_size[0] * kernel_size[-1]
|
|
6698
6677
|
r_shape = input_shape[:1] + (-1, k) + input_shape[-1:]
|
|
6699
|
-
input =
|
|
6678
|
+
input = ops.reshape(input, r_shape)
|
|
6700
6679
|
return fold_op(input, output_size)
|
|
6701
6680
|
|
|
6702
6681
|
|
|
@@ -6767,7 +6746,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6767
6746
|
A Tensor, with same type as `input` . And its shape is as described above.
|
|
6768
6747
|
|
|
6769
6748
|
Raises:
|
|
6770
|
-
TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `
|
|
6749
|
+
TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `padding` is not int, tuple or list.
|
|
6771
6750
|
ValueError: If `kernel_size`, `dilation`, `stride` value is not
|
|
6772
6751
|
greater than zero or elements number more than `2`.
|
|
6773
6752
|
ValueError: If `padding` value is less than zero.
|
|
@@ -6793,9 +6772,9 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6793
6772
|
dilations=dilation,
|
|
6794
6773
|
pads=padding)
|
|
6795
6774
|
tmp = unfold_op(input)
|
|
6796
|
-
tmp_shape =
|
|
6775
|
+
tmp_shape = ops.shape(tmp)
|
|
6797
6776
|
out_shape = tmp_shape[:1] + (-1,) + tmp_shape[-1:]
|
|
6798
|
-
out =
|
|
6777
|
+
out = ops.reshape(tmp, out_shape)
|
|
6799
6778
|
return out
|
|
6800
6779
|
|
|
6801
6780
|
|
|
@@ -6865,36 +6844,36 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
|
6865
6844
|
x_shape = input.shape
|
|
6866
6845
|
n, m = x_shape[-2:]
|
|
6867
6846
|
|
|
6868
|
-
|
|
6869
|
-
e = _get_cache_prim(P.Eye)()(n, m, dtype)
|
|
6847
|
+
e = ops.eye(n, m, dtype)
|
|
6870
6848
|
if offset >= m or offset <= -n:
|
|
6871
|
-
|
|
6872
|
-
|
|
6849
|
+
zero_shape = x_shape[:-2] + (0,)
|
|
6850
|
+
return ops.zeros(zero_shape, dtype)
|
|
6851
|
+
if offset != 0:
|
|
6873
6852
|
e = e.astype(mstype.float32)
|
|
6874
6853
|
if offset > 0:
|
|
6875
|
-
e_left =
|
|
6854
|
+
e_left = ops.fill(mstype.float32, (n, offset), 0)
|
|
6876
6855
|
e_right = e[..., 0:m - offset:1]
|
|
6877
|
-
e =
|
|
6856
|
+
e = ops.cat((e_left, e_right), 1).astype(dtype)
|
|
6878
6857
|
elif offset < 0:
|
|
6879
|
-
e_upper =
|
|
6858
|
+
e_upper = ops.fill(mstype.float32, (-offset, m), 0)
|
|
6880
6859
|
e_lower = e[0:n + offset:1, ...]
|
|
6881
|
-
e =
|
|
6882
|
-
e =
|
|
6860
|
+
e = ops.cat((e_upper, e_lower), 0).astype(dtype)
|
|
6861
|
+
e = ops.broadcast_to(e, x_shape)
|
|
6883
6862
|
|
|
6884
|
-
prod_val =
|
|
6885
|
-
res =
|
|
6863
|
+
prod_val = ops.mul(input, e)
|
|
6864
|
+
res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
|
|
6886
6865
|
|
|
6887
6866
|
begin = ()
|
|
6888
6867
|
for _ in ms_arrange(x_ndim - 2):
|
|
6889
6868
|
begin += (0,)
|
|
6890
|
-
last_dim_begin =
|
|
6869
|
+
last_dim_begin = builtins.max(0, -offset)
|
|
6891
6870
|
begin += (last_dim_begin,)
|
|
6892
6871
|
res_size = res.shape[:-1]
|
|
6893
|
-
last_dim_end =
|
|
6872
|
+
last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
|
|
6894
6873
|
if last_dim_end <= 0:
|
|
6895
6874
|
return Tensor([])
|
|
6896
6875
|
res_size += (last_dim_end,)
|
|
6897
|
-
res =
|
|
6876
|
+
res = ops.slice(res, begin, res_size)
|
|
6898
6877
|
return res.astype(dtype)
|
|
6899
6878
|
|
|
6900
6879
|
|
|
@@ -7205,7 +7184,7 @@ def _check_axis_valid(axis, ndim):
|
|
|
7205
7184
|
to the built-in operator (non-negative, int or tuple).
|
|
7206
7185
|
"""
|
|
7207
7186
|
if axis is None:
|
|
7208
|
-
axis =
|
|
7187
|
+
axis = ops.make_range(ndim)
|
|
7209
7188
|
return axis
|
|
7210
7189
|
if isinstance(axis, (tuple, list)):
|
|
7211
7190
|
axis = tuple(map(lambda x: _check_check_axis_in_range(x, ndim), axis))
|
|
@@ -7279,7 +7258,7 @@ def movedim(x, source, destination):
|
|
|
7279
7258
|
>>> print(output.shape)
|
|
7280
7259
|
(4, 3, 5)
|
|
7281
7260
|
"""
|
|
7282
|
-
ndim =
|
|
7261
|
+
ndim = ops.rank(x)
|
|
7283
7262
|
source = _check_axis_valid(source, ndim)
|
|
7284
7263
|
destination = _check_axis_valid(destination, ndim)
|
|
7285
7264
|
if len(source) != len(destination):
|
|
@@ -7354,7 +7333,7 @@ def swapaxes(input, axis0, axis1):
|
|
|
7354
7333
|
if axis0 > axis1:
|
|
7355
7334
|
axis0, axis1 = axis1, axis0
|
|
7356
7335
|
|
|
7357
|
-
perm =
|
|
7336
|
+
perm = ops.make_range(0, input.ndim)
|
|
7358
7337
|
if axis1 + 1 < input.ndim:
|
|
7359
7338
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
7360
7339
|
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
|
|
@@ -7395,7 +7374,7 @@ def swapdims(input, dim0, dim1):
|
|
|
7395
7374
|
>>> print(output.shape)
|
|
7396
7375
|
(4, 3, 2)
|
|
7397
7376
|
'''
|
|
7398
|
-
return
|
|
7377
|
+
return ops.swapaxes(input, dim0, dim1)
|
|
7399
7378
|
|
|
7400
7379
|
|
|
7401
7380
|
@constexpr
|
|
@@ -7406,7 +7385,7 @@ def _check_is_int(arg_value, arg_name, op_name):
|
|
|
7406
7385
|
|
|
7407
7386
|
@_primexpr
|
|
7408
7387
|
def _check_positive_int(arg_value, arg_name, op_name):
|
|
7409
|
-
arg_value = validator.
|
|
7388
|
+
arg_value = validator.check_int_range(arg_value, 0, 2147483647, validator.INC_RIGHT, arg_name, op_name)
|
|
7410
7389
|
return arg_value
|
|
7411
7390
|
|
|
7412
7391
|
|
|
@@ -7507,7 +7486,7 @@ def repeat_elements(x, rep, axis=0):
|
|
|
7507
7486
|
[[0 0 1 1 2 2]
|
|
7508
7487
|
[3 3 4 4 5 5]]
|
|
7509
7488
|
"""
|
|
7510
|
-
const_utils.check_type_valid(
|
|
7489
|
+
const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
|
|
7511
7490
|
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
7512
7491
|
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
7513
7492
|
shape_op = P.Shape()
|
|
@@ -7599,7 +7578,7 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
7599
7578
|
to_tensor_op = P.ScalarToTensor()
|
|
7600
7579
|
shape_op = P.Shape()
|
|
7601
7580
|
|
|
7602
|
-
const_utils.check_type_valid(
|
|
7581
|
+
const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
7603
7582
|
_check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
|
|
7604
7583
|
|
|
7605
7584
|
if maxlen is None:
|
|
@@ -7662,7 +7641,6 @@ __all__ = [
|
|
|
7662
7641
|
'matrix_band_part',
|
|
7663
7642
|
'padding',
|
|
7664
7643
|
'fill',
|
|
7665
|
-
'fill_',
|
|
7666
7644
|
'fills',
|
|
7667
7645
|
'tile',
|
|
7668
7646
|
'size',
|